diff options
Diffstat (limited to 'src')
463 files changed, 14817 insertions, 13218 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 97aa50855f..c4bbd5923f 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.93 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.94 2004/08/29 05:06:39 momjian Exp $ * * NOTES * The old interface functions have been converted to macros @@ -468,17 +468,19 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) break; /* - * If the attribute number is 0, then we are supposed to return - * the entire tuple as a row-type Datum. (Using zero for this - * purpose is unclean since it risks confusion with "invalid attr" - * result codes, but it's not worth changing now.) + * If the attribute number is 0, then we are supposed to + * return the entire tuple as a row-type Datum. (Using zero + * for this purpose is unclean since it risks confusion with + * "invalid attr" result codes, but it's not worth changing + * now.) * - * We have to make a copy of the tuple so we can safely insert the - * Datum overhead fields, which are not set in on-disk tuples. + * We have to make a copy of the tuple so we can safely insert + * the Datum overhead fields, which are not set in on-disk + * tuples. */ case InvalidAttrNumber: { - HeapTupleHeader dtup; + HeapTupleHeader dtup; dtup = (HeapTupleHeader) palloc(tup->t_len); memcpy((char *) dtup, (char *) tup->t_data, tup->t_len); @@ -555,7 +557,7 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest) * construct a tuple from the given values[] and nulls[] arrays * * Null attributes are indicated by a 'n' in the appropriate byte - * of nulls[]. Non-null attributes are indicated by a ' ' (space). + * of nulls[]. Non-null attributes are indicated by a ' ' (space). * ---------------- */ HeapTuple @@ -580,7 +582,7 @@ heap_formtuple(TupleDesc tupleDescriptor, /* * Check for nulls and embedded tuples; expand any toasted attributes - * in embedded tuples. This preserves the invariant that toasting can + * in embedded tuples. This preserves the invariant that toasting can * only go one level deep. * * We can skip calling toast_flatten_tuple_attribute() if the attribute @@ -620,7 +622,7 @@ heap_formtuple(TupleDesc tupleDescriptor, len += ComputeDataSize(tupleDescriptor, values, nulls); /* - * Allocate and zero the space needed. Note that the tuple body and + * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); @@ -683,9 +685,9 @@ heap_modifytuple(HeapTuple tuple, * allocate and fill values and nulls arrays from either the tuple or * the repl information, as appropriate. * - * NOTE: it's debatable whether to use heap_deformtuple() here or - * just heap_getattr() only the non-replaced colums. The latter could - * win if there are many replaced columns and few non-replaced ones. + * NOTE: it's debatable whether to use heap_deformtuple() here or just + * heap_getattr() only the non-replaced colums. The latter could win + * if there are many replaced columns and few non-replaced ones. * However, heap_deformtuple costs only O(N) while the heap_getattr * way would cost O(N^2) if there are many non-replaced columns, so it * seems better to err on the side of linear cost. @@ -763,10 +765,11 @@ heap_deformtuple(HeapTuple tuple, bool slow = false; /* can we use/set attcacheoff? */ natts = tup->t_natts; + /* - * In inheritance situations, it is possible that the given tuple actually - * has more fields than the caller is expecting. Don't run off the end - * of the caller's arrays. + * In inheritance situations, it is possible that the given tuple + * actually has more fields than the caller is expecting. Don't run + * off the end of the caller's arrays. */ natts = Min(natts, tdesc_natts); @@ -787,9 +790,7 @@ heap_deformtuple(HeapTuple tuple, nulls[attnum] = ' '; if (!slow && att[attnum]->attcacheoff >= 0) - { off = att[attnum]->attcacheoff; - } else { off = att_align(off, att[attnum]->attalign); @@ -807,8 +808,8 @@ heap_deformtuple(HeapTuple tuple, } /* - * If tuple doesn't have all the atts indicated by tupleDesc, read - * the rest as null + * If tuple doesn't have all the atts indicated by tupleDesc, read the + * rest as null */ for (; attnum < tdesc_natts; attnum++) { diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index d6191a2cfe..d305734c3e 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.70 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.71 2004/08/29 05:06:39 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor, if ((size & INDEX_SIZE_MASK) != size) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row requires %lu bytes, maximum size is %lu", - (unsigned long) size, - (unsigned long) INDEX_SIZE_MASK))); + errmsg("index row requires %lu bytes, maximum size is %lu", + (unsigned long) size, + (unsigned long) INDEX_SIZE_MASK))); infomask |= size; diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index 98dc37a76e..4477a65bb2 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -9,7 +9,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.84 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.85 2004/08/29 05:06:39 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -356,7 +356,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputstr = DatumGetCString(FunctionCall3(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam), + ObjectIdGetDatum(thisState->typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false); pfree(outputstr); @@ -368,7 +368,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam))); + ObjectIdGetDatum(thisState->typioparam))); /* We assume the result will not have been toasted */ pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); pq_sendbytes(&buf, VARDATA(outputbytes), @@ -458,7 +458,7 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputstr = DatumGetCString(FunctionCall3(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam), + ObjectIdGetDatum(thisState->typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); pq_sendcountedtext(&buf, outputstr, strlen(outputstr), true); pfree(outputstr); @@ -579,7 +579,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) value = DatumGetCString(OidFunctionCall3(typoutput, attr, - ObjectIdGetDatum(typioparam), + ObjectIdGetDatum(typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); printatt((unsigned) i + 1, typeinfo->attrs[i], value); @@ -672,7 +672,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam))); + ObjectIdGetDatum(thisState->typioparam))); /* We assume the result will not have been toasted */ pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); pq_sendbytes(&buf, VARDATA(outputbytes), diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 66403a1b6b..ed932d35ab 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.105 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.106 2004/08/29 05:06:39 momjian Exp $ * * NOTES * some of the executor utility code such as "ExecTypeFromTL" should be @@ -52,8 +52,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid) /* * Allocate enough memory for the tuple descriptor, and zero the - * attrs[] array since TupleDescInitEntry assumes that the array - * is filled with NULL pointers. + * attrs[] array since TupleDescInitEntry assumes that the array is + * filled with NULL pointers. */ desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); @@ -420,8 +420,8 @@ TupleDescInitEntry(TupleDesc desc, /* * Note: attributeName can be NULL, because the planner doesn't always - * fill in valid resname values in targetlists, particularly for resjunk - * attributes. + * fill in valid resname values in targetlists, particularly for + * resjunk attributes. */ if (attributeName != NULL) namestrcpy(&(att->attname), attributeName); @@ -464,7 +464,7 @@ TupleDescInitEntry(TupleDesc desc, * Given a relation schema (list of ColumnDef nodes), build a TupleDesc. * * Note: the default assumption is no OIDs; caller may modify the returned - * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in + * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in * later on. */ TupleDesc diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index d3fde5fbc5..75e8800277 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.110 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.111 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -667,7 +667,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) Datum attr[INDEX_MAX_KEYS]; bool whatfree[INDEX_MAX_KEYS]; char isnull[INDEX_MAX_KEYS]; - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize, i, @@ -715,8 +715,8 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) { evec->n = 2; gistentryinit(evec->vector[1], - evec->vector[0].key, r, NULL, - (OffsetNumber) 0, evec->vector[0].bytes, FALSE); + evec->vector[0].key, r, NULL, + (OffsetNumber) 0, evec->vector[0].bytes, FALSE); } else @@ -763,7 +763,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) static IndexTuple gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate) { - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize; bool result, @@ -879,7 +879,7 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV int len, *attrsize; OffsetNumber *entries; - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize; int reallen; @@ -940,8 +940,8 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV else { /* - * evec->vector[0].bytes may be not - * defined, so form union with itself + * evec->vector[0].bytes may be not defined, so form union + * with itself */ if (reallen == 1) { @@ -1056,7 +1056,7 @@ gistadjsubkey(Relation r, *ev1p; float lpenalty, rpenalty; - GistEntryVector *evec; + GistEntryVector *evec; int datumsize; bool isnull[INDEX_MAX_KEYS]; int i, @@ -1222,7 +1222,7 @@ gistSplit(Relation r, rbknum; GISTPageOpaque opaque; GIST_SPLITVEC v; - GistEntryVector *entryvec; + GistEntryVector *entryvec; bool *decompvec; int i, j, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index e3a267ba67..3580f1f3e6 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.41 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.42 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -250,9 +250,10 @@ gistindex_keytest(IndexTuple tuple, FALSE, isNull); /* - * Call the Consistent function to evaluate the test. The arguments - * are the index datum (as a GISTENTRY*), the comparison datum, and - * the comparison operator's strategy number and subtype from pg_amop. + * Call the Consistent function to evaluate the test. The + * arguments are the index datum (as a GISTENTRY*), the comparison + * datum, and the comparison operator's strategy number and + * subtype from pg_amop. * * (Presently there's no need to pass the subtype since it'll always * be zero, but might as well pass it for possible future use.) diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 02af1ef53e..822b97e8e9 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.54 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -115,9 +115,7 @@ gistrescan(PG_FUNCTION_ARGS) * the sk_subtype field. */ for (i = 0; i < s->numberOfKeys; i++) - { s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1]; - } } PG_RETURN_VOID(); @@ -266,9 +264,9 @@ ReleaseResources_gist(void) GISTScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index db00490e58..40b05720fb 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.72 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.73 2004/08/29 05:06:40 momjian Exp $ * * NOTES * This file contains only the public interface routines. @@ -210,8 +210,8 @@ hashgettuple(PG_FUNCTION_ARGS) bool res; /* - * We hold pin but not lock on current buffer while outside the hash AM. - * Reacquire the read lock here. + * We hold pin but not lock on current buffer while outside the hash + * AM. Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); @@ -470,7 +470,7 @@ hashbulkdelete(PG_FUNCTION_ARGS) /* * Read the metapage to fetch original bucket and tuple counts. Also, * we keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a + * hashm_spares[] values to compute bucket page addresses. This is a * bit hokey but perfectly safe, since the interesting entries in the * spares array cannot change under us; and it beats rereading the * metapage for each bucket. @@ -532,7 +532,7 @@ loop_top: ItemPointer htup; hitem = (HashItem) PageGetItem(page, - PageGetItemId(page, offno)); + PageGetItemId(page, offno)); htup = &(hitem->hash_itup.t_tid); if (callback(htup, callback_state)) { @@ -595,8 +595,8 @@ loop_top: orig_ntuples == metap->hashm_ntuples) { /* - * No one has split or inserted anything since start of scan, - * so believe our count as gospel. + * No one has split or inserted anything since start of scan, so + * believe our count as gospel. */ metap->hashm_ntuples = num_index_tuples; } @@ -604,7 +604,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by + * double-scanned tuples in split buckets. Proceed by * dead-reckoning. */ if (metap->hashm_ntuples > tuples_removed) diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index b1c303f8d0..91ae559e3a 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.33 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.34 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -20,7 +20,7 @@ static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, - Size itemsize, HashItem hitem); + Size itemsize, HashItem hitem); /* @@ -81,7 +81,7 @@ _hash_doinsert(Relation rel, HashItem hitem) /* * Check whether the item can fit on a hash page at all. (Eventually, - * we ought to try to apply TOAST methods if not.) Note that at this + * we ought to try to apply TOAST methods if not.) Note that at this * point, itemsz doesn't include the ItemId. */ if (itemsz > HashMaxItemSize((Page) metap)) @@ -105,7 +105,8 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); /* - * Acquire share lock on target bucket; then we can release split lock. + * Acquire share lock on target bucket; then we can release split + * lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -124,7 +125,7 @@ _hash_doinsert(Relation rel, HashItem hitem) /* * no space on this page; check for an overflow page */ - BlockNumber nextblkno = pageopaque->hasho_nextblkno; + BlockNumber nextblkno = pageopaque->hasho_nextblkno; if (BlockNumberIsValid(nextblkno)) { @@ -169,8 +170,8 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_droplock(rel, blkno, HASH_SHARE); /* - * Write-lock the metapage so we can increment the tuple count. - * After incrementing it, check to see if it's time for a split. + * Write-lock the metapage so we can increment the tuple count. After + * incrementing it, check to see if it's time for a split. */ _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 740f119bc7..c02da93dc1 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.43 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.44 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Overflow pages look like ordinary relation pages. @@ -41,11 +41,11 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum) for (i = 1; i < splitnum && ovflbitnum > metap->hashm_spares[i]; i++) - /* loop */ ; + /* loop */ ; /* - * Convert to absolute page number by adding the number of bucket pages - * that exist before this split point. + * Convert to absolute page number by adding the number of bucket + * pages that exist before this split point. */ return (BlockNumber) ((1 << i) + ovflbitnum); } @@ -79,7 +79,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is * dropped before exiting (we assume the caller is not interested in 'buf' * anymore). The returned overflow page will be pinned and write-locked; * it is guaranteed to be empty. @@ -88,12 +88,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * That buffer is returned in the same state. * * The caller must hold at least share lock on the bucket, to ensure that - * no one else tries to compact the bucket meanwhile. This guarantees that + * no one else tries to compact the bucket meanwhile. This guarantees that * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the - * immediate successor of the originally passed 'buf'. Additional overflow + * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ Buffer @@ -197,7 +197,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) /* outer loop iterates once per bitmap page */ for (;;) { - BlockNumber mapblkno; + BlockNumber mapblkno; Page mappage; uint32 last_inpage; @@ -274,9 +274,9 @@ _hash_getovflpage(Relation rel, Buffer metabuf) blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't - * risk changing it if someone moved it while we were searching - * bitmap pages. + * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * changing it if someone moved it while we were searching bitmap + * pages. */ if (metap->hashm_firstfree == orig_firstfree) metap->hashm_firstfree = bit + 1; @@ -304,9 +304,9 @@ found: blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't - * risk changing it if someone moved it while we were searching - * bitmap pages. + * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * changing it if someone moved it while we were searching bitmap + * pages. */ if (metap->hashm_firstfree == orig_firstfree) { @@ -381,7 +381,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf) Bucket bucket; /* Get information from the doomed page */ - ovflblkno = BufferGetBlockNumber(ovflbuf); + ovflblkno = BufferGetBlockNumber(ovflbuf); ovflpage = BufferGetPage(ovflbuf); _hash_checkpage(rel, ovflpage, LH_OVERFLOW_PAGE); ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); @@ -396,7 +396,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf) /* * Fix up the bucket chain. this is a doubly-linked list, so we must * fix up the bucket chain members behind and ahead of the overflow - * page being deleted. No concurrency issues since we hold exclusive + * page being deleted. No concurrency issues since we hold exclusive * lock on the entire bucket. */ if (BlockNumberIsValid(prevblkno)) @@ -488,7 +488,8 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno) /* * It is okay to write-lock the new bitmap page while holding metapage - * write lock, because no one else could be contending for the new page. + * write lock, because no one else could be contending for the new + * page. * * There is some loss of concurrency in possibly doing I/O for the new * page while holding the metapage lock, but this path is taken so @@ -654,8 +655,8 @@ _hash_squeezebucket(Relation rel, /* * delete the tuple from the "read" page. PageIndexTupleDelete - * repacks the ItemId array, so 'roffnum' will be "advanced" to - * the "next" ItemId. + * repacks the ItemId array, so 'roffnum' will be "advanced" + * to the "next" ItemId. */ PageIndexTupleDelete(rpage, roffnum); } @@ -667,8 +668,9 @@ _hash_squeezebucket(Relation rel, * Tricky point here: if our read and write pages are adjacent in the * bucket chain, our write lock on wbuf will conflict with * _hash_freeovflpage's attempt to update the sibling links of the - * removed page. However, in that case we are done anyway, so we can - * simply drop the write lock before calling _hash_freeovflpage. + * removed page. However, in that case we are done anyway, so we + * can simply drop the write lock before calling + * _hash_freeovflpage. */ if (PageIsEmpty(rpage)) { diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 787bb9bf62..d3088f50ce 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.45 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.46 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Postgres hash pages look like ordinary relation pages. The opaque @@ -35,11 +35,11 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, - Bucket obucket, Bucket nbucket, - BlockNumber start_oblkno, - BlockNumber start_nblkno, - uint32 maxbucket, - uint32 highmask, uint32 lowmask); + Bucket obucket, Bucket nbucket, + BlockNumber start_oblkno, + BlockNumber start_nblkno, + uint32 maxbucket, + uint32 highmask, uint32 lowmask); /* @@ -47,7 +47,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, * of the locking rules). However, we can skip taking lmgr locks when the * index is local to the current backend (ie, either temp or new in the * current transaction). No one else can see it, so there's no reason to - * take locks. We still take buffer-level locks, but not lmgr locks. + * take locks. We still take buffer-level locks, but not lmgr locks. */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) @@ -239,13 +239,13 @@ _hash_metapinit(Relation rel) RelationGetRelationName(rel)); /* - * Determine the target fill factor (tuples per bucket) for this index. - * The idea is to make the fill factor correspond to pages about 3/4ths - * full. We can compute it exactly if the index datatype is fixed-width, - * but for var-width there's some guessing involved. + * Determine the target fill factor (tuples per bucket) for this + * index. The idea is to make the fill factor correspond to pages + * about 3/4ths full. We can compute it exactly if the index datatype + * is fixed-width, but for var-width there's some guessing involved. */ data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid, - RelationGetDescr(rel)->attrs[0]->atttypmod); + RelationGetDescr(rel)->attrs[0]->atttypmod); item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) + sizeof(ItemIdData); /* include the line pointer */ ffactor = (BLCKSZ * 3 / 4) / item_width; @@ -288,8 +288,9 @@ _hash_metapinit(Relation rel) metap->hashm_procid = index_getprocid(rel, 1, HASHPROC); /* - * We initialize the index with two buckets, 0 and 1, occupying physical - * blocks 1 and 2. The first freespace bitmap page is in block 3. + * We initialize the index with two buckets, 0 and 1, occupying + * physical blocks 1 and 2. The first freespace bitmap page is in + * block 3. */ metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */ metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */ @@ -297,7 +298,7 @@ _hash_metapinit(Relation rel) MemSet((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares)); MemSet((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp)); - metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */ + metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */ metap->hashm_ovflpoint = 1; metap->hashm_firstfree = 0; @@ -319,8 +320,8 @@ _hash_metapinit(Relation rel) } /* - * Initialize first bitmap page. Can't do this until we - * create the first two buckets, else smgr will complain. + * Initialize first bitmap page. Can't do this until we create the + * first two buckets, else smgr will complain. */ _hash_initbitmap(rel, metap, 3); @@ -362,17 +363,18 @@ _hash_expandtable(Relation rel, Buffer metabuf) uint32 lowmask; /* - * Obtain the page-zero lock to assert the right to begin a split - * (see README). + * Obtain the page-zero lock to assert the right to begin a split (see + * README). * * Note: deadlock should be impossible here. Our own backend could only - * be holding bucket sharelocks due to stopped indexscans; those will not - * block other holders of the page-zero lock, who are only interested in - * acquiring bucket sharelocks themselves. Exclusive bucket locks are - * only taken here and in hashbulkdelete, and neither of these operations - * needs any additional locks to complete. (If, due to some flaw in this - * reasoning, we manage to deadlock anyway, it's okay to error out; the - * index will be left in a consistent state.) + * be holding bucket sharelocks due to stopped indexscans; those will + * not block other holders of the page-zero lock, who are only + * interested in acquiring bucket sharelocks themselves. Exclusive + * bucket locks are only taken here and in hashbulkdelete, and neither + * of these operations needs any additional locks to complete. (If, + * due to some flaw in this reasoning, we manage to deadlock anyway, + * it's okay to error out; the index will be left in a consistent + * state.) */ _hash_getlock(rel, 0, HASH_EXCLUSIVE); @@ -383,8 +385,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) _hash_checkpage(rel, (Page) metap, LH_META_PAGE); /* - * Check to see if split is still needed; someone else might have already - * done one while we waited for the lock. + * Check to see if split is still needed; someone else might have + * already done one while we waited for the lock. * * Make sure this stays in sync with_hash_doinsert() */ @@ -394,16 +396,16 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Determine which bucket is to be split, and attempt to lock the old - * bucket. If we can't get the lock, give up. + * bucket. If we can't get the lock, give up. * * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. * - * Ideally we would lock the new bucket too before proceeding, but if - * we are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping + * Ideally we would lock the new bucket too before proceeding, but if we + * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping * isn't correct yet. For simplicity we update the metapage first and - * then lock. This should be okay because no one else should be trying - * to lock the new bucket yet... + * then lock. This should be okay because no one else should be + * trying to lock the new bucket yet... */ new_bucket = metap->hashm_maxbucket + 1; old_bucket = (new_bucket & metap->hashm_lowmask); @@ -417,7 +419,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) goto fail; /* - * Okay to proceed with split. Update the metapage bucket mapping info. + * Okay to proceed with split. Update the metapage bucket mapping + * info. */ metap->hashm_maxbucket = new_bucket; @@ -431,11 +434,11 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * If the split point is increasing (hashm_maxbucket's log base 2 * increases), we need to adjust the hashm_spares[] array and - * hashm_ovflpoint so that future overflow pages will be created beyond - * this new batch of bucket pages. + * hashm_ovflpoint so that future overflow pages will be created + * beyond this new batch of bucket pages. * - * XXX should initialize new bucket pages to prevent out-of-order - * page creation? Don't wanna do it right here though. + * XXX should initialize new bucket pages to prevent out-of-order page + * creation? Don't wanna do it right here though. */ spare_ndx = _hash_log2(metap->hashm_maxbucket + 1); if (spare_ndx > metap->hashm_ovflpoint) @@ -456,9 +459,10 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the - * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it - * needs is to tell which of these two buckets to map hashkeys into. + * split lock, other splits could begin, so these values might be out + * of date before _hash_splitbucket finishes. That's okay, since all + * it needs is to tell which of these two buckets to map hashkeys + * into. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; @@ -539,8 +543,8 @@ _hash_splitbucket(Relation rel, /* * It should be okay to simultaneously write-lock pages from each - * bucket, since no one else can be trying to acquire buffer lock - * on pages of either bucket. + * bucket, since no one else can be trying to acquire buffer lock on + * pages of either bucket. */ oblkno = start_oblkno; nblkno = start_nblkno; @@ -562,9 +566,9 @@ _hash_splitbucket(Relation rel, nopaque->hasho_filler = HASHO_FILL; /* - * Partition the tuples in the old bucket between the old bucket and the - * new bucket, advancing along the old bucket's overflow bucket chain - * and adding overflow pages to the new bucket as needed. + * Partition the tuples in the old bucket between the old bucket and + * the new bucket, advancing along the old bucket's overflow bucket + * chain and adding overflow pages to the new bucket as needed. */ ooffnum = FirstOffsetNumber; omaxoffnum = PageGetMaxOffsetNumber(opage); @@ -582,9 +586,10 @@ _hash_splitbucket(Relation rel, oblkno = oopaque->hasho_nextblkno; if (!BlockNumberIsValid(oblkno)) break; + /* - * we ran out of tuples on this particular page, but we - * have more overflow pages; advance to next page. + * we ran out of tuples on this particular page, but we have + * more overflow pages; advance to next page. */ _hash_wrtbuf(rel, obuf); @@ -600,8 +605,8 @@ _hash_splitbucket(Relation rel, /* * Re-hash the tuple to determine which bucket it now belongs in. * - * It is annoying to call the hash function while holding locks, - * but releasing and relocking the page for each tuple is unappealing + * It is annoying to call the hash function while holding locks, but + * releasing and relocking the page for each tuple is unappealing * too. */ hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum)); @@ -666,10 +671,11 @@ _hash_splitbucket(Relation rel, } /* - * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Before quitting, call _hash_squeezebucket to ensure the - * tuples remaining in the old bucket (including the overflow pages) are - * packed as tightly as possible. The new bucket is already tight. + * We're at the end of the old bucket chain, so we're done + * partitioning the tuples. Before quitting, call _hash_squeezebucket + * to ensure the tuples remaining in the old bucket (including the + * overflow pages) are packed as tightly as possible. The new bucket + * is already tight. */ _hash_wrtbuf(rel, obuf); _hash_wrtbuf(rel, nbuf); diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c index 2fc24dd9e1..16d2a77d49 100644 --- a/src/backend/access/hash/hashscan.c +++ b/src/backend/access/hash/hashscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.36 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.37 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,9 +44,9 @@ ReleaseResources_hash(void) HashScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index 76ad5d3184..daaff4adc5 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.36 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.37 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -137,12 +137,13 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) * We do not support hash scans with no index qualification, because * we would have to read the whole index rather than just one bucket. * That creates a whole raft of problems, since we haven't got a - * practical way to lock all the buckets against splits or compactions. + * practical way to lock all the buckets against splits or + * compactions. */ if (scan->numberOfKeys < 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hash indexes do not support whole-index scans"))); + errmsg("hash indexes do not support whole-index scans"))); /* * If the constant in the index qual is NULL, assume it cannot match @@ -182,7 +183,8 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) _hash_relbuf(rel, metabuf); /* - * Acquire share lock on target bucket; then we can release split lock. + * Acquire share lock on target bucket; then we can release split + * lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -287,9 +289,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) while (offnum > maxoff) { /* - * either this page is empty - * (maxoff == InvalidOffsetNumber) - * or we ran off the end. + * either this page is empty (maxoff == + * InvalidOffsetNumber) or we ran off the end. */ _hash_readnext(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) @@ -315,15 +316,12 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) while (offnum < FirstOffsetNumber) { /* - * either this page is empty - * (offnum == InvalidOffsetNumber) - * or we ran off the end. + * either this page is empty (offnum == + * InvalidOffsetNumber) or we ran off the end. */ _hash_readprev(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) - { maxoff = offnum = PageGetMaxOffsetNumber(page); - } else { /* end of bucket */ diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 3fb04e77d8..bf9999dc92 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.39 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.40 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -113,6 +113,7 @@ void _hash_checkpage(Relation rel, Page page, int flags) { Assert(page); + /* * When checking the metapage, always verify magic number and version. */ diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 4f965eb2bf..6dd0c357fb 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.172 2004/08/29 04:12:20 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.173 2004/08/29 05:06:40 momjian Exp $ * * * INTERFACE ROUTINES @@ -75,9 +75,9 @@ initscan(HeapScanDesc scan, ScanKey key) /* * Determine the number of blocks we have to scan. * - * It is sufficient to do this once at scan start, since any tuples - * added while the scan is in progress will be invisible to my - * transaction anyway... + * It is sufficient to do this once at scan start, since any tuples added + * while the scan is in progress will be invisible to my transaction + * anyway... */ scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd); @@ -1141,12 +1141,13 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid) tup->t_data->t_infomask |= HEAP_XMAX_INVALID; HeapTupleHeaderSetXmin(tup->t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmin(tup->t_data, cid); - HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */ + HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */ tup->t_tableOid = relation->rd_id; /* * If the new tuple is too big for storage or contains already toasted - * out-of-line attributes from some other relation, invoke the toaster. + * out-of-line attributes from some other relation, invoke the + * toaster. */ if (HeapTupleHasExternal(tup) || (MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD)) @@ -1273,7 +1274,7 @@ simple_heap_insert(Relation relation, HeapTuple tup) */ int heap_delete(Relation relation, ItemPointer tid, - ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) + ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) { ItemId lp; HeapTupleData tp; @@ -1404,9 +1405,9 @@ l1: /* * If the tuple has toasted out-of-line attributes, we need to delete - * those items too. We have to do this before WriteBuffer because we need - * to look at the contents of the tuple, but it's OK to release the - * context lock on the buffer first. + * those items too. We have to do this before WriteBuffer because we + * need to look at the contents of the tuple, but it's OK to release + * the context lock on the buffer first. */ if (HeapTupleHasExternal(&tp)) heap_tuple_toast_attrs(relation, NULL, &tp); @@ -1443,7 +1444,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) result = heap_delete(relation, tid, &ctid, GetCurrentCommandId(), SnapshotAny, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -1490,7 +1491,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) */ int heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, - ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) + ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) { ItemId lp; HeapTupleData oldtup; @@ -1804,7 +1805,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) result = heap_update(relation, otid, tup, &ctid, GetCurrentCommandId(), SnapshotAny, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -2198,8 +2199,8 @@ heap_xlog_newpage(bool redo, XLogRecPtr lsn, XLogRecord *record) Page page; /* - * Note: the NEWPAGE log record is used for both heaps and indexes, - * so do not do anything that assumes we are touching a heap. + * Note: the NEWPAGE log record is used for both heaps and indexes, so + * do not do anything that assumes we are touching a heap. */ if (!redo || (record->xl_info & XLR_BKP_BLOCK_1)) @@ -2668,7 +2669,7 @@ static void out_target(char *buf, xl_heaptid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 1a3b4ef896..fe38999141 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.44 2004/08/29 04:12:20 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.45 2004/08/29 05:06:40 momjian Exp $ * * * INTERFACE ROUTINES @@ -288,13 +288,13 @@ toast_delete(Relation rel, HeapTuple oldtup) /* * Get the tuple descriptor and break down the tuple into fields. * - * NOTE: it's debatable whether to use heap_deformtuple() here or - * just heap_getattr() only the varlena columns. The latter could - * win if there are few varlena columns and many non-varlena ones. - * However, heap_deformtuple costs only O(N) while the heap_getattr - * way would cost O(N^2) if there are many varlena columns, so it - * seems better to err on the side of linear cost. (We won't even - * be here unless there's at least one varlena column, by the way.) + * NOTE: it's debatable whether to use heap_deformtuple() here or just + * heap_getattr() only the varlena columns. The latter could win if + * there are few varlena columns and many non-varlena ones. However, + * heap_deformtuple costs only O(N) while the heap_getattr way would + * cost O(N^2) if there are many varlena columns, so it seems better + * to err on the side of linear cost. (We won't even be here unless + * there's at least one varlena column, by the way.) */ tupleDesc = rel->rd_att; att = tupleDesc->attrs; @@ -311,7 +311,7 @@ toast_delete(Relation rel, HeapTuple oldtup) { if (att[i]->attlen == -1) { - Datum value = toast_values[i]; + Datum value = toast_values[i]; if (toast_nulls[i] != 'n' && VARATT_IS_EXTERNAL(value)) toast_delete_datum(rel, value); @@ -791,7 +791,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) * * If a Datum is of composite type, "flatten" it to contain no toasted fields. * This must be invoked on any potentially-composite field that is to be - * inserted into a tuple. Doing this preserves the invariant that toasting + * inserted into a tuple. Doing this preserves the invariant that toasting * goes only one level deep in a tuple. * ---------- */ @@ -1105,7 +1105,7 @@ toast_delete_datum(Relation rel, Datum value) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Find the chunks by index @@ -1176,7 +1176,7 @@ toast_fetch_datum(varattrib *attr) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Read the chunks by index @@ -1330,7 +1330,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length) ScanKeyInit(&toastkey[0], (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Use equality condition for one chunk, a range condition otherwise: diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 71266a5bfb..815e207fb2 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.115 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.116 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -200,26 +200,26 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel, * We can skip items that are marked killed. * * Formerly, we applied _bt_isequal() before checking the kill - * flag, so as to fall out of the item loop as soon as possible. - * However, in the presence of heavy update activity an index - * may contain many killed items with the same key; running - * _bt_isequal() on each killed item gets expensive. Furthermore - * it is likely that the non-killed version of each key appears - * first, so that we didn't actually get to exit any sooner anyway. - * So now we just advance over killed items as quickly as we can. - * We only apply _bt_isequal() when we get to a non-killed item or - * the end of the page. + * flag, so as to fall out of the item loop as soon as + * possible. However, in the presence of heavy update activity + * an index may contain many killed items with the same key; + * running _bt_isequal() on each killed item gets expensive. + * Furthermore it is likely that the non-killed version of + * each key appears first, so that we didn't actually get to + * exit any sooner anyway. So now we just advance over killed + * items as quickly as we can. We only apply _bt_isequal() + * when we get to a non-killed item or the end of the page. */ if (!ItemIdDeleted(curitemid)) { /* - * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's - * how we handling NULLs - and so we must not use _bt_compare - * in real comparison, but only for ordering/finding items on - * pages. - vadim 03/24/97 + * _bt_compare returns 0 for (1,NULL) and (1,NULL) - + * this's how we handling NULLs - and so we must not use + * _bt_compare in real comparison, but only for + * ordering/finding items on pages. - vadim 03/24/97 */ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey)) - break; /* we're past all the equal tuples */ + break; /* we're past all the equal tuples */ /* okay, we gotta fetch the heap tuple ... */ cbti = (BTItem) PageGetItem(page, curitemid); diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 7175fd5b2c..fcdb45d952 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.79 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.80 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Postgres btree pages look like ordinary relation pages. The opaque @@ -276,8 +276,8 @@ _bt_getroot(Relation rel, int access) rootlevel = metad->btm_fastlevel; /* - * We are done with the metapage; arrange to release it via - * first _bt_relandgetbuf call + * We are done with the metapage; arrange to release it via first + * _bt_relandgetbuf call */ rootbuf = metabuf; @@ -368,8 +368,8 @@ _bt_gettrueroot(Relation rel) rootlevel = metad->btm_level; /* - * We are done with the metapage; arrange to release it via - * first _bt_relandgetbuf call + * We are done with the metapage; arrange to release it via first + * _bt_relandgetbuf call */ rootbuf = metabuf; @@ -433,21 +433,22 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) * page could have been re-used between the time the last VACUUM * scanned it and the time the VACUUM made its FSM updates.) * - * In fact, it's worse than that: we can't even assume that it's - * safe to take a lock on the reported page. If somebody else - * has a lock on it, or even worse our own caller does, we could + * In fact, it's worse than that: we can't even assume that it's safe + * to take a lock on the reported page. If somebody else has a + * lock on it, or even worse our own caller does, we could * deadlock. (The own-caller scenario is actually not improbable. * Consider an index on a serial or timestamp column. Nearly all * splits will be at the rightmost page, so it's entirely likely - * that _bt_split will call us while holding a lock on the page most - * recently acquired from FSM. A VACUUM running concurrently with - * the previous split could well have placed that page back in FSM.) + * that _bt_split will call us while holding a lock on the page + * most recently acquired from FSM. A VACUUM running concurrently + * with the previous split could well have placed that page back + * in FSM.) * * To get around that, we ask for only a conditional lock on the - * reported page. If we fail, then someone else is using the page, - * and we may reasonably assume it's not free. (If we happen to be - * wrong, the worst consequence is the page will be lost to use till - * the next VACUUM, which is no big problem.) + * reported page. If we fail, then someone else is using the + * page, and we may reasonably assume it's not free. (If we + * happen to be wrong, the worst consequence is the page will be + * lost to use till the next VACUUM, which is no big problem.) */ for (;;) { diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 15dc433d11..f75fde4c9f 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.88 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.89 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -155,15 +155,16 @@ _bt_moveright(Relation rel, opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * When nextkey = false (normal case): if the scan key that brought us to - * this page is > the high key stored on the page, then the page has split - * and we need to move right. (If the scan key is equal to the high key, - * we might or might not need to move right; have to scan the page first - * anyway.) + * When nextkey = false (normal case): if the scan key that brought us + * to this page is > the high key stored on the page, then the page + * has split and we need to move right. (If the scan key is equal to + * the high key, we might or might not need to move right; have to + * scan the page first anyway.) * * When nextkey = true: move right if the scan key is >= page's high key. * - * The page could even have split more than once, so scan as far as needed. + * The page could even have split more than once, so scan as far as + * needed. * * We also have to move right if we followed a link that brought us to a * dead page. @@ -253,13 +254,11 @@ _bt_binsrch(Relation rel, * Binary search to find the first key on the page >= scan key, or * first key > scankey when nextkey is true. * - * For nextkey=false (cmpval=1), the loop invariant is: all slots - * before 'low' are < scan key, all slots at or after 'high' - * are >= scan key. + * For nextkey=false (cmpval=1), the loop invariant is: all slots before + * 'low' are < scan key, all slots at or after 'high' are >= scan key. * - * For nextkey=true (cmpval=0), the loop invariant is: all slots - * before 'low' are <= scan key, all slots at or after 'high' - * are > scan key. + * For nextkey=true (cmpval=0), the loop invariant is: all slots before + * 'low' are <= scan key, all slots at or after 'high' are > scan key. * * We can fall out when high == low. */ @@ -285,15 +284,15 @@ _bt_binsrch(Relation rel, * At this point we have high == low, but be careful: they could point * past the last slot on the page. * - * On a leaf page, we always return the first key >= scan key (resp. - * > scan key), which could be the last slot + 1. + * On a leaf page, we always return the first key >= scan key (resp. > + * scan key), which could be the last slot + 1. */ if (P_ISLEAF(opaque)) return low; /* - * On a non-leaf page, return the last key < scan key (resp. <= scan key). - * There must be one if _bt_compare() is playing by the rules. + * On a non-leaf page, return the last key < scan key (resp. <= scan + * key). There must be one if _bt_compare() is playing by the rules. */ Assert(low > P_FIRSTDATAKEY(opaque)); @@ -382,10 +381,10 @@ _bt_compare(Relation rel, { /* * The sk_func needs to be passed the index value as left arg - * and the sk_argument as right arg (they might be of different - * types). Since it is convenient for callers to think of - * _bt_compare as comparing the scankey to the index item, - * we have to flip the sign of the comparison result. + * and the sk_argument as right arg (they might be of + * different types). Since it is convenient for callers to + * think of _bt_compare as comparing the scankey to the index + * item, we have to flip the sign of the comparison result. * * Note: curious-looking coding is to avoid overflow if * comparison function returns INT_MIN. There is no risk of @@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) bool goback; bool continuescan; ScanKey scankeys; - ScanKey *startKeys = NULL; + ScanKey *startKeys = NULL; int keysCount = 0; int i; StrategyNumber strat_total; @@ -521,7 +520,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * We want to identify the keys that can be used as starting boundaries; * these are =, >, or >= keys for a forward scan or =, <, <= keys for * a backwards scan. We can use keys for multiple attributes so long as - * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept + * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept * a > or < boundary or find an attribute with no boundary (which can be * thought of as the same as "> -infinity"), we can't use keys for any * attributes to its right, because it would break our simplistic notion @@ -554,13 +553,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur; startKeys = (ScanKey *) palloc(so->numberOfKeys * sizeof(ScanKey)); + /* - * chosen is the so-far-chosen key for the current attribute, if any. - * We don't cast the decision in stone until we reach keys for the - * next attribute. + * chosen is the so-far-chosen key for the current attribute, if + * any. We don't cast the decision in stone until we reach keys + * for the next attribute. */ curattr = 1; chosen = NULL; + /* * Loop iterates from 0 to numberOfKeys inclusive; we use the last * pass to handle after-last-key processing. Actual exit from the @@ -578,8 +579,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) if (chosen == NULL) break; startKeys[keysCount++] = chosen; + /* - * Adjust strat_total, and quit if we have stored a > or < key. + * Adjust strat_total, and quit if we have stored a > or < + * key. */ strat = chosen->sk_strategy; if (strat != BTEqualStrategyNumber) @@ -589,11 +592,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) strat == BTLessStrategyNumber) break; } + /* * Done if that was the last attribute. */ if (i >= so->numberOfKeys) break; + /* * Reset for next attr, which should be in sequence. */ @@ -646,8 +651,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur = startKeys[i]; /* - * _bt_preprocess_keys disallows it, but it's place to add some code - * later + * _bt_preprocess_keys disallows it, but it's place to add some + * code later */ if (cur->sk_flags & SK_ISNULL) { @@ -656,10 +661,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) elog(ERROR, "btree doesn't support is(not)null, yet"); return false; } + /* * If scankey operator is of default subtype, we can use the - * cached comparison procedure; otherwise gotta look it up in - * the catalogs. + * cached comparison procedure; otherwise gotta look it up in the + * catalogs. */ if (cur->sk_subtype == InvalidOid) { @@ -695,43 +701,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* * Examine the selected initial-positioning strategy to determine - * exactly where we need to start the scan, and set flag variables - * to control the code below. + * exactly where we need to start the scan, and set flag variables to + * control the code below. * - * If nextkey = false, _bt_search and _bt_binsrch will locate the - * first item >= scan key. If nextkey = true, they will locate the - * first item > scan key. + * If nextkey = false, _bt_search and _bt_binsrch will locate the first + * item >= scan key. If nextkey = true, they will locate the first + * item > scan key. * - * If goback = true, we will then step back one item, while if - * goback = false, we will start the scan on the located item. + * If goback = true, we will then step back one item, while if goback = + * false, we will start the scan on the located item. * * it's yet other place to add some code later for is(not)null ... */ switch (strat_total) { case BTLessStrategyNumber: + /* - * Find first item >= scankey, then back up one to arrive at last - * item < scankey. (Note: this positioning strategy is only used - * for a backward scan, so that is always the correct starting - * position.) + * Find first item >= scankey, then back up one to arrive at + * last item < scankey. (Note: this positioning strategy is + * only used for a backward scan, so that is always the + * correct starting position.) */ nextkey = false; goback = true; break; case BTLessEqualStrategyNumber: + /* - * Find first item > scankey, then back up one to arrive at last - * item <= scankey. (Note: this positioning strategy is only used - * for a backward scan, so that is always the correct starting - * position.) + * Find first item > scankey, then back up one to arrive at + * last item <= scankey. (Note: this positioning strategy is + * only used for a backward scan, so that is always the + * correct starting position.) */ nextkey = true; goback = true; break; case BTEqualStrategyNumber: + /* * If a backward scan was specified, need to start with last * equal item not first one. @@ -739,8 +748,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) if (ScanDirectionIsBackward(dir)) { /* - * This is the same as the <= strategy. We will check - * at the end whether the found item is actually =. + * This is the same as the <= strategy. We will check at + * the end whether the found item is actually =. */ nextkey = true; goback = true; @@ -748,8 +757,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) else { /* - * This is the same as the >= strategy. We will check - * at the end whether the found item is actually =. + * This is the same as the >= strategy. We will check at + * the end whether the found item is actually =. */ nextkey = false; goback = false; @@ -757,18 +766,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) break; case BTGreaterEqualStrategyNumber: + /* - * Find first item >= scankey. (This is only used for - * forward scans.) + * Find first item >= scankey. (This is only used for forward + * scans.) */ nextkey = false; goback = false; break; case BTGreaterStrategyNumber: + /* - * Find first item > scankey. (This is only used for - * forward scans.) + * Find first item > scankey. (This is only used for forward + * scans.) */ nextkey = true; goback = false; @@ -814,23 +825,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) pfree(scankeys); /* - * If nextkey = false, we are positioned at the first item >= scan key, - * or possibly at the end of a page on which all the existing items are - * less than the scan key and we know that everything on later pages - * is greater than or equal to scan key. + * If nextkey = false, we are positioned at the first item >= scan + * key, or possibly at the end of a page on which all the existing + * items are less than the scan key and we know that everything on + * later pages is greater than or equal to scan key. * - * If nextkey = true, we are positioned at the first item > scan key, - * or possibly at the end of a page on which all the existing items are + * If nextkey = true, we are positioned at the first item > scan key, or + * possibly at the end of a page on which all the existing items are * less than or equal to the scan key and we know that everything on * later pages is greater than scan key. * * The actually desired starting point is either this item or the prior - * one, or in the end-of-page case it's the first item on the next page - * or the last item on this page. We apply _bt_step if needed to get to - * the right place. + * one, or in the end-of-page case it's the first item on the next + * page or the last item on this page. We apply _bt_step if needed to + * get to the right place. * - * If _bt_step fails (meaning we fell off the end of the index in - * one direction or the other), then there are no matches so we just + * If _bt_step fails (meaning we fell off the end of the index in one + * direction or the other), then there are no matches so we just * return false. */ if (goback) @@ -1292,7 +1303,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) itup = &(btitem->bti_itup); /* - * Okay, we are on the first or last tuple. Does it pass all the quals? + * Okay, we are on the first or last tuple. Does it pass all the + * quals? */ if (_bt_checkkeys(scan, itup, dir, &continuescan)) { diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index ac251a4ee4..98cdccb3ba 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -41,11 +41,11 @@ * * Since the index will never be used unless it is completely built, * from a crash-recovery point of view there is no need to WAL-log the - * steps of the build. After completing the index build, we can just sync + * steps of the build. After completing the index build, we can just sync * the whole file to disk using smgrimmedsync() before exiting this module. * This can be seen to be sufficient for crash recovery by considering that * it's effectively equivalent to what would happen if a CHECKPOINT occurred - * just after the index build. However, it is clearly not sufficient if the + * just after the index build. However, it is clearly not sufficient if the * DBA is using the WAL log for PITR or replication purposes, since another * machine would not be able to reconstruct the index from WAL. Therefore, * we log the completed index pages to WAL if and only if WAL archiving is @@ -56,7 +56,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.87 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.88 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -98,7 +98,7 @@ struct BTSpool typedef struct BTPageState { Page btps_page; /* workspace for page building */ - BlockNumber btps_blkno; /* block # to write this page at */ + BlockNumber btps_blkno; /* block # to write this page at */ BTItem btps_minkey; /* copy of minimum key (first item) on * page */ OffsetNumber btps_lastoff; /* last item offset loaded */ @@ -114,10 +114,10 @@ typedef struct BTPageState typedef struct BTWriteState { Relation index; - bool btws_use_wal; /* dump pages to WAL? */ - BlockNumber btws_pages_alloced; /* # pages allocated */ - BlockNumber btws_pages_written; /* # pages written out */ - Page btws_zeropage; /* workspace for filling zeroes */ + bool btws_use_wal; /* dump pages to WAL? */ + BlockNumber btws_pages_alloced; /* # pages allocated */ + BlockNumber btws_pages_written; /* # pages written out */ + Page btws_zeropage; /* workspace for filling zeroes */ } BTWriteState; @@ -136,7 +136,7 @@ static void _bt_sortaddtup(Page page, Size itemsize, static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti); static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state); static void _bt_load(BTWriteState *wstate, - BTSpool *btspool, BTSpool *btspool2); + BTSpool *btspool, BTSpool *btspool2); /* @@ -157,12 +157,12 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead) btspool->isunique = isunique; /* - * We size the sort area as maintenance_work_mem rather than work_mem to - * speed index creation. This should be OK since a single backend can't - * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the - * second one (for dead tuples) won't get very full, so we give it only - * work_mem. + * We size the sort area as maintenance_work_mem rather than work_mem + * to speed index creation. This should be OK since a single backend + * can't run multiple index creations in parallel. Note that creation + * of a unique index actually requires two BTSpool objects. We expect + * that the second one (for dead tuples) won't get very full, so we + * give it only work_mem. */ btKbytes = isdead ? work_mem : maintenance_work_mem; btspool->sortstate = tuplesort_begin_index(index, isunique, @@ -205,7 +205,7 @@ _bt_spool(BTItem btitem, BTSpool *btspool) void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) { - BTWriteState wstate; + BTWriteState wstate; #ifdef BTREE_BUILD_STATS if (log_btree_build_stats) @@ -220,6 +220,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) tuplesort_performsort(btspool2->sortstate); wstate.index = btspool->index; + /* * We need to log index creation in WAL iff WAL archiving is enabled * AND it's not a temp index. @@ -229,7 +230,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) /* reserve the metapage */ wstate.btws_pages_alloced = BTREE_METAPAGE + 1; wstate.btws_pages_written = 0; - wstate.btws_zeropage = NULL; /* until needed */ + wstate.btws_zeropage = NULL; /* until needed */ _bt_load(&wstate, btspool, btspool2); } @@ -246,7 +247,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) static Page _bt_blnewpage(uint32 level) { - Page page; + Page page; BTPageOpaque opaque; page = (Page) palloc(BLCKSZ); @@ -313,8 +314,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) * If we have to write pages nonsequentially, fill in the space with * zeroes until we come back and overwrite. This is not logically * necessary on standard Unix filesystems (unwritten space will read - * as zeroes anyway), but it should help to avoid fragmentation. - * The dummy pages aren't WAL-logged though. + * as zeroes anyway), but it should help to avoid fragmentation. The + * dummy pages aren't WAL-logged though. */ while (blkno > wstate->btws_pages_written) { @@ -326,9 +327,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) } /* - * Now write the page. We say isTemp = true even if it's not a - * temp index, because there's no need for smgr to schedule an fsync - * for this write; we'll do it ourselves before ending the build. + * Now write the page. We say isTemp = true even if it's not a temp + * index, because there's no need for smgr to schedule an fsync for + * this write; we'll do it ourselves before ending the build. */ smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true); @@ -468,7 +469,7 @@ static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) { Page npage; - BlockNumber nblkno; + BlockNumber nblkno; OffsetNumber last_off; Size pgspc; Size btisz; @@ -506,7 +507,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) * already. Finish off the page and write it out. */ Page opage = npage; - BlockNumber oblkno = nblkno; + BlockNumber oblkno = nblkno; ItemId ii; ItemId hii; BTItem obti; @@ -539,8 +540,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); /* - * Link the old page into its parent, using its minimum key. If - * we don't have a parent, we have to create one; this adds a new + * Link the old page into its parent, using its minimum key. If we + * don't have a parent, we have to create one; this adds a new * btree level. */ if (state->btps_next == NULL) @@ -572,8 +573,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) } /* - * Write out the old page. We never need to touch it again, - * so we can free the opage workspace too. + * Write out the old page. We never need to touch it again, so we + * can free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -613,7 +614,7 @@ static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) { BTPageState *s; - BlockNumber rootblkno = P_NONE; + BlockNumber rootblkno = P_NONE; uint32 rootlevel = 0; Page metapage; @@ -663,9 +664,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) /* * As the last step in the process, construct the metapage and make it - * point to the new root (unless we had no data at all, in which case it's - * set to point to "P_NONE"). This changes the index to the "valid" - * state by filling in a valid magic number in the metapage. + * point to the new root (unless we had no data at all, in which case + * it's set to point to "P_NONE"). This changes the index to the + * "valid" state by filling in a valid magic number in the metapage. */ metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, rootblkno, rootlevel); @@ -744,7 +745,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) compare = DatumGetInt32(FunctionCall2(&entry->sk_func, attrDatum1, - attrDatum2)); + attrDatum2)); if (compare > 0) { load1 = false; @@ -768,7 +769,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free) pfree((void *) bti); bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free); + true, &should_free); } else { @@ -776,7 +777,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free2) pfree((void *) bti2); bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate, - true, &should_free2); + true, &should_free2); } } _bt_freeskey(indexScanKey); @@ -785,7 +786,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { /* merge is unnecessary */ while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free)) != NULL) + true, &should_free)) != NULL) { /* When we see first tuple, create first index page */ if (state == NULL) @@ -802,18 +803,18 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp index we don't care + * safe to commit the transaction. (For a temp index we don't care * since the index will be uninteresting after a crash anyway.) * - * It's obvious that we must do this when not WAL-logging the build. - * It's less obvious that we have to do it even if we did WAL-log the - * index pages. The reason is that since we're building outside - * shared buffers, a CHECKPOINT occurring during the build has no way - * to flush the previously written data to disk (indeed it won't know - * the index even exists). A crash later on would replay WAL from the + * It's obvious that we must do this when not WAL-logging the build. It's + * less obvious that we have to do it even if we did WAL-log the index + * pages. The reason is that since we're building outside shared + * buffers, a CHECKPOINT occurring during the build has no way to + * flush the previously written data to disk (indeed it won't know the + * index even exists). A crash later on would replay WAL from the * checkpoint, therefore it wouldn't replay our earlier WAL entries. - * If we do not fsync those pages here, they might still not be on disk - * when the crash occurs. + * If we do not fsync those pages here, they might still not be on + * disk when the crash occurs. */ if (!wstate->index->rd_istemp) smgrimmedsync(wstate->index->rd_smgr); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 425e3b057e..bd640f6f8b 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.59 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.60 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup) bool null; /* - * We can use the cached (default) support procs since no cross-type - * comparison can be needed. + * We can use the cached (default) support procs since no + * cross-type comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); arg = index_getattr(itup, i + 1, itupdesc, &null); @@ -68,7 +68,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) /* * _bt_mkscankey_nodata * Build a scan key that contains comparator routines appropriate to - * the key datatypes, but no comparison data. The comparison data + * the key datatypes, but no comparison data. The comparison data * ultimately used must match the key datatypes. * * The result cannot be used with _bt_compare(). Currently this @@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel) FmgrInfo *procinfo; /* - * We can use the cached (default) support procs since no cross-type - * comparison can be needed. + * We can use the cached (default) support procs since no + * cross-type comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); ScanKeyEntryInitializeWithInfo(&skey[i], @@ -163,12 +163,12 @@ _bt_formitem(IndexTuple itup) * _bt_preprocess_keys() -- Preprocess scan keys * * The caller-supplied keys (in scan->keyData[]) are copied to - * so->keyData[] with possible transformation. scan->numberOfKeys is + * so->keyData[] with possible transformation. scan->numberOfKeys is * the number of input keys, so->numberOfKeys gets the number of output * keys (possibly less, never greater). * * The primary purpose of this routine is to discover how many scan keys - * must be satisfied to continue the scan. It also attempts to eliminate + * must be satisfied to continue the scan. It also attempts to eliminate * redundant keys and detect contradictory keys. At present, redundant and * contradictory keys can only be detected for same-data-type comparisons, * but that's the usual case so it seems worth doing. @@ -198,7 +198,7 @@ _bt_formitem(IndexTuple itup) * or one or two boundary-condition keys for each attr.) However, we can * only detect redundant keys when the right-hand datatypes are all equal * to the index datatype, because we do not know suitable operators for - * comparing right-hand values of two different datatypes. (In theory + * comparing right-hand values of two different datatypes. (In theory * we could handle comparison of a RHS of the index datatype with a RHS of * another type, but that seems too much pain for too little gain.) So, * keys whose operator has a nondefault subtype (ie, its RHS is not of the @@ -285,9 +285,9 @@ _bt_preprocess_keys(IndexScanDesc scan) * * xform[i] points to the currently best scan key of strategy type i+1, * if any is found with a default operator subtype; it is NULL if we - * haven't yet found such a key for this attr. Scan keys of nondefault - * subtypes are transferred to the output with no processing except for - * noting if they are of "=" type. + * haven't yet found such a key for this attr. Scan keys of + * nondefault subtypes are transferred to the output with no + * processing except for noting if they are of "=" type. */ attno = 1; memset(xform, 0, sizeof(xform)); @@ -361,7 +361,7 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * If no "=" for this key, we're done with required keys */ - if (! hasOtherTypeEqual) + if (!hasOtherTypeEqual) allEqualSoFar = false; } @@ -369,8 +369,8 @@ _bt_preprocess_keys(IndexScanDesc scan) if (xform[BTLessStrategyNumber - 1] && xform[BTLessEqualStrategyNumber - 1]) { - ScanKey lt = xform[BTLessStrategyNumber - 1]; - ScanKey le = xform[BTLessEqualStrategyNumber - 1]; + ScanKey lt = xform[BTLessStrategyNumber - 1]; + ScanKey le = xform[BTLessEqualStrategyNumber - 1]; test = FunctionCall2(&le->sk_func, lt->sk_argument, @@ -385,8 +385,8 @@ _bt_preprocess_keys(IndexScanDesc scan) if (xform[BTGreaterStrategyNumber - 1] && xform[BTGreaterEqualStrategyNumber - 1]) { - ScanKey gt = xform[BTGreaterStrategyNumber - 1]; - ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1]; + ScanKey gt = xform[BTGreaterStrategyNumber - 1]; + ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1]; test = FunctionCall2(&ge->sk_func, gt->sk_argument, @@ -545,21 +545,23 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, { /* * Tuple fails this qual. If it's a required qual, then we - * may be able to conclude no further tuples will pass, either. - * We have to look at the scan direction and the qual type. + * may be able to conclude no further tuples will pass, + * either. We have to look at the scan direction and the qual + * type. * * Note: the only case in which we would keep going after failing - * a required qual is if there are partially-redundant quals that - * _bt_preprocess_keys() was unable to eliminate. For example, - * given "x > 4 AND x > 10" where both are cross-type comparisons - * and so not removable, we might start the scan at the x = 4 - * boundary point. The "x > 10" condition will fail until we - * pass x = 10, but we must not stop the scan on its account. + * a required qual is if there are partially-redundant quals + * that _bt_preprocess_keys() was unable to eliminate. For + * example, given "x > 4 AND x > 10" where both are cross-type + * comparisons and so not removable, we might start the scan + * at the x = 4 boundary point. The "x > 10" condition will + * fail until we pass x = 10, but we must not stop the scan on + * its account. * - * Note: because we stop the scan as soon as any required equality - * qual fails, it is critical that equality quals be used for the - * initial positioning in _bt_first() when they are available. - * See comments in _bt_first(). + * Note: because we stop the scan as soon as any required + * equality qual fails, it is critical that equality quals be + * used for the initial positioning in _bt_first() when they + * are available. See comments in _bt_first(). */ if (ikey < so->numberOfRequiredKeys) { diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index edb1ff8d9b..03f09e3fa2 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.17 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.18 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -770,7 +770,7 @@ static void out_target(char *buf, xl_btreetid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c index 1765cef28d..4ec3a5da63 100644 --- a/src/backend/access/rtree/rtscan.c +++ b/src/backend/access/rtree/rtscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.54 2004/08/29 04:12:22 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -123,7 +123,7 @@ rtrescan(PG_FUNCTION_ARGS) Oid int_oper; RegProcedure int_proc; - opclass = s->indexRelation->rd_index->indclass[attno-1]; + opclass = s->indexRelation->rd_index->indclass[attno - 1]; int_strategy = RTMapToInternalOperator(s->keyData[i].sk_strategy); int_oper = get_opclass_member(opclass, s->keyData[i].sk_subtype, @@ -280,14 +280,14 @@ rtdropscan(IndexScanDesc s) void ReleaseResources_rtree(void) { - RTScanList l; - RTScanList prev; - RTScanList next; + RTScanList l; + RTScanList prev; + RTScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index b26807f9af..fb490e4137 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -24,7 +24,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.24 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.25 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,6 +62,7 @@ * Link to shared-memory data structures for CLOG control */ static SlruCtlData ClogCtlData; + #define ClogCtl (&ClogCtlData) diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index c695013ed1..c87b38a792 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -48,7 +48,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.20 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.21 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -79,7 +79,7 @@ * segment and page numbers in SimpleLruTruncate (see PagePrecedes()). * * Note: this file currently assumes that segment file names will be four - * hex digits. This sets a lower bound on the segment size (64K transactions + * hex digits. This sets a lower bound on the segment size (64K transactions * for 32-bit TransactionIds). */ #define SLRU_PAGES_PER_SEGMENT 32 @@ -96,9 +96,9 @@ */ typedef struct SlruFlushData { - int num_files; /* # files actually open */ - int fd[NUM_SLRU_BUFFERS]; /* their FD's */ - int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */ + int num_files; /* # files actually open */ + int fd[NUM_SLRU_BUFFERS]; /* their FD's */ + int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */ } SlruFlushData; /* @@ -132,7 +132,7 @@ static int slru_errno; static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno); static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, - SlruFlush fdata); + SlruFlush fdata); static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid); static int SlruSelectLRUPage(SlruCtl ctl, int pageno); @@ -385,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) /* If we failed, and we're in a flush, better close the files */ if (!ok && fdata) { - int i; + int i; for (i = 0; i < fdata->num_files; i++) close(fdata->fd[i]); @@ -511,7 +511,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) */ if (fdata) { - int i; + int i; for (i = 0; i < fdata->num_files; i++) { @@ -527,16 +527,17 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) { /* * If the file doesn't already exist, we should create it. It is - * possible for this to need to happen when writing a page that's not - * first in its segment; we assume the OS can cope with that. - * (Note: it might seem that it'd be okay to create files only when - * SimpleLruZeroPage is called for the first page of a segment. - * However, if after a crash and restart the REDO logic elects to - * replay the log from a checkpoint before the latest one, then it's - * possible that we will get commands to set transaction status of - * transactions that have already been truncated from the commit log. - * Easiest way to deal with that is to accept references to - * nonexistent files here and in SlruPhysicalReadPage.) + * possible for this to need to happen when writing a page that's + * not first in its segment; we assume the OS can cope with that. + * (Note: it might seem that it'd be okay to create files only + * when SimpleLruZeroPage is called for the first page of a + * segment. However, if after a crash and restart the REDO logic + * elects to replay the log from a checkpoint before the latest + * one, then it's possible that we will get commands to set + * transaction status of transactions that have already been + * truncated from the commit log. Easiest way to deal with that is + * to accept references to nonexistent files here and in + * SlruPhysicalReadPage.) */ SlruFileName(ctl, path, segno); fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -648,36 +649,36 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not seek in file \"%s\" to offset %u: %m", - path, offset))); + errdetail("could not seek in file \"%s\" to offset %u: %m", + path, offset))); break; case SLRU_READ_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not read from file \"%s\" at offset %u: %m", - path, offset))); + errdetail("could not read from file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_WRITE_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not write to file \"%s\" at offset %u: %m", - path, offset))); + errdetail("could not write to file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_FSYNC_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not fsync file \"%s\": %m", - path))); + errdetail("could not fsync file \"%s\": %m", + path))); break; case SLRU_CLOSE_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not close file \"%s\": %m", - path))); + errdetail("could not close file \"%s\": %m", + path))); break; default: /* can't get here, we trust */ @@ -841,8 +842,8 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage) /* * Scan shared memory and remove any pages preceding the cutoff page, * to ensure we won't rewrite them later. (Since this is normally - * called in or just after a checkpoint, any dirty pages should - * have been flushed already ... we're just being extra careful here.) + * called in or just after a checkpoint, any dirty pages should have + * been flushed already ... we're just being extra careful here.) */ LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); @@ -952,8 +953,11 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions) errno = 0; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 7976de2300..93a586148b 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -5,7 +5,7 @@ * * The pg_subtrans manager is a pg_clog-like manager that stores the parent * transaction Id for each transaction. It is a fundamental part of the - * nested transactions implementation. A main transaction has a parent + * nested transactions implementation. A main transaction has a parent * of InvalidTransactionId, and each subtransaction has its immediate parent. * The tree can easily be walked from child to parent, but not in the * opposite direction. @@ -22,7 +22,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.4 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.5 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -57,6 +57,7 @@ * Link to shared-memory data structures for SUBTRANS control */ static SlruCtlData SubTransCtlData; + #define SubTransCtl (&SubTransCtlData) @@ -101,7 +102,7 @@ SubTransGetParent(TransactionId xid) int entryno = TransactionIdToEntry(xid); int slotno; TransactionId *ptr; - TransactionId parent; + TransactionId parent; /* Can't ask about stuff that might not be around anymore */ Assert(TransactionIdFollowsOrEquals(xid, RecentXmin)); @@ -139,7 +140,7 @@ TransactionId SubTransGetTopmostTransaction(TransactionId xid) { TransactionId parentXid = xid, - previousXid = xid; + previousXid = xid; /* Can't ask about stuff that might not be around anymore */ Assert(TransactionIdFollowsOrEquals(xid, RecentXmin)); @@ -185,7 +186,7 @@ SUBTRANSShmemInit(void) * must have been called already.) * * Note: it's not really necessary to create the initial segment now, - * since slru.c would create it on first write anyway. But we may as well + * since slru.c would create it on first write anyway. But we may as well * do it to be sure the directory is set up correctly. */ void @@ -229,10 +230,11 @@ StartupSUBTRANS(void) int startPage; /* - * Since we don't expect pg_subtrans to be valid across crashes, - * we initialize the currently-active page to zeroes during startup. + * Since we don't expect pg_subtrans to be valid across crashes, we + * initialize the currently-active page to zeroes during startup. * Whenever we advance into a new page, ExtendSUBTRANS will likewise - * zero the new page without regard to whatever was previously on disk. + * zero the new page without regard to whatever was previously on + * disk. */ LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE); @@ -251,8 +253,8 @@ ShutdownSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. - * We do it merely as a debugging aid. + * This is not actually necessary from a correctness point of view. We do + * it merely as a debugging aid. */ SimpleLruFlush(SubTransCtl, false); } @@ -266,8 +268,8 @@ CheckPointSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. - * We do it merely to improve the odds that writing of dirty pages is done + * This is not actually necessary from a correctness point of view. We do + * it merely to improve the odds that writing of dirty pages is done * by the checkpoint process and not by backends. */ SimpleLruFlush(SubTransCtl, true); diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index fd5a1619a7..f82168be5b 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.60 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.61 2004/08/29 05:06:40 momjian Exp $ * * NOTES * This file contains the high level access-method interface to the @@ -126,7 +126,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */ static void TransactionLogMultiUpdate(int nxids, TransactionId *xids, XidStatus status) { - int i; + int i; Assert(nxids != 0); @@ -199,9 +199,10 @@ TransactionIdDidCommit(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent recursively. - * However, if it's older than RecentXmin, we can't look at pg_subtrans; - * instead assume that the parent crashed without cleaning up its children. + * If it's marked subcommitted, we have to check the parent + * recursively. However, if it's older than RecentXmin, we can't look + * at pg_subtrans; instead assume that the parent crashed without + * cleaning up its children. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { @@ -214,7 +215,7 @@ TransactionIdDidCommit(TransactionId transactionId) return TransactionIdDidCommit(parentXid); } - /* + /* * It's not committed. */ return false; @@ -247,9 +248,10 @@ TransactionIdDidAbort(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent recursively. - * However, if it's older than RecentXmin, we can't look at pg_subtrans; - * instead assume that the parent crashed without cleaning up its children. + * If it's marked subcommitted, we have to check the parent + * recursively. However, if it's older than RecentXmin, we can't look + * at pg_subtrans; instead assume that the parent crashed without + * cleaning up its children. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index cfd7893742..84926ac415 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -6,7 +6,7 @@ * Copyright (c) 2000-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.58 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.59 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -47,9 +47,9 @@ GetNewTransactionId(bool isSubXact) xid = ShmemVariableCache->nextXid; /* - * If we are allocating the first XID of a new page of the commit - * log, zero out that commit-log page before returning. We must do - * this while holding XidGenLock, else another xact could acquire and + * If we are allocating the first XID of a new page of the commit log, + * zero out that commit-log page before returning. We must do this + * while holding XidGenLock, else another xact could acquire and * commit a later XID before we zero the page. Fortunately, a page of * the commit log holds 32K or more transactions, so we don't have to * do this very often. @@ -61,17 +61,18 @@ GetNewTransactionId(bool isSubXact) /* * Now advance the nextXid counter. This must not happen until after - * we have successfully completed ExtendCLOG() --- if that routine fails, - * we want the next incoming transaction to try it again. We cannot - * assign more XIDs until there is CLOG space for them. + * we have successfully completed ExtendCLOG() --- if that routine + * fails, we want the next incoming transaction to try it again. We + * cannot assign more XIDs until there is CLOG space for them. */ TransactionIdAdvance(ShmemVariableCache->nextXid); /* - * We must store the new XID into the shared PGPROC array before releasing - * XidGenLock. This ensures that when GetSnapshotData calls + * We must store the new XID into the shared PGPROC array before + * releasing XidGenLock. This ensures that when GetSnapshotData calls * ReadNewTransactionId, all active XIDs before the returned value of - * nextXid are already present in PGPROC. Else we have a race condition. + * nextXid are already present in PGPROC. Else we have a race + * condition. * * XXX by storing xid into MyProc without acquiring SInvalLock, we are * relying on fetch/store of an xid to be atomic, else other backends @@ -86,19 +87,19 @@ GetNewTransactionId(bool isSubXact) * * A solution to the atomic-store problem would be to give each PGPROC * its own spinlock used only for fetching/storing that PGPROC's xid - * and related fields. (SInvalLock would then mean primarily that + * and related fields. (SInvalLock would then mean primarily that * PGPROCs couldn't be added/removed while holding the lock.) * * If there's no room to fit a subtransaction XID into PGPROC, set the * cache-overflowed flag instead. This forces readers to look in - * pg_subtrans to map subtransaction XIDs up to top-level XIDs. - * There is a race-condition window, in that the new XID will not - * appear as running until its parent link has been placed into - * pg_subtrans. However, that will happen before anyone could possibly - * have a reason to inquire about the status of the XID, so it seems - * OK. (Snapshots taken during this window *will* include the parent - * XID, so they will deliver the correct answer later on when someone - * does have a reason to inquire.) + * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There + * is a race-condition window, in that the new XID will not appear as + * running until its parent link has been placed into pg_subtrans. + * However, that will happen before anyone could possibly have a + * reason to inquire about the status of the XID, so it seems OK. + * (Snapshots taken during this window *will* include the parent XID, + * so they will deliver the correct answer later on when someone does + * have a reason to inquire.) */ if (MyProc != NULL) { @@ -112,9 +113,7 @@ GetNewTransactionId(bool isSubXact) MyProc->subxids.nxids++; } else - { MyProc->subxids.overflowed = true; - } } } diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index fd5d6b5168..3bb38e4227 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.182 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.183 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -89,19 +89,20 @@ typedef enum TBlockState */ typedef struct TransactionStateData { - TransactionId transactionIdData; /* my XID */ - char *name; /* savepoint name, if any */ - int savepointLevel; /* savepoint level */ - CommandId commandId; /* current CID */ - TransState state; /* low-level state */ - TBlockState blockState; /* high-level state */ - int nestingLevel; /* nest depth */ - MemoryContext curTransactionContext; /* my xact-lifetime context */ - ResourceOwner curTransactionOwner; /* my query resources */ - List *childXids; /* subcommitted child XIDs */ - AclId currentUser; /* subxact start current_user */ - bool prevXactReadOnly; /* entry-time xact r/o state */ - struct TransactionStateData *parent; /* back link to parent */ + TransactionId transactionIdData; /* my XID */ + char *name; /* savepoint name, if any */ + int savepointLevel; /* savepoint level */ + CommandId commandId; /* current CID */ + TransState state; /* low-level state */ + TBlockState blockState; /* high-level state */ + int nestingLevel; /* nest depth */ + MemoryContext curTransactionContext; /* my xact-lifetime + * context */ + ResourceOwner curTransactionOwner; /* my query resources */ + List *childXids; /* subcommitted child XIDs */ + AclId currentUser; /* subxact start current_user */ + bool prevXactReadOnly; /* entry-time xact r/o state */ + struct TransactionStateData *parent; /* back link to parent */ } TransactionStateData; typedef TransactionStateData *TransactionState; @@ -180,8 +181,8 @@ static TransactionState CurrentTransactionState = &TopTransactionStateData; * This does not change as we enter and exit subtransactions, so we don't * keep it inside the TransactionState stack. */ -static AbsoluteTime xactStartTime; /* integer part */ -static int xactStartTimeUsec; /* microsecond part */ +static AbsoluteTime xactStartTime; /* integer part */ +static int xactStartTimeUsec; /* microsecond part */ /* @@ -261,7 +262,7 @@ IsAbortedTransactionBlockState(void) { TransactionState s = CurrentTransactionState; - if (s->blockState == TBLOCK_ABORT || + if (s->blockState == TBLOCK_ABORT || s->blockState == TBLOCK_SUBABORT) return true; @@ -362,15 +363,15 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) } /* - * We will return true for the Xid of the current subtransaction, - * any of its subcommitted children, any of its parents, or any of - * their previously subcommitted children. However, a transaction - * being aborted is no longer "current", even though it may still - * have an entry on the state stack. + * We will return true for the Xid of the current subtransaction, any + * of its subcommitted children, any of its parents, or any of their + * previously subcommitted children. However, a transaction being + * aborted is no longer "current", even though it may still have an + * entry on the state stack. */ for (s = CurrentTransactionState; s != NULL; s = s->parent) { - ListCell *cell; + ListCell *cell; if (s->state == TRANS_ABORT) continue; @@ -502,15 +503,16 @@ AtSubStart_Memory(void) Assert(CurTransactionContext != NULL); /* - * Create a CurTransactionContext, which will be used to hold data that - * survives subtransaction commit but disappears on subtransaction abort. - * We make it a child of the immediate parent's CurTransactionContext. + * Create a CurTransactionContext, which will be used to hold data + * that survives subtransaction commit but disappears on + * subtransaction abort. We make it a child of the immediate parent's + * CurTransactionContext. */ CurTransactionContext = AllocSetContextCreate(CurTransactionContext, "CurTransactionContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); s->curTransactionContext = CurTransactionContext; /* Make the CurTransactionContext active. */ @@ -528,8 +530,8 @@ AtSubStart_ResourceOwner(void) Assert(s->parent != NULL); /* - * Create a resource owner for the subtransaction. We make it a - * child of the immediate parent's resource owner. + * Create a resource owner for the subtransaction. We make it a child + * of the immediate parent's resource owner. */ s->curTransactionOwner = ResourceOwnerCreate(s->parent->curTransactionOwner, @@ -560,10 +562,11 @@ RecordTransactionCommit(void) nchildren = xactGetCommittedChildren(&children); /* - * If we made neither any XLOG entries nor any temp-rel updates, - * and have no files to be deleted, we can omit recording the transaction + * If we made neither any XLOG entries nor any temp-rel updates, and + * have no files to be deleted, we can omit recording the transaction * commit at all. (This test includes the effects of subtransactions, - * so the presence of committed subxacts need not alone force a write.) + * so the presence of committed subxacts need not alone force a + * write.) */ if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate || nrels > 0) { @@ -577,17 +580,18 @@ RecordTransactionCommit(void) START_CRIT_SECTION(); /* - * If our transaction made any transaction-controlled XLOG entries, - * we need to lock out checkpoint start between writing our XLOG - * record and updating pg_clog. Otherwise it is possible for the - * checkpoint to set REDO after the XLOG record but fail to flush the - * pg_clog update to disk, leading to loss of the transaction commit - * if we crash a little later. Slightly klugy fix for problem - * discovered 2004-08-10. + * If our transaction made any transaction-controlled XLOG + * entries, we need to lock out checkpoint start between writing + * our XLOG record and updating pg_clog. Otherwise it is possible + * for the checkpoint to set REDO after the XLOG record but fail + * to flush the pg_clog update to disk, leading to loss of the + * transaction commit if we crash a little later. Slightly klugy + * fix for problem discovered 2004-08-10. * * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever care - * if it committed; so it doesn't matter if we lose the commit flag.) + * appears nowhere in permanent storage, so no one else will ever + * care if it committed; so it doesn't matter if we lose the + * commit flag.) * * Note we only need a shared lock. */ @@ -798,21 +802,21 @@ static void RecordSubTransactionCommit(void) { /* - * We do not log the subcommit in XLOG; it doesn't matter until - * the top-level transaction commits. + * We do not log the subcommit in XLOG; it doesn't matter until the + * top-level transaction commits. * * We must mark the subtransaction subcommitted in clog if its XID * appears either in permanent rels or in local temporary rels. We - * test this by seeing if we made transaction-controlled entries - * *OR* local-rel tuple updates. (The test here actually covers the - * entire transaction tree so far, so it may mark subtransactions that - * don't really need it, but it's probably not worth being tenser. - * Note that if a prior subtransaction dirtied these variables, then + * test this by seeing if we made transaction-controlled entries *OR* + * local-rel tuple updates. (The test here actually covers the entire + * transaction tree so far, so it may mark subtransactions that don't + * really need it, but it's probably not worth being tenser. Note that + * if a prior subtransaction dirtied these variables, then * RecordTransactionCommit will have to do the full pushup anyway...) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate) { - TransactionId xid = GetCurrentTransactionId(); + TransactionId xid = GetCurrentTransactionId(); /* XXX does this really need to be a critical section? */ START_CRIT_SECTION(); @@ -837,8 +841,8 @@ RecordTransactionAbort(void) { int nrels; RelFileNode *rptr; - int nchildren; - TransactionId *children; + int nchildren; + TransactionId *children; /* Get data needed for abort record */ nrels = smgrGetPendingDeletes(false, &rptr); @@ -846,13 +850,13 @@ RecordTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can omit - * recording the transaction abort at all. No one will ever care that - * it aborted. (These tests cover our whole transaction tree.) + * temp-rel updates, and are not going to delete any files, we can + * omit recording the transaction abort at all. No one will ever care + * that it aborted. (These tests cover our whole transaction tree.) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate || nrels > 0) { - TransactionId xid = GetCurrentTransactionId(); + TransactionId xid = GetCurrentTransactionId(); /* * Catch the scenario where we aborted partway through @@ -867,13 +871,13 @@ RecordTransactionAbort(void) * We only need to log the abort in XLOG if the transaction made * any transaction-controlled XLOG entries or will delete files. * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever care - * if it committed.) + * appears nowhere in permanent storage, so no one else will ever + * care if it committed.) * * We do not flush XLOG to disk unless deleting files, since the - * default assumption after a crash would be that we aborted, anyway. - * For the same reason, we don't need to worry about interlocking - * against checkpoint start. + * default assumption after a crash would be that we aborted, + * anyway. For the same reason, we don't need to worry about + * interlocking against checkpoint start. */ if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { @@ -990,9 +994,9 @@ RecordSubTransactionAbort(void) { int nrels; RelFileNode *rptr; - TransactionId xid = GetCurrentTransactionId(); - int nchildren; - TransactionId *children; + TransactionId xid = GetCurrentTransactionId(); + int nchildren; + TransactionId *children; /* Get data needed for abort record */ nrels = smgrGetPendingDeletes(false, &rptr); @@ -1000,10 +1004,10 @@ RecordSubTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can omit - * recording the transaction abort at all. No one will ever care that - * it aborted. (These tests cover our whole transaction tree, and - * therefore may mark subxacts that don't really need it, but it's + * temp-rel updates, and are not going to delete any files, we can + * omit recording the transaction abort at all. No one will ever care + * that it aborted. (These tests cover our whole transaction tree, + * and therefore may mark subxacts that don't really need it, but it's * probably not worth being tenser.) * * In this case we needn't worry about marking subcommitted children as @@ -1021,9 +1025,9 @@ RecordSubTransactionAbort(void) if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { XLogRecData rdata[3]; - int lastrdata = 0; + int lastrdata = 0; xl_xact_abort xlrec; - XLogRecPtr recptr; + XLogRecPtr recptr; xlrec.xtime = time(NULL); xlrec.nrels = nrels; @@ -1071,8 +1075,8 @@ RecordSubTransactionAbort(void) /* * We can immediately remove failed XIDs from PGPROC's cache of * running child XIDs. It's easiest to do it here while we have the - * child XID array at hand, even though in the main-transaction - * case the equivalent work happens just after return from + * child XID array at hand, even though in the main-transaction case + * the equivalent work happens just after return from * RecordTransactionAbort. */ XidCacheRemoveRunningXids(xid, nchildren, children); @@ -1169,7 +1173,8 @@ StartTransaction(void) s->state = TRANS_START; /* - * Make sure we've freed any old snapshot, and reset xact state variables + * Make sure we've freed any old snapshot, and reset xact state + * variables */ FreeXactSnapshot(); XactIsoLevel = DefaultXactIsoLevel; @@ -1323,9 +1328,9 @@ CommitTransaction(void) * want to release locks at the point where any backend waiting for us * will see our transaction as being fully cleaned up. * - * Resources that can be associated with individual queries are - * handled by the ResourceOwner mechanism. The other calls here - * are for backend-wide state. + * Resources that can be associated with individual queries are handled + * by the ResourceOwner mechanism. The other calls here are for + * backend-wide state. */ smgrDoPendingDeletes(true); @@ -1342,7 +1347,8 @@ CommitTransaction(void) * after relcache references are dropped (see comments for * AtEOXact_RelationCache), but before locks are released (if anyone * is waiting for lock on a relation we've modified, we want them to - * know about the catalog change before they start using the relation). + * know about the catalog change before they start using the + * relation). */ AtEOXact_Inval(true); @@ -1428,11 +1434,12 @@ AbortTransaction(void) /* * Reset user id which might have been changed transiently. We cannot - * use s->currentUser, but must get the session userid from miscinit.c. + * use s->currentUser, but must get the session userid from + * miscinit.c. * * (Note: it is not necessary to restore session authorization here * because that can only be changed via GUC, and GUC will take care of - * rolling it back if need be. However, an error within a SECURITY + * rolling it back if need be. However, an error within a SECURITY * DEFINER function could send control here with the wrong current * userid.) */ @@ -1443,7 +1450,7 @@ AbortTransaction(void) */ DeferredTriggerAbortXact(); AtAbort_Portals(); - AtEOXact_LargeObject(false); /* 'false' means it's abort */ + AtEOXact_LargeObject(false); /* 'false' means it's abort */ AtAbort_Notify(); AtEOXact_UpdatePasswordFile(false); @@ -1523,7 +1530,7 @@ CleanupTransaction(void) */ AtCleanup_Portals(); /* now safe to release portal memory */ - CurrentResourceOwner = NULL; /* and resource owner */ + CurrentResourceOwner = NULL; /* and resource owner */ ResourceOwnerDelete(TopTransactionResourceOwner); s->curTransactionOwner = NULL; CurTransactionResourceOwner = NULL; @@ -1561,9 +1568,10 @@ StartTransactionCommand(void) break; /* - * This is the case when we are somewhere in a transaction block - * and about to start a new command. For now we do nothing - * but someday we may do command-local resource initialization. + * This is the case when we are somewhere in a transaction + * block and about to start a new command. For now we do + * nothing but someday we may do command-local resource + * initialization. */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: @@ -1616,8 +1624,8 @@ CommitTransactionCommand(void) /* * This shouldn't happen, because it means the previous * StartTransactionCommand didn't set the STARTED state - * appropriately, or we didn't manage previous pending - * abort states. + * appropriately, or we didn't manage previous pending abort + * states. */ case TBLOCK_DEFAULT: case TBLOCK_SUBABORT_PENDING: @@ -1689,19 +1697,21 @@ CommitTransactionCommand(void) break; /* - * Ditto, but in a subtransaction. AbortOutOfAnyTransaction + * Ditto, but in a subtransaction. AbortOutOfAnyTransaction * will do the dirty work. */ case TBLOCK_SUBENDABORT_ALL: AbortOutOfAnyTransaction(); - s = CurrentTransactionState; /* changed by AbortOutOfAnyTransaction */ + s = CurrentTransactionState; /* changed by + * AbortOutOfAnyTransaction + * */ /* AbortOutOfAnyTransaction sets the blockState */ break; /* * We were just issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already - * did PushTransaction, so as to have someplace to put the + * Start a subtransaction. (DefineSavepoint already did + * PushTransaction, so as to have someplace to put the * SUBBEGIN state.) */ case TBLOCK_SUBBEGIN: @@ -1720,14 +1730,15 @@ CommitTransactionCommand(void) * We were issued a RELEASE command, so we end the current * subtransaction and return to the parent transaction. * - * Since RELEASE can exit multiple levels of subtransaction, - * we must loop here until we get out of all SUBEND'ed levels. + * Since RELEASE can exit multiple levels of subtransaction, we + * must loop here until we get out of all SUBEND'ed levels. */ case TBLOCK_SUBEND: - do { + do + { CommitSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ } while (s->blockState == TBLOCK_SUBEND); break; @@ -1738,25 +1749,26 @@ CommitTransactionCommand(void) break; /* - * The current subtransaction is ending. Do the equivalent - * of a ROLLBACK TO followed by a RELEASE command. + * The current subtransaction is ending. Do the equivalent of + * a ROLLBACK TO followed by a RELEASE command. */ case TBLOCK_SUBENDABORT_RELEASE: CleanupAbortedSubTransactions(false); break; /* - * The current subtransaction is ending due to a ROLLBACK - * TO command, so close all savepoints up to the target - * level. When finished, recreate the savepoint. + * The current subtransaction is ending due to a ROLLBACK TO + * command, so close all savepoints up to the target level. + * When finished, recreate the savepoint. */ case TBLOCK_SUBENDABORT: { - char *name = CleanupAbortedSubTransactions(true); + char *name = CleanupAbortedSubTransactions(true); Assert(PointerIsValid(name)); DefineSavepoint(name); - s = CurrentTransactionState; /* changed by DefineSavepoint */ + s = CurrentTransactionState; /* changed by + * DefineSavepoint */ pfree(name); /* This is the same as TBLOCK_SUBBEGIN case */ @@ -1780,8 +1792,8 @@ static char * CleanupAbortedSubTransactions(bool returnName) { TransactionState s = CurrentTransactionState; - char *name = NULL; - + char *name = NULL; + AssertState(PointerIsValid(s->parent)); Assert(s->parent->blockState == TBLOCK_SUBINPROGRESS || s->parent->blockState == TBLOCK_INPROGRESS || @@ -1798,7 +1810,7 @@ CleanupAbortedSubTransactions(bool returnName) CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ while (s->blockState == TBLOCK_SUBABORT_PENDING) { @@ -1827,9 +1839,9 @@ AbortCurrentTransaction(void) switch (s->blockState) { - /* - * we aren't in a transaction, so we do nothing. - */ + /* + * we aren't in a transaction, so we do nothing. + */ case TBLOCK_DEFAULT: break; @@ -1856,10 +1868,10 @@ AbortCurrentTransaction(void) break; /* - * This is the case when we are somewhere in a transaction block - * and we've gotten a failure, so we abort the transaction and - * set up the persistent ABORT state. We will stay in ABORT - * until we get an "END TRANSACTION". + * This is the case when we are somewhere in a transaction + * block and we've gotten a failure, so we abort the + * transaction and set up the persistent ABORT state. We will + * stay in ABORT until we get an "END TRANSACTION". */ case TBLOCK_INPROGRESS: AbortTransaction(); @@ -1900,8 +1912,8 @@ AbortCurrentTransaction(void) break; /* - * If we are just starting a subtransaction, put it - * in aborted state. + * If we are just starting a subtransaction, put it in aborted + * state. */ case TBLOCK_SUBBEGIN: StartAbortedSubTransaction(); @@ -1914,8 +1926,8 @@ AbortCurrentTransaction(void) break; /* - * If we are aborting an ending transaction, - * we have to abort the parent transaction too. + * If we are aborting an ending transaction, we have to abort + * the parent transaction too. */ case TBLOCK_SUBEND: case TBLOCK_SUBABORT_PENDING: @@ -1924,7 +1936,7 @@ AbortCurrentTransaction(void) PopTransaction(); s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState != TBLOCK_SUBEND && - s->blockState != TBLOCK_SUBENDABORT); + s->blockState != TBLOCK_SUBENDABORT); AbortCurrentTransaction(); break; @@ -1937,13 +1949,13 @@ AbortCurrentTransaction(void) PopTransaction(); s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState != TBLOCK_SUBEND && - s->blockState != TBLOCK_SUBENDABORT); + s->blockState != TBLOCK_SUBENDABORT); AbortCurrentTransaction(); break; /* - * We are already aborting the whole transaction tree. - * Do nothing, CommitTransactionCommand will call + * We are already aborting the whole transaction tree. Do + * nothing, CommitTransactionCommand will call * AbortOutOfAnyTransaction and set things straight. */ case TBLOCK_SUBENDABORT_ALL: @@ -2068,8 +2080,8 @@ bool IsInTransactionChain(void *stmtNode) { /* - * Return true on same conditions that would make PreventTransactionChain - * error out + * Return true on same conditions that would make + * PreventTransactionChain error out */ if (IsTransactionBlock()) return true; @@ -2097,8 +2109,8 @@ IsInTransactionChain(void *stmtNode) * (mainly because it's easier to control the order that way, where needed). * * At transaction end, the callback occurs post-commit or post-abort, so the - * callback functions can only do noncritical cleanup. At subtransaction - * start, the callback is called when the subtransaction has finished + * callback functions can only do noncritical cleanup. At subtransaction + * start, the callback is called when the subtransaction has finished * initializing. */ void @@ -2141,9 +2153,7 @@ CallXactCallbacks(XactEvent event, TransactionId parentXid) XactCallbackItem *item; for (item = Xact_callbacks; item; item = item->next) - { (*item->callback) (event, parentXid, item->arg); - } } @@ -2164,8 +2174,8 @@ BeginTransactionBlock(void) switch (s->blockState) { /* - * We are not inside a transaction block, so allow one - * to begin. + * We are not inside a transaction block, so allow one to + * begin. */ case TBLOCK_STARTED: s->blockState = TBLOCK_BEGIN; @@ -2180,7 +2190,7 @@ BeginTransactionBlock(void) case TBLOCK_SUBABORT: ereport(WARNING, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("there is already a transaction in progress"))); + errmsg("there is already a transaction in progress"))); break; /* These cases are invalid. Reject them altogether. */ @@ -2215,12 +2225,13 @@ EndTransactionBlock(void) switch (s->blockState) { - /* - * We are in a transaction block which should commit when we - * get to the upcoming CommitTransactionCommand() so we set the - * state to "END". CommitTransactionCommand() will recognize this - * and commit the transaction and return us to the default state. - */ + /* + * We are in a transaction block which should commit when we + * get to the upcoming CommitTransactionCommand() so we set + * the state to "END". CommitTransactionCommand() will + * recognize this and commit the transaction and return us to + * the default state. + */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: s->blockState = TBLOCK_END; @@ -2229,30 +2240,31 @@ EndTransactionBlock(void) /* * We are in a transaction block which aborted. Since the - * AbortTransaction() was already done, we need only - * change to the special "END ABORT" state. The upcoming - * CommitTransactionCommand() will recognise this and then put us - * back in the default state. + * AbortTransaction() was already done, we need only change to + * the special "END ABORT" state. The upcoming + * CommitTransactionCommand() will recognise this and then put + * us back in the default state. */ case TBLOCK_ABORT: s->blockState = TBLOCK_ENDABORT; break; /* - * Here we are inside an aborted subtransaction. Go to the "abort - * the whole tree" state so that CommitTransactionCommand() calls - * AbortOutOfAnyTransaction. + * Here we are inside an aborted subtransaction. Go to the + * "abort the whole tree" state so that + * CommitTransactionCommand() calls AbortOutOfAnyTransaction. */ case TBLOCK_SUBABORT: s->blockState = TBLOCK_SUBENDABORT_ALL; break; case TBLOCK_STARTED: + /* - * here, the user issued COMMIT when not inside a - * transaction. Issue a WARNING and go to abort state. The - * upcoming call to CommitTransactionCommand() will then put us - * back into the default state. + * here, the user issued COMMIT when not inside a transaction. + * Issue a WARNING and go to abort state. The upcoming call + * to CommitTransactionCommand() will then put us back into + * the default state. */ ereport(WARNING, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), @@ -2303,11 +2315,10 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a failed subtransaction and we got an - * abort command from the user. Abort processing is already - * done, so go to the "abort all" state and - * CommitTransactionCommand will call AbortOutOfAnyTransaction - * to set things straight. + * We are inside a failed subtransaction and we got an abort + * command from the user. Abort processing is already done, + * so go to the "abort all" state and CommitTransactionCommand + * will call AbortOutOfAnyTransaction to set things straight. */ case TBLOCK_SUBABORT: s->blockState = TBLOCK_SUBENDABORT_ALL; @@ -2325,7 +2336,7 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a subtransaction. Abort the current + * We are inside a subtransaction. Abort the current * subtransaction and go to the "abort all" state, so * CommitTransactionCommand will call AbortOutOfAnyTransaction * to set things straight. @@ -2373,7 +2384,7 @@ UserAbortTransactionBlock(void) void DefineSavepoint(char *name) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; switch (s->blockState) { @@ -2381,11 +2392,12 @@ DefineSavepoint(char *name) case TBLOCK_SUBINPROGRESS: /* Normal subtransaction start */ PushTransaction(); - s = CurrentTransactionState; /* changed by push */ + s = CurrentTransactionState; /* changed by push */ + /* * Note that we are allocating the savepoint name in the - * parent transaction's CurTransactionContext, since we - * don't yet have a transaction context for the new guy. + * parent transaction's CurTransactionContext, since we don't + * yet have a transaction context for the new guy. */ s->name = MemoryContextStrdup(CurTransactionContext, name); s->blockState = TBLOCK_SUBBEGIN; @@ -2413,16 +2425,16 @@ DefineSavepoint(char *name) /* * ReleaseSavepoint - * This executes a RELEASE command. + * This executes a RELEASE command. */ void ReleaseSavepoint(List *options) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; TransactionState target, - xact; - ListCell *cell; - char *name = NULL; + xact; + ListCell *cell; + char *name = NULL; /* * Check valid block state transaction status. @@ -2437,8 +2449,8 @@ ReleaseSavepoint(List *options) break; /* - * We are in a non-aborted subtransaction. This is - * the only valid case. + * We are in a non-aborted subtransaction. This is the only + * valid case. */ case TBLOCK_SUBINPROGRESS: break; @@ -2461,9 +2473,9 @@ ReleaseSavepoint(List *options) break; } - foreach (cell, options) + foreach(cell, options) { - DefElem *elem = lfirst(cell); + DefElem *elem = lfirst(cell); if (strcmp(elem->defname, "savepoint_name") == 0) name = strVal(elem->arg); @@ -2490,8 +2502,8 @@ ReleaseSavepoint(List *options) /* * Mark "commit pending" all subtransactions up to the target - * subtransaction. The actual commits will happen when control - * gets to CommitTransactionCommand. + * subtransaction. The actual commits will happen when control gets + * to CommitTransactionCommand. */ xact = CurrentTransactionState; for (;;) @@ -2507,23 +2519,23 @@ ReleaseSavepoint(List *options) /* * RollbackToSavepoint - * This executes a ROLLBACK TO <savepoint> command. + * This executes a ROLLBACK TO <savepoint> command. */ void RollbackToSavepoint(List *options) { TransactionState s = CurrentTransactionState; TransactionState target, - xact; - ListCell *cell; - char *name = NULL; + xact; + ListCell *cell; + char *name = NULL; switch (s->blockState) { - /* - * We can't rollback to a savepoint if there is no saveopint - * defined. - */ + /* + * We can't rollback to a savepoint if there is no saveopint + * defined. + */ case TBLOCK_ABORT: case TBLOCK_INPROGRESS: ereport(ERROR, @@ -2536,9 +2548,10 @@ RollbackToSavepoint(List *options) */ case TBLOCK_SUBABORT: case TBLOCK_SUBINPROGRESS: + /* - * Have to do AbortSubTransaction, but first check - * if this is the right subtransaction + * Have to do AbortSubTransaction, but first check if this is + * the right subtransaction */ break; @@ -2559,9 +2572,9 @@ RollbackToSavepoint(List *options) break; } - foreach (cell, options) + foreach(cell, options) { - DefElem *elem = lfirst(cell); + DefElem *elem = lfirst(cell); if (strcmp(elem->defname, "savepoint_name") == 0) name = strVal(elem->arg); @@ -2597,7 +2610,7 @@ RollbackToSavepoint(List *options) /* * Mark "abort pending" all subtransactions up to the target - * subtransaction. (Except the current subtransaction!) + * subtransaction. (Except the current subtransaction!) */ xact = CurrentTransactionState; @@ -2623,7 +2636,7 @@ RollbackToSavepoint(List *options) void BeginInternalSubTransaction(char *name) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; switch (s->blockState) { @@ -2632,11 +2645,12 @@ BeginInternalSubTransaction(char *name) case TBLOCK_SUBINPROGRESS: /* Normal subtransaction start */ PushTransaction(); - s = CurrentTransactionState; /* changed by push */ + s = CurrentTransactionState; /* changed by push */ + /* * Note that we are allocating the savepoint name in the - * parent transaction's CurTransactionContext, since we - * don't yet have a transaction context for the new guy. + * parent transaction's CurTransactionContext, since we don't + * yet have a transaction context for the new guy. */ if (name) s->name = MemoryContextStrdup(CurTransactionContext, name); @@ -2698,7 +2712,7 @@ RollbackAndReleaseCurrentSubTransaction(void) switch (s->blockState) { - /* Must be in a subtransaction */ + /* Must be in a subtransaction */ case TBLOCK_SUBABORT: case TBLOCK_SUBINPROGRESS: break; @@ -2748,7 +2762,8 @@ AbortOutOfAnyTransaction(void) /* * Get out of any transaction or nested transaction */ - do { + do + { switch (s->blockState) { case TBLOCK_DEFAULT: @@ -2770,21 +2785,26 @@ AbortOutOfAnyTransaction(void) s->blockState = TBLOCK_DEFAULT; break; case TBLOCK_SUBBEGIN: + /* - * We didn't get as far as starting the subxact, so there's - * nothing to abort. Just pop back to parent. + * We didn't get as far as starting the subxact, so + * there's nothing to abort. Just pop back to parent. */ PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; case TBLOCK_SUBINPROGRESS: case TBLOCK_SUBEND: case TBLOCK_SUBABORT_PENDING: - /* In a subtransaction, so clean it up and abort parent too */ + + /* + * In a subtransaction, so clean it up and abort parent + * too + */ AbortSubTransaction(); CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; case TBLOCK_SUBABORT: case TBLOCK_SUBENDABORT_ALL: @@ -2793,7 +2813,7 @@ AbortOutOfAnyTransaction(void) /* As above, but AbortSubTransaction already done */ CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; } } while (s->blockState != TBLOCK_DEFAULT); @@ -2819,7 +2839,7 @@ CommitTransactionToLevel(int level) { CommitSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ Assert(s->state == TRANS_INPROGRESS); } } @@ -2840,7 +2860,7 @@ IsTransactionBlock(void) /* * IsTransactionOrTransactionBlock --- are we within either a transaction - * or a transaction block? (The backend is only really "idle" when this + * or a transaction block? (The backend is only really "idle" when this * returns false.) * * This should match up with IsTransactionBlock and IsTransactionState. @@ -2928,9 +2948,10 @@ StartSubTransaction(void) /* * Generate a new Xid and record it in pg_subtrans. NB: we must make - * the subtrans entry BEFORE the Xid appears anywhere in shared storage, - * such as in the lock table; because until it's made the Xid may not - * appear to be "running" to other backends. See GetNewTransactionId. + * the subtrans entry BEFORE the Xid appears anywhere in shared + * storage, such as in the lock table; because until it's made the Xid + * may not appear to be "running" to other backends. See + * GetNewTransactionId. */ s->transactionIdData = GetNewTransactionId(true); @@ -2943,7 +2964,7 @@ StartSubTransaction(void) */ s->currentUser = GetUserId(); s->prevXactReadOnly = XactReadOnly; - + /* * Initialize other subsystems for new subtransaction */ @@ -2954,7 +2975,7 @@ StartSubTransaction(void) s->state = TRANS_INPROGRESS; /* - * Call start-of-subxact callbacks + * Call start-of-subxact callbacks */ CallXactCallbacks(XACT_EVENT_START_SUB, s->parent->transactionIdData); @@ -3020,9 +3041,9 @@ CommitSubTransaction(void) s->parent->transactionIdData); /* - * We need to restore the upper transaction's read-only state, - * in case the upper is read-write while the child is read-only; - * GUC will incorrectly think it should leave the child state in place. + * We need to restore the upper transaction's read-only state, in case + * the upper is read-write while the child is read-only; GUC will + * incorrectly think it should leave the child state in place. */ XactReadOnly = s->prevXactReadOnly; @@ -3117,14 +3138,16 @@ AbortSubTransaction(void) /* * Reset user id which might have been changed transiently. Here we * want to restore to the userid that was current at subxact entry. - * (As in AbortTransaction, we need not worry about the session userid.) + * (As in AbortTransaction, we need not worry about the session + * userid.) * * Must do this after AtEOXact_GUC to handle the case where we entered * the subxact inside a SECURITY DEFINER function (hence current and * session userids were different) and then session auth was changed - * inside the subxact. GUC will reset both current and session userids - * to the entry-time session userid. This is right in every other - * scenario so it seems simplest to let GUC do that and fix it here. + * inside the subxact. GUC will reset both current and session + * userids to the entry-time session userid. This is right in every + * other scenario so it seems simplest to let GUC do that and fix it + * here. */ SetUserId(s->currentUser); @@ -3168,11 +3191,11 @@ CleanupSubTransaction(void) * StartAbortedSubTransaction * * This function is used to start a subtransaction and put it immediately - * into aborted state. The end result should be equivalent to + * into aborted state. The end result should be equivalent to * StartSubTransaction immediately followed by AbortSubTransaction. * The reason we don't implement it just that way is that many of the backend * modules aren't designed to handle starting a subtransaction when not - * inside a valid transaction. Rather than making them all capable of + * inside a valid transaction. Rather than making them all capable of * doing that, we just omit the paired start and abort calls in this path. */ static void @@ -3195,9 +3218,10 @@ StartAbortedSubTransaction(void) /* Make sure currentUser is reasonably valid */ Assert(s->parent != NULL); s->currentUser = s->parent->currentUser; - + /* - * Initialize only what has to be there for CleanupSubTransaction to work. + * Initialize only what has to be there for CleanupSubTransaction to + * work. */ AtSubStart_Memory(); AtSubStart_ResourceOwner(); @@ -3219,8 +3243,8 @@ StartAbortedSubTransaction(void) static void PushTransaction(void) { - TransactionState p = CurrentTransactionState; - TransactionState s; + TransactionState p = CurrentTransactionState; + TransactionState s; /* * We keep subtransaction state nodes in TopTransactionContext. @@ -3315,7 +3339,7 @@ ShowTransactionStateRec(TransactionState s) /* use ereport to suppress computation if msg will not be printed */ ereport(DEBUG2, (errmsg_internal("name: %s; blockState: %13s; state: %7s, xid/cid: %u/%02u, nestlvl: %d, children: %s", - PointerIsValid(s->name) ? s->name : "unnamed", + PointerIsValid(s->name) ? s->name : "unnamed", BlockStateAsString(s->blockState), TransStateAsString(s->state), (unsigned int) s->transactionIdData, @@ -3393,7 +3417,7 @@ TransStateAsString(TransState state) /* * xactGetCommittedChildren * - * Gets the list of committed children of the current transaction. The return + * Gets the list of committed children of the current transaction. The return * value is the number of child transactions. *children is set to point to a * palloc'd array of TransactionIds. If there are no subxacts, *children is * set to NULL. @@ -3401,10 +3425,10 @@ TransStateAsString(TransState state) int xactGetCommittedChildren(TransactionId **ptr) { - TransactionState s = CurrentTransactionState; - int nchildren; - TransactionId *children; - ListCell *p; + TransactionState s = CurrentTransactionState; + int nchildren; + TransactionId *children; + ListCell *p; nchildren = list_length(s->childXids); if (nchildren == 0) @@ -3438,12 +3462,12 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record) if (info == XLOG_XACT_COMMIT) { xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record); - int i; + int i; TransactionIdCommit(record->xl_xid); /* Mark committed subtransactions as committed */ TransactionIdCommitTree(xlrec->nsubxacts, - (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); + (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); /* Make sure files supposed to be dropped are dropped */ for (i = 0; i < xlrec->nrels; i++) { @@ -3454,12 +3478,12 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record) else if (info == XLOG_XACT_ABORT) { xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record); - int i; + int i; TransactionIdAbort(record->xl_xid); /* mark subtransactions as aborted */ TransactionIdAbortTree(xlrec->nsubxacts, - (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); + (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); /* Make sure files supposed to be dropped are dropped */ for (i = 0; i < xlrec->nrels; i++) { @@ -3486,7 +3510,7 @@ void xact_desc(char *buf, uint8 xl_info, char *rec) { uint8 info = xl_info & ~XLR_INFO_MASK; - int i; + int i; if (info == XLOG_XACT_COMMIT) { @@ -3502,6 +3526,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) for (i = 0; i < xlrec->nrels; i++) { RelFileNode rnode = xlrec->xnodes[i]; + sprintf(buf + strlen(buf), " %u/%u/%u", rnode.spcNode, rnode.dbNode, rnode.relNode); } @@ -3509,7 +3534,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) @@ -3530,6 +3555,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) for (i = 0; i < xlrec->nrels; i++) { RelFileNode rnode = xlrec->xnodes[i]; + sprintf(buf + strlen(buf), " %u/%u/%u", rnode.spcNode, rnode.dbNode, rnode.relNode); } @@ -3537,7 +3563,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) @@ -3549,7 +3575,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) } void -XactPushRollback(void (*func) (void *), void *data) + XactPushRollback(void (*func) (void *), void *data) { #ifdef XLOG_II if (_RollbackFunc != NULL) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 309f17a83f..e65c109f66 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.165 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.166 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -128,26 +128,28 @@ TimeLineID ThisTimeLineID = 0; /* Are we doing recovery from XLOG? */ bool InRecovery = false; + /* Are we recovering using offline XLOG archives? */ -static bool InArchiveRecovery = false; +static bool InArchiveRecovery = false; + /* Was the last xlog file restored from archive, or local? */ -static bool restoredFromArchive = false; +static bool restoredFromArchive = false; /* options taken from recovery.conf */ static char *recoveryRestoreCommand = NULL; static bool recoveryTarget = false; static bool recoveryTargetExact = false; static bool recoveryTargetInclusive = true; -static TransactionId recoveryTargetXid; -static time_t recoveryTargetTime; +static TransactionId recoveryTargetXid; +static time_t recoveryTargetTime; /* if recoveryStopsHere returns true, it saves actual stop xid/time here */ -static TransactionId recoveryStopXid; -static time_t recoveryStopTime; -static bool recoveryStopAfter; +static TransactionId recoveryStopXid; +static time_t recoveryStopTime; +static bool recoveryStopAfter; /* constraint set by read_backup_label */ -static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; +static XLogRecPtr recoveryMinXlogOffset = {0, 0}; /* * During normal operation, the only timeline we care about is ThisTimeLineID. @@ -161,7 +163,7 @@ static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; * * expectedTLIs: an integer list of recoveryTargetTLI and the TLIs of * its known parents, newest first (so recoveryTargetTLI is always the - * first list member). Only these TLIs are expected to be seen in the WAL + * first list member). Only these TLIs are expected to be seen in the WAL * segments we read, and indeed only these TLIs will be considered as * candidate WAL files to open at all. * @@ -171,9 +173,9 @@ static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; * file was created.) During a sequential scan we do not allow this value * to decrease. */ -static TimeLineID recoveryTargetTLI; -static List *expectedTLIs; -static TimeLineID curFileTLI; +static TimeLineID recoveryTargetTLI; +static List *expectedTLIs; +static TimeLineID curFileTLI; /* * MyLastRecPtr points to the start of the last XLOG record inserted by the @@ -373,7 +375,7 @@ static ControlFileData *ControlFile = NULL; /* File path names */ -char XLogDir[MAXPGPATH]; +char XLogDir[MAXPGPATH]; static char ControlFilePath[MAXPGPATH]; /* @@ -422,7 +424,7 @@ static bool XLogArchiveIsDone(const char *xlog); static void XLogArchiveCleanup(const char *xlog); static void readRecoveryCommandFile(void); static void exitArchiveRecovery(TimeLineID endTLI, - uint32 endLogId, uint32 endLogSeg); + uint32 endLogId, uint32 endLogSeg); static bool recoveryStopsHere(XLogRecord *record, bool *includeThis); static bool AdvanceXLInsertBuffer(void); @@ -435,7 +437,7 @@ static bool InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath, static int XLogFileOpen(uint32 log, uint32 seg); static int XLogFileRead(uint32 log, uint32 seg, int emode); static bool RestoreArchivedFile(char *path, const char *xlogfname, - const char *recovername, off_t expectedSize); + const char *recovername, off_t expectedSize); static void PreallocXlogFiles(XLogRecPtr endptr); static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr); static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer); @@ -447,12 +449,13 @@ static List *readTimeLineHistory(TimeLineID targetTLI); static bool existsTimeLineHistory(TimeLineID probeTLI); static TimeLineID findNewestTimeLine(TimeLineID startTLI); static void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, - TimeLineID endTLI, - uint32 endLogId, uint32 endLogSeg); + TimeLineID endTLI, + uint32 endLogId, uint32 endLogSeg); static void WriteControlFile(void); static void ReadControlFile(void); static char *str_time(time_t tnow); static void issue_xlog_fsync(void); + #ifdef WAL_DEBUG static void xlog_outrec(char *buf, XLogRecord *record); #endif @@ -514,7 +517,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID) { RecPtr.xlogid = 0; - RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt record */ + RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt + * record */ return (RecPtr); } @@ -724,7 +728,8 @@ begin:; /* * If there isn't enough space on the current XLOG page for a record - * header, advance to the next page (leaving the unused space as zeroes). + * header, advance to the next page (leaving the unused space as + * zeroes). */ updrqst = false; freespace = INSERT_FREESPACE(Insert); @@ -895,19 +900,21 @@ static void XLogArchiveNotify(const char *xlog) { char archiveStatusPath[MAXPGPATH]; - FILE *fd; + FILE *fd; /* insert an otherwise empty file called <XLOG>.ready */ StatusFilePath(archiveStatusPath, xlog, ".ready"); fd = AllocateFile(archiveStatusPath, "w"); - if (fd == NULL) { + if (fd == NULL) + { ereport(LOG, (errcode_for_file_access(), errmsg("could not create archive status file \"%s\": %m", archiveStatusPath))); return; } - if (FreeFile(fd)) { + if (FreeFile(fd)) + { ereport(LOG, (errcode_for_file_access(), errmsg("could not write archive status file \"%s\": %m", @@ -935,7 +942,7 @@ XLogArchiveNotifySeg(uint32 log, uint32 seg) /* * XLogArchiveIsDone * - * Checks for a ".done" archive notification file. This is called when we + * Checks for a ".done" archive notification file. This is called when we * are ready to delete or recycle an old XLOG segment file. If it is okay * to delete it then return true. * @@ -958,7 +965,7 @@ XLogArchiveIsDone(const char *xlog) /* check for .ready --- this means archiver is still busy with it */ StatusFilePath(archiveStatusPath, xlog, ".ready"); if (stat(archiveStatusPath, &stat_buf) == 0) - return false; + return false; /* Race condition --- maybe archiver just finished, so recheck */ StatusFilePath(archiveStatusPath, xlog, ".done"); @@ -978,7 +985,7 @@ XLogArchiveIsDone(const char *xlog) static void XLogArchiveCleanup(const char *xlog) { - char archiveStatusPath[MAXPGPATH]; + char archiveStatusPath[MAXPGPATH]; /* Remove the .done file */ StatusFilePath(archiveStatusPath, xlog, ".done"); @@ -1267,8 +1274,8 @@ XLogWrite(XLogwrtRqst WriteRqst) issue_xlog_fsync(); LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */ - if (XLogArchivingActive()) - XLogArchiveNotifySeg(openLogId, openLogSeg); + if (XLogArchivingActive()) + XLogArchiveNotifySeg(openLogId, openLogSeg); } if (ispartialpage) @@ -1552,7 +1559,7 @@ XLogFileInit(uint32 log, uint32 seg, ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -1591,8 +1598,8 @@ XLogFileInit(uint32 log, uint32 seg, if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return (fd); } @@ -1606,7 +1613,7 @@ XLogFileInit(uint32 log, uint32 seg, * a different timeline) * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -1660,7 +1667,7 @@ XLogFileCopy(uint32 log, uint32 seg, errmsg("could not read file \"%s\": %m", path))); else ereport(PANIC, - (errmsg("insufficient data in file \"%s\"", path))); + (errmsg("insufficient data in file \"%s\"", path))); } errno = 0; if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer)) @@ -1677,7 +1684,7 @@ XLogFileCopy(uint32 log, uint32 seg, ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -1805,8 +1812,8 @@ XLogFileOpen(uint32 log, uint32 seg) if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return fd; } @@ -1823,11 +1830,11 @@ XLogFileRead(uint32 log, uint32 seg, int emode) int fd; /* - * Loop looking for a suitable timeline ID: we might need to - * read any of the timelines listed in expectedTLIs. + * Loop looking for a suitable timeline ID: we might need to read any + * of the timelines listed in expectedTLIs. * - * We expect curFileTLI on entry to be the TLI of the preceding file - * in sequence, or 0 if there was no predecessor. We do not allow + * We expect curFileTLI on entry to be the TLI of the preceding file in + * sequence, or 0 if there was no predecessor. We do not allow * curFileTLI to go backwards; this prevents us from picking up the * wrong file when a parent timeline extends to higher segment numbers * than the child we want to read. @@ -1868,8 +1875,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode) errno = ENOENT; ereport(emode, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return -1; } @@ -1891,36 +1898,37 @@ static bool RestoreArchivedFile(char *path, const char *xlogfname, const char *recovername, off_t expectedSize) { - char xlogpath[MAXPGPATH]; - char xlogRestoreCmd[MAXPGPATH]; - char *dp; - char *endp; + char xlogpath[MAXPGPATH]; + char xlogRestoreCmd[MAXPGPATH]; + char *dp; + char *endp; const char *sp; - int rc; + int rc; struct stat stat_buf; /* * When doing archive recovery, we always prefer an archived log file * even if a file of the same name exists in XLogDir. The reason is - * that the file in XLogDir could be an old, un-filled or partly-filled - * version that was copied and restored as part of backing up $PGDATA. + * that the file in XLogDir could be an old, un-filled or + * partly-filled version that was copied and restored as part of + * backing up $PGDATA. * - * We could try to optimize this slightly by checking the local - * copy lastchange timestamp against the archived copy, - * but we have no API to do this, nor can we guarantee that the - * lastchange timestamp was preserved correctly when we copied - * to archive. Our aim is robustness, so we elect not to do this. + * We could try to optimize this slightly by checking the local copy + * lastchange timestamp against the archived copy, but we have no API + * to do this, nor can we guarantee that the lastchange timestamp was + * preserved correctly when we copied to archive. Our aim is + * robustness, so we elect not to do this. * - * If we cannot obtain the log file from the archive, however, we - * will try to use the XLogDir file if it exists. This is so that - * we can make use of log segments that weren't yet transferred to - * the archive. + * If we cannot obtain the log file from the archive, however, we will + * try to use the XLogDir file if it exists. This is so that we can + * make use of log segments that weren't yet transferred to the + * archive. * * Notice that we don't actually overwrite any files when we copy back * from archive because the recoveryRestoreCommand may inadvertently - * restore inappropriate xlogs, or they may be corrupt, so we may - * wish to fallback to the segments remaining in current XLogDir later. - * The copy-from-archive filename is always the same, ensuring that we + * restore inappropriate xlogs, or they may be corrupt, so we may wish + * to fallback to the segments remaining in current XLogDir later. The + * copy-from-archive filename is always the same, ensuring that we * don't run out of disk space on long recoveries. */ snprintf(xlogpath, MAXPGPATH, "%s/%s", XLogDir, recovername); @@ -1961,14 +1969,14 @@ RestoreArchivedFile(char *path, const char *xlogfname, case 'p': /* %p: full path of target file */ sp++; - StrNCpy(dp, xlogpath, endp-dp); + StrNCpy(dp, xlogpath, endp - dp); make_native_path(dp); dp += strlen(dp); break; case 'f': /* %f: filename of desired file */ sp++; - StrNCpy(dp, xlogfname, endp-dp); + StrNCpy(dp, xlogfname, endp - dp); dp += strlen(dp); break; case '%': @@ -1993,7 +2001,7 @@ RestoreArchivedFile(char *path, const char *xlogfname, *dp = '\0'; ereport(DEBUG3, - (errmsg_internal("executing restore command \"%s\"", + (errmsg_internal("executing restore command \"%s\"", xlogRestoreCmd))); /* @@ -2006,9 +2014,9 @@ RestoreArchivedFile(char *path, const char *xlogfname, * command apparently succeeded, but let's make sure the file is * really there now and has the correct size. * - * XXX I made wrong-size a fatal error to ensure the DBA would - * notice it, but is that too strong? We could try to plow ahead - * with a local copy of the file ... but the problem is that there + * XXX I made wrong-size a fatal error to ensure the DBA would notice + * it, but is that too strong? We could try to plow ahead with a + * local copy of the file ... but the problem is that there * probably isn't one, and we'd incorrectly conclude we've reached * the end of WAL and we're done recovering ... */ @@ -2041,23 +2049,23 @@ RestoreArchivedFile(char *path, const char *xlogfname, } /* - * remember, we rollforward UNTIL the restore fails - * so failure here is just part of the process... - * that makes it difficult to determine whether the restore - * failed because there isn't an archive to restore, or - * because the administrator has specified the restore + * remember, we rollforward UNTIL the restore fails so failure here is + * just part of the process... that makes it difficult to determine + * whether the restore failed because there isn't an archive to + * restore, or because the administrator has specified the restore * program incorrectly. We have to assume the former. */ ereport(DEBUG1, - (errmsg("could not restore \"%s\" from archive: return code %d", - xlogfname, rc))); + (errmsg("could not restore \"%s\" from archive: return code %d", + xlogfname, rc))); /* - * if an archived file is not available, there might still be a version - * of this file in XLogDir, so return that as the filename to open. + * if an archived file is not available, there might still be a + * version of this file in XLogDir, so return that as the filename to + * open. * - * In many recovery scenarios we expect this to fail also, but - * if so that just means we've reached the end of WAL. + * In many recovery scenarios we expect this to fail also, but if so that + * just means we've reached the end of WAL. */ snprintf(path, MAXPGPATH, "%s/%s", XLogDir, xlogfname); return false; @@ -2118,24 +2126,24 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that + * deciding whether a segment is still needed. This ensures that * we won't prematurely remove a segment from a parent timeline. * We could probably be a little more proactive about removing * segments of non-parent timelines, but that would be a whole lot * more complicated. * - * We use the alphanumeric sorting property of the filenames to decide - * which ones are earlier than the lastoff segment. + * We use the alphanumeric sorting property of the filenames to + * decide which ones are earlier than the lastoff segment. */ if (strlen(xlde->d_name) == 24 && strspn(xlde->d_name, "0123456789ABCDEF") == 24 && strcmp(xlde->d_name + 8, lastoff + 8) <= 0) { - bool recycle; + bool recycle; if (XLogArchivingActive()) recycle = XLogArchiveIsDone(xlde->d_name); - else + else recycle = true; if (recycle) @@ -2160,8 +2168,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) { /* No need for any more future segments... */ ereport(LOG, - (errmsg("removing transaction log file \"%s\"", - xlde->d_name))); + (errmsg("removing transaction log file \"%s\"", + xlde->d_name))); unlink(path); } @@ -2171,8 +2179,11 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) errno = 0; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -2263,8 +2274,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) if (!EQ_CRC64(record->xl_crc, crc)) { ereport(emode, - (errmsg("incorrect resource manager data checksum in record at %X/%X", - recptr.xlogid, recptr.xrecoff))); + (errmsg("incorrect resource manager data checksum in record at %X/%X", + recptr.xlogid, recptr.xrecoff))); return (false); } @@ -2286,8 +2297,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) if (!EQ_CRC64(cbuf, crc)) { ereport(emode, - (errmsg("incorrect checksum of backup block %d in record at %X/%X", - i + 1, recptr.xlogid, recptr.xrecoff))); + (errmsg("incorrect checksum of backup block %d in record at %X/%X", + i + 1, recptr.xlogid, recptr.xrecoff))); return (false); } blk += sizeof(BkpBlock) + BLCKSZ; @@ -2361,12 +2372,13 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer) ereport(PANIC, (errmsg("invalid record offset at %X/%X", RecPtr->xlogid, RecPtr->xrecoff))); + /* * Since we are going to a random position in WAL, forget any - * prior state about what timeline we were in, and allow it - * to be any timeline in expectedTLIs. We also set a flag to - * allow curFileTLI to go backwards (but we can't reset that - * variable right here, since we might not change files at all). + * prior state about what timeline we were in, and allow it to be + * any timeline in expectedTLIs. We also set a flag to allow + * curFileTLI to go backwards (but we can't reset that variable + * right here, since we might not change files at all). */ lastPageTLI = 0; /* see comment in ValidXLOGHeader */ randAccess = true; /* allow curFileTLI to go backwards too */ @@ -2418,9 +2430,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer) if (targetRecOff == 0) { /* - * Can only get here in the continuing-from-prev-page case, because - * XRecOffIsValid eliminated the zero-page-offset case otherwise. - * Need to skip over the new page's header. + * Can only get here in the continuing-from-prev-page case, + * because XRecOffIsValid eliminated the zero-page-offset case + * otherwise. Need to skip over the new page's header. */ tmpRecPtr.xrecoff += pageHeaderSize; targetRecOff = pageHeaderSize; @@ -2631,15 +2643,15 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) ControlFile->system_identifier); ereport(emode, (errmsg("WAL file is from different system"), - errdetail("WAL file SYSID is %s, pg_control SYSID is %s", - fhdrident_str, sysident_str))); + errdetail("WAL file SYSID is %s, pg_control SYSID is %s", + fhdrident_str, sysident_str))); return false; } if (longhdr->xlp_seg_size != XLogSegSize) { ereport(emode, (errmsg("WAL file is from different system"), - errdetail("Incorrect XLOG_SEG_SIZE in page header."))); + errdetail("Incorrect XLOG_SEG_SIZE in page header."))); return false; } } @@ -2671,9 +2683,9 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) * immediate parent's TLI, we should never see TLI go backwards across * successive pages of a consistent WAL sequence. * - * Of course this check should only be applied when advancing sequentially - * across pages; therefore ReadRecord resets lastPageTLI to zero when - * going to a random page. + * Of course this check should only be applied when advancing + * sequentially across pages; therefore ReadRecord resets lastPageTLI + * to zero when going to a random page. */ if (hdr->xlp_tli < lastPageTLI) { @@ -2691,7 +2703,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) * Try to read a timeline's history file. * * If successful, return the list of component TLIs (the given TLI followed by - * its ancestor TLIs). If we can't find the history file, assume that the + * its ancestor TLIs). If we can't find the history file, assume that the * timeline has no parents, and return a list of just the specified timeline * ID. */ @@ -2702,7 +2714,7 @@ readTimeLineHistory(TimeLineID targetTLI) char path[MAXPGPATH]; char histfname[MAXFNAMELEN]; char fline[MAXPGPATH]; - FILE *fd; + FILE *fd; if (InArchiveRecovery) { @@ -2712,7 +2724,7 @@ readTimeLineHistory(TimeLineID targetTLI) else TLHistoryFilePath(path, targetTLI); - fd = AllocateFile(path, "r"); + fd = AllocateFile(path, "r"); if (fd == NULL) { if (errno != ENOENT) @@ -2725,15 +2737,15 @@ readTimeLineHistory(TimeLineID targetTLI) result = NIL; - /* - * Parse the file... - */ - while (fgets(fline, MAXPGPATH, fd) != NULL) + /* + * Parse the file... + */ + while (fgets(fline, MAXPGPATH, fd) != NULL) { /* skip leading whitespace and check for # comment */ - char *ptr; - char *endptr; - TimeLineID tli; + char *ptr; + char *endptr; + TimeLineID tli; for (ptr = fline; *ptr; ptr++) { @@ -2754,7 +2766,7 @@ readTimeLineHistory(TimeLineID targetTLI) tli <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file: %s", fline), - errhint("Timeline IDs must be in increasing sequence."))); + errhint("Timeline IDs must be in increasing sequence."))); /* Build list with newest item first */ result = lcons_int((int) tli, result); @@ -2768,7 +2780,7 @@ readTimeLineHistory(TimeLineID targetTLI) targetTLI <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file \"%s\"", path), - errhint("Timeline IDs must be less than child timeline's ID."))); + errhint("Timeline IDs must be less than child timeline's ID."))); result = lcons_int((int) targetTLI, result); @@ -2787,7 +2799,7 @@ existsTimeLineHistory(TimeLineID probeTLI) { char path[MAXPGPATH]; char histfname[MAXFNAMELEN]; - FILE *fd; + FILE *fd; if (InArchiveRecovery) { @@ -2827,12 +2839,12 @@ findNewestTimeLine(TimeLineID startTLI) TimeLineID probeTLI; /* - * The algorithm is just to probe for the existence of timeline history - * files. XXX is it useful to allow gaps in the sequence? + * The algorithm is just to probe for the existence of timeline + * history files. XXX is it useful to allow gaps in the sequence? */ newestTLI = startTLI; - for (probeTLI = startTLI + 1; ; probeTLI++) + for (probeTLI = startTLI + 1;; probeTLI++) { if (existsTimeLineHistory(probeTLI)) { @@ -2856,7 +2868,7 @@ findNewestTimeLine(TimeLineID startTLI) * endTLI et al: ID of the last used WAL file, for annotation purposes * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -2872,7 +2884,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, int fd; int nbytes; - Assert(newTLI > parentTLI); /* else bad selection of newTLI */ + Assert(newTLI > parentTLI); /* else bad selection of newTLI */ /* * Write into a temp file name. @@ -2932,12 +2944,16 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, * space */ unlink(tmppath); - /* if write didn't set errno, assume problem is no disk space */ + + /* + * if write didn't set errno, assume problem is no disk + * space + */ errno = save_errno ? save_errno : ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } close(srcfd); @@ -2946,8 +2962,8 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, /* * Append one line with the details of this timeline split. * - * If we did have a parent file, insert an extra newline just in case - * the parent file failed to end with one. + * If we did have a parent file, insert an extra newline just in case the + * parent file failed to end with one. */ XLogFileName(xlogfname, endTLI, endLogId, endLogSeg); @@ -2967,8 +2983,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, int save_errno = errno; /* - * If we fail to make the file, delete it to release disk - * space + * If we fail to make the file, delete it to release disk space */ unlink(tmppath); /* if write didn't set errno, assume problem is no disk space */ @@ -3215,7 +3230,7 @@ ReadControlFile(void) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d," - " but the server was compiled with XLOG_SEG_SIZE %d.", + " but the server was compiled with XLOG_SEG_SIZE %d.", ControlFile->xlog_seg_size, XLOG_SEG_SIZE), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->nameDataLen != NAMEDATALEN) @@ -3336,7 +3351,8 @@ XLOGShmemSize(void) void XLOGShmemInit(void) { - bool foundXLog, foundCFile; + bool foundXLog, + foundCFile; /* this must agree with space requested by XLOGShmemSize() */ if (XLOGbuffers < MinXLOGbuffers) @@ -3414,16 +3430,17 @@ BootStrapXLOG(void) crc64 crc; /* - * Select a hopefully-unique system identifier code for this installation. - * We use the result of gettimeofday(), including the fractional seconds - * field, as being about as unique as we can easily get. (Think not to - * use random(), since it hasn't been seeded and there's no portable way - * to seed it other than the system clock value...) The upper half of the - * uint64 value is just the tv_sec part, while the lower half is the XOR - * of tv_sec and tv_usec. This is to ensure that we don't lose uniqueness - * unnecessarily if "uint64" is really only 32 bits wide. A person - * knowing this encoding can determine the initialization time of the - * installation, which could perhaps be useful sometimes. + * Select a hopefully-unique system identifier code for this + * installation. We use the result of gettimeofday(), including the + * fractional seconds field, as being about as unique as we can easily + * get. (Think not to use random(), since it hasn't been seeded and + * there's no portable way to seed it other than the system clock + * value...) The upper half of the uint64 value is just the tv_sec + * part, while the lower half is the XOR of tv_sec and tv_usec. This + * is to ensure that we don't lose uniqueness unnecessarily if + * "uint64" is really only 32 bits wide. A person knowing this + * encoding can determine the initialization time of the installation, + * which could perhaps be useful sometimes. */ gettimeofday(&tv, NULL); sysidentifier = ((uint64) tv.tv_sec) << 32; @@ -3492,18 +3509,18 @@ BootStrapXLOG(void) errno = ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write bootstrap transaction log file: %m"))); + errmsg("could not write bootstrap transaction log file: %m"))); } if (pg_fsync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync bootstrap transaction log file: %m"))); + errmsg("could not fsync bootstrap transaction log file: %m"))); if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close bootstrap transaction log file: %m"))); + errmsg("could not close bootstrap transaction log file: %m"))); openLogFile = -1; @@ -3550,37 +3567,37 @@ str_time(time_t tnow) static void readRecoveryCommandFile(void) { - char recoveryCommandFile[MAXPGPATH]; - FILE *fd; - char cmdline[MAXPGPATH]; - TimeLineID rtli = 0; - bool rtliGiven = false; - bool syntaxError = false; - - snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); - fd = AllocateFile(recoveryCommandFile, "r"); + char recoveryCommandFile[MAXPGPATH]; + FILE *fd; + char cmdline[MAXPGPATH]; + TimeLineID rtli = 0; + bool rtliGiven = false; + bool syntaxError = false; + + snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); + fd = AllocateFile(recoveryCommandFile, "r"); if (fd == NULL) { if (errno == ENOENT) return; /* not there, so no archive recovery */ ereport(FATAL, - (errcode_for_file_access(), + (errcode_for_file_access(), errmsg("could not open recovery command file \"%s\": %m", recoveryCommandFile))); } ereport(LOG, - (errmsg("starting archive recovery"))); + (errmsg("starting archive recovery"))); - /* - * Parse the file... - */ - while (fgets(cmdline, MAXPGPATH, fd) != NULL) + /* + * Parse the file... + */ + while (fgets(cmdline, MAXPGPATH, fd) != NULL) { /* skip leading whitespace and check for # comment */ - char *ptr; - char *tok1; - char *tok2; + char *ptr; + char *tok1; + char *tok2; for (ptr = cmdline; *ptr; ptr++) { @@ -3591,13 +3608,13 @@ readRecoveryCommandFile(void) continue; /* identify the quoted parameter value */ - tok1 = strtok(ptr, "'"); + tok1 = strtok(ptr, "'"); if (!tok1) { syntaxError = true; break; } - tok2 = strtok(NULL, "'"); + tok2 = strtok(NULL, "'"); if (!tok2) { syntaxError = true; @@ -3611,13 +3628,15 @@ readRecoveryCommandFile(void) break; } - if (strcmp(tok1,"restore_command") == 0) { + if (strcmp(tok1, "restore_command") == 0) + { recoveryRestoreCommand = pstrdup(tok2); ereport(LOG, (errmsg("restore_command = \"%s\"", recoveryRestoreCommand))); } - else if (strcmp(tok1,"recovery_target_timeline") == 0) { + else if (strcmp(tok1, "recovery_target_timeline") == 0) + { rtliGiven = true; if (strcmp(tok2, "latest") == 0) rtli = 0; @@ -3637,7 +3656,8 @@ readRecoveryCommandFile(void) ereport(LOG, (errmsg("recovery_target_timeline = latest"))); } - else if (strcmp(tok1,"recovery_target_xid") == 0) { + else if (strcmp(tok1, "recovery_target_xid") == 0) + { errno = 0; recoveryTargetXid = (TransactionId) strtoul(tok2, NULL, 0); if (errno == EINVAL || errno == ERANGE) @@ -3650,7 +3670,8 @@ readRecoveryCommandFile(void) recoveryTarget = true; recoveryTargetExact = true; } - else if (strcmp(tok1,"recovery_target_time") == 0) { + else if (strcmp(tok1, "recovery_target_time") == 0) + { /* * if recovery_target_xid specified, then this overrides * recovery_target_time @@ -3659,20 +3680,22 @@ readRecoveryCommandFile(void) continue; recoveryTarget = true; recoveryTargetExact = false; + /* - * Convert the time string given by the user to the time_t format. - * We use type abstime's input converter because we know abstime - * has the same representation as time_t. + * Convert the time string given by the user to the time_t + * format. We use type abstime's input converter because we + * know abstime has the same representation as time_t. */ recoveryTargetTime = (time_t) DatumGetAbsoluteTime(DirectFunctionCall1(abstimein, - CStringGetDatum(tok2))); + CStringGetDatum(tok2))); ereport(LOG, (errmsg("recovery_target_time = %s", - DatumGetCString(DirectFunctionCall1(abstimeout, - AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); + DatumGetCString(DirectFunctionCall1(abstimeout, + AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); } - else if (strcmp(tok1,"recovery_target_inclusive") == 0) { + else if (strcmp(tok1, "recovery_target_inclusive") == 0) + { /* * does nothing if a recovery_target is not also set */ @@ -3694,11 +3717,11 @@ readRecoveryCommandFile(void) FreeFile(fd); - if (syntaxError) - ereport(FATAL, + if (syntaxError) + ereport(FATAL, (errmsg("syntax error in recovery command file: %s", cmdline), - errhint("Lines should have the format parameter = 'value'."))); + errhint("Lines should have the format parameter = 'value'."))); /* Check that required parameters were supplied */ if (recoveryRestoreCommand == NULL) @@ -3710,10 +3733,10 @@ readRecoveryCommandFile(void) InArchiveRecovery = true; /* - * If user specified recovery_target_timeline, validate it or compute the - * "latest" value. We can't do this until after we've gotten the restore - * command and set InArchiveRecovery, because we need to fetch timeline - * history files from the archive. + * If user specified recovery_target_timeline, validate it or compute + * the "latest" value. We can't do this until after we've gotten the + * restore command and set InArchiveRecovery, because we need to fetch + * timeline history files from the archive. */ if (rtliGiven) { @@ -3722,8 +3745,8 @@ readRecoveryCommandFile(void) /* Timeline 1 does not have a history file, all else should */ if (rtli != 1 && !existsTimeLineHistory(rtli)) ereport(FATAL, - (errmsg("recovery_target_timeline %u does not exist", - rtli))); + (errmsg("recovery_target_timeline %u does not exist", + rtli))); recoveryTargetTLI = rtli; } else @@ -3740,10 +3763,10 @@ readRecoveryCommandFile(void) static void exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) { - char recoveryPath[MAXPGPATH]; - char xlogpath[MAXPGPATH]; - char recoveryCommandFile[MAXPGPATH]; - char recoveryCommandDone[MAXPGPATH]; + char recoveryPath[MAXPGPATH]; + char xlogpath[MAXPGPATH]; + char recoveryCommandFile[MAXPGPATH]; + char recoveryCommandDone[MAXPGPATH]; /* * We are no longer in archive recovery state. @@ -3751,9 +3774,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) InArchiveRecovery = false; /* - * We should have the ending log segment currently open. Verify, - * and then close it (to avoid problems on Windows with trying to - * rename or delete an open file). + * We should have the ending log segment currently open. Verify, and + * then close it (to avoid problems on Windows with trying to rename + * or delete an open file). */ Assert(readFile >= 0); Assert(readId == endLogId); @@ -3763,17 +3786,17 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) readFile = -1; /* - * If the segment was fetched from archival storage, we want to replace - * the existing xlog segment (if any) with the archival version. This - * is because whatever is in XLogDir is very possibly older than what - * we have from the archives, since it could have come from restoring - * a PGDATA backup. In any case, the archival version certainly is - * more descriptive of what our current database state is, because that - * is what we replayed from. + * If the segment was fetched from archival storage, we want to + * replace the existing xlog segment (if any) with the archival + * version. This is because whatever is in XLogDir is very possibly + * older than what we have from the archives, since it could have come + * from restoring a PGDATA backup. In any case, the archival version + * certainly is more descriptive of what our current database state + * is, because that is what we replayed from. * * Note that if we are establishing a new timeline, ThisTimeLineID is - * already set to the new value, and so we will create a new file instead - * of overwriting any existing file. + * already set to the new value, and so we will create a new file + * instead of overwriting any existing file. */ snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYXLOG", XLogDir); XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg); @@ -3798,6 +3821,7 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) * RECOVERYXLOG laying about, get rid of it. */ unlink(recoveryPath); /* ignore any error */ + /* * If we are establishing a new timeline, we have to copy data * from the last WAL segment of the old timeline to create a @@ -3809,22 +3833,22 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) } /* - * Let's just make real sure there are not .ready or .done flags posted - * for the new segment. + * Let's just make real sure there are not .ready or .done flags + * posted for the new segment. */ XLogFileName(xlogpath, ThisTimeLineID, endLogId, endLogSeg); XLogArchiveCleanup(xlogpath); /* Get rid of any remaining recovered timeline-history file, too */ snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYHISTORY", XLogDir); - unlink(recoveryPath); /* ignore any error */ + unlink(recoveryPath); /* ignore any error */ /* - * Rename the config file out of the way, so that we don't accidentally - * re-enter archive recovery mode in a subsequent crash. + * Rename the config file out of the way, so that we don't + * accidentally re-enter archive recovery mode in a subsequent crash. */ - snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); - snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir); + snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); + snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir); unlink(recoveryCommandDone); if (rename(recoveryCommandFile, recoveryCommandDone) != 0) ereport(FATAL, @@ -3849,8 +3873,8 @@ static bool recoveryStopsHere(XLogRecord *record, bool *includeThis) { bool stopsHere; - uint8 record_info; - time_t recordXtime; + uint8 record_info; + time_t recordXtime; /* Do we have a PITR target at all? */ if (!recoveryTarget) @@ -3862,14 +3886,14 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) record_info = record->xl_info & ~XLR_INFO_MASK; if (record_info == XLOG_XACT_COMMIT) { - xl_xact_commit *recordXactCommitData; + xl_xact_commit *recordXactCommitData; recordXactCommitData = (xl_xact_commit *) XLogRecGetData(record); recordXtime = recordXactCommitData->xtime; } else if (record_info == XLOG_XACT_ABORT) { - xl_xact_abort *recordXactAbortData; + xl_xact_abort *recordXactAbortData; recordXactAbortData = (xl_xact_abort *) XLogRecGetData(record); recordXtime = recordXactAbortData->xtime; @@ -3880,14 +3904,13 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryTargetExact) { /* - * there can be only one transaction end record - * with this exact transactionid + * there can be only one transaction end record with this exact + * transactionid * - * when testing for an xid, we MUST test for - * equality only, since transactions are numbered - * in the order they start, not the order they - * complete. A higher numbered xid will complete - * before you about 50% of the time... + * when testing for an xid, we MUST test for equality only, since + * transactions are numbered in the order they start, not the + * order they complete. A higher numbered xid will complete before + * you about 50% of the time... */ stopsHere = (record->xl_xid == recoveryTargetXid); if (stopsHere) @@ -3896,11 +3919,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) else { /* - * there can be many transactions that - * share the same commit time, so - * we stop after the last one, if we are - * inclusive, or stop at the first one - * if we are exclusive + * there can be many transactions that share the same commit time, + * so we stop after the last one, if we are inclusive, or stop at + * the first one if we are exclusive */ if (recoveryTargetInclusive) stopsHere = (recordXtime > recoveryTargetTime); @@ -3921,22 +3942,22 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } else { if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } } @@ -4009,14 +4030,14 @@ StartupXLOG(void) #endif /* - * Initialize on the assumption we want to recover to the same timeline - * that's active according to pg_control. + * Initialize on the assumption we want to recover to the same + * timeline that's active according to pg_control. */ recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID; /* - * Check for recovery control file, and if so set up state for - * offline recovery + * Check for recovery control file, and if so set up state for offline + * recovery */ readRecoveryCommandFile(); @@ -4029,7 +4050,7 @@ StartupXLOG(void) * timeline. */ if (!list_member_int(expectedTLIs, - (int) ControlFile->checkPointCopy.ThisTimeLineID)) + (int) ControlFile->checkPointCopy.ThisTimeLineID)) ereport(FATAL, (errmsg("requested timeline %u is not a child of database system timeline %u", recoveryTargetTLI, @@ -4038,29 +4059,30 @@ StartupXLOG(void) if (read_backup_label(&checkPointLoc)) { /* - * When a backup_label file is present, we want to roll forward from - * the checkpoint it identifies, rather than using pg_control. + * When a backup_label file is present, we want to roll forward + * from the checkpoint it identifies, rather than using + * pg_control. */ record = ReadCheckpointRecord(checkPointLoc, 0, buffer); if (record != NULL) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); InRecovery = true; /* force recovery even if SHUTDOWNED */ } else { ereport(PANIC, - (errmsg("could not locate required checkpoint record"), - errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label."))); + (errmsg("could not locate required checkpoint record"), + errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label."))); } } else { /* - * Get the last valid checkpoint record. If the latest one according - * to pg_control is broken, try the next-to-last one. + * Get the last valid checkpoint record. If the latest one + * according to pg_control is broken, try the next-to-last one. */ checkPointLoc = ControlFile->checkPoint; record = ReadCheckpointRecord(checkPointLoc, 1, buffer); @@ -4068,7 +4090,7 @@ StartupXLOG(void) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); } else { @@ -4077,13 +4099,14 @@ StartupXLOG(void) if (record != NULL) { ereport(LOG, - (errmsg("using previous checkpoint record at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); - InRecovery = true; /* force recovery even if SHUTDOWNED */ + (errmsg("using previous checkpoint record at %X/%X", + checkPointLoc.xlogid, checkPointLoc.xrecoff))); + InRecovery = true; /* force recovery even if + * SHUTDOWNED */ } else ereport(PANIC, - (errmsg("could not locate a valid checkpoint record"))); + (errmsg("could not locate a valid checkpoint record"))); } } @@ -4108,9 +4131,9 @@ StartupXLOG(void) ShmemVariableCache->oidCount = 0; /* - * We must replay WAL entries using the same TimeLineID they were created - * under, so temporarily adopt the TLI indicated by the checkpoint (see - * also xlog_redo()). + * We must replay WAL entries using the same TimeLineID they were + * created under, so temporarily adopt the TLI indicated by the + * checkpoint (see also xlog_redo()). */ ThisTimeLineID = checkPoint.ThisTimeLineID; @@ -4123,8 +4146,8 @@ StartupXLOG(void) checkPoint.undo = RecPtr; /* - * Check whether we need to force recovery from WAL. If it appears - * to have been a clean shutdown and we did not have a recovery.conf + * Check whether we need to force recovery from WAL. If it appears to + * have been a clean shutdown and we did not have a recovery.conf * file, then assume no recovery needed. */ if (XLByteLT(checkPoint.undo, RecPtr) || @@ -4219,7 +4242,7 @@ StartupXLOG(void) */ if (recoveryStopsHere(record, &recoveryApply)) { - needNewTimeLine = true; /* see below */ + needNewTimeLine = true; /* see below */ recoveryContinue = false; if (!recoveryApply) break; @@ -4242,6 +4265,7 @@ StartupXLOG(void) record = ReadRecord(NULL, LOG, buffer); } while (record != NULL && recoveryContinue); + /* * end of main redo apply loop */ @@ -4276,7 +4300,8 @@ StartupXLOG(void) if (needNewTimeLine) /* stopped because of stop request */ ereport(FATAL, (errmsg("requested recovery stop point is before end time of backup dump"))); - else /* ran off end of WAL */ + else +/* ran off end of WAL */ ereport(FATAL, (errmsg("WAL ends before end time of backup dump"))); } @@ -4284,10 +4309,10 @@ StartupXLOG(void) /* * Consider whether we need to assign a new timeline ID. * - * If we stopped short of the end of WAL during recovery, then we - * are generating a new timeline and must assign it a unique new ID. - * Otherwise, we can just extend the timeline we were in when we - * ran out of WAL. + * If we stopped short of the end of WAL during recovery, then we are + * generating a new timeline and must assign it a unique new ID. + * Otherwise, we can just extend the timeline we were in when we ran + * out of WAL. */ if (needNewTimeLine) { @@ -4302,8 +4327,8 @@ StartupXLOG(void) XLogCtl->ThisTimeLineID = ThisTimeLineID; /* - * We are now done reading the old WAL. Turn off archive fetching - * if it was active, and make a writable copy of the last WAL segment. + * We are now done reading the old WAL. Turn off archive fetching if + * it was active, and make a writable copy of the last WAL segment. * (Note that we also have a copy of the last block of the old WAL in * readBuf; we will use that below.) */ @@ -4361,7 +4386,7 @@ StartupXLOG(void) * XLogWrite()). * * Note: it might seem we should do AdvanceXLInsertBuffer() here, but - * this is sufficient. The first actual attempt to insert a log + * this is sufficient. The first actual attempt to insert a log * record will advance the insert state. */ XLogCtl->Write.curridx = NextBufIdx(0); @@ -4434,8 +4459,8 @@ StartupXLOG(void) XLogCloseRelationCache(); /* - * Now that we've checkpointed the recovery, it's safe to - * flush old backup_label, if present. + * Now that we've checkpointed the recovery, it's safe to flush + * old backup_label, if present. */ remove_backup_label(); } @@ -4504,7 +4529,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, break; default: ereport(LOG, - (errmsg("invalid checkpoint link in backup_label file"))); + (errmsg("invalid checkpoint link in backup_label file"))); break; } return NULL; @@ -4557,7 +4582,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, { case 1: ereport(LOG, - (errmsg("invalid xl_info in primary checkpoint record"))); + (errmsg("invalid xl_info in primary checkpoint record"))); break; case 2: ereport(LOG, @@ -4576,7 +4601,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, { case 1: ereport(LOG, - (errmsg("invalid length of primary checkpoint record"))); + (errmsg("invalid length of primary checkpoint record"))); break; case 2: ereport(LOG, @@ -4791,8 +4816,8 @@ CreateCheckPoint(bool shutdown, bool force) * so there's a risk of deadlock. Need to find a better solution. See * pgsql-hackers discussion of 17-Dec-01. * - * XXX actually, the whole UNDO code is dead code and unlikely to ever - * be revived, so the lack of a good solution here is not troubling. + * XXX actually, the whole UNDO code is dead code and unlikely to ever be + * revived, so the lack of a good solution here is not troubling. */ #ifdef NOT_USED checkPoint.undo = GetUndoRecPtr(); @@ -4919,11 +4944,11 @@ CreateCheckPoint(bool shutdown, bool force) PreallocXlogFiles(recptr); /* - * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will - * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). During recovery, though, we mustn't do this because - * StartupSUBTRANS hasn't been called yet. + * Truncate pg_subtrans if possible. We can throw away all data + * before the oldest XMIN of any running transaction. No future + * transaction will attempt to reference any pg_subtrans entry older + * than that (see Asserts in subtrans.c). During recovery, though, we + * mustn't do this because StartupSUBTRANS hasn't been called yet. */ if (!InRecovery) TruncateSUBTRANS(GetOldestXmin(true)); @@ -4974,8 +4999,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ShmemVariableCache->nextXid = checkPoint.nextXid; ShmemVariableCache->nextOid = checkPoint.nextOid; ShmemVariableCache->oidCount = 0; + /* - * TLI may change in a shutdown checkpoint, but it shouldn't decrease + * TLI may change in a shutdown checkpoint, but it shouldn't + * decrease */ if (checkPoint.ThisTimeLineID != ThisTimeLineID) { @@ -4984,7 +5011,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) (int) checkPoint.ThisTimeLineID)) ereport(PANIC, (errmsg("unexpected timeline ID %u (after %u) in checkpoint record", - checkPoint.ThisTimeLineID, ThisTimeLineID))); + checkPoint.ThisTimeLineID, ThisTimeLineID))); /* Following WAL records should be run with new TLI */ ThisTimeLineID = checkPoint.ThisTimeLineID; } @@ -5071,8 +5098,7 @@ xlog_outrec(char *buf, XLogRecord *record) sprintf(buf + strlen(buf), ": %s", RmgrTable[record->xl_rmid].rm_name); } - -#endif /* WAL_DEBUG */ +#endif /* WAL_DEBUG */ /* @@ -5200,7 +5226,7 @@ pg_start_backup(PG_FUNCTION_ARGS) char *backupidstr; XLogRecPtr checkpointloc; XLogRecPtr startpoint; - time_t stamp_time; + time_t stamp_time; char strfbuf[128]; char labelfilepath[MAXPGPATH]; char xlogfilename[MAXFNAMELEN]; @@ -5209,24 +5235,26 @@ pg_start_backup(PG_FUNCTION_ARGS) struct stat stat_buf; FILE *fp; - if (!superuser()) + if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to run a backup")))); backupidstr = DatumGetCString(DirectFunctionCall1(textout, - PointerGetDatum(backupid))); + PointerGetDatum(backupid))); + /* - * Force a CHECKPOINT. This is not strictly necessary, but it seems - * like a good idea to minimize the amount of past WAL needed to use the - * backup. Also, this guarantees that two successive backup runs - * will have different checkpoint positions and hence different history - * file names, even if nothing happened in between. + * Force a CHECKPOINT. This is not strictly necessary, but it seems + * like a good idea to minimize the amount of past WAL needed to use + * the backup. Also, this guarantees that two successive backup runs + * will have different checkpoint positions and hence different + * history file names, even if nothing happened in between. */ RequestCheckpoint(true); + /* * Now we need to fetch the checkpoint record location, and also its - * REDO pointer. The oldest point in WAL that would be needed to restore - * starting from the checkpoint is precisely the REDO pointer. + * REDO pointer. The oldest point in WAL that would be needed to + * restore starting from the checkpoint is precisely the REDO pointer. */ LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); checkpointloc = ControlFile->checkPoint; @@ -5235,18 +5263,21 @@ pg_start_backup(PG_FUNCTION_ARGS) XLByteToSeg(startpoint, _logId, _logSeg); XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg); + /* - * We deliberately use strftime/localtime not the src/timezone functions, - * so that backup labels will consistently be recorded in the same - * timezone regardless of TimeZone setting. This matches elog.c's - * practice. + * We deliberately use strftime/localtime not the src/timezone + * functions, so that backup labels will consistently be recorded in + * the same timezone regardless of TimeZone setting. This matches + * elog.c's practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&stamp_time)); + /* - * Check for existing backup label --- implies a backup is already running + * Check for existing backup label --- implies a backup is already + * running */ snprintf(labelfilepath, MAXPGPATH, "%s/backup_label", DataDir); if (stat(labelfilepath, &stat_buf) != 0) @@ -5263,6 +5294,7 @@ pg_start_backup(PG_FUNCTION_ARGS) errmsg("a backup is already in progress"), errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.", labelfilepath))); + /* * Okay, write the file */ @@ -5283,13 +5315,14 @@ pg_start_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", labelfilepath))); + /* * We're done. As a convenience, return the starting WAL offset. */ snprintf(xlogfilename, sizeof(xlogfilename), "%X/%X", startpoint.xlogid, startpoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(xlogfilename))); + CStringGetDatum(xlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5308,7 +5341,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) XLogCtlInsert *Insert = &XLogCtl->Insert; XLogRecPtr startpoint; XLogRecPtr stoppoint; - time_t stamp_time; + time_t stamp_time; char strfbuf[128]; char labelfilepath[MAXPGPATH]; char histfilepath[MAXPGPATH]; @@ -5321,10 +5354,11 @@ pg_stop_backup(PG_FUNCTION_ARGS) char ch; int ich; - if (!superuser()) + if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to run a backup")))); + /* * Get the current end-of-WAL position; it will be unsafe to use this * dump to restore to a point in advance of this time. @@ -5335,16 +5369,18 @@ pg_stop_backup(PG_FUNCTION_ARGS) XLByteToSeg(stoppoint, _logId, _logSeg); XLogFileName(stopxlogfilename, ThisTimeLineID, _logId, _logSeg); + /* - * We deliberately use strftime/localtime not the src/timezone functions, - * so that backup labels will consistently be recorded in the same - * timezone regardless of TimeZone setting. This matches elog.c's - * practice. + * We deliberately use strftime/localtime not the src/timezone + * functions, so that backup labels will consistently be recorded in + * the same timezone regardless of TimeZone setting. This matches + * elog.c's practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&stamp_time)); + /* * Open the existing label file */ @@ -5361,9 +5397,11 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("a backup is not in progress"))); } + /* * Read and parse the START WAL LOCATION line (this code is pretty - * crude, but we are not expecting any variability in the file format). + * crude, but we are not expecting any variability in the file + * format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %24s)%c", &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, @@ -5371,6 +5409,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", labelfilepath))); + /* * Write the backup history file */ @@ -5396,6 +5435,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", histfilepath))); + /* * Close and remove the backup label file */ @@ -5409,6 +5449,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", labelfilepath))); + /* * Notify archiver that history file may be archived immediately */ @@ -5418,13 +5459,14 @@ pg_stop_backup(PG_FUNCTION_ARGS) startpoint.xrecoff % XLogSegSize); XLogArchiveNotify(histfilepath); } + /* * We're done. As a convenience, return the ending WAL offset. */ snprintf(stopxlogfilename, sizeof(stopxlogfilename), "%X/%X", stoppoint.xlogid, stoppoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(stopxlogfilename))); + CStringGetDatum(stopxlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5433,7 +5475,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) * * If we see a backup_label during recovery, we assume that we are recovering * from a backup dump file, and we therefore roll forward from the checkpoint - * identified by the label file, NOT what pg_control says. This avoids the + * identified by the label file, NOT what pg_control says. This avoids the * problem that pg_control might have been archived one or more checkpoints * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. @@ -5476,10 +5518,11 @@ read_backup_label(XLogRecPtr *checkPointLoc) labelfilepath))); return false; /* it's not there, all is fine */ } + /* - * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code - * is pretty crude, but we are not expecting any variability in the file - * format). + * Read and parse the START WAL LOCATION and CHECKPOINT lines (this + * code is pretty crude, but we are not expecting any variability in + * the file format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c", &startpoint.xlogid, &startpoint.xrecoff, &tli, @@ -5498,6 +5541,7 @@ read_backup_label(XLogRecPtr *checkPointLoc) (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", labelfilepath))); + /* * Try to retrieve the backup history file (no error if we can't) */ @@ -5511,24 +5555,24 @@ read_backup_label(XLogRecPtr *checkPointLoc) BackupHistoryFilePath(histfilepath, tli, _logId, _logSeg, startpoint.xrecoff % XLogSegSize); - fp = AllocateFile(histfilepath, "r"); + fp = AllocateFile(histfilepath, "r"); if (fp) { /* * Parse history file to identify stop point. */ if (fscanf(fp, "START WAL LOCATION: %X/%X (file %24s)%c", - &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, + &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); if (fscanf(fp, "STOP WAL LOCATION: %X/%X (file %24s)%c", - &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, + &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); recoveryMinXlogOffset = stoppoint; if (ferror(fp) || FreeFile(fp)) ereport(FATAL, diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 1791068d7a..4f1ac8dde5 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -11,7 +11,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.33 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.34 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -212,11 +212,11 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode) res->reldata.rd_node = rnode; /* - * We set up the lockRelId in case anything tries to lock the dummy - * relation. Note that this is fairly bogus since relNode may be - * different from the relation's OID. It shouldn't really matter - * though, since we are presumably running by ourselves and can't - * have any lock conflicts ... + * We set up the lockRelId in case anything tries to lock the + * dummy relation. Note that this is fairly bogus since relNode + * may be different from the relation's OID. It shouldn't really + * matter though, since we are presumably running by ourselves and + * can't have any lock conflicts ... */ res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode; res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode; @@ -234,14 +234,15 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode) res->reldata.rd_targblock = InvalidBlockNumber; res->reldata.rd_smgr = smgropen(res->reldata.rd_node); + /* * Create the target file if it doesn't already exist. This lets * us cope if the replay sequence contains writes to a relation * that is later deleted. (The original coding of this routine * would instead return NULL, causing the writes to be suppressed. - * But that seems like it risks losing valuable data if the filesystem - * loses an inode during a crash. Better to write the data until we - * are actually told to delete the file.) + * But that seems like it risks losing valuable data if the + * filesystem loses an inode during a crash. Better to write the + * data until we are actually told to delete the file.) */ smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true); } diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index 9e401e7764..0b63df1801 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.192 2004/08/29 04:12:25 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.193 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -111,46 +111,46 @@ struct typinfo static const struct typinfo TypInfo[] = { {"bool", BOOLOID, 0, 1, true, 'c', 'p', - F_BOOLIN, F_BOOLOUT}, + F_BOOLIN, F_BOOLOUT}, {"bytea", BYTEAOID, 0, -1, false, 'i', 'x', - F_BYTEAIN, F_BYTEAOUT}, + F_BYTEAIN, F_BYTEAOUT}, {"char", CHAROID, 0, 1, true, 'c', 'p', - F_CHARIN, F_CHAROUT}, + F_CHARIN, F_CHAROUT}, {"name", NAMEOID, CHAROID, NAMEDATALEN, false, 'i', 'p', - F_NAMEIN, F_NAMEOUT}, + F_NAMEIN, F_NAMEOUT}, {"int2", INT2OID, 0, 2, true, 's', 'p', - F_INT2IN, F_INT2OUT}, + F_INT2IN, F_INT2OUT}, {"int4", INT4OID, 0, 4, true, 'i', 'p', - F_INT4IN, F_INT4OUT}, + F_INT4IN, F_INT4OUT}, {"regproc", REGPROCOID, 0, 4, true, 'i', 'p', - F_REGPROCIN, F_REGPROCOUT}, + F_REGPROCIN, F_REGPROCOUT}, {"regclass", REGCLASSOID, 0, 4, true, 'i', 'p', - F_REGCLASSIN, F_REGCLASSOUT}, + F_REGCLASSIN, F_REGCLASSOUT}, {"regtype", REGTYPEOID, 0, 4, true, 'i', 'p', - F_REGTYPEIN, F_REGTYPEOUT}, + F_REGTYPEIN, F_REGTYPEOUT}, {"text", TEXTOID, 0, -1, false, 'i', 'x', - F_TEXTIN, F_TEXTOUT}, + F_TEXTIN, F_TEXTOUT}, {"oid", OIDOID, 0, 4, true, 'i', 'p', - F_OIDIN, F_OIDOUT}, + F_OIDIN, F_OIDOUT}, {"tid", TIDOID, 0, 6, false, 's', 'p', - F_TIDIN, F_TIDOUT}, + F_TIDIN, F_TIDOUT}, {"xid", XIDOID, 0, 4, true, 'i', 'p', - F_XIDIN, F_XIDOUT}, + F_XIDIN, F_XIDOUT}, {"cid", CIDOID, 0, 4, true, 'i', 'p', - F_CIDIN, F_CIDOUT}, + F_CIDIN, F_CIDOUT}, {"int2vector", INT2VECTOROID, INT2OID, INDEX_MAX_KEYS * 2, false, 's', 'p', - F_INT2VECTORIN, F_INT2VECTOROUT}, + F_INT2VECTORIN, F_INT2VECTOROUT}, {"oidvector", OIDVECTOROID, OIDOID, INDEX_MAX_KEYS * 4, false, 'i', 'p', - F_OIDVECTORIN, F_OIDVECTOROUT}, + F_OIDVECTORIN, F_OIDVECTOROUT}, {"_int4", INT4ARRAYOID, INT4OID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT}, + F_ARRAY_IN, F_ARRAY_OUT}, {"_text", 1009, TEXTOID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT}, + F_ARRAY_IN, F_ARRAY_OUT}, {"_aclitem", 1034, ACLITEMOID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT} + F_ARRAY_IN, F_ARRAY_OUT} }; -static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo); +static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo); struct typmap { /* a hack */ @@ -498,13 +498,13 @@ static void usage(void) { write_stderr("Usage:\n" - " postgres -boot [OPTION]... DBNAME\n" - " -c NAME=VALUE set run-time parameter\n" - " -d 1-5 debug level\n" - " -D datadir data directory\n" - " -F turn off fsync\n" - " -o file send debug output to file\n" - " -x num internal use\n"); + " postgres -boot [OPTION]... DBNAME\n" + " -c NAME=VALUE set run-time parameter\n" + " -d 1-5 debug level\n" + " -D datadir data directory\n" + " -F turn off fsync\n" + " -o file send debug output to file\n" + " -x num internal use\n"); proc_exit(1); } diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 38f8ccfff6..a6ec207a32 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.106 2004/08/29 04:12:26 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.107 2004/08/29 05:06:41 momjian Exp $ * * NOTES * See acl.h. @@ -73,7 +73,7 @@ dumpacl(Acl *acl) * Determine the effective grantor ID for a GRANT or REVOKE operation. * * Ordinarily this is just the current user, but when a superuser does - * GRANT or REVOKE, we pretend he is the object owner. This ensures that + * GRANT or REVOKE, we pretend he is the object owner. This ensures that * all granted privileges appear to flow from the object owner, and there * are never multiple "original sources" of a privilege. */ @@ -122,25 +122,25 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, foreach(j, grantees) { PrivGrantee *grantee = (PrivGrantee *) lfirst(j); - AclItem aclitem; + AclItem aclitem; uint32 idtype; Acl *newer_acl; if (grantee->username) { - aclitem.ai_grantee = get_usesysid(grantee->username); + aclitem. ai_grantee = get_usesysid(grantee->username); idtype = ACL_IDTYPE_UID; } else if (grantee->groupname) { - aclitem.ai_grantee = get_grosysid(grantee->groupname); + aclitem. ai_grantee = get_grosysid(grantee->groupname); idtype = ACL_IDTYPE_GID; } else { - aclitem.ai_grantee = ACL_ID_WORLD; + aclitem. ai_grantee = ACL_ID_WORLD; idtype = ACL_IDTYPE_WORLD; } @@ -157,18 +157,19 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("grant options can only be granted to individual users"))); - aclitem.ai_grantor = grantor_uid; + aclitem. ai_grantor = grantor_uid; /* * The asymmetry in the conditions here comes from the spec. In - * GRANT, the grant_option flag signals WITH GRANT OPTION, which means - * to grant both the basic privilege and its grant option. But in - * REVOKE, plain revoke revokes both the basic privilege and its - * grant option, while REVOKE GRANT OPTION revokes only the option. + * GRANT, the grant_option flag signals WITH GRANT OPTION, which + * means to grant both the basic privilege and its grant option. + * But in REVOKE, plain revoke revokes both the basic privilege + * and its grant option, while REVOKE GRANT OPTION revokes only + * the option. */ ACLITEM_SET_PRIVS_IDTYPE(aclitem, - (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, - (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS, + (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, + (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS, idtype); newer_acl = aclupdate(new_acl, &aclitem, modechg, owner_uid, behavior); @@ -318,11 +319,11 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -476,11 +477,11 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -630,11 +631,11 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -761,7 +762,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("language \"%s\" is not trusted", langname), - errhint("Only superusers may use untrusted languages."))); + errhint("Only superusers may use untrusted languages."))); /* * Note: for now, languages are treated as owned by the bootstrap @@ -793,11 +794,11 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -946,11 +947,11 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -1039,8 +1040,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), - errmsg("invalid privilege type %s for tablespace", - privilege_to_string(priv)))); + errmsg("invalid privilege type %s for tablespace", + privilege_to_string(priv)))); privileges |= priv; } } @@ -1076,7 +1077,7 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", spcname))); + errmsg("tablespace \"%s\" does not exist", spcname))); pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple); ownerId = pg_tablespace_tuple->spcowner; @@ -1105,11 +1106,11 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -1389,11 +1390,12 @@ pg_class_aclmask(Oid table_oid, AclId userid, /* * Deny anyone permission to update a system catalog unless * pg_shadow.usecatupd is set. (This is to let superusers protect - * themselves from themselves.) Also allow it if allowSystemTableMods. + * themselves from themselves.) Also allow it if + * allowSystemTableMods. * - * As of 7.4 we have some updatable system views; those shouldn't - * be protected in this way. Assume the view rules can take care - * of themselves. + * As of 7.4 we have some updatable system views; those shouldn't be + * protected in this way. Assume the view rules can take care of + * themselves. */ if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) && IsSystemClass(classForm) && @@ -1648,23 +1650,23 @@ pg_namespace_aclmask(Oid nsp_oid, AclId userid, return mask; /* - * If we have been assigned this namespace as a temp namespace, - * check to make sure we have CREATE TEMP permission on the database, - * and if so act as though we have all standard (but not GRANT OPTION) + * If we have been assigned this namespace as a temp namespace, check + * to make sure we have CREATE TEMP permission on the database, and if + * so act as though we have all standard (but not GRANT OPTION) * permissions on the namespace. If we don't have CREATE TEMP, act as * though we have only USAGE (and not CREATE) rights. * - * This may seem redundant given the check in InitTempTableNamespace, - * but it really isn't since current user ID may have changed since then. + * This may seem redundant given the check in InitTempTableNamespace, but + * it really isn't since current user ID may have changed since then. * The upshot of this behavior is that a SECURITY DEFINER function can - * create temp tables that can then be accessed (if permission is granted) - * by code in the same session that doesn't have permissions to create - * temp tables. + * create temp tables that can then be accessed (if permission is + * granted) by code in the same session that doesn't have permissions + * to create temp tables. * * XXX Would it be safe to ereport a special error message as * InitTempTableNamespace does? Returning zero here means we'll get a - * generic "permission denied for schema pg_temp_N" message, which is not - * remarkably user-friendly. + * generic "permission denied for schema pg_temp_N" message, which is + * not remarkably user-friendly. */ if (isTempNamespace(nsp_oid)) { @@ -1731,8 +1733,8 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid, AclId ownerId; /* - * Only shared relations can be stored in global space; don't let - * even superusers override this + * Only shared relations can be stored in global space; don't let even + * superusers override this */ if (spc_oid == GLOBALTABLESPACE_OID && !IsBootstrapProcessingMode()) return 0; @@ -1756,7 +1758,7 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid, if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner; @@ -2034,7 +2036,7 @@ pg_tablespace_ownercheck(Oid spc_oid, AclId userid) if (!HeapTupleIsValid(spctuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner; @@ -2131,7 +2133,7 @@ pg_conversion_ownercheck(Oid conv_oid, AclId userid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("conversion with OID %u does not exist", conv_oid))); + errmsg("conversion with OID %u does not exist", conv_oid))); owner_id = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner; diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 72002d8461..586be553a8 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.38 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.39 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -970,6 +970,7 @@ find_expr_references_walker(Node *node, if (var->varno <= 0 || var->varno > list_length(rtable)) elog(ERROR, "invalid varno %d", var->varno); rte = rt_fetch(var->varno, rtable); + /* * A whole-row Var references no specific columns, so adds no new * dependency. @@ -995,7 +996,7 @@ find_expr_references_walker(Node *node, var->varattno > list_length(rte->joinaliasvars)) elog(ERROR, "invalid varattno %d", var->varattno); find_expr_references_walker((Node *) list_nth(rte->joinaliasvars, - var->varattno - 1), + var->varattno - 1), context); list_free(context->rtables); context->rtables = save_rtables; @@ -1424,8 +1425,8 @@ getObjectDescription(const ObjectAddress *object) getRelationDescription(&buffer, object->objectId); if (object->objectSubId != 0) appendStringInfo(&buffer, gettext(" column %s"), - get_relid_attribute_name(object->objectId, - object->objectSubId)); + get_relid_attribute_name(object->objectId, + object->objectSubId)); break; case OCLASS_PROC: @@ -1624,7 +1625,7 @@ getObjectDescription(const ObjectAddress *object) appendStringInfo(&buffer, gettext("operator class %s for %s"), quote_qualified_identifier(nspname, - NameStr(opcForm->opcname)), + NameStr(opcForm->opcname)), NameStr(amForm->amname)); ReleaseSysCache(amTup); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 057bd7fb80..5cf6c5fa6d 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.274 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.275 2004/08/29 05:06:41 momjian Exp $ * * * INTERFACE ROUTINES @@ -265,10 +265,10 @@ heap_create(const char *relname, /* * Never allow a pg_class entry to explicitly specify the database's - * default tablespace in reltablespace; force it to zero instead. - * This ensures that if the database is cloned with a different - * default tablespace, the pg_class entry will still match where - * CREATE DATABASE will put the physically copied relation. + * default tablespace in reltablespace; force it to zero instead. This + * ensures that if the database is cloned with a different default + * tablespace, the pg_class entry will still match where CREATE + * DATABASE will put the physically copied relation. * * Yes, this is a bit of a hack. */ @@ -294,7 +294,8 @@ heap_create(const char *relname, nailme); /* - * have the storage manager create the relation's disk file, if needed. + * have the storage manager create the relation's disk file, if + * needed. */ if (create_storage) { @@ -980,12 +981,12 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) /* * Set the type OID to invalid. A dropped attribute's type link - * cannot be relied on (once the attribute is dropped, the type might - * be too). Fortunately we do not need the type row --- the only - * really essential information is the type's typlen and typalign, - * which are preserved in the attribute's attlen and attalign. We set - * atttypid to zero here as a means of catching code that incorrectly - * expects it to be valid. + * cannot be relied on (once the attribute is dropped, the type + * might be too). Fortunately we do not need the type row --- the + * only really essential information is the type's typlen and + * typalign, which are preserved in the attribute's attlen and + * attalign. We set atttypid to zero here as a means of catching + * code that incorrectly expects it to be valid. */ attStruct->atttypid = InvalidOid; @@ -995,7 +996,10 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) /* We don't want to keep stats for it anymore */ attStruct->attstattarget = 0; - /* Change the column name to something that isn't likely to conflict */ + /* + * Change the column name to something that isn't likely to + * conflict + */ snprintf(newattname, sizeof(newattname), "........pg.dropped.%d........", attnum); namestrcpy(&(attStruct->attname), newattname); @@ -1199,7 +1203,7 @@ heap_drop_with_catalog(Oid relid) /* * Flush the relation from the relcache. We want to do this before * starting to remove catalog entries, just to be certain that no - * relcache entry rebuild will happen partway through. (That should + * relcache entry rebuild will happen partway through. (That should * not really matter, since we don't do CommandCounterIncrement here, * but let's be safe.) */ @@ -1584,11 +1588,11 @@ AddRelationRawConstraints(Relation rel, if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in check constraint"))); + errmsg("cannot use subquery in check constraint"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in check constraint"))); + errmsg("cannot use aggregate function in check constraint"))); /* * Check name uniqueness, or generate a name if none was given. @@ -1614,8 +1618,8 @@ AddRelationRawConstraints(Relation rel, if (strcmp((char *) lfirst(cell2), ccname) == 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("check constraint \"%s\" already exists", - ccname))); + errmsg("check constraint \"%s\" already exists", + ccname))); } } else @@ -1623,18 +1627,18 @@ AddRelationRawConstraints(Relation rel, /* * When generating a name, we want to create "tab_col_check" * for a column constraint and "tab_check" for a table - * constraint. We no longer have any info about the - * syntactic positioning of the constraint phrase, so we - * approximate this by seeing whether the expression references - * more than one column. (If the user played by the rules, - * the result is the same...) + * constraint. We no longer have any info about the syntactic + * positioning of the constraint phrase, so we approximate + * this by seeing whether the expression references more than + * one column. (If the user played by the rules, the result + * is the same...) * - * Note: pull_var_clause() doesn't descend into sublinks, - * but we eliminated those above; and anyway this only needs - * to be an approximate answer. + * Note: pull_var_clause() doesn't descend into sublinks, but we + * eliminated those above; and anyway this only needs to be an + * approximate answer. */ - List *vars; - char *colname; + List *vars; + char *colname; vars = pull_var_clause(expr, false); @@ -1763,7 +1767,7 @@ cookDefault(ParseState *pstate, if (contain_var_clause(expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("cannot use column references in default expression"))); + errmsg("cannot use column references in default expression"))); /* * It can't return a set either. @@ -1783,7 +1787,7 @@ cookDefault(ParseState *pstate, if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in default expression"))); + errmsg("cannot use aggregate function in default expression"))); /* * Coerce the expression to the correct type and typmod, if given. @@ -2047,7 +2051,7 @@ heap_truncate_check_FKs(Relation rel) return; /* - * Otherwise, must scan pg_constraint. Right now, this is a seqscan + * Otherwise, must scan pg_constraint. Right now, this is a seqscan * because there is no available index on confrelid. */ fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock); diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 78bbe3ecf5..bed06fc538 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.237 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.238 2004/08/29 05:06:41 momjian Exp $ * * * INTERFACE ROUTINES @@ -511,9 +511,10 @@ index_create(Oid heapRelationId, * We cannot allow indexing a shared relation after initdb (because * there's no way to make the entry in other databases' pg_class). * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting of shared rels happens after the bootstrap - * phase, so checking IsBootstrapProcessingMode() won't work). However, - * we can at least prevent this mistake under normal multi-user operation. + * standalone backend (toasting of shared rels happens after the + * bootstrap phase, so checking IsBootstrapProcessingMode() won't + * work). However, we can at least prevent this mistake under normal + * multi-user operation. */ if (shared_relation && IsUnderPostmaster) ereport(ERROR, @@ -800,8 +801,8 @@ index_drop(Oid indexId) /* * Close and flush the index's relcache entry, to ensure relcache - * doesn't try to rebuild it while we're deleting catalog entries. - * We keep the lock though. + * doesn't try to rebuild it while we're deleting catalog entries. We + * keep the lock though. */ index_close(userIndexRelation); @@ -826,8 +827,8 @@ index_drop(Oid indexId) heap_close(indexRelation, RowExclusiveLock); /* - * if it has any expression columns, we might have stored - * statistics about them. + * if it has any expression columns, we might have stored statistics + * about them. */ if (hasexprs) RemoveStatistics(indexId, 0); @@ -1008,7 +1009,7 @@ setRelhasindex(Oid relid, bool hasindex, bool isprimary, Oid reltoastidxid) /* * Find the tuple to update in pg_class. In bootstrap mode we can't - * use heap_update, so cheat and overwrite the tuple in-place. In + * use heap_update, so cheat and overwrite the tuple in-place. In * normal processing, make a copy to scribble on. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); @@ -1122,13 +1123,13 @@ setNewRelfilenode(Relation relation) newrelfilenode = newoid(); /* - * Find the pg_class tuple for the given relation. This is not used + * Find the pg_class tuple for the given relation. This is not used * during bootstrap, so okay to use heap_update always. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(relation)), + ObjectIdGetDatum(RelationGetRelid(relation)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "could not find tuple for relation %u", @@ -1206,15 +1207,15 @@ UpdateStats(Oid relid, double reltuples) /* * Find the tuple to update in pg_class. Normally we make a copy of - * the tuple using the syscache, modify it, and apply heap_update. - * But in bootstrap mode we can't use heap_update, so we cheat and + * the tuple using the syscache, modify it, and apply heap_update. But + * in bootstrap mode we can't use heap_update, so we cheat and * overwrite the tuple in-place. * - * We also must cheat if reindexing pg_class itself, because the - * target index may presently not be part of the set of indexes that + * We also must cheat if reindexing pg_class itself, because the target + * index may presently not be part of the set of indexes that * CatalogUpdateIndexes would update (see reindex_relation). In this * case the stats updates will not be WAL-logged and so could be lost - * in a crash. This seems OK considering VACUUM does the same thing. + * in a crash. This seems OK considering VACUUM does the same thing. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); @@ -1454,7 +1455,7 @@ IndexBuildHeapScan(Relation heapRelation, scan = heap_beginscan(heapRelation, /* relation */ snapshot, /* seeself */ 0, /* number of keys */ - NULL); /* scan key */ + NULL); /* scan key */ reltuples = 0; @@ -1513,7 +1514,7 @@ IndexBuildHeapScan(Relation heapRelation, * system catalogs before committing. */ if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmin(heapTuple->t_data)) + HeapTupleHeaderGetXmin(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent insert in progress"); indexIt = true; @@ -1531,7 +1532,7 @@ IndexBuildHeapScan(Relation heapRelation, * system catalogs before committing. */ if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmax(heapTuple->t_data)) + HeapTupleHeaderGetXmax(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent delete in progress"); indexIt = true; @@ -1659,11 +1660,11 @@ reindex_index(Oid indexId) * Note: for REINDEX INDEX, doing this before opening the parent heap * relation means there's a possibility for deadlock failure against * another xact that is doing normal accesses to the heap and index. - * However, it's not real clear why you'd be wanting to do REINDEX INDEX - * on a table that's in active use, so I'd rather have the protection of - * making sure the index is locked down. In the REINDEX TABLE and - * REINDEX DATABASE cases, there is no problem because caller already - * holds exclusive lock on the parent table. + * However, it's not real clear why you'd be wanting to do REINDEX + * INDEX on a table that's in active use, so I'd rather have the + * protection of making sure the index is locked down. In the REINDEX + * TABLE and REINDEX DATABASE cases, there is no problem because + * caller already holds exclusive lock on the parent table. */ iRel = index_open(indexId); LockRelation(iRel, AccessExclusiveLock); @@ -1680,8 +1681,8 @@ reindex_index(Oid indexId) * we can do it the normal transaction-safe way. * * Since inplace processing isn't crash-safe, we only allow it in a - * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases, - * the caller should have detected this.) + * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE + * cases, the caller should have detected this.) */ inplace = iRel->rd_rel->relisshared; @@ -1705,7 +1706,8 @@ reindex_index(Oid indexId) { /* * Release any buffers associated with this index. If they're - * dirty, they're just dropped without bothering to flush to disk. + * dirty, they're just dropped without bothering to flush to + * disk. */ DropRelationBuffers(iRel); @@ -1724,8 +1726,8 @@ reindex_index(Oid indexId) index_build(heapRelation, iRel, indexInfo); /* - * index_build will close both the heap and index relations (but not - * give up the locks we hold on them). So we're done. + * index_build will close both the heap and index relations (but + * not give up the locks we hold on them). So we're done. */ } PG_CATCH(); @@ -1774,13 +1776,13 @@ reindex_relation(Oid relid, bool toast_too) /* * reindex_index will attempt to update the pg_class rows for the - * relation and index. If we are processing pg_class itself, we - * want to make sure that the updates do not try to insert index - * entries into indexes we have not processed yet. (When we are - * trying to recover from corrupted indexes, that could easily - * cause a crash.) We can accomplish this because CatalogUpdateIndexes - * will use the relcache's index list to know which indexes to update. - * We just force the index list to be only the stuff we've processed. + * relation and index. If we are processing pg_class itself, we want + * to make sure that the updates do not try to insert index entries + * into indexes we have not processed yet. (When we are trying to + * recover from corrupted indexes, that could easily cause a crash.) + * We can accomplish this because CatalogUpdateIndexes will use the + * relcache's index list to know which indexes to update. We just + * force the index list to be only the stuff we've processed. * * It is okay to not insert entries into the indexes we have not * processed yet because all of this is transaction-safe. If we fail @@ -1795,7 +1797,7 @@ reindex_relation(Oid relid, bool toast_too) /* Reindex all the indexes. */ foreach(indexId, indexIds) { - Oid indexOid = lfirst_oid(indexId); + Oid indexOid = lfirst_oid(indexId); if (is_pg_class) RelationSetIndexList(rel, doneIndexes); @@ -1819,8 +1821,8 @@ reindex_relation(Oid relid, bool toast_too) result = (indexIds != NIL); /* - * If the relation has a secondary toast rel, reindex that too while we - * still hold the lock on the master table. + * If the relation has a secondary toast rel, reindex that too while + * we still hold the lock on the master table. */ if (toast_too && OidIsValid(toast_relid)) result |= reindex_relation(toast_relid, false); diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 2ff0070536..001e02ba7f 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -13,7 +13,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.69 2004/08/29 04:12:28 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.70 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -170,9 +170,9 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK) if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: \"%s.%s.%s\"", - relation->catalogname, relation->schemaname, - relation->relname))); + errmsg("cross-database references are not implemented: \"%s.%s.%s\"", + relation->catalogname, relation->schemaname, + relation->relname))); } if (relation->schemaname) @@ -225,9 +225,9 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: \"%s.%s.%s\"", - newRelation->catalogname, newRelation->schemaname, - newRelation->relname))); + errmsg("cross-database references are not implemented: \"%s.%s.%s\"", + newRelation->catalogname, newRelation->schemaname, + newRelation->relname))); } if (newRelation->istemp) @@ -236,7 +236,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) if (newRelation->schemaname) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("temporary tables may not specify a schema name"))); + errmsg("temporary tables may not specify a schema name"))); /* Initialize temp namespace if first time through */ if (!OidIsValid(myTempNamespace)) InitTempTableNamespace(); @@ -699,12 +699,13 @@ OpernameGetCandidates(List *names, char oprkind) /* * In typical scenarios, most if not all of the operators found by the - * catcache search will end up getting returned; and there can be quite - * a few, for common operator names such as '=' or '+'. To reduce the - * time spent in palloc, we allocate the result space as an array large - * enough to hold all the operators. The original coding of this routine - * did a separate palloc for each operator, but profiling revealed that - * the pallocs used an unreasonably large fraction of parsing time. + * catcache search will end up getting returned; and there can be + * quite a few, for common operator names such as '=' or '+'. To + * reduce the time spent in palloc, we allocate the result space as an + * array large enough to hold all the operators. The original coding + * of this routine did a separate palloc for each operator, but + * profiling revealed that the pallocs used an unreasonably large + * fraction of parsing time. */ #define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid)) @@ -1191,8 +1192,8 @@ DeconstructQualifiedName(List *names, if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: %s", - NameListToString(names)))); + errmsg("cross-database references are not implemented: %s", + NameListToString(names)))); break; default: ereport(ERROR, @@ -1645,10 +1646,11 @@ InitTempTableNamespace(void) * tables. We use a nonstandard error message here since * "databasename: permission denied" might be a tad cryptic. * - * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask; - * that's necessary since current user ID could change during the session. - * But there's no need to make the namespace in the first place until a - * temp table creation request is made by someone with appropriate rights. + * Note that ACL_CREATE_TEMP rights are rechecked in + * pg_namespace_aclmask; that's necessary since current user ID could + * change during the session. But there's no need to make the + * namespace in the first place until a temp table creation request is + * made by someone with appropriate rights. */ if (pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE_TEMP) != ACLCHECK_OK) @@ -1847,7 +1849,8 @@ assign_search_path(const char *newval, bool doit, GucSource source) * ALTER DATABASE SET or ALTER USER SET command. It could be that * the intended use of the search path is for some other database, * so we should not error out if it mentions schemas not present - * in the current database. We reduce the message to NOTICE instead. + * in the current database. We reduce the message to NOTICE + * instead. */ foreach(l, namelist) { diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index 928d27ae16..2882eeff06 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.67 2004/08/29 04:12:28 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.68 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,8 +78,8 @@ AggregateCreate(const char *aggName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot determine transition data type"), - errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " - "transition type must have one of them as its base type."))); + errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " + "transition type must have one of them as its base type."))); /* handle transfn */ MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -163,8 +163,8 @@ AggregateCreate(const char *aggName, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot determine result data type"), - errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " - "must have one of them as its base type."))); + errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " + "must have one of them as its base type."))); /* * Everything looks okay. Try to create the pg_proc entry for the @@ -190,8 +190,8 @@ AggregateCreate(const char *aggName, PROVOLATILE_IMMUTABLE, /* volatility (not * needed for agg) */ 1, /* parameterCount */ - fnArgs, /* parameterTypes */ - NULL); /* parameterNames */ + fnArgs, /* parameterTypes */ + NULL); /* parameterNames */ /* * Okay to create the pg_aggregate entry. diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 0f74814f17..1b658c9ad2 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.118 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.119 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,12 +44,12 @@ Datum fmgr_c_validator(PG_FUNCTION_ARGS); Datum fmgr_sql_validator(PG_FUNCTION_ARGS); static Datum create_parameternames_array(int parameterCount, - const char *parameterNames[]); + const char *parameterNames[]); static void sql_function_parse_error_callback(void *arg); -static int match_prosrc_to_query(const char *prosrc, const char *queryText, - int cursorpos); +static int match_prosrc_to_query(const char *prosrc, const char *queryText, + int cursorpos); static bool match_prosrc_to_literal(const char *prosrc, const char *literal, - int cursorpos, int *newcursorpos); + int cursorpos, int *newcursorpos); /* ---------------------------------------------------------------- @@ -173,7 +173,7 @@ ProcedureCreate(const char *procedureName, values[i++] = UInt16GetDatum(parameterCount); /* pronargs */ values[i++] = ObjectIdGetDatum(returnType); /* prorettype */ values[i++] = PointerGetDatum(typev); /* proargtypes */ - values[i++] = namesarray; /* proargnames */ + values[i++] = namesarray; /* proargnames */ if (namesarray == PointerGetDatum(NULL)) nulls[Anum_pg_proc_proargnames - 1] = 'n'; values[i++] = DirectFunctionCall1(textin, /* prosrc */ @@ -329,7 +329,7 @@ create_parameternames_array(int parameterCount, const char *parameterNames[]) if (!parameterNames) return PointerGetDatum(NULL); - for (i=0; i<parameterCount; i++) + for (i = 0; i < parameterCount; i++) { const char *s = parameterNames[i]; @@ -562,8 +562,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList) } /* - * Otherwise assume we are returning the whole tuple. Crosschecking - * against what the caller expects will happen at runtime. + * Otherwise assume we are returning the whole tuple. + * Crosschecking against what the caller expects will happen at + * runtime. */ return true; } @@ -652,9 +653,10 @@ fmgr_c_validator(PG_FUNCTION_ARGS) char *probin; /* - * It'd be most consistent to skip the check if !check_function_bodies, - * but the purpose of that switch is to be helpful for pg_dump loading, - * and for pg_dump loading it's much better if we *do* check. + * It'd be most consistent to skip the check if + * !check_function_bodies, but the purpose of that switch is to be + * helpful for pg_dump loading, and for pg_dump loading it's much + * better if we *do* check. */ tuple = SearchSysCache(PROCOID, @@ -760,10 +762,10 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) error_context_stack = &sqlerrcontext; /* - * We can't do full prechecking of the function definition if there - * are any polymorphic input types, because actual datatypes of - * expression results will be unresolvable. The check will be done - * at runtime instead. + * We can't do full prechecking of the function definition if + * there are any polymorphic input types, because actual datatypes + * of expression results will be unresolvable. The check will be + * done at runtime instead. * * We can run the text through the raw parser though; this will at * least catch silly syntactic errors. @@ -817,7 +819,7 @@ sql_function_parse_error_callback(void *arg) /* * Adjust a syntax error occurring inside the function body of a CREATE * FUNCTION command. This can be used by any function validator, not only - * for SQL-language functions. It is assumed that the syntax error position + * for SQL-language functions. It is assumed that the syntax error position * is initially relative to the function body string (as passed in). If * possible, we adjust the position to reference the original CREATE command; * if we can't manage that, we set up an "internal query" syntax error instead. @@ -832,11 +834,11 @@ function_parse_error_transpose(const char *prosrc) const char *queryText; /* - * Nothing to do unless we are dealing with a syntax error that has - * a cursor position. + * Nothing to do unless we are dealing with a syntax error that has a + * cursor position. * - * Some PLs may prefer to report the error position as an internal - * error to begin with, so check that too. + * Some PLs may prefer to report the error position as an internal error + * to begin with, so check that too. */ origerrposition = geterrposition(); if (origerrposition <= 0) @@ -891,17 +893,17 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, * (though not in any very probable scenarios), so fail if we find * more than one match. */ - int prosrclen = strlen(prosrc); - int querylen = strlen(queryText); - int matchpos = 0; - int curpos; - int newcursorpos; + int prosrclen = strlen(prosrc); + int querylen = strlen(queryText); + int matchpos = 0; + int curpos; + int newcursorpos; - for (curpos = 0; curpos < querylen-prosrclen; curpos++) + for (curpos = 0; curpos < querylen - prosrclen; curpos++) { if (queryText[curpos] == '$' && - strncmp(prosrc, &queryText[curpos+1], prosrclen) == 0 && - queryText[curpos+1+prosrclen] == '$') + strncmp(prosrc, &queryText[curpos + 1], prosrclen) == 0 && + queryText[curpos + 1 + prosrclen] == '$') { /* * Found a $foo$ match. Since there are no embedded quoting @@ -910,20 +912,21 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, */ if (matchpos) return 0; /* multiple matches, fail */ - matchpos = pg_mbstrlen_with_len(queryText, curpos+1) + matchpos = pg_mbstrlen_with_len(queryText, curpos + 1) + cursorpos; } else if (queryText[curpos] == '\'' && - match_prosrc_to_literal(prosrc, &queryText[curpos+1], + match_prosrc_to_literal(prosrc, &queryText[curpos + 1], cursorpos, &newcursorpos)) { /* - * Found a 'foo' match. match_prosrc_to_literal() has adjusted - * for any quotes or backslashes embedded in the literal. + * Found a 'foo' match. match_prosrc_to_literal() has + * adjusted for any quotes or backslashes embedded in the + * literal. */ if (matchpos) return 0; /* multiple matches, fail */ - matchpos = pg_mbstrlen_with_len(queryText, curpos+1) + matchpos = pg_mbstrlen_with_len(queryText, curpos + 1) + newcursorpos; } } @@ -948,15 +951,16 @@ match_prosrc_to_literal(const char *prosrc, const char *literal, /* * This implementation handles backslashes and doubled quotes in the - * string literal. It does not handle the SQL syntax for literals + * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * - * We do the comparison a character at a time, not a byte at a time, - * so that we can do the correct cursorpos math. + * We do the comparison a character at a time, not a byte at a time, so + * that we can do the correct cursorpos math. */ while (*prosrc) { cursorpos--; /* characters left before cursor */ + /* * Check for backslashes and doubled quotes in the literal; adjust * newcp when one is found before the cursor. diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 2326f25330..2ba6ea0ff9 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.95 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.96 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -201,8 +201,8 @@ TypeCreate(const char *typeName, (internalSize <= 0 || internalSize > (int16) sizeof(Datum))) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("internal size %d is invalid for passed-by-value type", - internalSize))); + errmsg("internal size %d is invalid for passed-by-value type", + internalSize))); /* Only varlena types can be toasted */ if (storage != 'p' && internalSize != -1) diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index bc3affcf4b..fcbd1df98d 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.20 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.21 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -223,9 +223,9 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname) /* * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a basetype - * of ANYOID. This means that the aggregate applies to all basetypes - * (eg, COUNT). + * that specific type; else attempt to find an aggregate with a + * basetype of ANYOID. This means that the aggregate applies to all + * basetypes (eg, COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -302,9 +302,9 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) /* * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a basetype - * of ANYOID. This means that the aggregate applies to all basetypes - * (eg, COUNT). + * that specific type; else attempt to find an aggregate with a + * basetype of ANYOID. This means that the aggregate applies to all + * basetypes (eg, COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -322,7 +322,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) elog(ERROR, "cache lookup failed for function %u", procOid); procForm = (Form_pg_proc) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -334,7 +334,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ procForm->proowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 11d9c828f6..3e08a551f6 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.10 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.11 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ /* - * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object * type, the function appropriate to that type is executed. */ void @@ -153,7 +153,7 @@ ExecRenameStmt(RenameStmt *stmt) void ExecAlterOwnerStmt(AlterOwnerStmt *stmt) { - AclId newowner = get_usesysid(stmt->newowner); + AclId newowner = get_usesysid(stmt->newowner); switch (stmt->objectType) { diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 45aff53796..ce7db27211 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.75 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.76 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -42,9 +42,9 @@ /* Data structure for Algorithm S from Knuth 3.4.2 */ typedef struct { - BlockNumber N; /* number of blocks, known in advance */ + BlockNumber N; /* number of blocks, known in advance */ int n; /* desired sample size */ - BlockNumber t; /* current block number */ + BlockNumber t; /* current block number */ int m; /* blocks selected so far */ } BlockSamplerData; typedef BlockSamplerData *BlockSampler; @@ -68,13 +68,13 @@ static MemoryContext anl_context = NULL; static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, - int samplesize); + int samplesize); static bool BlockSampler_HasMore(BlockSampler bs); static BlockNumber BlockSampler_Next(BlockSampler bs); static void compute_index_stats(Relation onerel, double totalrows, - AnlIndexData *indexdata, int nindexes, - HeapTuple *rows, int numrows, - MemoryContext col_context); + AnlIndexData *indexdata, int nindexes, + HeapTuple *rows, int numrows, + MemoryContext col_context); static VacAttrStats *examine_attribute(Relation onerel, int attnum); static int acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, double *totalrows); @@ -157,9 +157,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Check that it's a plain table; we used to do this in - * get_rel_oids() but seems safer to check after we've locked the - * relation. + * Check that it's a plain table; we used to do this in get_rel_oids() + * but seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != RELKIND_RELATION) { @@ -239,9 +238,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Open all indexes of the relation, and see if there are any analyzable - * columns in the indexes. We do not analyze index columns if there was - * an explicit column list in the ANALYZE command, however. + * Open all indexes of the relation, and see if there are any + * analyzable columns in the indexes. We do not analyze index columns + * if there was an explicit column list in the ANALYZE command, + * however. */ vac_open_indexes(onerel, &nindexes, &Irel); hasindex = (nindexes > 0); @@ -253,10 +253,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) for (ind = 0; ind < nindexes; ind++) { AnlIndexData *thisdata = &indexdata[ind]; - IndexInfo *indexInfo; + IndexInfo *indexInfo; thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]); - thisdata->tupleFract = 1.0; /* fix later if partial */ + thisdata->tupleFract = 1.0; /* fix later if partial */ if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL) { ListCell *indexpr_item = list_head(indexInfo->ii_Expressions); @@ -273,25 +273,26 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* Found an index expression */ Node *indexkey; - if (indexpr_item == NULL) /* shouldn't happen */ + if (indexpr_item == NULL) /* shouldn't happen */ elog(ERROR, "too few entries in indexprs list"); indexkey = (Node *) lfirst(indexpr_item); indexpr_item = lnext(indexpr_item); /* - * Can't analyze if the opclass uses a storage type - * different from the expression result type. We'd - * get confused because the type shown in pg_attribute - * for the index column doesn't match what we are - * getting from the expression. Perhaps this can be - * fixed someday, but for now, punt. + * Can't analyze if the opclass uses a storage + * type different from the expression result type. + * We'd get confused because the type shown in + * pg_attribute for the index column doesn't match + * what we are getting from the expression. + * Perhaps this can be fixed someday, but for now, + * punt. */ if (exprType(indexkey) != Irel[ind]->rd_att->attrs[i]->atttypid) continue; thisdata->vacattrstats[tcnt] = - examine_attribute(Irel[ind], i+1); + examine_attribute(Irel[ind], i + 1); if (thisdata->vacattrstats[tcnt] != NULL) { tcnt++; @@ -401,10 +402,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* * If we are running a standalone ANALYZE, update pages/tuples stats - * in pg_class. We know the accurate page count from the smgr, - * but only an approximate number of tuples; therefore, if we are part - * of VACUUM ANALYZE do *not* overwrite the accurate count already - * inserted by VACUUM. The same consideration applies to indexes. + * in pg_class. We know the accurate page count from the smgr, but + * only an approximate number of tuples; therefore, if we are part of + * VACUUM ANALYZE do *not* overwrite the accurate count already + * inserted by VACUUM. The same consideration applies to indexes. */ if (!vacstmt->vacuum) { @@ -446,7 +447,7 @@ compute_index_stats(Relation onerel, double totalrows, MemoryContext col_context) { MemoryContext ind_context, - old_context; + old_context; TupleDesc heapDescriptor; Datum attdata[INDEX_MAX_KEYS]; char nulls[INDEX_MAX_KEYS]; @@ -465,7 +466,7 @@ compute_index_stats(Relation onerel, double totalrows, for (ind = 0; ind < nindexes; ind++) { AnlIndexData *thisdata = &indexdata[ind]; - IndexInfo *indexInfo = thisdata->indexInfo; + IndexInfo *indexInfo = thisdata->indexInfo; int attr_cnt = thisdata->attr_cnt; TupleTable tupleTable; TupleTableSlot *slot; @@ -526,8 +527,9 @@ compute_index_stats(Relation onerel, double totalrows, if (attr_cnt > 0) { /* - * Evaluate the index row to compute expression values. - * We could do this by hand, but FormIndexDatum is convenient. + * Evaluate the index row to compute expression values. We + * could do this by hand, but FormIndexDatum is + * convenient. */ FormIndexDatum(indexInfo, heapTuple, @@ -535,16 +537,17 @@ compute_index_stats(Relation onerel, double totalrows, estate, attdata, nulls); + /* * Save just the columns we care about. */ for (i = 0; i < attr_cnt; i++) { VacAttrStats *stats = thisdata->vacattrstats[i]; - int attnum = stats->attr->attnum; + int attnum = stats->attr->attnum; - exprvals[tcnt] = attdata[attnum-1]; - exprnulls[tcnt] = (nulls[attnum-1] == 'n'); + exprvals[tcnt] = attdata[attnum - 1]; + exprnulls[tcnt] = (nulls[attnum - 1] == 'n'); tcnt++; } } @@ -552,7 +555,8 @@ compute_index_stats(Relation onerel, double totalrows, /* * Having counted the number of rows that pass the predicate in - * the sample, we can estimate the total number of rows in the index. + * the sample, we can estimate the total number of rows in the + * index. */ thisdata->tupleFract = (double) numindexrows / (double) numrows; totalindexrows = ceil(thisdata->tupleFract * totalrows); @@ -630,7 +634,7 @@ examine_attribute(Relation onerel, int attnum) stats->tupattnum = attnum; /* - * Call the type-specific typanalyze function. If none is specified, + * Call the type-specific typanalyze function. If none is specified, * use std_typanalyze(). */ if (OidIsValid(stats->attrtype->typanalyze)) @@ -667,10 +671,10 @@ static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize) { bs->N = nblocks; /* measured table size */ + /* - * If we decide to reduce samplesize for tables that have less or - * not much more than samplesize blocks, here is the place to do - * it. + * If we decide to reduce samplesize for tables that have less or not + * much more than samplesize blocks, here is the place to do it. */ bs->n = samplesize; bs->t = 0; /* blocks scanned so far */ @@ -686,10 +690,10 @@ BlockSampler_HasMore(BlockSampler bs) static BlockNumber BlockSampler_Next(BlockSampler bs) { - BlockNumber K = bs->N - bs->t; /* remaining blocks */ + BlockNumber K = bs->N - bs->t; /* remaining blocks */ int k = bs->n - bs->m; /* blocks still to sample */ - double p; /* probability to skip block */ - double V; /* random */ + double p; /* probability to skip block */ + double V; /* random */ Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */ @@ -706,7 +710,7 @@ BlockSampler_Next(BlockSampler bs) * If we are to skip, we should advance t (hence decrease K), and * repeat the same probabilistic test for the next block. The naive * implementation thus requires a random_fract() call for each block - * number. But we can reduce this to one random_fract() call per + * number. But we can reduce this to one random_fract() call per * selected block, by noting that each time the while-test succeeds, * we can reinterpret V as a uniform random number in the range 0 to p. * Therefore, instead of choosing a new V, we just adjust p to be @@ -770,11 +774,11 @@ static int acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, double *totalrows) { - int numrows = 0; /* # rows collected */ - double liverows = 0; /* # rows seen */ + int numrows = 0; /* # rows collected */ + double liverows = 0; /* # rows seen */ double deadrows = 0; - double rowstoskip = -1; /* -1 means not set yet */ - BlockNumber totalblocks; + double rowstoskip = -1; /* -1 means not set yet */ + BlockNumber totalblocks; BlockSamplerData bs; double rstate; @@ -826,14 +830,13 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, { /* * The first targrows live rows are simply copied into the - * reservoir. - * Then we start replacing tuples in the sample until - * we reach the end of the relation. This algorithm is - * from Jeff Vitter's paper (see full citation below). + * reservoir. Then we start replacing tuples in the sample + * until we reach the end of the relation. This algorithm + * is from Jeff Vitter's paper (see full citation below). * It works by repeatedly computing the number of tuples * to skip before selecting a tuple, which replaces a - * randomly chosen element of the reservoir (current - * set of tuples). At all times the reservoir is a true + * randomly chosen element of the reservoir (current set + * of tuples). At all times the reservoir is a true * random sample of the tuples we've passed over so far, * so when we fall off the end of the relation we're done. */ @@ -842,10 +845,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, else { /* - * t in Vitter's paper is the number of records already - * processed. If we need to compute a new S value, we - * must use the not-yet-incremented value of liverows - * as t. + * t in Vitter's paper is the number of records + * already processed. If we need to compute a new S + * value, we must use the not-yet-incremented value of + * liverows as t. */ if (rowstoskip < 0) rowstoskip = get_next_S(liverows, targrows, &rstate); @@ -853,10 +856,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, if (rowstoskip <= 0) { /* - * Found a suitable tuple, so save it, - * replacing one old tuple at random + * Found a suitable tuple, so save it, replacing + * one old tuple at random */ - int k = (int) (targrows * random_fract()); + int k = (int) (targrows * random_fract()); Assert(k >= 0 && k < targrows); heap_freetuple(rows[k]); @@ -874,9 +877,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, else { /* - * Count dead rows, but not empty slots. This information is - * currently not used, but it seems likely we'll want it - * someday. + * Count dead rows, but not empty slots. This information + * is currently not used, but it seems likely we'll want + * it someday. */ if (targtuple.t_data != NULL) deadrows += 1; @@ -888,12 +891,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, } /* - * If we didn't find as many tuples as we wanted then we're done. - * No sort is needed, since they're already in order. + * If we didn't find as many tuples as we wanted then we're done. No + * sort is needed, since they're already in order. * * Otherwise we need to sort the collected tuples by position - * (itempointer). It's not worth worrying about corner cases - * where the tuples are already sorted. + * (itempointer). It's not worth worrying about corner cases where + * the tuples are already sorted. */ if (numrows == targrows) qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); @@ -907,7 +910,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, *totalrows = 0.0; /* - * Emit some interesting relation info + * Emit some interesting relation info */ ereport(elevel, (errmsg("\"%s\": scanned %d of %u pages, " @@ -1128,10 +1131,10 @@ update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats) i = 0; values[i++] = ObjectIdGetDatum(relid); /* starelid */ - values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */ - values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */ + values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */ + values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */ values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */ - values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */ + values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */ for (k = 0; k < STATISTIC_NUM_SLOTS; k++) { values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */ @@ -1305,13 +1308,13 @@ static int *datumCmpTupnoLink; static void compute_minimal_stats(VacAttrStatsP stats, - AnalyzeAttrFetchFunc fetchfunc, - int samplerows, - double totalrows); + AnalyzeAttrFetchFunc fetchfunc, + int samplerows, + double totalrows); static void compute_scalar_stats(VacAttrStatsP stats, - AnalyzeAttrFetchFunc fetchfunc, - int samplerows, - double totalrows); + AnalyzeAttrFetchFunc fetchfunc, + int samplerows, + double totalrows); static int compare_scalars(const void *a, const void *b); static int compare_mcvs(const void *a, const void *b); diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index a0cb1cc393..f9d257d1a1 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.114 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.115 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,7 +106,8 @@ */ static List *pendingNotifies = NIL; -static List *upperPendingNotifies = NIL; /* list of upper-xact lists */ +static List *upperPendingNotifies = NIL; /* list of upper-xact + * lists */ /* * State for inbound notifies consists of two flags: one saying whether @@ -524,25 +525,27 @@ AtCommit_Notify(void) rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl); + /* * We cannot use simple_heap_update here because the tuple * could have been modified by an uncommitted transaction; * specifically, since UNLISTEN releases exclusive lock on - * the table before commit, the other guy could already have - * tried to unlisten. There are no other cases where we - * should be able to see an uncommitted update or delete. - * Therefore, our response to a HeapTupleBeingUpdated result - * is just to ignore it. We do *not* wait for the other - * guy to commit --- that would risk deadlock, and we don't - * want to block while holding the table lock anyway for - * performance reasons. We also ignore HeapTupleUpdated, - * which could occur if the other guy commits between our - * heap_getnext and heap_update calls. + * the table before commit, the other guy could already + * have tried to unlisten. There are no other cases where + * we should be able to see an uncommitted update or + * delete. Therefore, our response to a + * HeapTupleBeingUpdated result is just to ignore it. We + * do *not* wait for the other guy to commit --- that + * would risk deadlock, and we don't want to block while + * holding the table lock anyway for performance reasons. + * We also ignore HeapTupleUpdated, which could occur if + * the other guy commits between our heap_getnext and + * heap_update calls. */ result = heap_update(lRel, &lTuple->t_self, rTuple, &ctid, GetCurrentCommandId(), SnapshotAny, - false /* no wait for commit */); + false /* no wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -620,7 +623,7 @@ AtAbort_Notify(void) void AtSubStart_Notify(void) { - MemoryContext old_cxt; + MemoryContext old_cxt; /* Keep the list-of-lists in TopTransactionContext for simplicity */ old_cxt = MemoryContextSwitchTo(TopTransactionContext); @@ -640,13 +643,14 @@ AtSubStart_Notify(void) void AtSubCommit_Notify(void) { - List *parentPendingNotifies; + List *parentPendingNotifies; parentPendingNotifies = (List *) linitial(upperPendingNotifies); upperPendingNotifies = list_delete_first(upperPendingNotifies); /* - * We could try to eliminate duplicates here, but it seems not worthwhile. + * We could try to eliminate duplicates here, but it seems not + * worthwhile. */ pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies); } @@ -836,7 +840,7 @@ EnableNotifyInterrupt(void) bool DisableNotifyInterrupt(void) { - bool result = (notifyInterruptEnabled != 0); + bool result = (notifyInterruptEnabled != 0); notifyInterruptEnabled = 0; @@ -914,11 +918,12 @@ ProcessIncomingNotify(void) relname, (int) sourcePID); NotifyMyFrontEnd(relname, sourcePID); + /* * Rewrite the tuple with 0 in notification column. * - * simple_heap_update is safe here because no one else would - * have tried to UNLISTEN us, so there can be no uncommitted + * simple_heap_update is safe here because no one else would have + * tried to UNLISTEN us, so there can be no uncommitted * changes. */ rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl); diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 7b618db072..0bce21ffb9 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.128 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.129 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -286,8 +286,8 @@ cluster_rel(RelToCluster *rvtc, bool recheck) /* * We grab exclusive access to the target rel and index for the * duration of the transaction. (This is redundant for the single- - * transaction case, since cluster() already did it.) The index - * lock is taken inside check_index_is_clusterable. + * transaction case, since cluster() already did it.) The index lock + * is taken inside check_index_is_clusterable. */ OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock); @@ -391,7 +391,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid) if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot cluster temporary tables of other sessions"))); + errmsg("cannot cluster temporary tables of other sessions"))); /* Drop relcache refcnt on OldIndex, but keep lock */ index_close(OldIndex); @@ -438,7 +438,7 @@ mark_index_clustered(Relation rel, Oid indexOid) foreach(index, RelationGetIndexList(rel)) { - Oid thisIndexOid = lfirst_oid(index); + Oid thisIndexOid = lfirst_oid(index); indexTuple = SearchSysCacheCopy(INDEXRELID, ObjectIdGetDatum(thisIndexOid), @@ -540,8 +540,8 @@ rebuild_relation(Relation OldHeap, Oid indexOid) /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast table, - * which is all-new at this point). We do not need + * Rebuild each index on the relation (but not the toast table, which + * is all-new at this point). We do not need * CommandCounterIncrement() because reindex_relation does it. */ reindex_relation(tableOid, false); @@ -569,7 +569,7 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace) OIDNewHeap = heap_create_with_catalog(NewName, RelationGetNamespace(OldHeap), - NewTableSpace, + NewTableSpace, tupdesc, OldHeap->rd_rel->relkind, OldHeap->rd_rel->relisshared, @@ -745,8 +745,8 @@ swap_relation_files(Oid r1, Oid r2) * their new owning relations. Otherwise the wrong one will get * dropped ... * - * NOTE: it is possible that only one table has a toast table; this - * can happen in CLUSTER if there were dropped columns in the old table, + * NOTE: it is possible that only one table has a toast table; this can + * happen in CLUSTER if there were dropped columns in the old table, * and in ALTER TABLE when adding or changing type of columns. * * NOTE: at present, a TOAST table's only dependency is the one on its @@ -802,15 +802,15 @@ swap_relation_files(Oid r1, Oid r2) /* * Blow away the old relcache entries now. We need this kluge because * relcache.c keeps a link to the smgr relation for the physical file, - * and that will be out of date as soon as we do CommandCounterIncrement. - * Whichever of the rels is the second to be cleared during cache - * invalidation will have a dangling reference to an already-deleted smgr - * relation. Rather than trying to avoid this by ordering operations - * just so, it's easiest to not have the relcache entries there at all. - * (Fortunately, since one of the entries is local in our transaction, - * it's sufficient to clear out our own relcache this way; the problem - * cannot arise for other backends when they see our update on the - * non-local relation.) + * and that will be out of date as soon as we do + * CommandCounterIncrement. Whichever of the rels is the second to be + * cleared during cache invalidation will have a dangling reference to + * an already-deleted smgr relation. Rather than trying to avoid this + * by ordering operations just so, it's easiest to not have the + * relcache entries there at all. (Fortunately, since one of the + * entries is local in our transaction, it's sufficient to clear out + * our own relcache this way; the problem cannot arise for other + * backends when they see our update on the non-local relation.) */ RelationForgetRelation(r1); RelationForgetRelation(r2); diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 8e3e3a8caf..8a1b2e0c4c 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -7,7 +7,7 @@ * Copyright (c) 1996-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.78 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.79 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -123,10 +123,10 @@ CommentObject(CommentStmt *stmt) CommentOpClass(stmt->objname, stmt->objargs, stmt->comment); break; case OBJECT_LARGEOBJECT: - CommentLargeObject(stmt->objname, stmt->comment); + CommentLargeObject(stmt->objname, stmt->comment); break; case OBJECT_CAST: - CommentCast(stmt->objname, stmt->objargs, stmt->comment); + CommentCast(stmt->objname, stmt->objargs, stmt->comment); break; default: elog(ERROR, "unrecognized object type: %d", @@ -401,8 +401,8 @@ CommentAttribute(List *qualname, char *comment) if (attnum == InvalidAttrNumber) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - attrname, RelationGetRelationName(relation)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + attrname, RelationGetRelationName(relation)))); /* Create the comment using the relation's oid */ @@ -462,7 +462,8 @@ CommentDatabase(List *qualname, char *comment) /* Only allow comments on the current database */ if (oid != MyDatabaseId) { - ereport(WARNING, /* throw just a warning so pg_restore doesn't fail */ + ereport(WARNING, /* throw just a warning so pg_restore + * doesn't fail */ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("database comments may only be applied to the current database"))); return; @@ -586,7 +587,7 @@ CommentRule(List *qualname, char *comment) ForwardScanDirection))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("there are multiple rules named \"%s\"", rulename), + errmsg("there are multiple rules named \"%s\"", rulename), errhint("Specify a relation name as well as a rule name."))); heap_endscan(scanDesc); @@ -615,8 +616,8 @@ CommentRule(List *qualname, char *comment) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("rule \"%s\" for relation \"%s\" does not exist", - rulename, RelationGetRelationName(relation)))); + errmsg("rule \"%s\" for relation \"%s\" does not exist", + rulename, RelationGetRelationName(relation)))); Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class); ruleoid = HeapTupleGetOid(tuple); ReleaseSysCache(tuple); @@ -832,8 +833,8 @@ CommentTrigger(List *qualname, char *comment) if (!HeapTupleIsValid(triggertuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - trigname, RelationGetRelationName(relation)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + trigname, RelationGetRelationName(relation)))); oid = HeapTupleGetOid(triggertuple); @@ -924,8 +925,8 @@ CommentConstraint(List *qualname, char *comment) if (!OidIsValid(conOid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" for table \"%s\" does not exist", - conName, RelationGetRelationName(relation)))); + errmsg("constraint \"%s\" for table \"%s\" does not exist", + conName, RelationGetRelationName(relation)))); /* Create the comment with the pg_constraint oid */ CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment); @@ -1003,7 +1004,7 @@ CommentLanguage(List *qualname, char *comment) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to comment on procedural language"))); + errmsg("must be superuser to comment on procedural language"))); /* pg_language doesn't have a hard-coded OID, so must look it up */ classoid = get_system_catalog_relid(LanguageRelationName); @@ -1084,7 +1085,7 @@ CommentOpClass(List *qualname, List *arguments, char *comment) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("operator class \"%s\" does not exist for access method \"%s\"", - NameListToString(qualname), amname))); + NameListToString(qualname), amname))); opcID = HeapTupleGetOid(tuple); @@ -1116,7 +1117,7 @@ CommentLargeObject(List *qualname, char *comment) { Oid loid; Oid classoid; - Node *node; + Node *node; Assert(list_length(qualname) == 1); node = (Node *) linitial(qualname); @@ -1127,19 +1128,20 @@ CommentLargeObject(List *qualname, char *comment) loid = intVal(node); break; case T_Float: + /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid - * OID strings. + * constants by the lexer. Accept these if they are valid OID + * strings. */ loid = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(strVal(node)))); + CStringGetDatum(strVal(node)))); break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); /* keep compiler quiet */ - loid = InvalidOid; + loid = InvalidOid; } /* check that the large object exists */ @@ -1152,7 +1154,7 @@ CommentLargeObject(List *qualname, char *comment) classoid = get_system_catalog_relid(LargeObjectRelationName); /* Call CreateComments() to create/drop the comments */ - CreateComments(loid, classoid, 0, comment); + CreateComments(loid, classoid, 0, comment); } /* @@ -1182,7 +1184,7 @@ CommentCast(List *qualname, List *arguments, char *comment) Assert(list_length(arguments) == 1); targettype = (TypeName *) linitial(arguments); Assert(IsA(targettype, TypeName)); - + sourcetypeid = typenameTypeId(sourcetype); if (!OidIsValid(sourcetypeid)) ereport(ERROR, @@ -1210,7 +1212,7 @@ CommentCast(List *qualname, List *arguments, char *comment) /* Get the OID of the cast */ castOid = HeapTupleGetOid(tuple); - + /* Permission check */ if (!pg_type_ownercheck(sourcetypeid, GetUserId()) && !pg_type_ownercheck(targettypeid, GetUserId())) @@ -1226,5 +1228,5 @@ CommentCast(List *qualname, List *arguments, char *comment) classoid = get_system_catalog_relid(CastRelationName); /* Call CreateComments() to create/drop the comments */ - CreateComments(castOid, classoid, 0, comment); + CreateComments(castOid, classoid, 0, comment); } diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index 44b2ef266f..751e0b9152 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.14 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.15 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -181,7 +181,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) Oid conversionOid; HeapTuple tup; Relation rel; - Form_pg_conversion convForm; + Form_pg_conversion convForm; rel = heap_openr(ConversionRelationName, RowExclusiveLock); @@ -200,7 +200,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) convForm = (Form_pg_conversion) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -212,7 +212,10 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ convForm->conowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index d060785d8d..5793c0b2bb 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.229 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.230 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -133,22 +133,22 @@ static void DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids, char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_quote_atts, bool fe_copy); static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, - char *delim, char *null_print, bool csv_mode, char *quote, char *escape, + char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_quote_atts); static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, - char *delim, char *null_print, bool csv_mode, char *quote, char *escape, + char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_notnull_atts); static bool CopyReadLine(void); static char *CopyReadAttribute(const char *delim, const char *null_print, - CopyReadResult *result, bool *isnull); + CopyReadResult *result, bool *isnull); static char *CopyReadAttributeCSV(const char *delim, const char *null_print, - char *quote, char *escape, - CopyReadResult *result, bool *isnull); + char *quote, char *escape, + CopyReadResult *result, bool *isnull); static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo, Oid typioparam, bool *isnull); static void CopyAttributeOut(char *string, char *delim); static void CopyAttributeOutCSV(char *string, char *delim, char *quote, - char *escape, bool force_quote); + char *escape, bool force_quote); static List *CopyGetAttnums(Relation rel, List *attnamelist); static void limit_printout_length(StringInfo buf); @@ -413,7 +413,7 @@ CopyGetData(void *databuf, int datasize) /* Try to receive another message */ int mtype; - readmessage: + readmessage: mtype = pq_getbyte(); if (mtype == EOF) ereport(ERROR, @@ -439,11 +439,12 @@ CopyGetData(void *databuf, int datasize) break; case 'H': /* Flush */ case 'S': /* Sync */ + /* * Ignore Flush/Sync for the convenience of * client libraries (such as libpq) that may - * send those without noticing that the command - * they just sent was COPY. + * send those without noticing that the + * command they just sent was COPY. */ goto readmessage; default: @@ -693,7 +694,7 @@ DoCopy(const CopyStmt *stmt) bool fe_copy = false; bool binary = false; bool oids = false; - bool csv_mode = false; + bool csv_mode = false; char *delim = NULL; char *quote = NULL; char *escape = NULL; @@ -773,7 +774,7 @@ DoCopy(const CopyStmt *stmt) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); - force_quote = (List *)defel->arg; + force_quote = (List *) defel->arg; } else if (strcmp(defel->defname, "force_notnull") == 0) { @@ -781,7 +782,7 @@ DoCopy(const CopyStmt *stmt) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); - force_notnull = (List *)defel->arg; + force_notnull = (List *) defel->arg; } else elog(ERROR, "option \"%s\" not recognized", @@ -806,7 +807,7 @@ DoCopy(const CopyStmt *stmt) /* Set defaults */ if (!delim) delim = csv_mode ? "," : "\t"; - + if (!null_print) null_print = csv_mode ? "" : "\\N"; @@ -817,7 +818,7 @@ DoCopy(const CopyStmt *stmt) if (!escape) escape = quote; } - + /* * Only single-character delimiter strings are supported. */ @@ -862,7 +863,7 @@ DoCopy(const CopyStmt *stmt) if (force_quote != NIL && is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force quote only available using COPY TO"))); + errmsg("COPY force quote only available using COPY TO"))); /* * Check force_notnull @@ -870,11 +871,11 @@ DoCopy(const CopyStmt *stmt) if (!csv_mode && force_notnull != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null available only in CSV mode"))); + errmsg("COPY force not null available only in CSV mode"))); if (force_notnull != NIL && !is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null only available using COPY FROM"))); + errmsg("COPY force not null only available using COPY FROM"))); /* * Don't allow the delimiter to appear in the null string. @@ -948,11 +949,11 @@ DoCopy(const CopyStmt *stmt) if (!list_member_int(attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", - NameStr(attr[attnum - 1]->attname)))); + errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", + NameStr(attr[attnum - 1]->attname)))); } } - + /* * Check that FORCE NOT NULL references valid COPY columns */ @@ -975,7 +976,7 @@ DoCopy(const CopyStmt *stmt) NameStr(attr[attnum - 1]->attname)))); } } - + /* * Set up variables to avoid per-attribute overhead. */ @@ -1152,9 +1153,9 @@ DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids, PG_CATCH(); { /* - * Make sure we turn off old-style COPY OUT mode upon error. - * It is okay to do this in all cases, since it does nothing - * if the mode is not on. + * Make sure we turn off old-style COPY OUT mode upon error. It is + * okay to do this in all cases, since it does nothing if the mode + * is not on. */ pq_endcopyout(true); PG_RE_THROW(); @@ -1202,10 +1203,10 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, { int attnum = lfirst_int(cur); Oid out_func_oid; - + if (binary) getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid, - &out_func_oid, &typioparams[attnum - 1], + &out_func_oid, &typioparams[attnum - 1], &isvarlena[attnum - 1]); else getTypeOutputInfo(attr[attnum - 1]->atttypid, @@ -1266,6 +1267,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL) { bool need_delim = false; + CHECK_FOR_INTERRUPTS(); MemoryContextReset(mycontext); @@ -1325,13 +1327,13 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, { string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1], value, - ObjectIdGetDatum(typioparams[attnum - 1]), + ObjectIdGetDatum(typioparams[attnum - 1]), Int32GetDatum(attr[attnum - 1]->atttypmod))); if (csv_mode) { CopyAttributeOutCSV(string, delim, quote, escape, - (strcmp(string, null_print) == 0 || - force_quote[attnum - 1])); + (strcmp(string, null_print) == 0 || + force_quote[attnum - 1])); } else CopyAttributeOut(string, delim); @@ -1343,7 +1345,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1], value, - ObjectIdGetDatum(typioparams[attnum - 1]))); + ObjectIdGetDatum(typioparams[attnum - 1]))); /* We assume the result will not have been toasted */ CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ); CopySendData(VARDATA(outputbytes), @@ -1444,7 +1446,7 @@ limit_printout_length(StringInfo buf) { #define MAX_COPY_DATA_DISPLAY 100 - int len; + int len; /* Fast path if definitely okay */ if (buf->len <= MAX_COPY_DATA_DISPLAY) @@ -1551,7 +1553,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, /* Fetch the input function and typioparam info */ if (binary) getTypeBinaryInputInfo(attr[attnum - 1]->atttypid, - &in_func_oid, &typioparams[attnum - 1]); + &in_func_oid, &typioparams[attnum - 1]); else getTypeInputInfo(attr[attnum - 1]->atttypid, &in_func_oid, &typioparams[attnum - 1]); @@ -1561,7 +1563,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, force_notnull[attnum - 1] = true; else force_notnull[attnum - 1] = false; - + /* Get default info if needed */ if (!list_member_int(attnumlist, attnum)) { @@ -1603,7 +1605,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, COERCE_IMPLICIT_CAST, false); constraintexprs[attnum - 1] = ExecPrepareExpr((Expr *) node, - estate); + estate); hasConstraints = true; } } @@ -1718,10 +1720,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, done = CopyReadLine(); /* - * EOF at start of line means we're done. If we see EOF - * after some characters, we act as though it was newline - * followed by EOF, ie, process the line and then exit loop - * on next iteration. + * EOF at start of line means we're done. If we see EOF after + * some characters, we act as though it was newline followed + * by EOF, ie, process the line and then exit loop on next + * iteration. */ if (done && line_buf.len == 0) break; @@ -1770,29 +1772,29 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, if (csv_mode) { string = CopyReadAttributeCSV(delim, null_print, quote, - escape, &result, &isnull); + escape, &result, &isnull); if (result == UNTERMINATED_FIELD) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("unterminated CSV quoted field"))); + errmsg("unterminated CSV quoted field"))); } else - string = CopyReadAttribute(delim, null_print, + string = CopyReadAttribute(delim, null_print, &result, &isnull); if (csv_mode && isnull && force_notnull[m]) { - string = null_print; /* set to NULL string */ + string = null_print; /* set to NULL string */ isnull = false; } - /* we read an SQL NULL, no need to do anything */ + /* we read an SQL NULL, no need to do anything */ if (!isnull) { copy_attname = NameStr(attr[m]->attname); values[m] = FunctionCall3(&in_functions[m], CStringGetDatum(string), - ObjectIdGetDatum(typioparams[m]), + ObjectIdGetDatum(typioparams[m]), Int32GetDatum(attr[m]->atttypmod)); nulls[m] = ' '; copy_attname = NULL; @@ -1809,7 +1811,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, if (result == NORMAL_ATTR && line_buf.len != 0) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("extra data after last expected column"))); + errmsg("extra data after last expected column"))); } else { @@ -1835,8 +1837,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, copy_attname = "oid"; loaded_oid = DatumGetObjectId(CopyReadBinaryAttribute(0, - &oid_in_function, - oid_typioparam, + &oid_in_function, + oid_typioparam, &isnull)); if (isnull || loaded_oid == InvalidOid) ereport(ERROR, @@ -2022,14 +2024,14 @@ CopyReadLine(void) result = false; /* - * In this loop we only care for detecting newlines (\r and/or \n) - * and the end-of-copy marker (\.). For backwards compatibility - * we allow backslashes to escape newline characters. Backslashes - * other than the end marker get put into the line_buf, since - * CopyReadAttribute does its own escape processing. These four - * characters, and only these four, are assumed the same in frontend - * and backend encodings. We do not assume that second and later bytes - * of a frontend multibyte character couldn't look like ASCII characters. + * In this loop we only care for detecting newlines (\r and/or \n) and + * the end-of-copy marker (\.). For backwards compatibility we allow + * backslashes to escape newline characters. Backslashes other than + * the end marker get put into the line_buf, since CopyReadAttribute + * does its own escape processing. These four characters, and only + * these four, are assumed the same in frontend and backend encodings. + * We do not assume that second and later bytes of a frontend + * multibyte character couldn't look like ASCII characters. */ for (;;) { @@ -2120,9 +2122,9 @@ CopyReadLine(void) errmsg("end-of-copy marker does not match previous newline style"))); /* - * In protocol version 3, we should ignore anything - * after \. up to the protocol end of copy data. (XXX - * maybe better not to treat \. as special?) + * In protocol version 3, we should ignore anything after + * \. up to the protocol end of copy data. (XXX maybe + * better not to treat \. as special?) */ if (copy_dest == COPY_NEW_FE) { @@ -2140,10 +2142,10 @@ CopyReadLine(void) /* * When client encoding != server, must be careful to read the - * extra bytes of a multibyte character exactly, since the encoding - * might not ensure they don't look like ASCII. When the encodings - * are the same, we need not do this, since no server encoding we - * use has ASCII-like following bytes. + * extra bytes of a multibyte character exactly, since the + * encoding might not ensure they don't look like ASCII. When the + * encodings are the same, we need not do this, since no server + * encoding we use has ASCII-like following bytes. */ if (change_encoding) { @@ -2162,7 +2164,7 @@ CopyReadLine(void) if (result) break; /* out of outer loop */ } - } /* end of outer loop */ + } /* end of outer loop */ /* * Done reading the line. Convert it to server encoding. @@ -2170,8 +2172,9 @@ CopyReadLine(void) * Note: set line_buf_converted to true *before* attempting conversion; * this prevents infinite recursion during error reporting should * pg_client_to_server() issue an error, due to copy_in_error_callback - * again attempting the same conversion. We'll end up issuing the message - * without conversion, which is bad but better than nothing ... + * again attempting the same conversion. We'll end up issuing the + * message without conversion, which is bad but better than nothing + * ... */ line_buf_converted = true; @@ -2295,9 +2298,11 @@ CopyReadAttribute(const char *delim, const char *null_print, case 'v': c = '\v'; break; - /* - * in all other cases, take the char after '\' literally - */ + + /* + * in all other cases, take the char after '\' + * literally + */ } } appendStringInfoCharMacro(&attribute_buf, c); @@ -2316,7 +2321,7 @@ CopyReadAttribute(const char *delim, const char *null_print, /* - * Read the value of a single attribute in CSV mode, + * Read the value of a single attribute in CSV mode, * performing de-escaping as needed. Escaping does not follow the normal * PostgreSQL text mode, but instead "standard" (i.e. common) CSV usage. * @@ -2329,7 +2334,7 @@ CopyReadAttribute(const char *delim, const char *null_print, * *result is set to indicate what terminated the read: * NORMAL_ATTR: column delimiter * END_OF_LINE: end of line - * UNTERMINATED_FIELD no quote detected at end of a quoted field + * UNTERMINATED_FIELD no quote detected at end of a quoted field * * In any case, the string read up to the terminator (or end of file) * is returned. @@ -2345,15 +2350,15 @@ static char * CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, char *escape, CopyReadResult *result, bool *isnull) { - char delimc = delim[0]; - char quotec = quote[0]; - char escapec = escape[0]; + char delimc = delim[0]; + char quotec = quote[0]; + char escapec = escape[0]; char c; int start_cursor = line_buf.cursor; int end_cursor = start_cursor; int input_len; - bool in_quote = false; - bool saw_quote = false; + bool in_quote = false; + bool saw_quote = false; /* reset attribute_buf to empty */ attribute_buf.len = 0; @@ -2367,18 +2372,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, /* handle multiline quoted fields */ if (in_quote && line_buf.cursor >= line_buf.len) { - bool done; + bool done; - switch(eol_type) + switch (eol_type) { case EOL_NL: - appendStringInfoString(&attribute_buf,"\n"); + appendStringInfoString(&attribute_buf, "\n"); break; case EOL_CR: - appendStringInfoString(&attribute_buf,"\r"); + appendStringInfoString(&attribute_buf, "\r"); break; case EOL_CRNL: - appendStringInfoString(&attribute_buf,"\r\n"); + appendStringInfoString(&attribute_buf, "\r\n"); break; case EOL_UNKNOWN: /* shouldn't happen - just keep going */ @@ -2396,16 +2401,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, if (line_buf.cursor >= line_buf.len) break; c = line_buf.data[line_buf.cursor++]; - /* - * unquoted field delimiter + + /* + * unquoted field delimiter */ if (!in_quote && c == delimc) { *result = NORMAL_ATTR; break; } - /* - * start of quoted field (or part of field) + + /* + * start of quoted field (or part of field) */ if (!in_quote && c == quotec) { @@ -2413,18 +2420,20 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, in_quote = true; continue; } - /* + + /* * escape within a quoted field */ if (in_quote && c == escapec) { - /* - * peek at the next char if available, and escape it if it - * is an escape char or a quote char + /* + * peek at the next char if available, and escape it if it is + * an escape char or a quote char */ if (line_buf.cursor <= line_buf.len) { - char nextc = line_buf.data[line_buf.cursor]; + char nextc = line_buf.data[line_buf.cursor]; + if (nextc == escapec || nextc == quotec) { appendStringInfoCharMacro(&attribute_buf, nextc); @@ -2433,10 +2442,11 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, } } } + /* - * end of quoted field. - * Must do this test after testing for escape in case quote char - * and escape char are the same (which is the common case). + * end of quoted field. Must do this test after testing for escape + * in case quote char and escape char are the same (which is the + * common case). */ if (in_quote && c == quotec) { @@ -2586,7 +2596,7 @@ CopyAttributeOut(char *server_string, char *delim) } /* - * Send CSV representation of one attribute, with conversion and + * Send CSV representation of one attribute, with conversion and * CSV type escaping */ static void @@ -2596,9 +2606,9 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote, char *string; char c; char delimc = delim[0]; - char quotec = quote[0]; - char escapec = escape[0]; - char *test_string; + char quotec = quote[0]; + char escapec = escape[0]; + char *test_string; bool same_encoding; int mblen; int i; @@ -2610,13 +2620,14 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote, else string = server_string; - /* have to run through the string twice, - * first time to see if it needs quoting, second to actually send it + /* + * have to run through the string twice, first time to see if it needs + * quoting, second to actually send it */ - for(test_string = string; - !use_quote && (c = *test_string) != '\0'; - test_string += mblen) + for (test_string = string; + !use_quote && (c = *test_string) != '\0'; + test_string += mblen) { if (c == delimc || c == quotec || c == '\n' || c == '\r') use_quote = true; @@ -2695,8 +2706,8 @@ CopyGetAttnums(Relation rel, List *attnamelist) if (list_member_int(attnums, attnum)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", - name))); + errmsg("column \"%s\" specified more than once", + name))); attnums = lappend_int(attnums, attnum); } } diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 055b7be7eb..f7ef440b02 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.140 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.141 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,7 +78,7 @@ createdb(const CreatedbStmt *stmt) Oid dboid; AclId datdba; ListCell *option; - DefElem *dtablespacename = NULL; + DefElem *dtablespacename = NULL; DefElem *downer = NULL; DefElem *dtemplate = NULL; DefElem *dencoding = NULL; @@ -86,6 +86,7 @@ createdb(const CreatedbStmt *stmt) char *dbowner = NULL; char *dbtemplate = NULL; int encoding = -1; + #ifndef WIN32 char buf[2 * MAXPGPATH + 100]; #endif @@ -224,7 +225,7 @@ createdb(const CreatedbStmt *stmt) &src_vacuumxid, &src_frozenxid, &src_deftablespace)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("template database \"%s\" does not exist", dbtemplate))); + errmsg("template database \"%s\" does not exist", dbtemplate))); /* * Permission check: to copy a DB that's not marked datistemplate, you @@ -265,7 +266,7 @@ createdb(const CreatedbStmt *stmt) if (dtablespacename && dtablespacename->arg) { char *tablespacename; - AclResult aclresult; + AclResult aclresult; tablespacename = strVal(dtablespacename->arg); dst_deftablespace = get_tablespace_oid(tablespacename); @@ -275,11 +276,11 @@ createdb(const CreatedbStmt *stmt) errmsg("tablespace \"%s\" does not exist", tablespacename))); /* check permissions */ - aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(), + aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, - tablespacename); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + tablespacename); } else { @@ -308,22 +309,22 @@ createdb(const CreatedbStmt *stmt) closeAllVfds(); /* - * Iterate through all tablespaces of the template database, and - * copy each one to the new database. + * Iterate through all tablespaces of the template database, and copy + * each one to the new database. * - * If we are trying to change the default tablespace of the template, - * we require that the template not have any files in the new default - * tablespace. This avoids the need to merge two subdirectories. - * This could probably be improved later. + * If we are trying to change the default tablespace of the template, we + * require that the template not have any files in the new default + * tablespace. This avoids the need to merge two subdirectories. This + * could probably be improved later. */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); scan = heap_beginscan(rel, SnapshotNow, 0, NULL); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid srctablespace = HeapTupleGetOid(tuple); - Oid dsttablespace; - char *srcpath; - char *dstpath; + Oid srctablespace = HeapTupleGetOid(tuple); + Oid dsttablespace; + char *srcpath; + char *dstpath; struct stat st; /* No need to copy global tablespace */ @@ -351,10 +352,11 @@ createdb(const CreatedbStmt *stmt) remove_dbtablespaces(dboid); ereport(ERROR, (errmsg("could not initialize database directory"), - errdetail("Directory \"%s\" already exists.", dstpath))); + errdetail("Directory \"%s\" already exists.", dstpath))); } #ifndef WIN32 + /* * Copy this subdirectory to the new location * @@ -374,7 +376,7 @@ createdb(const CreatedbStmt *stmt) errdetail("Failing system command was: %s", buf), errhint("Look in the postmaster's stderr log for more information."))); } -#else /* WIN32 */ +#else /* WIN32 */ if (copydir(srcpath, dstpath) != 0) { /* copydir should already have given details of its troubles */ @@ -382,7 +384,7 @@ createdb(const CreatedbStmt *stmt) ereport(ERROR, (errmsg("could not initialize database directory"))); } -#endif /* WIN32 */ +#endif /* WIN32 */ } heap_endscan(scan); heap_close(rel, AccessShareLock); @@ -772,7 +774,7 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) Relation rel; ScanKeyData scankey; SysScanDesc scan; - Form_pg_database datForm; + Form_pg_database datForm; rel = heap_openr(DatabaseRelationName, RowExclusiveLock); ScanKeyInit(&scankey, @@ -789,16 +791,17 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) datForm = (Form_pg_database) GETSTRUCT(tuple); - /* + /* * If the new owner is the same as the existing owner, consider the - * command to have succeeded. This is to be consistent with other objects. + * command to have succeeded. This is to be consistent with other + * objects. */ if (datForm->datdba != newOwnerSysId) { Datum repl_val[Natts_pg_database]; char repl_null[Natts_pg_database]; char repl_repl[Natts_pg_database]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -821,9 +824,9 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) * necessary when the ACL is non-null. */ aclDatum = heap_getattr(tuple, - Anum_pg_database_datacl, - RelationGetDescr(rel), - &isNull); + Anum_pg_database_datacl, + RelationGetDescr(rel), + &isNull); if (!isNull) { newAcl = aclnewowner(DatumGetAclP(aclDatum), @@ -941,16 +944,16 @@ have_createdb_privilege(void) static void remove_dbtablespaces(Oid db_id) { - Relation rel; + Relation rel; HeapScanDesc scan; - HeapTuple tuple; + HeapTuple tuple; rel = heap_openr(TableSpaceRelationName, AccessShareLock); scan = heap_beginscan(rel, SnapshotNow, 0, NULL); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid dsttablespace = HeapTupleGetOid(tuple); - char *dstpath; + Oid dsttablespace = HeapTupleGetOid(tuple); + char *dstpath; struct stat st; /* Don't mess with the global tablespace */ @@ -969,9 +972,9 @@ remove_dbtablespaces(Oid db_id) if (!rmtree(dstpath, true)) { ereport(WARNING, - (errmsg("could not remove database directory \"%s\"", - dstpath), - errhint("Look in the postmaster's stderr log for more information."))); + (errmsg("could not remove database directory \"%s\"", + dstpath), + errhint("Look in the postmaster's stderr log for more information."))); } pfree(dstpath); diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index cc2643d637..dc2ea2974a 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.90 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.91 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -126,8 +126,8 @@ bool defGetBoolean(DefElem *def) { /* - * Presently, boolean flags must simply be present or absent. - * Later we could allow 'flag = t', 'flag = f', etc. + * Presently, boolean flags must simply be present or absent. Later we + * could allow 'flag = t', 'flag = f', etc. */ if (def->arg == NULL) return true; @@ -265,7 +265,7 @@ defGetTypeLength(DefElem *def) case T_TypeName: /* cope if grammar chooses to believe "variable" is a typename */ if (pg_strcasecmp(TypeNameToString((TypeName *) def->arg), - "variable") == 0) + "variable") == 0) return -1; /* variable length */ break; case T_List: diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 29b4f30fce..7ad3596fac 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.123 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.124 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -317,7 +317,7 @@ explain_outNode(StringInfo str, Plan *outer_plan, int indent, ExplainState *es) { - ListCell *l; + ListCell *l; char *pname; int i; diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 8a139e5601..7bce0b9b9d 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -3,14 +3,14 @@ * functioncmds.c * * Routines for CREATE and DROP FUNCTION commands and CREATE and DROP - * CAST commands. + * CAST commands. * * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.51 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.52 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * These routines take the parse tree and pick out the @@ -449,14 +449,14 @@ CreateFunction(CreateFunctionStmt *stmt) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("language \"%s\" does not exist", languageName), - (strcmp(languageName, "plperl") == 0 || - strcmp(languageName, "plperlu") == 0 || - strcmp(languageName, "plpgsql") == 0 || - strcmp(languageName, "plpythonu") == 0 || - strcmp(languageName, "pltcl") == 0 || - strcmp(languageName, "pltclu") == 0) ? + (strcmp(languageName, "plperl") == 0 || + strcmp(languageName, "plperlu") == 0 || + strcmp(languageName, "plpgsql") == 0 || + strcmp(languageName, "plpythonu") == 0 || + strcmp(languageName, "pltcl") == 0 || + strcmp(languageName, "pltclu") == 0) ? errhint("You need to use \"createlang\" to load the language into the database.") : 0)); - + languageOid = HeapTupleGetOid(languageTuple); languageStruct = (Form_pg_language) GETSTRUCT(languageTuple); @@ -490,7 +490,7 @@ CreateFunction(CreateFunctionStmt *stmt) &prorettype, &returnsSet); parameterCount = examine_parameter_list(stmt->parameters, languageOid, - parameterTypes, parameterNames); + parameterTypes, parameterNames); compute_attributes_with_style(stmt->withClause, &isStrict, &volatility); @@ -739,8 +739,8 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) procOid = LookupFuncNameTypeNames(name, argtypes, false); tup = SearchSysCache(PROCOID, - ObjectIdGetDatum(procOid), - 0, 0, 0); + ObjectIdGetDatum(procOid), + 0, 0, 0); if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for function %u", procOid); procForm = (Form_pg_proc) GETSTRUCT(tup); @@ -750,9 +750,9 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(name)), - errhint("Use ALTER AGGREGATE to change owner of aggregate functions."))); + errhint("Use ALTER AGGREGATE to change owner of aggregate functions."))); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -761,7 +761,7 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) Datum repl_val[Natts_pg_proc]; char repl_null[Natts_pg_proc]; char repl_repl[Natts_pg_proc]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -968,7 +968,7 @@ CreateCast(CreateCastStmt *stmt) if (nargs < 1 || nargs > 3) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must take one to three arguments"))); + errmsg("cast function must take one to three arguments"))); if (procstruct->proargtypes[0] != sourcetypeid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 04761fac68..6e550e67c6 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.124 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.125 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -47,10 +47,10 @@ /* non-export function prototypes */ static void CheckPredicate(Expr *predicate); static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP, - List *attList, - Oid relId, - char *accessMethodName, Oid accessMethodId, - bool isconstraint); + List *attList, + Oid relId, + char *accessMethodName, Oid accessMethodId, + bool isconstraint); static Oid GetIndexOpClass(List *opclass, Oid attrType, char *accessMethodName, Oid accessMethodId); static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId); @@ -143,7 +143,8 @@ DefineIndex(RangeVar *heapRelation, * Verify we (still) have CREATE rights in the rel's namespace. * (Presumably we did when the rel was created, but maybe not * anymore.) Skip check if caller doesn't want it. Also skip check - * if bootstrapping, since permissions machinery may not be working yet. + * if bootstrapping, since permissions machinery may not be working + * yet. */ if (check_rights && !IsBootstrapProcessingMode()) { @@ -159,7 +160,7 @@ DefineIndex(RangeVar *heapRelation, /* Determine tablespace to use */ if (tableSpaceName) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(tableSpaceName); if (!OidIsValid(tablespaceId)) @@ -173,7 +174,9 @@ DefineIndex(RangeVar *heapRelation, if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, tableSpaceName); - } else { + } + else + { /* Use the parent rel's tablespace */ tablespaceId = get_rel_tablespace(relationId); /* Note there is no additional permission check in this path */ @@ -256,9 +259,9 @@ DefineIndex(RangeVar *heapRelation, /* * If ALTER TABLE, check that there isn't already a PRIMARY KEY. - * In CREATE TABLE, we have faith that the parser rejected multiple - * pkey clauses; and CREATE INDEX doesn't have a way to say - * PRIMARY KEY, so it's no problem either. + * In CREATE TABLE, we have faith that the parser rejected + * multiple pkey clauses; and CREATE INDEX doesn't have a way to + * say PRIMARY KEY, so it's no problem either. */ if (is_alter_table && relationHasPrimaryKey(rel)) @@ -270,8 +273,8 @@ DefineIndex(RangeVar *heapRelation, } /* - * Check that all of the attributes in a primary key are marked as not - * null, otherwise attempt to ALTER TABLE .. SET NOT NULL + * Check that all of the attributes in a primary key are marked as + * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL */ cmds = NIL; foreach(keys, attributeList) @@ -294,7 +297,7 @@ DefineIndex(RangeVar *heapRelation, if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull) { /* Add a subcommand to make this one NOT NULL */ - AlterTableCmd *cmd = makeNode(AlterTableCmd); + AlterTableCmd *cmd = makeNode(AlterTableCmd); cmd->subtype = AT_SetNotNull; cmd->name = key->name; @@ -318,15 +321,15 @@ DefineIndex(RangeVar *heapRelation, } /* - * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade - * to child tables? Currently, since the PRIMARY KEY - * itself doesn't cascade, we don't cascade the - * notnull constraint(s) either; but this is pretty debatable. + * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child + * tables? Currently, since the PRIMARY KEY itself doesn't + * cascade, we don't cascade the notnull constraint(s) either; but + * this is pretty debatable. * - * XXX: possible future improvement: when being called from - * ALTER TABLE, it would be more efficient to merge this with - * the outer ALTER TABLE, so as to avoid two scans. But that - * seems to complicate DefineIndex's API unduly. + * XXX: possible future improvement: when being called from ALTER + * TABLE, it would be more efficient to merge this with the outer + * ALTER TABLE, so as to avoid two scans. But that seems to + * complicate DefineIndex's API unduly. */ if (cmds) AlterTableInternal(relationId, cmds, false); @@ -352,15 +355,15 @@ DefineIndex(RangeVar *heapRelation, heap_close(rel, NoLock); /* - * Report index creation if appropriate (delay this till after most - * of the error checks) + * Report index creation if appropriate (delay this till after most of + * the error checks) */ if (isconstraint && !quiet) ereport(NOTICE, (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"", - is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", + is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", primary ? "PRIMARY KEY" : "UNIQUE", - indexRelationName, RelationGetRelationName(rel)))); + indexRelationName, RelationGetRelationName(rel)))); index_create(relationId, indexRelationName, indexInfo, accessMethodId, tablespaceId, classObjectId, @@ -450,8 +453,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (isconstraint) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" named in key does not exist", - attribute->name))); + errmsg("column \"%s\" named in key does not exist", + attribute->name))); else ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -488,11 +491,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (contain_subplans(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in index expression"))); + errmsg("cannot use subquery in index expression"))); if (contain_agg_clause(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in index expression"))); + errmsg("cannot use aggregate function in index expression"))); /* * A expression using mutable functions is probably wrong, @@ -647,7 +650,7 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId) * than one exact match, then someone put bogus entries in pg_opclass. * * The initial search is done by namespace.c so that we only consider - * opclasses visible in the current namespace search path. (See also + * opclasses visible in the current namespace search path. (See also * typcache.c, which applies the same logic, but over all opclasses.) */ for (opclass = OpclassGetCandidates(accessMethodId); @@ -962,16 +965,16 @@ ReindexTable(RangeVar *relation, bool force /* currently unused */ ) * separate transaction, so we can release the lock on it right away. */ void -ReindexDatabase(const char *dbname, bool force /* currently unused */, +ReindexDatabase(const char *dbname, bool force /* currently unused */ , bool all) { - Relation relationRelation; + Relation relationRelation; HeapScanDesc scan; - HeapTuple tuple; + HeapTuple tuple; MemoryContext private_context; MemoryContext old; - List *relids = NIL; - ListCell *l; + List *relids = NIL; + ListCell *l; AssertArg(dbname); @@ -1006,7 +1009,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */, /* * We always want to reindex pg_class first. This ensures that if * there is any corruption in pg_class' indexes, they will be fixed - * before we process any other tables. This is critical because + * before we process any other tables. This is critical because * reindexing itself will try to update pg_class. */ old = MemoryContextSwitchTo(private_context); @@ -1054,7 +1057,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */, CommitTransactionCommand(); foreach(l, relids) { - Oid relid = lfirst_oid(l); + Oid relid = lfirst_oid(l); StartTransactionCommand(); SetQuerySnapshot(); /* might be needed for functions in diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 0b2bc391f1..e0f58d9ab2 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.27 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.28 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -300,8 +300,8 @@ DefineOpClass(CreateOpClassStmt *stmt) errmsg("could not make operator class \"%s\" be default for type %s", opcname, TypeNameToString(stmt->datatype)), - errdetail("Operator class \"%s\" already is the default.", - NameStr(opclass->opcname)))); + errdetail("Operator class \"%s\" already is the default.", + NameStr(opclass->opcname)))); } systable_endscan(scan); @@ -419,6 +419,7 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) if (optup == NULL) elog(ERROR, "cache lookup failed for operator %u", operOid); opform = (Form_pg_operator) GETSTRUCT(optup); + /* * btree operators must be binary ops returning boolean, and the * left-side input type must match the operator class' input type. @@ -434,10 +435,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) if (opform->oprleft != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree operators must have index type as left input"))); + errmsg("btree operators must have index type as left input"))); + /* - * The subtype is "default" (0) if oprright matches the operator class, - * otherwise it is oprright. + * The subtype is "default" (0) if oprright matches the operator + * class, otherwise it is oprright. */ if (opform->oprright == typeoid) subtype = InvalidOid; @@ -471,6 +473,7 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) if (proctup == NULL) elog(ERROR, "cache lookup failed for function %u", procOid); procform = (Form_pg_proc) GETSTRUCT(proctup); + /* * btree support procs must be 2-arg procs returning int4, and the * first input type must match the operator class' input type. @@ -486,10 +489,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) if (procform->proargtypes[0] != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree procedures must have index type as first input"))); + errmsg("btree procedures must have index type as first input"))); + /* - * The subtype is "default" (0) if second input type matches the operator - * class, otherwise it is the second input type. + * The subtype is "default" (0) if second input type matches the + * operator class, otherwise it is the second input type. */ if (procform->proargtypes[1] == typeoid) subtype = InvalidOid; @@ -518,13 +522,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc) if (isProc) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("procedure number %d appears more than once", - member->number))); + errmsg("procedure number %d appears more than once", + member->number))); else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator number %d appears more than once", - member->number))); + errmsg("operator number %d appears more than once", + member->number))); } } *list = lappend(*list, member); @@ -885,7 +889,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) char *opcname; HeapTuple tup; Relation rel; - Form_pg_opclass opcForm; + Form_pg_opclass opcForm; amOid = GetSysCacheOid(AMNAME, CStringGetDatum(access_method), @@ -937,7 +941,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) } opcForm = (Form_pg_opclass) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -949,7 +953,10 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ opcForm->opcowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index 0605b75b3e..280404ceb7 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.18 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.19 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -275,7 +275,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, Oid operOid; HeapTuple tup; Relation rel; - Form_pg_operator oprForm; + Form_pg_operator oprForm; rel = heap_openr(OperatorRelationName, RowExclusiveLock); @@ -283,14 +283,14 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, false); tup = SearchSysCacheCopy(OPEROID, - ObjectIdGetDatum(operOid), - 0, 0, 0); + ObjectIdGetDatum(operOid), + 0, 0, 0); if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for operator %u", operOid); oprForm = (Form_pg_operator) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -302,7 +302,10 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ oprForm->oprowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); @@ -314,5 +317,3 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, heap_freetuple(tup); } - - diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index a8356e5dcf..08b1401354 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -14,7 +14,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.32 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.33 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,10 +106,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) /* * Also copy the outer portal's parameter list into the inner portal's - * memory context. We want to pass down the parameter values in case - * we had a command like - * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 - * This will have been parsed using the outer parameter set and the + * memory context. We want to pass down the parameter values in case + * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = + * $1 This will have been parsed using the outer parameter set and the * parameter value needs to be preserved for use when the cursor is * executed. */ @@ -180,8 +179,8 @@ PerformPortalFetch(FetchStmt *stmt, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), - errmsg("cursor \"%s\" does not exist", stmt->portalname))); - return; /* keep compiler happy */ + errmsg("cursor \"%s\" does not exist", stmt->portalname))); + return; /* keep compiler happy */ } /* Adjust dest if needed. MOVE wants destination None */ @@ -228,7 +227,7 @@ PerformPortalClose(const char *name) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), errmsg("cursor \"%s\" does not exist", name))); - return; /* keep compiler happy */ + return; /* keep compiler happy */ } /* @@ -354,8 +353,9 @@ PersistHoldablePortal(Portal portal) MemoryContextSwitchTo(PortalContext); /* - * Rewind the executor: we need to store the entire result set in the - * tuplestore, so that subsequent backward FETCHs can be processed. + * Rewind the executor: we need to store the entire result set in + * the tuplestore, so that subsequent backward FETCHs can be + * processed. */ ExecutorRewind(queryDesc); @@ -371,15 +371,15 @@ PersistHoldablePortal(Portal portal) /* * Now shut down the inner executor. */ - portal->queryDesc = NULL; /* prevent double shutdown */ + portal->queryDesc = NULL; /* prevent double shutdown */ ExecutorEnd(queryDesc); /* * Reset the position in the result set: ideally, this could be - * implemented by just skipping straight to the tuple # that we need - * to be at, but the tuplestore API doesn't support that. So we start - * at the beginning of the tuplestore and iterate through it until we - * reach where we need to be. FIXME someday? + * implemented by just skipping straight to the tuple # that we + * need to be at, but the tuplestore API doesn't support that. So + * we start at the beginning of the tuplestore and iterate through + * it until we reach where we need to be. FIXME someday? */ MemoryContextSwitchTo(portal->holdContext); @@ -389,8 +389,8 @@ PersistHoldablePortal(Portal portal) if (portal->posOverflow) /* oops, cannot trust portalPos */ ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not reposition held cursor"))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not reposition held cursor"))); tuplestore_rescan(portal->holdStore); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 31de3e839f..032fe4acbc 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -10,7 +10,7 @@ * Copyright (c) 2002-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.30 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.31 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -211,7 +211,8 @@ EvaluateParams(EState *estate, List *params, List *argtypes) int nargs = list_length(argtypes); ParamListInfo paramLI; List *exprstates; - ListCell *le, *la; + ListCell *le, + *la; int i = 0; /* Parser should have caught this error, but check for safety */ @@ -510,7 +511,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate) } /* Explain each query */ - forboth (q, query_list, p, plan_list) + forboth(q, query_list, p, plan_list) { Query *query = (Query *) lfirst(q); Plan *plan = (Plan *) lfirst(p); diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 94de3f1235..404436e8c0 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.54 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.55 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -101,8 +101,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type \"language_handler\"", - NameListToString(stmt->plhandler)))); + errmsg("function %s must return type \"language_handler\"", + NameListToString(stmt->plhandler)))); } /* validate the validator function */ @@ -126,12 +126,12 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) i = 0; namestrcpy(&langname, languageName); - values[i++] = NameGetDatum(&langname); /* lanname */ - values[i++] = BoolGetDatum(true); /* lanispl */ - values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */ - values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */ - values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */ - nulls[i] = 'n'; /* lanacl */ + values[i++] = NameGetDatum(&langname); /* lanname */ + values[i++] = BoolGetDatum(true); /* lanispl */ + values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */ + values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */ + values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */ + nulls[i] = 'n'; /* lanacl */ rel = heap_openr(LanguageRelationName, RowExclusiveLock); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index df565d46e8..8a3d02d100 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.23 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.24 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -103,12 +103,12 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) errdetail("The prefix \"pg_\" is reserved for system schemas."))); /* - * Select default tablespace for schema. If not given, use zero - * which implies the database's default tablespace. + * Select default tablespace for schema. If not given, use zero which + * implies the database's default tablespace. */ if (stmt->tablespacename) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(stmt->tablespacename); if (!OidIsValid(tablespaceId)) @@ -122,7 +122,9 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, stmt->tablespacename); - } else { + } + else + { tablespaceId = InvalidOid; /* note there is no permission check in this path */ } @@ -316,20 +318,20 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) { HeapTuple tup; Relation rel; - Form_pg_namespace nspForm; + Form_pg_namespace nspForm; rel = heap_openr(NamespaceRelationName, RowExclusiveLock); tup = SearchSysCache(NAMESPACENAME, - CStringGetDatum(name), - 0, 0, 0); + CStringGetDatum(name), + 0, 0, 0); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("schema \"%s\" does not exist", name))); nspForm = (Form_pg_namespace) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -338,7 +340,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) Datum repl_val[Natts_pg_namespace]; char repl_null[Natts_pg_namespace]; char repl_repl[Natts_pg_namespace]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -377,7 +379,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) heap_freetuple(newtuple); } - + ReleaseSysCache(tup); heap_close(rel, NoLock); } diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index d9852ed9d9..53ec53e39f 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.115 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.116 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -326,7 +326,7 @@ AlterSequence(AlterSeqStmt *stmt) memcpy(seq, &new, sizeof(FormData_pg_sequence)); /* Clear local cache so that we don't think we have cached numbers */ - elm->last = new.last_value; /* last returned number */ + elm->last = new.last_value; /* last returned number */ elm->cached = new.last_value; /* last cached number (forget * cached values) */ @@ -950,26 +950,22 @@ init_params(List *options, Form_pg_sequence new, bool isInit) /* MAXVALUE (null arg means NO MAXVALUE) */ if (max_value != NULL && max_value->arg) - { new->max_value = defGetInt64(max_value); - } else if (isInit || max_value != NULL) { if (new->increment_by > 0) new->max_value = SEQ_MAXVALUE; /* ascending seq */ else - new->max_value = -1; /* descending seq */ + new->max_value = -1; /* descending seq */ } /* MINVALUE (null arg means NO MINVALUE) */ if (min_value != NULL && min_value->arg) - { new->min_value = defGetInt64(min_value); - } else if (isInit || min_value != NULL) { if (new->increment_by > 0) - new->min_value = 1; /* ascending seq */ + new->min_value = 1; /* ascending seq */ else new->min_value = SEQ_MINVALUE; /* descending seq */ } @@ -1073,7 +1069,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) buffer = XLogReadBuffer(true, reln, 0); if (!BufferIsValid(buffer)) elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); page = (Page) BufferGetPage(buffer); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 73a51c2da9..ab0d659dc5 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.128 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.129 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -77,8 +77,8 @@ typedef struct OnCommitItem * entries in the list until commit so that we can roll back if * needed. */ - TransactionId creating_xid; - TransactionId deleting_xid; + TransactionId creating_xid; + TransactionId deleting_xid; } OnCommitItem; static List *on_commits = NIL; @@ -117,7 +117,7 @@ typedef struct AlteredTableInfo char relkind; /* Its relkind */ TupleDesc oldDesc; /* Pre-modification tuple descriptor */ /* Information saved by Phase 1 for Phase 2: */ - List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */ + List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */ /* Information saved by Phases 1/2 for Phase 3: */ List *constraints; /* List of NewConstraint */ List *newvals; /* List of NewColumnValue */ @@ -125,8 +125,8 @@ typedef struct AlteredTableInfo /* Objects to rebuild after completing ALTER TYPE operations */ List *changedConstraintOids; /* OIDs of constraints to rebuild */ List *changedConstraintDefs; /* string definitions of same */ - List *changedIndexOids; /* OIDs of indexes to rebuild */ - List *changedIndexDefs; /* string definitions of same */ + List *changedIndexOids; /* OIDs of indexes to rebuild */ + List *changedIndexDefs; /* string definitions of same */ } AlteredTableInfo; /* Struct describing one new constraint to check in Phase 3 scan */ @@ -171,12 +171,12 @@ static bool needs_toast_table(Relation rel); static int transformColumnNameList(Oid relId, List *colList, int16 *attnums, Oid *atttypids); static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, - List **attnamelist, - int16 *attnums, Oid *atttypids, - Oid *opclasses); + List **attnamelist, + int16 *attnums, Oid *atttypids, + Oid *opclasses); static Oid transformFkeyCheckAttrs(Relation pkrel, - int numattrs, int16 *attnums, - Oid *opclasses); + int numattrs, int16 *attnums, + Oid *opclasses); static void validateForeignKeyConstraint(FkConstraint *fkconstraint, Relation rel, Relation pkrel); static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, @@ -184,7 +184,7 @@ static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, static char *fkMatchTypeToString(char match_type); static void ATController(Relation rel, List *cmds, bool recurse); static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing); + bool recurse, bool recursing); static void ATRewriteCatalogs(List **wqueue); static void ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd); static void ATRewriteTables(List **wqueue); @@ -192,55 +192,55 @@ static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap); static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel); static void ATSimplePermissions(Relation rel, bool allowView); static void ATSimpleRecursion(List **wqueue, Relation rel, - AlterTableCmd *cmd, bool recurse); + AlterTableCmd *cmd, bool recurse); static void ATOneLevelRecursion(List **wqueue, Relation rel, - AlterTableCmd *cmd); + AlterTableCmd *cmd); static void find_composite_type_dependencies(Oid typeOid, - const char *origTblName); + const char *origTblName); static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, - AlterTableCmd *cmd); + AlterTableCmd *cmd); static void ATExecAddColumn(AlteredTableInfo *tab, Relation rel, - ColumnDef *colDef); + ColumnDef *colDef); static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid); static void add_column_support_dependency(Oid relid, int32 attnum, - RangeVar *support); + RangeVar *support); static void ATExecDropNotNull(Relation rel, const char *colName); static void ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, - const char *colName); + const char *colName); static void ATExecColumnDefault(Relation rel, const char *colName, - Node *newDefault); + Node *newDefault); static void ATPrepSetStatistics(Relation rel, const char *colName, - Node *flagValue); + Node *flagValue); static void ATExecSetStatistics(Relation rel, const char *colName, - Node *newValue); + Node *newValue); static void ATExecSetStorage(Relation rel, const char *colName, - Node *newValue); + Node *newValue); static void ATExecDropColumn(Relation rel, const char *colName, - DropBehavior behavior, - bool recurse, bool recursing); + DropBehavior behavior, + bool recurse, bool recursing); static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel, - IndexStmt *stmt, bool is_rebuild); + IndexStmt *stmt, bool is_rebuild); static void ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, - Node *newConstraint); + Node *newConstraint); static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, - FkConstraint *fkconstraint); + FkConstraint *fkconstraint); static void ATPrepDropConstraint(List **wqueue, Relation rel, - bool recurse, AlterTableCmd *cmd); + bool recurse, AlterTableCmd *cmd); static void ATExecDropConstraint(Relation rel, const char *constrName, - DropBehavior behavior, bool quiet); + DropBehavior behavior, bool quiet); static void ATPrepAlterColumnType(List **wqueue, - AlteredTableInfo *tab, Relation rel, - bool recurse, bool recursing, - AlterTableCmd *cmd); + AlteredTableInfo *tab, Relation rel, + bool recurse, bool recursing, + AlterTableCmd *cmd); static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, - const char *colName, TypeName *typename); + const char *colName, TypeName *typename); static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab); static void ATPostAlterTypeParse(char *cmd, List **wqueue); static void ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId); static void ATExecClusterOn(Relation rel, const char *indexName); static void ATExecDropCluster(Relation rel); static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, - char *tablespacename); + char *tablespacename); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace); static void copy_relation_data(Relation rel, SMgrRelation dst); static int ri_trigger_type(Oid tgfoid); @@ -289,7 +289,7 @@ DefineRelation(CreateStmt *stmt, char relkind) if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("ON COMMIT can only be used on temporary tables"))); + errmsg("ON COMMIT can only be used on temporary tables"))); /* * Look up the namespace in which we are supposed to create the @@ -310,12 +310,13 @@ DefineRelation(CreateStmt *stmt, char relkind) } /* - * Select tablespace to use. If not specified, use containing schema's - * default tablespace (which may in turn default to database's default). + * Select tablespace to use. If not specified, use containing + * schema's default tablespace (which may in turn default to + * database's default). */ if (stmt->tablespacename) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(stmt->tablespacename); if (!OidIsValid(tablespaceId)) @@ -329,7 +330,9 @@ DefineRelation(CreateStmt *stmt, char relkind) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, stmt->tablespacename); - } else { + } + else + { tablespaceId = get_namespace_tablespace(namespaceId); /* note no permission check on tablespace in this case */ } @@ -340,7 +343,7 @@ DefineRelation(CreateStmt *stmt, char relkind) */ schema = MergeAttributes(schema, stmt->inhRelations, stmt->relation->istemp, - &inheritOids, &old_constraints, &parentOidCount); + &inheritOids, &old_constraints, &parentOidCount); /* * Create a relation descriptor from the relation schema and create @@ -357,23 +360,25 @@ DefineRelation(CreateStmt *stmt, char relkind) if (old_constraints != NIL) { ConstrCheck *check = (ConstrCheck *) - palloc0(list_length(old_constraints) * sizeof(ConstrCheck)); + palloc0(list_length(old_constraints) * sizeof(ConstrCheck)); int ncheck = 0; foreach(listptr, old_constraints) { Constraint *cdef = (Constraint *) lfirst(listptr); - bool dup = false; + bool dup = false; if (cdef->contype != CONSTR_CHECK) continue; Assert(cdef->name != NULL); Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL); + /* - * In multiple-inheritance situations, it's possible to inherit - * the same grandparent constraint through multiple parents. - * Hence, discard inherited constraints that match as to both - * name and expression. Otherwise, gripe if the names conflict. + * In multiple-inheritance situations, it's possible to + * inherit the same grandparent constraint through multiple + * parents. Hence, discard inherited constraints that match as + * to both name and expression. Otherwise, gripe if the names + * conflict. */ for (i = 0; i < ncheck; i++) { @@ -546,8 +551,9 @@ TruncateRelation(const RangeVar *relation) RelationGetRelationName(rel)))); /* - * We can never allow truncation of shared or nailed-in-cache relations, - * because we can't support changing their relfilenode values. + * We can never allow truncation of shared or nailed-in-cache + * relations, because we can't support changing their relfilenode + * values. */ if (rel->rd_rel->relisshared || rel->rd_isnailed) ereport(ERROR, @@ -562,7 +568,7 @@ TruncateRelation(const RangeVar *relation) if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot truncate temporary tables of other sessions"))); + errmsg("cannot truncate temporary tables of other sessions"))); /* * Don't allow truncate on tables which are referenced by foreign keys @@ -571,7 +577,7 @@ TruncateRelation(const RangeVar *relation) /* * Okay, here we go: create a new empty storage file for the relation, - * and assign it as the relfilenode value. The old storage file is + * and assign it as the relfilenode value. The old storage file is * scheduled for deletion at commit. */ setNewRelfilenode(rel); @@ -797,8 +803,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, def->typename->typmod != attribute->atttypmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a type conflict", - attributeName), + errmsg("inherited column \"%s\" has a type conflict", + attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), format_type_be(attribute->atttypid)))); @@ -935,15 +941,15 @@ MergeAttributes(List *schema, List *supers, bool istemp, * have the same type and typmod. */ ereport(NOTICE, - (errmsg("merging column \"%s\" with inherited definition", - attributeName))); + (errmsg("merging column \"%s\" with inherited definition", + attributeName))); def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1); if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) || def->typename->typmod != newdef->typename->typmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" has a type conflict", - attributeName), + errmsg("column \"%s\" has a type conflict", + attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), TypeNameToString(newdef->typename)))); @@ -1061,12 +1067,12 @@ StoreCatalogInheritance(Oid relationId, List *supers) /* * Store INHERITS information in pg_inherits using direct ancestors - * only. Also enter dependencies on the direct ancestors, and make sure - * they are marked with relhassubclass = true. + * only. Also enter dependencies on the direct ancestors, and make + * sure they are marked with relhassubclass = true. * * (Once upon a time, both direct and indirect ancestors were found here - * and then entered into pg_ipl. Since that catalog doesn't exist anymore, - * there's no need to look for indirect ancestors.) + * and then entered into pg_ipl. Since that catalog doesn't exist + * anymore, there's no need to look for indirect ancestors.) */ relation = heap_openr(InheritsRelationName, RowExclusiveLock); desc = RelationGetDescr(relation); @@ -1081,7 +1087,7 @@ StoreCatalogInheritance(Oid relationId, List *supers) parentobject; datum[0] = ObjectIdGetDatum(relationId); /* inhrel */ - datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */ + datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */ datum[2] = Int16GetDatum(seqNumber); /* inhseqno */ nullarr[0] = ' '; @@ -1156,9 +1162,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass) /* * Fetch a modifiable copy of the tuple, modify it, update pg_class. * - * If the tuple already has the right relhassubclass setting, we - * don't need to update it, but we still need to issue an SI inval - * message. + * If the tuple already has the right relhassubclass setting, we don't + * need to update it, but we still need to issue an SI inval message. */ relationRelation = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, @@ -1318,7 +1323,7 @@ renameatt(Oid myrelid, 0, 0)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", + errmsg("column \"%s\" of relation \"%s\" already exists", newattname, RelationGetRelationName(targetrelation)))); namestrcpy(&(attform->attname), newattname); @@ -1712,9 +1717,9 @@ update_ri_trigger_args(Oid relid, * rebuild relcache entries. (Ideally this should happen * automatically...) * - * We can skip this for triggers on relid itself, since that - * relcache flush will happen anyway due to the table or column - * rename. We just need to catch the far ends of RI relationships. + * We can skip this for triggers on relid itself, since that relcache + * flush will happen anyway due to the table or column rename. We + * just need to catch the far ends of RI relationships. */ pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); if (pg_trigger->tgrelid != relid) @@ -1747,11 +1752,11 @@ update_ri_trigger_args(Oid relid, * 3. Scan table(s) to check new constraints, and optionally recopy * the data into new table(s). * Phase 3 is not performed unless one or more of the subcommands requires - * it. The intention of this design is to allow multiple independent + * it. The intention of this design is to allow multiple independent * updates of the table schema to be performed with only one pass over the * data. * - * ATPrepCmd performs phase 1. A "work queue" entry is created for + * ATPrepCmd performs phase 1. A "work queue" entry is created for * each table to be affected (there may be multiple affected tables if the * commands traverse a table inheritance hierarchy). Also we do preliminary * validation of the subcommands, including parse transformation of those @@ -1762,7 +1767,7 @@ update_ri_trigger_args(Oid relid, * phases 2 and 3 do no explicit recursion, since phase 1 already did it). * Certain subcommands need to be performed before others to avoid * unnecessary conflicts; for example, DROP COLUMN should come before - * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple + * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple * lists, one for each logical "pass" of phase 2. * * ATRewriteTables performs phase 3 for those tables that need it. @@ -1843,8 +1848,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd = copyObject(cmd); /* - * Do permissions checking, recursion to child tables if needed, - * and any additional phase-1 processing needed. + * Do permissions checking, recursion to child tables if needed, and + * any additional phase-1 processing needed. */ switch (cmd->subtype) { @@ -1855,9 +1860,10 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_ADD_COL; break; case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ + /* - * We allow defaults on views so that INSERT into a view can have - * default-ish behavior. This works because the rewriter + * We allow defaults on views so that INSERT into a view can + * have default-ish behavior. This works because the rewriter * substitutes default values into INSERTs before it expands * rules. */ @@ -1906,6 +1912,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, break; case AT_AddConstraint: /* ADD CONSTRAINT */ ATSimplePermissions(rel, false); + /* * Currently we recurse only for CHECK constraints, never for * foreign-key constraints. UNIQUE/PKEY constraints won't be @@ -1928,13 +1935,13 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* No command-specific prep needed */ pass = AT_PASS_DROP; break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ + case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATSimplePermissions(rel, false); /* Performs own recursion */ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd); pass = AT_PASS_ALTER_TYPE; break; - case AT_ToastTable: /* CREATE TOAST TABLE */ + case AT_ToastTable: /* CREATE TOAST TABLE */ ATSimplePermissions(rel, false); /* This command never recurses */ /* No command-specific prep needed */ @@ -1945,14 +1952,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* No command-specific prep needed */ pass = AT_PASS_MISC; break; - case AT_ClusterOn: /* CLUSTER ON */ + case AT_ClusterOn: /* CLUSTER ON */ case AT_DropCluster: /* SET WITHOUT CLUSTER */ ATSimplePermissions(rel, false); /* These commands never recurse */ /* No command-specific prep needed */ pass = AT_PASS_MISC; break; - case AT_DropOids: /* SET WITHOUT OIDS */ + case AT_DropOids: /* SET WITHOUT OIDS */ ATSimplePermissions(rel, false); /* Performs own recursion */ if (rel->rd_rel->relhasoids) @@ -1969,9 +1976,9 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, case AT_SetTableSpace: /* SET TABLESPACE */ /* This command never recurses */ ATPrepSetTableSpace(tab, rel, cmd->name); - pass = AT_PASS_MISC; /* doesn't actually matter */ + pass = AT_PASS_MISC; /* doesn't actually matter */ break; - default: /* oops */ + default: /* oops */ elog(ERROR, "unrecognized alter table type: %d", (int) cmd->subtype); pass = 0; /* keep compiler quiet */ @@ -1985,7 +1992,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * ATRewriteCatalogs * - * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are + * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are * dispatched in a "safe" execution order (designed to avoid unnecessary * conflicts). */ @@ -1997,10 +2004,10 @@ ATRewriteCatalogs(List **wqueue) /* * We process all the tables "in parallel", one pass at a time. This - * is needed because we may have to propagate work from one table - * to another (specifically, ALTER TYPE on a foreign key's PK has to + * is needed because we may have to propagate work from one table to + * another (specifically, ALTER TYPE on a foreign key's PK has to * dispatch the re-adding of the foreign key constraint to the other - * table). Work can only be propagated into later passes, however. + * table). Work can only be propagated into later passes, however. */ for (pass = 0; pass < AT_NUM_PASSES; pass++) { @@ -2015,18 +2022,19 @@ ATRewriteCatalogs(List **wqueue) if (subcmds == NIL) continue; - /* Exclusive lock was obtained by phase 1, needn't get it again */ + /* + * Exclusive lock was obtained by phase 1, needn't get it + * again + */ rel = relation_open(tab->relid, NoLock); foreach(lcmd, subcmds) - { ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd)); - } /* - * After the ALTER TYPE pass, do cleanup work (this is not done in - * ATExecAlterColumnType since it should be done only once if - * multiple columns of a table are altered). + * After the ALTER TYPE pass, do cleanup work (this is not + * done in ATExecAlterColumnType since it should be done only + * once if multiple columns of a table are altered). */ if (pass == AT_PASS_ALTER_TYPE) ATPostAlterTypeCleanup(wqueue, tab); @@ -2047,9 +2055,7 @@ ATRewriteCatalogs(List **wqueue) (tab->subcmds[AT_PASS_ADD_COL] || tab->subcmds[AT_PASS_ALTER_TYPE] || tab->subcmds[AT_PASS_COL_ATTRS])) - { AlterTableCreateToastTable(tab->relid, true); - } } } @@ -2082,7 +2088,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_DropColumn: /* DROP COLUMN */ ATExecDropColumn(rel, cmd->name, cmd->behavior, false, false); break; - case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ + case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ ATExecDropColumn(rel, cmd->name, cmd->behavior, true, false); break; case AT_AddIndex: /* ADD INDEX */ @@ -2100,7 +2106,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_DropConstraintQuietly: /* DROP CONSTRAINT for child */ ATExecDropConstraint(rel, cmd->name, cmd->behavior, true); break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ + case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATExecAlterColumnType(tab, rel, cmd->name, (TypeName *) cmd->def); break; case AT_ToastTable: /* CREATE TOAST TABLE */ @@ -2113,29 +2119,31 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_ClusterOn: /* CLUSTER ON */ ATExecClusterOn(rel, cmd->name); break; - case AT_DropCluster: /* SET WITHOUT CLUSTER */ + case AT_DropCluster: /* SET WITHOUT CLUSTER */ ATExecDropCluster(rel); break; case AT_DropOids: /* SET WITHOUT OIDS */ + /* - * Nothing to do here; we'll have generated a DropColumn subcommand - * to do the real work + * Nothing to do here; we'll have generated a DropColumn + * subcommand to do the real work */ break; - case AT_SetTableSpace: /* SET TABLESPACE */ + case AT_SetTableSpace: /* SET TABLESPACE */ + /* * Nothing to do here; Phase 3 does the work */ break; - default: /* oops */ + default: /* oops */ elog(ERROR, "unrecognized alter table type: %d", (int) cmd->subtype); break; } /* - * Bump the command counter to ensure the next subcommand in the sequence - * can see the changes so far + * Bump the command counter to ensure the next subcommand in the + * sequence can see the changes so far */ CommandCounterIncrement(); } @@ -2164,14 +2172,14 @@ ATRewriteTables(List **wqueue) char NewHeapName[NAMEDATALEN]; Oid NewTableSpace; Relation OldHeap; - ObjectAddress object; + ObjectAddress object; OldHeap = heap_open(tab->relid, NoLock); /* * We can never allow rewriting of shared or nailed-in-cache - * relations, because we can't support changing their relfilenode - * values. + * relations, because we can't support changing their + * relfilenode values. */ if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed) ereport(ERROR, @@ -2180,8 +2188,8 @@ ATRewriteTables(List **wqueue) RelationGetRelationName(OldHeap)))); /* - * Don't allow rewrite on temp tables of other backends ... their - * local buffer manager is not going to cope. + * Don't allow rewrite on temp tables of other backends ... + * their local buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, @@ -2201,11 +2209,12 @@ ATRewriteTables(List **wqueue) /* * Create the new heap, using a temporary name in the same - * namespace as the existing table. NOTE: there is some risk of - * collision with user relnames. Working around this seems more - * trouble than it's worth; in particular, we can't create the new - * heap in a different namespace from the old, or we will have - * problems with the TEMP status of temp tables. + * namespace as the existing table. NOTE: there is some risk + * of collision with user relnames. Working around this seems + * more trouble than it's worth; in particular, we can't + * create the new heap in a different namespace from the old, + * or we will have problems with the TEMP status of temp + * tables. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tab->relid); @@ -2230,15 +2239,15 @@ ATRewriteTables(List **wqueue) object.objectSubId = 0; /* - * The new relation is local to our transaction and we know nothing - * depends on it, so DROP_RESTRICT should be OK. + * The new relation is local to our transaction and we know + * nothing depends on it, so DROP_RESTRICT should be OK. */ performDeletion(&object, DROP_RESTRICT); /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast table, - * which is all-new anyway). We do not need + * Rebuild each index on the relation (but not the toast + * table, which is all-new anyway). We do not need * CommandCounterIncrement() because reindex_relation does it. */ reindex_relation(tab->relid, false); @@ -2246,14 +2255,16 @@ ATRewriteTables(List **wqueue) else { /* - * Test the current data within the table against new constraints - * generated by ALTER TABLE commands, but don't rebuild data. + * Test the current data within the table against new + * constraints generated by ALTER TABLE commands, but don't + * rebuild data. */ if (tab->constraints != NIL) ATRewriteTable(tab, InvalidOid); + /* - * If we had SET TABLESPACE but no reason to reconstruct tuples, - * just do a block-by-block copy. + * If we had SET TABLESPACE but no reason to reconstruct + * tuples, just do a block-by-block copy. */ if (tab->newTableSpace) ATExecSetTableSpace(tab->relid, tab->newTableSpace); @@ -2261,17 +2272,17 @@ ATRewriteTables(List **wqueue) } /* - * Foreign key constraints are checked in a final pass, since - * (a) it's generally best to examine each one separately, and - * (b) it's at least theoretically possible that we have changed - * both relations of the foreign key, and we'd better have finished - * both rewrites before we try to read the tables. + * Foreign key constraints are checked in a final pass, since (a) it's + * generally best to examine each one separately, and (b) it's at + * least theoretically possible that we have changed both relations of + * the foreign key, and we'd better have finished both rewrites before + * we try to read the tables. */ foreach(ltab, *wqueue) { - AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); - Relation rel = NULL; - ListCell *lcon; + AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); + Relation rel = NULL; + ListCell *lcon; foreach(lcon, tab->constraints) { @@ -2324,7 +2335,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) */ oldrel = heap_open(tab->relid, NoLock); oldTupDesc = tab->oldDesc; - newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ + newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ if (OidIsValid(OIDNewHeap)) newrel = heap_open(OIDNewHeap, AccessExclusiveLock); @@ -2335,9 +2346,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) * If we need to rewrite the table, the operation has to be propagated * to tables that use this table's rowtype as a column type. * - * (Eventually this will probably become true for scans as well, but - * at the moment a composite type does not enforce any constraints, - * so it's not necessary/appropriate to enforce them just during ALTER.) + * (Eventually this will probably become true for scans as well, but at + * the moment a composite type does not enforce any constraints, so + * it's not necessary/appropriate to enforce them just during ALTER.) */ if (newrel) find_composite_type_dependencies(oldrel->rd_rel->reltype, @@ -2375,7 +2386,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) foreach(l, tab->newvals) { - NewColumnValue *ex = lfirst(l); + NewColumnValue *ex = lfirst(l); needscan = true; @@ -2384,12 +2395,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) if (needscan) { - ExprContext *econtext; + ExprContext *econtext; Datum *values; char *nulls; TupleTableSlot *oldslot; TupleTableSlot *newslot; - HeapScanDesc scan; + HeapScanDesc scan; HeapTuple tuple; econtext = GetPerTupleExprContext(estate); @@ -2425,7 +2436,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) * Extract data from old tuple. We can force to null any * columns that are deleted according to the new tuple. */ - int natts = newTupDesc->natts; + int natts = newTupDesc->natts; heap_deformtuple(tuple, oldTupDesc, values, nulls); @@ -2436,16 +2447,16 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) } /* - * Process supplied expressions to replace selected columns. - * Expression inputs come from the old tuple. + * Process supplied expressions to replace selected + * columns. Expression inputs come from the old tuple. */ ExecStoreTuple(tuple, oldslot, InvalidBuffer, false); econtext->ecxt_scantuple = oldslot; foreach(l, tab->newvals) { - NewColumnValue *ex = lfirst(l); - bool isNull; + NewColumnValue *ex = lfirst(l); + bool isNull; values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, @@ -2478,20 +2489,20 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) con->name))); break; case CONSTR_NOTNULL: - { - Datum d; - bool isnull; + { + Datum d; + bool isnull; - d = heap_getattr(tuple, con->attnum, newTupDesc, - &isnull); - if (isnull) - ereport(ERROR, + d = heap_getattr(tuple, con->attnum, newTupDesc, + &isnull); + if (isnull) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" contains null values", get_attname(tab->relid, con->attnum)))); - } - break; + } + break; case CONSTR_FOREIGN: /* Nothing to do here */ break; @@ -2733,8 +2744,9 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName) else if (OidIsValid(rel->rd_rel->reltype)) { /* - * A view or composite type itself isn't a problem, but we must - * recursively check for indirect dependencies via its rowtype. + * A view or composite type itself isn't a problem, but we + * must recursively check for indirect dependencies via its + * rowtype. */ find_composite_type_dependencies(rel->rd_rel->reltype, origTblName); @@ -2790,7 +2802,7 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, if (find_inheritance_children(RelationGetRelid(rel)) != NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("column must be added to child tables too"))); + errmsg("column must be added to child tables too"))); } } @@ -2815,8 +2827,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, attrdesc = heap_openr(AttributeRelationName, RowExclusiveLock); /* - * Are we adding the column to a recursion child? If so, check whether - * to merge with an existing definition for the column. + * Are we adding the column to a recursion child? If so, check + * whether to merge with an existing definition for the column. */ if (colDef->inhcount > 0) { @@ -2834,7 +2846,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("child table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(rel), colDef->colname))); + RelationGetRelationName(rel), colDef->colname))); /* Bump the existing child att's inhcount */ childatt->attinhcount++; @@ -2846,7 +2858,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* Inform the user about the merge */ ereport(NOTICE, (errmsg("merging definition of column \"%s\" for child \"%s\"", - colDef->colname, RelationGetRelationName(rel)))); + colDef->colname, RelationGetRelationName(rel)))); heap_close(attrdesc, RowExclusiveLock); return; @@ -2872,8 +2884,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, 0, 0)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", - colDef->colname, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" already exists", + colDef->colname, RelationGetRelationName(rel)))); minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts; maxatts = minattnum + 1; @@ -2965,21 +2977,20 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* * Tell Phase 3 to fill in the default expression, if there is one. * - * If there is no default, Phase 3 doesn't have to do anything, - * because that effectively means that the default is NULL. The - * heap tuple access routines always check for attnum > # of attributes - * in tuple, and return NULL if so, so without any modification of - * the tuple data we will get the effect of NULL values in the new - * column. + * If there is no default, Phase 3 doesn't have to do anything, because + * that effectively means that the default is NULL. The heap tuple + * access routines always check for attnum > # of attributes in tuple, + * and return NULL if so, so without any modification of the tuple + * data we will get the effect of NULL values in the new column. * * Note: we use build_column_default, and not just the cooked default - * returned by AddRelationRawConstraints, so that the right thing happens - * when a datatype's default applies. + * returned by AddRelationRawConstraints, so that the right thing + * happens when a datatype's default applies. */ defval = (Expr *) build_column_default(rel, attribute->attnum); if (defval) { - NewColumnValue *newval; + NewColumnValue *newval; newval = (NewColumnValue *) palloc0(sizeof(NewColumnValue)); newval->attnum = attribute->attnum; @@ -3099,8 +3110,8 @@ ATExecDropNotNull(Relation rel, const char *colName) if (indexStruct->indkey[i] == attnum) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("column \"%s\" is in a primary key", - colName))); + errmsg("column \"%s\" is in a primary key", + colName))); } } @@ -3162,7 +3173,7 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, /* * Okay, actually perform the catalog change ... if needed */ - if (! ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) + if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) { ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE; @@ -3199,8 +3210,8 @@ ATExecColumnDefault(Relation rel, const char *colName, if (attnum == InvalidAttrNumber) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); /* Prevent them from altering a system attribute */ if (attnum <= 0) @@ -3240,10 +3251,10 @@ static void ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue) { /* - * We do our own permission checking because (a) we want to allow - * SET STATISTICS on indexes (for expressional index columns), and - * (b) we want to allow SET STATISTICS on system catalogs without - * requiring allowSystemTableMods to be turned on. + * We do our own permission checking because (a) we want to allow SET + * STATISTICS on indexes (for expressional index columns), and (b) we + * want to allow SET STATISTICS on system catalogs without requiring + * allowSystemTableMods to be turned on. */ if (rel->rd_rel->relkind != RELKIND_RELATION && rel->rd_rel->relkind != RELKIND_INDEX) @@ -3295,8 +3306,8 @@ ATExecSetStatistics(Relation rel, const char *colName, Node *newValue) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); if (attrtuple->attnum <= 0) @@ -3356,8 +3367,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); if (attrtuple->attnum <= 0) @@ -3394,9 +3405,9 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue) * * DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism, * because we have to decide at runtime whether to recurse or not depending - * on whether attinhcount goes to zero or not. (We can't check this in a + * on whether attinhcount goes to zero or not. (We can't check this in a * static pre-pass because it won't handle multiple inheritance situations - * correctly.) Since DROP COLUMN doesn't need to create any work queue + * correctly.) Since DROP COLUMN doesn't need to create any work queue * entries for Phase 3, it's okay to recurse internally in this routine * without considering the work queue. */ @@ -3479,8 +3490,8 @@ ATExecDropColumn(Relation rel, const char *colName, { /* * If the child column has other definition sources, just - * decrement its inheritance count; if not, recurse to delete - * it. + * decrement its inheritance count; if not, recurse to + * delete it. */ if (childatt->attinhcount == 1 && !childatt->attislocal) { @@ -3504,9 +3515,9 @@ ATExecDropColumn(Relation rel, const char *colName, else { /* - * If we were told to drop ONLY in this table (no recursion), - * we need to mark the inheritors' attribute as locally - * defined rather than inherited. + * If we were told to drop ONLY in this table (no + * recursion), we need to mark the inheritors' attribute + * as locally defined rather than inherited. */ childatt->attinhcount--; childatt->attislocal = true; @@ -3547,7 +3558,7 @@ ATExecDropColumn(Relation rel, const char *colName, class_rel = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(rel)), + ObjectIdGetDatum(RelationGetRelid(rel)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", @@ -3575,9 +3586,9 @@ static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel, IndexStmt *stmt, bool is_rebuild) { - bool check_rights; - bool skip_build; - bool quiet; + bool check_rights; + bool skip_build; + bool quiet; Assert(IsA(stmt, IndexStmt)); @@ -3588,17 +3599,17 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, /* suppress notices when rebuilding existing index */ quiet = is_rebuild; - DefineIndex(stmt->relation, /* relation */ - stmt->idxname, /* index name */ - stmt->accessMethod, /* am name */ + DefineIndex(stmt->relation, /* relation */ + stmt->idxname, /* index name */ + stmt->accessMethod, /* am name */ stmt->tableSpace, - stmt->indexParams, /* parameters */ + stmt->indexParams, /* parameters */ (Expr *) stmt->whereClause, stmt->rangetable, stmt->unique, stmt->primary, stmt->isconstraint, - true, /* is_alter_table */ + true, /* is_alter_table */ check_rights, skip_build, quiet); @@ -3613,84 +3624,85 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint) switch (nodeTag(newConstraint)) { case T_Constraint: - { - Constraint *constr = (Constraint *) newConstraint; - - /* - * Currently, we only expect to see CONSTR_CHECK nodes - * arriving here (see the preprocessing done in - * parser/analyze.c). Use a switch anyway to make it - * easier to add more code later. - */ - switch (constr->contype) { - case CONSTR_CHECK: - { - List *newcons; - ListCell *lcon; + Constraint *constr = (Constraint *) newConstraint; - /* - * Call AddRelationRawConstraints to do the work. - * It returns a list of cooked constraints. - */ - newcons = AddRelationRawConstraints(rel, NIL, - list_make1(constr)); - /* Add each constraint to Phase 3's queue */ - foreach(lcon, newcons) - { - CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon); - NewConstraint *newcon; - - newcon = (NewConstraint *) palloc0(sizeof(NewConstraint)); - newcon->name = ccon->name; - newcon->contype = ccon->contype; - newcon->attnum = ccon->attnum; - /* ExecQual wants implicit-AND format */ - newcon->qual = (Node *) - make_ands_implicit((Expr *) ccon->expr); - - tab->constraints = lappend(tab->constraints, - newcon); - } - break; + /* + * Currently, we only expect to see CONSTR_CHECK nodes + * arriving here (see the preprocessing done in + * parser/analyze.c). Use a switch anyway to make it + * easier to add more code later. + */ + switch (constr->contype) + { + case CONSTR_CHECK: + { + List *newcons; + ListCell *lcon; + + /* + * Call AddRelationRawConstraints to do the + * work. It returns a list of cooked + * constraints. + */ + newcons = AddRelationRawConstraints(rel, NIL, + list_make1(constr)); + /* Add each constraint to Phase 3's queue */ + foreach(lcon, newcons) + { + CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon); + NewConstraint *newcon; + + newcon = (NewConstraint *) palloc0(sizeof(NewConstraint)); + newcon->name = ccon->name; + newcon->contype = ccon->contype; + newcon->attnum = ccon->attnum; + /* ExecQual wants implicit-AND format */ + newcon->qual = (Node *) + make_ands_implicit((Expr *) ccon->expr); + + tab->constraints = lappend(tab->constraints, + newcon); + } + break; + } + default: + elog(ERROR, "unrecognized constraint type: %d", + (int) constr->contype); } - default: - elog(ERROR, "unrecognized constraint type: %d", - (int) constr->contype); + break; } - break; - } case T_FkConstraint: - { - FkConstraint *fkconstraint = (FkConstraint *) newConstraint; - - /* - * Assign or validate constraint name - */ - if (fkconstraint->constr_name) { - if (ConstraintNameIsUsed(CONSTRAINT_RELATION, - RelationGetRelid(rel), - RelationGetNamespace(rel), - fkconstraint->constr_name)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for relation \"%s\" already exists", - fkconstraint->constr_name, - RelationGetRelationName(rel)))); - } - else - fkconstraint->constr_name = - ChooseConstraintName(RelationGetRelationName(rel), - strVal(linitial(fkconstraint->fk_attrs)), - "fkey", - RelationGetNamespace(rel), - NIL); + FkConstraint *fkconstraint = (FkConstraint *) newConstraint; - ATAddForeignKeyConstraint(tab, rel, fkconstraint); + /* + * Assign or validate constraint name + */ + if (fkconstraint->constr_name) + { + if (ConstraintNameIsUsed(CONSTRAINT_RELATION, + RelationGetRelid(rel), + RelationGetNamespace(rel), + fkconstraint->constr_name)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("constraint \"%s\" for relation \"%s\" already exists", + fkconstraint->constr_name, + RelationGetRelationName(rel)))); + } + else + fkconstraint->constr_name = + ChooseConstraintName(RelationGetRelationName(rel), + strVal(linitial(fkconstraint->fk_attrs)), + "fkey", + RelationGetNamespace(rel), + NIL); - break; - } + ATAddForeignKeyConstraint(tab, rel, fkconstraint); + + break; + } default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(newConstraint)); @@ -3761,12 +3773,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, RelationGetRelationName(rel)); /* - * Disallow reference from permanent table to temp table or vice versa. - * (The ban on perm->temp is for fairly obvious reasons. The ban on - * temp->perm is because other backends might need to run the RI triggers - * on the perm table, but they can't reliably see tuples the owning - * backend has created in the temp table, because non-shared buffers - * are used for temp tables.) + * Disallow reference from permanent table to temp table or vice + * versa. (The ban on perm->temp is for fairly obvious reasons. The + * ban on temp->perm is because other backends might need to run the + * RI triggers on the perm table, but they can't reliably see tuples + * the owning backend has created in the temp table, because + * non-shared buffers are used for temp tables.) */ if (isTempNamespace(RelationGetNamespace(pkrel))) { @@ -3834,11 +3846,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * fktypoid[i] is the foreign key table's i'th key's type * * Note that we look for an operator with the PK type on the left; - * when the types are different this is critical because the PK index - * will need operators with the indexkey on the left. (Ordinarily - * both commutator operators will exist if either does, but we won't - * get the right answer from the test below on opclass membership - * unless we select the proper operator.) + * when the types are different this is critical because the PK + * index will need operators with the indexkey on the left. + * (Ordinarily both commutator operators will exist if either + * does, but we won't get the right answer from the test below on + * opclass membership unless we select the proper operator.) */ Operator o = oper(list_make1(makeString("=")), pktypoid[i], fktypoid[i], true); @@ -3851,8 +3863,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of incompatible types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); @@ -3868,8 +3880,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of different types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); @@ -3877,8 +3889,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, } /* - * Tell Phase 3 to check that the constraint is satisfied by existing rows - * (we can skip this during table creation). + * Tell Phase 3 to check that the constraint is satisfied by existing + * rows (we can skip this during table creation). */ if (!fkconstraint->skip_validation) { @@ -3971,10 +3983,10 @@ transformColumnNameList(Oid relId, List *colList, * transformFkeyGetPrimaryKey - * * Look up the names, attnums, and types of the primary key attributes - * for the pkrel. Also return the index OID and index opclasses of the + * for the pkrel. Also return the index OID and index opclasses of the * index supporting the primary key. * - * All parameters except pkrel are output parameters. Also, the function + * All parameters except pkrel are output parameters. Also, the function * return value is the number of attributes in the primary key. * * Used when the column list in the REFERENCES specification is omitted. @@ -4060,7 +4072,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, static Oid transformFkeyCheckAttrs(Relation pkrel, int numattrs, int16 *attnums, - Oid *opclasses) /* output parameter */ + Oid *opclasses) /* output parameter */ { Oid indexoid = InvalidOid; bool found = false; @@ -4190,8 +4202,8 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint, trig.tginitdeferred = FALSE; trig.tgargs = (char **) palloc(sizeof(char *) * - (4 + list_length(fkconstraint->fk_attrs) - + list_length(fkconstraint->pk_attrs))); + (4 + list_length(fkconstraint->fk_attrs) + + list_length(fkconstraint->pk_attrs))); trig.tgargs[0] = trig.tgname; trig.tgargs[1] = RelationGetRelationName(rel); @@ -4518,8 +4530,8 @@ ATExecDropConstraint(Relation rel, const char *constrName, /* Otherwise if more than one constraint deleted, notify */ else if (deleted > 1) ereport(NOTICE, - (errmsg("multiple constraints named \"%s\" were dropped", - constrName))); + (errmsg("multiple constraints named \"%s\" were dropped", + constrName))); } } @@ -4578,12 +4590,12 @@ ATPrepAlterColumnType(List **wqueue, CheckAttributeType(colName, targettype); /* - * Set up an expression to transform the old data value to the new type. - * If a USING option was given, transform and use that expression, else - * just take the old value and try to coerce it. We do this first so - * that type incompatibility can be detected before we waste effort, - * and because we need the expression to be parsed against the original - * table rowtype. + * Set up an expression to transform the old data value to the new + * type. If a USING option was given, transform and use that + * expression, else just take the old value and try to coerce it. We + * do this first so that type incompatibility can be detected before + * we waste effort, and because we need the expression to be parsed + * against the original table rowtype. */ if (cmd->transform) { @@ -4592,7 +4604,7 @@ ATPrepAlterColumnType(List **wqueue, /* Expression must be able to access vars of old table */ rte = addRangeTableEntryForRelation(pstate, RelationGetRelid(rel), - makeAlias(RelationGetRelationName(rel), NIL), + makeAlias(RelationGetRelationName(rel), NIL), false, true); addRTEtoQuery(pstate, rte, false, true); @@ -4603,13 +4615,13 @@ ATPrepAlterColumnType(List **wqueue, if (expression_returns_set(transform)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("transform expression must not return a set"))); + errmsg("transform expression must not return a set"))); /* No subplans or aggregates, either... */ if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in transform expression"))); + errmsg("cannot use subquery in transform expression"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), @@ -4646,9 +4658,9 @@ ATPrepAlterColumnType(List **wqueue, ReleaseSysCache(tuple); /* - * The recursion case is handled by ATSimpleRecursion. However, - * if we are told not to recurse, there had better not be any - * child tables; else the alter would put them out of step. + * The recursion case is handled by ATSimpleRecursion. However, if we + * are told not to recurse, there had better not be any child tables; + * else the alter would put them out of step. */ if (recurse) ATSimpleRecursion(wqueue, rel, cmd, recurse); @@ -4683,15 +4695,15 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attTup = (Form_pg_attribute) GETSTRUCT(heapTup); attnum = attTup->attnum; /* Check for multiple ALTER TYPE on same column --- can't cope */ - if (attTup->atttypid != tab->oldDesc->attrs[attnum-1]->atttypid || - attTup->atttypmod != tab->oldDesc->attrs[attnum-1]->atttypmod) + if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1]->atttypid || + attTup->atttypmod != tab->oldDesc->attrs[attnum - 1]->atttypmod) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", @@ -4713,8 +4725,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, { defaultexpr = build_column_default(rel, attnum); Assert(defaultexpr); - defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ - defaultexpr, exprType(defaultexpr), + defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ + defaultexpr, exprType(defaultexpr), targettype, typename->typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST); @@ -4728,18 +4740,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, defaultexpr = NULL; /* - * Find everything that depends on the column (constraints, indexes, etc), - * and record enough information to let us recreate the objects. + * Find everything that depends on the column (constraints, indexes, + * etc), and record enough information to let us recreate the objects. * * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to save - * the info before executing ALTER TYPE, though, else the deparser will - * get confused. + * performed all the individual ALTER TYPE operations. We have to + * save the info before executing ALTER TYPE, though, else the + * deparser will get confused. * * There could be multiple entries for the same object, so we must check - * to ensure we process each one only once. Note: we assume that an index - * that implements a constraint will not show a direct dependency on the - * column. + * to ensure we process each one only once. Note: we assume that an + * index that implements a constraint will not show a direct + * dependency on the column. */ depRel = heap_openr(DependRelationName, RowExclusiveLock); @@ -4761,8 +4773,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); - ObjectAddress foundObject; + Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); + ObjectAddress foundObject; /* We don't expect any PIN dependencies on columns */ if (foundDep->deptype == DEPENDENCY_PIN) @@ -4775,45 +4787,45 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, switch (getObjectClass(&foundObject)) { case OCLASS_CLASS: - { - char relKind = get_rel_relkind(foundObject.objectId); - - if (relKind == RELKIND_INDEX) { - Assert(foundObject.objectSubId == 0); - if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) + char relKind = get_rel_relkind(foundObject.objectId); + + if (relKind == RELKIND_INDEX) { - tab->changedIndexOids = lappend_oid(tab->changedIndexOids, - foundObject.objectId); - tab->changedIndexDefs = lappend(tab->changedIndexDefs, - pg_get_indexdef_string(foundObject.objectId)); + Assert(foundObject.objectSubId == 0); + if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) + { + tab->changedIndexOids = lappend_oid(tab->changedIndexOids, + foundObject.objectId); + tab->changedIndexDefs = lappend(tab->changedIndexDefs, + pg_get_indexdef_string(foundObject.objectId)); + } } + else if (relKind == RELKIND_SEQUENCE) + { + /* + * This must be a SERIAL column's sequence. We + * need not do anything to it. + */ + Assert(foundObject.objectSubId == 0); + } + else + { + /* Not expecting any other direct dependencies... */ + elog(ERROR, "unexpected object depending on column: %s", + getObjectDescription(&foundObject)); + } + break; } - else if (relKind == RELKIND_SEQUENCE) - { - /* - * This must be a SERIAL column's sequence. We need not - * do anything to it. - */ - Assert(foundObject.objectSubId == 0); - } - else - { - /* Not expecting any other direct dependencies... */ - elog(ERROR, "unexpected object depending on column: %s", - getObjectDescription(&foundObject)); - } - break; - } case OCLASS_CONSTRAINT: Assert(foundObject.objectSubId == 0); if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId)) { tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, - foundObject.objectId); + foundObject.objectId); tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, - pg_get_constraintdef_string(foundObject.objectId)); + pg_get_constraintdef_string(foundObject.objectId)); } break; @@ -4828,9 +4840,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, break; case OCLASS_DEFAULT: + /* - * Ignore the column's default expression, since we will fix - * it below. + * Ignore the column's default expression, since we will + * fix it below. */ Assert(defaultexpr); break; @@ -4844,6 +4857,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, case OCLASS_OPCLASS: case OCLASS_TRIGGER: case OCLASS_SCHEMA: + /* * We don't expect any of these sorts of objects to depend * on a column. @@ -4883,7 +4897,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); + Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); if (foundDep->deptype != DEPENDENCY_NORMAL) elog(ERROR, "found unexpected dependency type '%c'", @@ -4900,8 +4914,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, heap_close(depRel, RowExclusiveLock); /* - * Here we go --- change the recorded column type. (Note heapTup is - * a copy of the syscache entry, so okay to scribble on.) + * Here we go --- change the recorded column type. (Note heapTup is a + * copy of the syscache entry, so okay to scribble on.) */ attTup->atttypid = targettype; attTup->atttypmod = typename->typmod; @@ -4923,15 +4937,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, /* Install dependency on new datatype */ add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype); - /* Drop any pg_statistic entry for the column, since it's now wrong type */ + /* + * Drop any pg_statistic entry for the column, since it's now wrong + * type + */ RemoveStatistics(RelationGetRelid(rel), attnum); /* - * Update the default, if present, by brute force --- remove and re-add - * the default. Probably unsafe to take shortcuts, since the new version - * may well have additional dependencies. (It's okay to do this now, - * rather than after other ALTER TYPE commands, since the default won't - * depend on other column types.) + * Update the default, if present, by brute force --- remove and + * re-add the default. Probably unsafe to take shortcuts, since the + * new version may well have additional dependencies. (It's okay to + * do this now, rather than after other ALTER TYPE commands, since the + * default won't depend on other column types.) */ if (defaultexpr) { @@ -4939,8 +4956,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, CommandCounterIncrement(); /* - * We use RESTRICT here for safety, but at present we do not expect - * anything to depend on the default. + * We use RESTRICT here for safety, but at present we do not + * expect anything to depend on the default. */ RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true); @@ -4960,31 +4977,26 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab) { ObjectAddress obj; - ListCell *l; + ListCell *l; /* * Re-parse the index and constraint definitions, and attach them to - * the appropriate work queue entries. We do this before dropping + * the appropriate work queue entries. We do this before dropping * because in the case of a FOREIGN KEY constraint, we might not yet - * have exclusive lock on the table the constraint is attached to, - * and we need to get that before dropping. It's safe because the - * parser won't actually look at the catalogs to detect the existing - * entry. + * have exclusive lock on the table the constraint is attached to, and + * we need to get that before dropping. It's safe because the parser + * won't actually look at the catalogs to detect the existing entry. */ foreach(l, tab->changedIndexDefs) - { ATPostAlterTypeParse((char *) lfirst(l), wqueue); - } foreach(l, tab->changedConstraintDefs) - { ATPostAlterTypeParse((char *) lfirst(l), wqueue); - } /* - * Now we can drop the existing constraints and indexes --- constraints - * first, since some of them might depend on the indexes. It should be - * okay to use DROP_RESTRICT here, since nothing else should be depending - * on these objects. + * Now we can drop the existing constraints and indexes --- + * constraints first, since some of them might depend on the indexes. + * It should be okay to use DROP_RESTRICT here, since nothing else + * should be depending on these objects. */ if (tab->changedConstraintOids) obj.classId = get_system_catalog_relid(ConstraintRelationName); @@ -5017,8 +5029,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) ListCell *list_item; /* - * We expect that we only have to do raw parsing and parse analysis, not - * any rule rewriting, since these will all be utility statements. + * We expect that we only have to do raw parsing and parse analysis, + * not any rule rewriting, since these will all be utility statements. */ raw_parsetree_list = raw_parser(cmd); querytree_list = NIL; @@ -5027,12 +5039,13 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) Node *parsetree = (Node *) lfirst(list_item); querytree_list = list_concat(querytree_list, - parse_analyze(parsetree, NULL, 0)); + parse_analyze(parsetree, NULL, 0)); } /* - * Attach each generated command to the proper place in the work queue. - * Note this could result in creation of entirely new work-queue entries. + * Attach each generated command to the proper place in the work + * queue. Note this could result in creation of entirely new + * work-queue entries. */ foreach(list_item, querytree_list) { @@ -5045,50 +5058,50 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) switch (nodeTag(query->utilityStmt)) { case T_IndexStmt: - { - IndexStmt *stmt = (IndexStmt *) query->utilityStmt; - AlterTableCmd *newcmd; - - rel = relation_openrv(stmt->relation, AccessExclusiveLock); - tab = ATGetQueueEntry(wqueue, rel); - newcmd = makeNode(AlterTableCmd); - newcmd->subtype = AT_ReAddIndex; - newcmd->def = (Node *) stmt; - tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); - relation_close(rel, NoLock); - break; - } + { + IndexStmt *stmt = (IndexStmt *) query->utilityStmt; + AlterTableCmd *newcmd; + + rel = relation_openrv(stmt->relation, AccessExclusiveLock); + tab = ATGetQueueEntry(wqueue, rel); + newcmd = makeNode(AlterTableCmd); + newcmd->subtype = AT_ReAddIndex; + newcmd->def = (Node *) stmt; + tab->subcmds[AT_PASS_OLD_INDEX] = + lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); + relation_close(rel, NoLock); + break; + } case T_AlterTableStmt: - { - AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; - ListCell *lcmd; - - rel = relation_openrv(stmt->relation, AccessExclusiveLock); - tab = ATGetQueueEntry(wqueue, rel); - foreach(lcmd, stmt->cmds) { - AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + ListCell *lcmd; - switch (cmd->subtype) + rel = relation_openrv(stmt->relation, AccessExclusiveLock); + tab = ATGetQueueEntry(wqueue, rel); + foreach(lcmd, stmt->cmds) { - case AT_AddIndex: - cmd->subtype = AT_ReAddIndex; - tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); - break; - case AT_AddConstraint: - tab->subcmds[AT_PASS_OLD_CONSTR] = - lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); - break; - default: - elog(ERROR, "unexpected statement type: %d", - (int) cmd->subtype); + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + + switch (cmd->subtype) + { + case AT_AddIndex: + cmd->subtype = AT_ReAddIndex; + tab->subcmds[AT_PASS_OLD_INDEX] = + lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); + break; + case AT_AddConstraint: + tab->subcmds[AT_PASS_OLD_CONSTR] = + lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); + break; + default: + elog(ERROR, "unexpected statement type: %d", + (int) cmd->subtype); + } } + relation_close(rel, NoLock); + break; } - relation_close(rel, NoLock); - break; - } default: elog(ERROR, "unexpected statement type: %d", (int) nodeTag(query->utilityStmt)); @@ -5116,8 +5129,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) class_rel = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCache(RELOID, - ObjectIdGetDatum(relationOid), - 0, 0, 0); + ObjectIdGetDatum(relationOid), + 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", relationOid); tuple_class = (Form_pg_class) GETSTRUCT(tuple); @@ -5139,7 +5152,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) NameStr(tuple_class->relname)))); } - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -5148,7 +5161,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) Datum repl_val[Natts_pg_class]; char repl_null[Natts_pg_class]; char repl_repl[Natts_pg_class]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -5156,8 +5169,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) /* Otherwise, check that we are the superuser */ if (!superuser()) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change owner"))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to change owner"))); memset(repl_null, ' ', sizeof(repl_null)); memset(repl_repl, ' ', sizeof(repl_repl)); @@ -5188,9 +5201,9 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) heap_freetuple(newtuple); /* - * If we are operating on a table, also change the ownership of any - * indexes that belong to the table, as well as the table's toast - * table (if it has one) + * If we are operating on a table, also change the ownership of + * any indexes that belong to the table, as well as the table's + * toast table (if it has one) */ if (tuple_class->relkind == RELKIND_RELATION || tuple_class->relkind == RELKIND_TOASTVALUE) @@ -5265,7 +5278,7 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) { Oid tablespaceId; - AclResult aclresult; + AclResult aclresult; /* * We do our own permission checking because we want to allow this on @@ -5294,7 +5307,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (!OidIsValid(tablespaceId)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", tablespacename))); + errmsg("tablespace \"%s\" does not exist", tablespacename))); /* Check its permissions */ aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); @@ -5305,7 +5318,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (OidIsValid(tab->newTableSpace)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("multiple SET TABLESPACE subcommands are not valid"))); + errmsg("multiple SET TABLESPACE subcommands are not valid"))); tab->newTableSpace = tablespaceId; } @@ -5339,13 +5352,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace) RelationGetRelationName(rel)))); /* - * Don't allow moving temp tables of other backends ... their - * local buffer manager is not going to cope. + * Don't allow moving temp tables of other backends ... their local + * buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move temporary tables of other sessions"))); + errmsg("cannot move temporary tables of other sessions"))); /* * No work if no change in tablespace. @@ -5425,14 +5438,15 @@ copy_relation_data(Relation rel, SMgrRelation dst) bool use_wal; BlockNumber nblocks; BlockNumber blkno; - char buf[BLCKSZ]; + char buf[BLCKSZ]; Page page = (Page) buf; /* - * Since we copy the data directly without looking at the shared buffers, - * we'd better first flush out any pages of the source relation that are - * in shared buffers. We assume no new pages will get loaded into - * buffers while we are holding exclusive lock on the rel. + * Since we copy the data directly without looking at the shared + * buffers, we'd better first flush out any pages of the source + * relation that are in shared buffers. We assume no new pages will + * get loaded into buffers while we are holding exclusive lock on the + * rel. */ FlushRelationBuffers(rel, 0); @@ -5479,7 +5493,7 @@ copy_relation_data(Relation rel, SMgrRelation dst) } /* - * Now write the page. We say isTemp = true even if it's not a + * Now write the page. We say isTemp = true even if it's not a * temp rel, because there's no need for smgr to schedule an fsync * for this write; we'll do it ourselves below. */ @@ -5488,18 +5502,18 @@ copy_relation_data(Relation rel, SMgrRelation dst) /* * If the rel isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp rel we don't care + * safe to commit the transaction. (For a temp rel we don't care * since the rel will be uninteresting after a crash anyway.) * - * It's obvious that we must do this when not WAL-logging the copy. - * It's less obvious that we have to do it even if we did WAL-log the + * It's obvious that we must do this when not WAL-logging the copy. It's + * less obvious that we have to do it even if we did WAL-log the * copied pages. The reason is that since we're copying outside * shared buffers, a CHECKPOINT occurring during the copy has no way * to flush the previously written data to disk (indeed it won't know - * the new rel even exists). A crash later on would replay WAL from the - * checkpoint, therefore it wouldn't replay our earlier WAL entries. - * If we do not fsync those pages here, they might still not be on disk - * when the crash occurs. + * the new rel even exists). A crash later on would replay WAL from + * the checkpoint, therefore it wouldn't replay our earlier WAL + * entries. If we do not fsync those pages here, they might still not + * be on disk when the crash occurs. */ if (!rel->rd_istemp) smgrimmedsync(dst); @@ -5510,7 +5524,7 @@ copy_relation_data(Relation rel, SMgrRelation dst) * * Note: this is also invoked from outside this module; in such cases we * expect the caller to have verified that the relation is a table and we - * have all the right permissions. Callers expect this function + * have all the right permissions. Callers expect this function * to end with CommandCounterIncrement if it makes any changes. */ void @@ -5532,8 +5546,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent) /* * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. (This is probably redundant - * in all present uses...) + * release until end of transaction. (This is probably redundant in + * all present uses...) */ rel = heap_open(relOid, AccessExclusiveLock); @@ -5543,15 +5557,15 @@ AlterTableCreateToastTable(Oid relOid, bool silent) * We cannot allow toasting a shared relation after initdb (because * there's no way to mark it toasted in other databases' pg_class). * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting happens after the bootstrap phase, - * so checking IsBootstrapProcessingMode() won't work). However, we can + * standalone backend (toasting happens after the bootstrap phase, so + * checking IsBootstrapProcessingMode() won't work). However, we can * at least prevent this mistake under normal multi-user operation. */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && IsUnderPostmaster) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("shared tables cannot be toasted after initdb"))); + errmsg("shared tables cannot be toasted after initdb"))); /* * Is it already toasted? @@ -5894,8 +5908,8 @@ PreCommit_on_commit_actions(void) void AtEOXact_on_commit_actions(bool isCommit, TransactionId xid) { - ListCell *cur_item; - ListCell *prev_item; + ListCell *cur_item; + ListCell *prev_item; prev_item = NULL; cur_item = list_head(on_commits); @@ -5930,15 +5944,15 @@ AtEOXact_on_commit_actions(bool isCommit, TransactionId xid) * Post-subcommit or post-subabort cleanup for ON COMMIT management. * * During subabort, we can immediately remove entries created during this - * subtransaction. During subcommit, just relabel entries marked during + * subtransaction. During subcommit, just relabel entries marked during * this subtransaction as being the parent's responsibility. */ void AtEOSubXact_on_commit_actions(bool isCommit, TransactionId childXid, TransactionId parentXid) { - ListCell *cur_item; - ListCell *prev_item; + ListCell *cur_item; + ListCell *prev_item; prev_item = NULL; cur_item = list_head(on_commits); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 05a13315a1..15fe839288 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -35,7 +35,7 @@ * To allow CREATE DATABASE to give a new database a default tablespace * that's different from the template database's default, we make the * provision that a zero in pg_class.reltablespace means the database's - * default tablespace. Without this, CREATE DATABASE would have to go in + * default tablespace. Without this, CREATE DATABASE would have to go in * and munge the system catalogs of the new database. This special meaning * of zero also applies in pg_namespace.nsptablespace. * @@ -45,7 +45,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.8 2004/08/08 01:31:11 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.9 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -95,11 +95,11 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) { #ifdef HAVE_SYMLINK struct stat st; - char *dir; + char *dir; /* - * The global tablespace doesn't have per-database subdirectories, - * so nothing to do for it. + * The global tablespace doesn't have per-database subdirectories, so + * nothing to do for it. */ if (spcNode == GLOBALTABLESPACE_OID) return; @@ -118,7 +118,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) * DROP TABLESPACE or TablespaceCreateDbspace is running * concurrently. Simple reads from pg_tablespace are OK. */ - Relation rel; + Relation rel; if (!isRedo) rel = heap_openr(TableSpaceRelationName, ExclusiveLock); @@ -126,8 +126,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) rel = NULL; /* - * Recheck to see if someone created the directory while - * we were waiting for lock. + * Recheck to see if someone created the directory while we + * were waiting for lock. */ if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -139,8 +139,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) if (mkdir(dir, S_IRWXU) < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - dir))); + errmsg("could not create directory \"%s\": %m", + dir))); } /* OK to drop the exclusive lock */ @@ -165,7 +165,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) } pfree(dir); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } /* @@ -179,13 +179,13 @@ void CreateTableSpace(CreateTableSpaceStmt *stmt) { #ifdef HAVE_SYMLINK - Relation rel; - Datum values[Natts_pg_tablespace]; + Relation rel; + Datum values[Natts_pg_tablespace]; char nulls[Natts_pg_tablespace]; HeapTuple tuple; Oid tablespaceoid; - char *location; - char *linkloc; + char *location; + char *linkloc; AclId ownerid; /* validate */ @@ -196,10 +196,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) /* Must be super user */ if (!superuser()) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to create tablespace \"%s\"", - stmt->tablespacename), - errhint("Must be superuser to create a tablespace."))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to create tablespace \"%s\"", + stmt->tablespacename), + errhint("Must be superuser to create a tablespace."))); /* However, the eventual owner of the tablespace need not be */ if (stmt->owner) @@ -218,7 +218,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) if (strchr(location, '\'')) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("tablespace location may not contain single quotes"))); + errmsg("tablespace location may not contain single quotes"))); /* * Allowing relative paths seems risky @@ -231,9 +231,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) errmsg("tablespace location must be an absolute path"))); /* - * Check that location isn't too long. Remember that we're going to append - * '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path - * explicitly? This may be overly conservative.) + * Check that location isn't too long. Remember that we're going to + * append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole + * path explicitly? This may be overly conservative.) */ if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10)) ereport(ERROR, @@ -250,12 +250,12 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", stmt->tablespacename), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* - * Check that there is no other tablespace by this name. (The - * unique index would catch this anyway, but might as well give - * a friendlier message.) + * Check that there is no other tablespace by this name. (The unique + * index would catch this anyway, but might as well give a friendlier + * message.) */ if (OidIsValid(get_tablespace_oid(stmt->tablespacename))) ereport(ERROR, @@ -293,14 +293,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) heap_freetuple(tuple); /* - * Attempt to coerce target directory to safe permissions. If this + * Attempt to coerce target directory to safe permissions. If this * fails, it doesn't exist or has the wrong owner. */ if (chmod(location, 0700) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not set permissions on directory \"%s\": %m", - location))); + errmsg("could not set permissions on directory \"%s\": %m", + location))); /* * Check the target directory is empty. @@ -312,10 +312,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) location))); /* - * Create the PG_VERSION file in the target directory. This has several - * purposes: to make sure we can write in the directory, to prevent - * someone from creating another tablespace pointing at the same - * directory (the emptiness check above will fail), and to label + * Create the PG_VERSION file in the target directory. This has + * several purposes: to make sure we can write in the directory, to + * prevent someone from creating another tablespace pointing at the + * same directory (the emptiness check above will fail), and to label * tablespace directories by PG version. */ set_short_version(location); @@ -337,11 +337,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) heap_close(rel, RowExclusiveLock); -#else /* !HAVE_SYMLINK */ +#else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tablespaces are not supported on this platform"))); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } /* @@ -353,23 +353,24 @@ void DropTableSpace(DropTableSpaceStmt *stmt) { #ifdef HAVE_SYMLINK - char *tablespacename = stmt->tablespacename; - HeapScanDesc scandesc; - Relation rel; - HeapTuple tuple; - ScanKeyData entry[1]; - char *location; - Oid tablespaceoid; - DIR *dirdesc; + char *tablespacename = stmt->tablespacename; + HeapScanDesc scandesc; + Relation rel; + HeapTuple tuple; + ScanKeyData entry[1]; + char *location; + Oid tablespaceoid; + DIR *dirdesc; struct dirent *de; - char *subfile; + char *subfile; /* don't call this in a transaction block */ PreventTransactionChain((void *) stmt, "DROP TABLESPACE"); /* * Acquire ExclusiveLock on pg_tablespace to ensure that no one else - * is trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently. + * is trying to do DROP TABLESPACE or TablespaceCreateDbspace + * concurrently. */ rel = heap_openr(TableSpaceRelationName, ExclusiveLock); @@ -409,15 +410,15 @@ DropTableSpace(DropTableSpaceStmt *stmt) /* * Check if the tablespace still contains any files. We try to rmdir * each per-database directory we find in it. rmdir failure implies - * there are still files in that subdirectory, so give up. (We do not - * have to worry about undoing any already completed rmdirs, since - * the next attempt to use the tablespace from that database will simply + * there are still files in that subdirectory, so give up. (We do not + * have to worry about undoing any already completed rmdirs, since the + * next attempt to use the tablespace from that database will simply * recreate the subdirectory via TablespaceCreateDbspace.) * - * Since we hold exclusive lock, no one else should be creating any - * fresh subdirectories in parallel. It is possible that new files - * are being created within subdirectories, though, so the rmdir - * call could fail. Worst consequence is a less friendly error message. + * Since we hold exclusive lock, no one else should be creating any fresh + * subdirectories in parallel. It is possible that new files are + * being created within subdirectories, though, so the rmdir call + * could fail. Worst consequence is a less friendly error message. */ dirdesc = AllocateDir(location); if (dirdesc == NULL) @@ -458,8 +459,11 @@ DropTableSpace(DropTableSpaceStmt *stmt) pfree(subfile); } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -494,15 +498,15 @@ DropTableSpace(DropTableSpaceStmt *stmt) ereport(ERROR, (errcode_for_file_access(), errmsg("could not remove junction dir \"%s\": %m", - location))); + location))); #endif pfree(subfile); pfree(location); /* - * We have successfully destroyed the infrastructure ... there is - * now no way to roll back the DROP ... so proceed to remove the + * We have successfully destroyed the infrastructure ... there is now + * no way to roll back the DROP ... so proceed to remove the * pg_tablespace tuple. */ simple_heap_delete(rel, &tuple->t_self); @@ -511,11 +515,11 @@ DropTableSpace(DropTableSpaceStmt *stmt) heap_close(rel, ExclusiveLock); -#else /* !HAVE_SYMLINK */ +#else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tablespaces are not supported on this platform"))); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } @@ -579,7 +583,7 @@ set_short_version(const char *path) static bool directory_is_empty(const char *path) { - DIR *dirdesc; + DIR *dirdesc; struct dirent *de; dirdesc = AllocateDir(path); @@ -602,8 +606,11 @@ directory_is_empty(const char *path) return false; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -624,11 +631,11 @@ directory_is_empty(const char *path) Oid get_tablespace_oid(const char *tablespacename) { - Oid result; - Relation rel; + Oid result; + Relation rel; HeapScanDesc scandesc; HeapTuple tuple; - ScanKeyData entry[1]; + ScanKeyData entry[1]; /* Search pg_tablespace */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); @@ -645,8 +652,8 @@ get_tablespace_oid(const char *tablespacename) else result = InvalidOid; - heap_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_endscan(scandesc); + heap_close(rel, AccessShareLock); return result; } @@ -659,11 +666,11 @@ get_tablespace_oid(const char *tablespacename) char * get_tablespace_name(Oid spc_oid) { - char *result; - Relation rel; + char *result; + Relation rel; HeapScanDesc scandesc; HeapTuple tuple; - ScanKeyData entry[1]; + ScanKeyData entry[1]; /* Search pg_tablespace */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); @@ -681,8 +688,8 @@ get_tablespace_name(Oid spc_oid) else result = NULL; - heap_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_endscan(scandesc); + heap_close(rel, AccessShareLock); return result; } @@ -693,8 +700,8 @@ get_tablespace_name(Oid spc_oid) void RenameTableSpace(const char *oldname, const char *newname) { - Relation rel; - ScanKeyData entry[1]; + Relation rel; + ScanKeyData entry[1]; HeapScanDesc scan; HeapTuple tup; HeapTuple newtuple; @@ -729,7 +736,7 @@ RenameTableSpace(const char *oldname, const char *newname) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", newname), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* Make sure the new name doesn't exist */ ScanKeyInit(&entry[0], @@ -743,7 +750,7 @@ RenameTableSpace(const char *oldname, const char *newname) (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("tablespace \"%s\" already exists", newname))); - + heap_endscan(scan); /* OK, update the entry */ @@ -761,8 +768,8 @@ RenameTableSpace(const char *oldname, const char *newname) void AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) { - Relation rel; - ScanKeyData entry[1]; + Relation rel; + ScanKeyData entry[1]; HeapScanDesc scandesc; Form_pg_tablespace spcForm; HeapTuple tup; @@ -783,7 +790,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) spcForm = (Form_pg_tablespace) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -792,7 +799,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) Datum repl_val[Natts_pg_tablespace]; char repl_null[Natts_pg_tablespace]; char repl_repl[Natts_pg_tablespace]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -814,9 +821,9 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) * necessary when the ACL is non-null. */ aclDatum = heap_getattr(tup, - Anum_pg_tablespace_spcacl, - RelationGetDescr(rel), - &isNull); + Anum_pg_tablespace_spcacl, + RelationGetDescr(rel), + &isNull); if (!isNull) { newAcl = aclnewowner(DatumGetAclP(aclDatum), diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index dfc8098782..7e73f6b000 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.167 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.168 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -480,8 +480,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior) if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - trigname, get_rel_name(relid)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + trigname, get_rel_name(relid)))); if (!pg_class_ownercheck(relid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, @@ -694,8 +694,8 @@ renametrig(Oid relid, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - oldname, RelationGetRelationName(targetrel)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + oldname, RelationGetRelationName(targetrel)))); } systable_endscan(tgscan); @@ -1638,7 +1638,7 @@ ltrmark:; * Deferred trigger stuff * * The DeferredTriggersData struct holds data about pending deferred - * trigger events during the current transaction tree. The struct and + * trigger events during the current transaction tree. The struct and * most of its subsidiary data are kept in TopTransactionContext; however * the individual event records are kept in CurTransactionContext, so that * they will easily go away during subtransaction abort. @@ -1670,7 +1670,7 @@ ltrmark:; * saves a copy, which we use to restore the state if we abort. * * numpushed and numalloc keep control of allocation and storage in the above - * stacks. numpushed is essentially the current subtransaction nesting depth. + * stacks. numpushed is essentially the current subtransaction nesting depth. * * XXX We need to be able to save the per-event data in a file if it grows too * large. @@ -1723,11 +1723,11 @@ typedef struct DeferredTriggerStatusData *DeferredTriggerStatus; */ typedef struct DeferredTriggerStateData { - bool all_isset; - bool all_isdeferred; - int numstates; /* number of trigstates[] entries in use */ - int numalloc; /* allocated size of trigstates[] */ - DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */ + bool all_isset; + bool all_isdeferred; + int numstates; /* number of trigstates[] entries in use */ + int numalloc; /* allocated size of trigstates[] */ + DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */ } DeferredTriggerStateData; typedef DeferredTriggerStateData *DeferredTriggerState; @@ -1735,15 +1735,15 @@ typedef DeferredTriggerStateData *DeferredTriggerState; /* Per-transaction data */ typedef struct DeferredTriggersData { - DeferredTriggerState state; - DeferredTriggerEvent events; - DeferredTriggerEvent tail_thisxact; - DeferredTriggerEvent events_imm; - DeferredTriggerEvent *tail_stack; - DeferredTriggerEvent *imm_stack; - DeferredTriggerState *state_stack; - int numpushed; - int numalloc; + DeferredTriggerState state; + DeferredTriggerEvent events; + DeferredTriggerEvent tail_thisxact; + DeferredTriggerEvent events_imm; + DeferredTriggerEvent *tail_stack; + DeferredTriggerEvent *imm_stack; + DeferredTriggerState *state_stack; + int numpushed; + int numalloc; } DeferredTriggersData; typedef DeferredTriggersData *DeferredTriggers; @@ -1757,7 +1757,7 @@ static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno, static DeferredTriggerState DeferredTriggerStateCreate(int numalloc); static DeferredTriggerState DeferredTriggerStateCopy(DeferredTriggerState state); static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState state, - Oid tgoid, bool tgisdeferred); + Oid tgoid, bool tgisdeferred); /* ---------- @@ -1770,8 +1770,8 @@ static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState sta static bool deferredTriggerCheckState(Oid tgoid, int32 itemstate) { - bool tgisdeferred; - int i; + bool tgisdeferred; + int i; /* * For not-deferrable triggers (i.e. normal AFTER ROW triggers and @@ -1798,7 +1798,8 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate) /* * No ALL state known either, remember the default state as the - * current and return that. (XXX why do we bother making a state entry?) + * current and return that. (XXX why do we bother making a state + * entry?) */ tgisdeferred = ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0); deferredTriggers->state = @@ -1982,8 +1983,8 @@ deferredTriggerInvokeEvents(bool immediate_only) /* * If immediate_only is true, then the only events that could need - * firing are those since events_imm. (But if - * events_imm is NULL, we must scan the entire list.) + * firing are those since events_imm. (But if events_imm is NULL, we + * must scan the entire list.) */ if (immediate_only && deferredTriggers->events_imm != NULL) { @@ -2003,13 +2004,13 @@ deferredTriggerInvokeEvents(bool immediate_only) int i; /* - * Skip executing cancelled events, and events done by transactions - * that are not aborted. + * Skip executing cancelled events, and events done by + * transactions that are not aborted. */ if (!(event->dte_event & TRIGGER_DEFERRED_CANCELED) || - (event->dte_event & TRIGGER_DEFERRED_DONE && - TransactionIdIsValid(event->dte_done_xid) && - !TransactionIdDidAbort(event->dte_done_xid))) + (event->dte_event & TRIGGER_DEFERRED_DONE && + TransactionIdIsValid(event->dte_done_xid) && + !TransactionIdDidAbort(event->dte_done_xid))) { MemoryContextReset(per_tuple_context); @@ -2019,8 +2020,8 @@ deferredTriggerInvokeEvents(bool immediate_only) for (i = 0; i < event->dte_n_items; i++) { if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE && - TransactionIdIsValid(event->dte_item[i].dti_done_xid) && - !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid))) + TransactionIdIsValid(event->dte_item[i].dti_done_xid) && + !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid))) continue; /* @@ -2097,8 +2098,8 @@ deferredTriggerInvokeEvents(bool immediate_only) { /* * We can drop an item if it's done, but only if we're not - * inside a subtransaction because it could abort later on. - * We will want to check the item again if it does. + * inside a subtransaction because it could abort later on. We + * will want to check the item again if it does. */ if (immediate_only && !IsSubTransaction()) { @@ -2209,8 +2210,8 @@ DeferredTriggerEndXact(void) /* * Forget everything we know about deferred triggers. * - * Since all the info is in TopTransactionContext or children thereof, - * we need do nothing special to reclaim memory. + * Since all the info is in TopTransactionContext or children thereof, we + * need do nothing special to reclaim memory. */ deferredTriggers = NULL; } @@ -2236,8 +2237,8 @@ DeferredTriggerAbortXact(void) /* * Forget everything we know about deferred triggers. * - * Since all the info is in TopTransactionContext or children thereof, - * we need do nothing special to reclaim memory. + * Since all the info is in TopTransactionContext or children thereof, we + * need do nothing special to reclaim memory. */ deferredTriggers = NULL; } @@ -2285,13 +2286,13 @@ DeferredTriggerBeginSubXact(void) deferredTriggers->tail_stack = (DeferredTriggerEvent *) repalloc(deferredTriggers->tail_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); + deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); deferredTriggers->imm_stack = (DeferredTriggerEvent *) repalloc(deferredTriggers->imm_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); + deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); deferredTriggers->state_stack = (DeferredTriggerState *) repalloc(deferredTriggers->state_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerState)); + deferredTriggers->numalloc * sizeof(DeferredTriggerState)); } } @@ -2358,8 +2359,8 @@ DeferredTriggerEndSubXact(bool isCommit) deferredTriggers->tail_thisxact->dte_next = NULL; /* - * We don't need to free the items, since the CurTransactionContext - * will be reset shortly. + * We don't need to free the items, since the + * CurTransactionContext will be reset shortly. */ /* @@ -2393,7 +2394,7 @@ DeferredTriggerStateCreate(int numalloc) state = (DeferredTriggerState) MemoryContextAllocZero(TopTransactionContext, sizeof(DeferredTriggerStateData) + - (numalloc - 1) * sizeof(DeferredTriggerStatusData)); + (numalloc - 1) *sizeof(DeferredTriggerStatusData)); state->numalloc = numalloc; @@ -2429,13 +2430,13 @@ DeferredTriggerStateAddItem(DeferredTriggerState state, { if (state->numstates >= state->numalloc) { - int newalloc = state->numalloc * 2; + int newalloc = state->numalloc * 2; - newalloc = Max(newalloc, 8); /* in case original has size 0 */ + newalloc = Max(newalloc, 8); /* in case original has size 0 */ state = (DeferredTriggerState) repalloc(state, sizeof(DeferredTriggerStateData) + - (newalloc - 1) * sizeof(DeferredTriggerStatusData)); + (newalloc - 1) *sizeof(DeferredTriggerStatusData)); state->numalloc = newalloc; Assert(state->numstates < state->numalloc); } @@ -2463,8 +2464,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt) return; /* - * If in a subtransaction, and we didn't save the current state already, - * save it so it can be restored if the subtransaction aborts. + * If in a subtransaction, and we didn't save the current state + * already, save it so it can be restored if the subtransaction + * aborts. */ if (deferredTriggers->numpushed > 0 && deferredTriggers->state_stack[deferredTriggers->numpushed - 1] == NULL) @@ -2686,7 +2688,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger, return; /* - * Create a new event. We use the CurTransactionContext so the event + * Create a new event. We use the CurTransactionContext so the event * will automatically go away if the subtransaction aborts. */ oldcxt = MemoryContextSwitchTo(CurTransactionContext); diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 8fd16fdb58..6a43809329 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.62 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.63 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -302,8 +302,8 @@ DefineType(List *names, List *parameters) else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type output function %s must return type \"cstring\"", - NameListToString(outputName)))); + errmsg("type output function %s must return type \"cstring\"", + NameListToString(outputName)))); } if (receiveOid) { @@ -311,8 +311,8 @@ DefineType(List *names, List *parameters) if (resulttype != typoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type receive function %s must return type %s", - NameListToString(receiveName), typeName))); + errmsg("type receive function %s must return type %s", + NameListToString(receiveName), typeName))); } if (sendOid) { @@ -320,13 +320,14 @@ DefineType(List *names, List *parameters) if (resulttype != BYTEAOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type send function %s must return type \"bytea\"", - NameListToString(sendName)))); + errmsg("type send function %s must return type \"bytea\"", + NameListToString(sendName)))); } /* - * Convert analysis function proc name to an OID. If no analysis function - * is specified, we'll use zero to select the built-in default algorithm. + * Convert analysis function proc name to an OID. If no analysis + * function is specified, we'll use zero to select the built-in + * default algorithm. */ if (analyzeName) analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid); @@ -691,7 +692,7 @@ DefineDomain(CreateDomainStmt *stmt) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: @@ -932,8 +933,8 @@ findTypeOutputFunction(List *procname, Oid typeOid) * arguments (data value, element OID). * * For backwards compatibility we allow OPAQUE in place of the actual - * type name; if we see this, we issue a warning and fix up the pg_proc - * entry. + * type name; if we see this, we issue a warning and fix up the + * pg_proc entry. */ MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -967,8 +968,8 @@ findTypeOutputFunction(List *procname, Oid typeOid) { /* Found, but must complain and fix the pg_proc entry */ ereport(WARNING, - (errmsg("changing argument type of function %s from \"opaque\" to %s", - NameListToString(procname), format_type_be(typeOid)))); + (errmsg("changing argument type of function %s from \"opaque\" to %s", + NameListToString(procname), format_type_be(typeOid)))); SetFunctionArgType(procOid, 0, typeOid); /* @@ -1062,7 +1063,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) Oid procOid; /* - * Analyze functions always take one INTERNAL argument and return bool. + * Analyze functions always take one INTERNAL argument and return + * bool. */ MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -1078,8 +1080,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) if (get_func_rettype(procOid) != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type analyze function %s must return type \"boolean\"", - NameListToString(procname)))); + errmsg("type analyze function %s must return type \"boolean\"", + NameListToString(procname)))); return procOid; } @@ -1110,8 +1112,8 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist) errmsg("composite type must have at least one attribute"))); /* - * now set the parameters for keys/inheritance etc. All of these - * are uninteresting for composite types... + * now set the parameters for keys/inheritance etc. All of these are + * uninteresting for composite types... */ createStmt->relation = (RangeVar *) typevar; createStmt->tableElts = coldeflist; @@ -1337,8 +1339,8 @@ AlterDomainNotNull(List *names, bool notNull) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains null values", - NameStr(tupdesc->attrs[attnum - 1]->attname), - RelationGetRelationName(testrel)))); + NameStr(tupdesc->attrs[attnum - 1]->attname), + RelationGetRelationName(testrel)))); } } heap_endscan(scan); @@ -1499,7 +1501,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) if (IsA(newConstraint, FkConstraint)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("foreign key constraints not possible for domains"))); + errmsg("foreign key constraints not possible for domains"))); /* otherwise it should be a plain Constraint */ if (!IsA(newConstraint, Constraint)) @@ -1517,13 +1519,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("primary key constraints not possible for domains"))); + errmsg("primary key constraints not possible for domains"))); break; case CONSTR_ATTR_DEFERRABLE: @@ -1604,7 +1606,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint", - NameStr(tupdesc->attrs[attnum - 1]->attname), + NameStr(tupdesc->attrs[attnum - 1]->attname), RelationGetRelationName(testrel)))); } @@ -2078,9 +2080,9 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) typTup = (Form_pg_type) GETSTRUCT(tup); /* - * If it's a composite type, we need to check that it really is a - * free-standing composite type, and not a table's underlying type. - * We want people to use ALTER TABLE not ALTER TYPE for that case. + * If it's a composite type, we need to check that it really is a + * free-standing composite type, and not a table's underlying type. We + * want people to use ALTER TABLE not ALTER TYPE for that case. */ if (typTup->typtype == 'c' && get_rel_relkind(typTup->typrelid) != 'c') ereport(ERROR, @@ -2088,7 +2090,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) errmsg("\"%s\" is a table's row type", TypeNameToString(typename)))); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -2100,7 +2102,10 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on typTup because it's a copy */ + /* + * Modify the owner --- okay to scribble on typTup because it's a + * copy + */ typTup->typowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 8e637367b3..e365f946b1 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.143 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.144 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -46,10 +46,10 @@ extern bool Password_encryption; /* * The need-to-update-files flags are a pair of TransactionIds that show what - * level of the transaction tree requested the update. To register an update, + * level of the transaction tree requested the update. To register an update, * the transaction saves its own TransactionId in the flag, unless the value * was already set to a valid TransactionId. If it aborts and the value is its - * TransactionId, it resets the value to InvalidTransactionId. If it commits, + * TransactionId, it resets the value to InvalidTransactionId. If it commits, * it changes the value to its parent's TransactionId. This way the value is * propagated up to the topmost transaction, which will update the files if a * valid TransactionId is detected. @@ -169,7 +169,7 @@ write_group_file(Relation grel) if (fp == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to temporary file \"%s\": %m", tempname))); + errmsg("could not write to temporary file \"%s\": %m", tempname))); /* * Read pg_group and write the file. Note we use SnapshotSelf to @@ -316,7 +316,7 @@ write_user_file(Relation urel) if (fp == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to temporary file \"%s\": %m", tempname))); + errmsg("could not write to temporary file \"%s\": %m", tempname))); /* * Read pg_shadow and write the file. Note we use SnapshotSelf to @@ -1009,7 +1009,7 @@ AlterUserSet(AlterUserSetStmt *stmt) errmsg("user \"%s\" does not exist", stmt->user))); if (!(superuser() || - ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId())) + ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId())) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied"))); @@ -1216,14 +1216,14 @@ RenameUser(const char *oldname, const char *newname) char repl_null[Natts_pg_shadow]; char repl_repl[Natts_pg_shadow]; int i; - + /* ExclusiveLock because we need to update the password file */ rel = heap_openr(ShadowRelationName, ExclusiveLock); dsc = RelationGetDescr(rel); oldtuple = SearchSysCache(SHADOWNAME, - CStringGetDatum(oldname), - 0, 0, 0); + CStringGetDatum(oldname), + 0, 0, 0); if (!HeapTupleIsValid(oldtuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -1259,7 +1259,7 @@ RenameUser(const char *oldname, const char *newname) repl_repl[Anum_pg_shadow_usename - 1] = 'r'; repl_val[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein, - CStringGetDatum(newname)); + CStringGetDatum(newname)); repl_null[Anum_pg_shadow_usename - 1] = ' '; datum = heap_getattr(oldtuple, Anum_pg_shadow_passwd, dsc, &isnull); @@ -1269,14 +1269,14 @@ RenameUser(const char *oldname, const char *newname) /* MD5 uses the username as salt, so just clear it on a rename */ repl_repl[Anum_pg_shadow_passwd - 1] = 'r'; repl_null[Anum_pg_shadow_passwd - 1] = 'n'; - + ereport(NOTICE, - (errmsg("MD5 password cleared because of user rename"))); + (errmsg("MD5 password cleared because of user rename"))); } - + newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl); simple_heap_update(rel, &oldtuple->t_self, newtuple); - + CatalogUpdateIndexes(rel, newtuple); ReleaseSysCache(oldtuple); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 1a1cb2393f..67c1c02b6d 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.288 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.289 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,7 +106,7 @@ typedef struct VRelStats * As these variables always appear together, we put them into one struct * and pull initialization and cleanup into separate routines. * ExecContext is used by repair_frag() and move_xxx_tuple(). More - * accurately: It is *used* only in move_xxx_tuple(), but because this + * accurately: It is *used* only in move_xxx_tuple(), but because this * routine is called many times, we initialize the struct just once in * repair_frag() and pass it on to move_xxx_tuple(). */ @@ -131,9 +131,9 @@ ExecContext_Init(ExecContext ec, Relation rel) ec->estate = CreateExecutorState(); ec->resultRelInfo = makeNode(ResultRelInfo); - ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ + ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ ec->resultRelInfo->ri_RelationDesc = rel; - ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */ + ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */ ExecOpenIndices(ec->resultRelInfo); @@ -154,6 +154,7 @@ ExecContext_Finish(ExecContext ec) ExecCloseIndices(ec->resultRelInfo); FreeExecutorState(ec->estate); } + /* * End of ExecContext Implementation *---------------------------------------------------------------------- @@ -182,16 +183,16 @@ static void repair_frag(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages, VacPageList fraged_pages, int nindexes, Relation *Irel); static void move_chain_tuple(Relation rel, - Buffer old_buf, Page old_page, HeapTuple old_tup, - Buffer dst_buf, Page dst_page, VacPage dst_vacpage, - ExecContext ec, ItemPointer ctid, bool cleanVpd); + Buffer old_buf, Page old_page, HeapTuple old_tup, + Buffer dst_buf, Page dst_page, VacPage dst_vacpage, + ExecContext ec, ItemPointer ctid, bool cleanVpd); static void move_plain_tuple(Relation rel, - Buffer old_buf, Page old_page, HeapTuple old_tup, - Buffer dst_buf, Page dst_page, VacPage dst_vacpage, - ExecContext ec); + Buffer old_buf, Page old_page, HeapTuple old_tup, + Buffer dst_buf, Page dst_page, VacPage dst_vacpage, + ExecContext ec); static void update_hint_bits(Relation rel, VacPageList fraged_pages, - int num_fraged_pages, BlockNumber last_move_dest_block, - int num_moved); + int num_fraged_pages, BlockNumber last_move_dest_block, + int num_moved); static void vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacpagelist); static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage); @@ -248,11 +249,11 @@ vacuum(VacuumStmt *vacstmt) * Furthermore, the forced commit that occurs before truncating the * relation's file would have the effect of committing the rest of the * user's transaction too, which would certainly not be the desired - * behavior. (This only applies to VACUUM FULL, though. We could - * in theory run lazy VACUUM inside a transaction block, but we choose - * to disallow that case because we'd rather commit as soon as possible - * after finishing the vacuum. This is mainly so that we can let go the - * AccessExclusiveLock that we may be holding.) + * behavior. (This only applies to VACUUM FULL, though. We could in + * theory run lazy VACUUM inside a transaction block, but we choose to + * disallow that case because we'd rather commit as soon as possible + * after finishing the vacuum. This is mainly so that we can let go + * the AccessExclusiveLock that we may be holding.) * * ANALYZE (without VACUUM) can run either way. */ @@ -262,9 +263,7 @@ vacuum(VacuumStmt *vacstmt) in_outer_xact = false; } else - { in_outer_xact = IsInTransactionChain((void *) vacstmt); - } /* * Send info about dead objects to the statistics collector @@ -296,22 +295,21 @@ vacuum(VacuumStmt *vacstmt) /* * It's a database-wide VACUUM. * - * Compute the initially applicable OldestXmin and FreezeLimit - * XIDs, so that we can record these values at the end of the - * VACUUM. Note that individual tables may well be processed - * with newer values, but we can guarantee that no - * (non-shared) relations are processed with older ones. + * Compute the initially applicable OldestXmin and FreezeLimit XIDs, + * so that we can record these values at the end of the VACUUM. + * Note that individual tables may well be processed with newer + * values, but we can guarantee that no (non-shared) relations are + * processed with older ones. * - * It is okay to record non-shared values in pg_database, even - * though we may vacuum shared relations with older cutoffs, - * because only the minimum of the values present in - * pg_database matters. We can be sure that shared relations - * have at some time been vacuumed with cutoffs no worse than - * the global minimum; for, if there is a backend in some - * other DB with xmin = OLDXMIN that's determining the cutoff - * with which we vacuum shared relations, it is not possible - * for that database to have a cutoff newer than OLDXMIN - * recorded in pg_database. + * It is okay to record non-shared values in pg_database, even though + * we may vacuum shared relations with older cutoffs, because only + * the minimum of the values present in pg_database matters. We + * can be sure that shared relations have at some time been + * vacuumed with cutoffs no worse than the global minimum; for, if + * there is a backend in some other DB with xmin = OLDXMIN that's + * determining the cutoff with which we vacuum shared relations, + * it is not possible for that database to have a cutoff newer + * than OLDXMIN recorded in pg_database. */ vacuum_set_xid_limits(vacstmt, false, &initialOldestXmin, @@ -321,8 +319,8 @@ vacuum(VacuumStmt *vacstmt) /* * Decide whether we need to start/commit our own transactions. * - * For VACUUM (with or without ANALYZE): always do so, so that we - * can release locks as soon as possible. (We could possibly use the + * For VACUUM (with or without ANALYZE): always do so, so that we can + * release locks as soon as possible. (We could possibly use the * outer transaction for a one-table VACUUM, but handling TOAST tables * would be problematic.) * @@ -333,9 +331,7 @@ vacuum(VacuumStmt *vacstmt) * locks sooner. */ if (vacstmt->vacuum) - { use_own_xacts = true; - } else { Assert(vacstmt->analyze); @@ -359,10 +355,10 @@ vacuum(VacuumStmt *vacstmt) ALLOCSET_DEFAULT_MAXSIZE); /* - * vacuum_rel expects to be entered with no transaction active; it will - * start and commit its own transaction. But we are called by an SQL - * command, and so we are executing inside a transaction already. We - * commit the transaction started in PostgresMain() here, and start + * vacuum_rel expects to be entered with no transaction active; it + * will start and commit its own transaction. But we are called by an + * SQL command, and so we are executing inside a transaction already. + * We commit the transaction started in PostgresMain() here, and start * another one before exiting to match the commit waiting for us back * in PostgresMain(). */ @@ -390,24 +386,24 @@ vacuum(VacuumStmt *vacstmt) if (vacstmt->vacuum) { if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION)) - all_rels = false; /* forget about updating dbstats */ + all_rels = false; /* forget about updating dbstats */ } if (vacstmt->analyze) { MemoryContext old_context = NULL; /* - * If using separate xacts, start one for analyze. Otherwise, - * we can use the outer transaction, but we still need to call - * analyze_rel in a memory context that will be cleaned up on - * return (else we leak memory while processing multiple - * tables). + * If using separate xacts, start one for analyze. + * Otherwise, we can use the outer transaction, but we + * still need to call analyze_rel in a memory context that + * will be cleaned up on return (else we leak memory while + * processing multiple tables). */ if (use_own_xacts) { StartTransactionCommand(); - SetQuerySnapshot(); /* might be needed for functions - * in indexes */ + SetQuerySnapshot(); /* might be needed for functions + * in indexes */ } else old_context = MemoryContextSwitchTo(anl_context); @@ -873,8 +869,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) * indexes */ /* - * Tell the cache replacement strategy that vacuum is causing - * all following IO + * Tell the cache replacement strategy that vacuum is causing all + * following IO */ StrategyHintVacuum(true); @@ -932,9 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) } /* - * Check that it's a plain table; we used to do this in - * get_rel_oids() but seems safer to check after we've locked the - * relation. + * Check that it's a plain table; we used to do this in get_rel_oids() + * but seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != expected_relkind) { @@ -1201,7 +1196,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (PageIsNew(page)) { - VacPage vacpagecopy; + VacPage vacpagecopy; ereport(WARNING, (errmsg("relation \"%s\" page %u is uninitialized --- fixing", @@ -1220,7 +1215,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (PageIsEmpty(page)) { - VacPage vacpagecopy; + VacPage vacpagecopy; vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; free_space += vacpage->free; @@ -1424,7 +1419,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (do_reap || do_frag) { - VacPage vacpagecopy = copy_vac_page(vacpage); + VacPage vacpagecopy = copy_vac_page(vacpage); + if (do_reap) vpage_insert(vacuum_pages, vacpagecopy); if (do_frag) @@ -1504,9 +1500,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, RelationGetRelationName(onerel), tups_vacuumed, num_tuples, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" - "Nonremovable row versions range from %lu to %lu bytes long.\n" + "Nonremovable row versions range from %lu to %lu bytes long.\n" "There were %.0f unused item pointers.\n" - "Total free space (including removable row versions) is %.0f bytes.\n" + "Total free space (including removable row versions) is %.0f bytes.\n" "%u pages are or will become empty, including %u at the end of the table.\n" "%u pages containing %.0f free bytes are potential move destinations.\n" "%s", @@ -1544,7 +1540,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, BlockNumber last_move_dest_block = 0, last_vacuum_block; Page dst_page = NULL; - ExecContextData ec; + ExecContextData ec; VacPageListData Nvacpagelist; VacPage dst_vacpage = NULL, last_vacuum_page, @@ -1595,13 +1591,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, blkno > last_move_dest_block; blkno--) { - Buffer buf; - Page page; - OffsetNumber offnum, - maxoff; - bool isempty, - dowrite, - chain_tuple_moved; + Buffer buf; + Page page; + OffsetNumber offnum, + maxoff; + bool isempty, + dowrite, + chain_tuple_moved; vacuum_delay_point(); @@ -1678,9 +1674,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - Size tuple_len; - HeapTupleData tuple; - ItemId itemid = PageGetItemId(page, offnum); + Size tuple_len; + HeapTupleData tuple; + ItemId itemid = PageGetItemId(page, offnum); if (!ItemIdIsUsed(itemid)) continue; @@ -1693,29 +1689,29 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* * VACUUM FULL has an exclusive lock on the relation. So * normally no other transaction can have pending INSERTs or - * DELETEs in this relation. A tuple is either - * (a) a tuple in a system catalog, inserted or deleted by - * a not yet committed transaction or - * (b) dead (XMIN_INVALID or XMAX_COMMITTED) or - * (c) inserted by a committed xact (XMIN_COMMITTED) or - * (d) moved by the currently running VACUUM. - * In case (a) we wouldn't be in repair_frag() at all. + * DELETEs in this relation. A tuple is either (a) a tuple in + * a system catalog, inserted or deleted by a not yet + * committed transaction or (b) dead (XMIN_INVALID or + * XMAX_COMMITTED) or (c) inserted by a committed xact + * (XMIN_COMMITTED) or (d) moved by the currently running + * VACUUM. In case (a) we wouldn't be in repair_frag() at all. * In case (b) we cannot be here, because scan_heap() has - * already marked the item as unused, see continue above. - * Case (c) is what normally is to be expected. - * Case (d) is only possible, if a whole tuple chain has been - * moved while processing this or a higher numbered block. + * already marked the item as unused, see continue above. Case + * (c) is what normally is to be expected. Case (d) is only + * possible, if a whole tuple chain has been moved while + * processing this or a higher numbered block. */ if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { /* - * There cannot be another concurrently running VACUUM. If - * the tuple had been moved in by a previous VACUUM, the - * visibility check would have set XMIN_COMMITTED. If the - * tuple had been moved in by the currently running VACUUM, - * the loop would have been terminated. We had + * There cannot be another concurrently running VACUUM. + * If the tuple had been moved in by a previous VACUUM, + * the visibility check would have set XMIN_COMMITTED. If + * the tuple had been moved in by the currently running + * VACUUM, the loop would have been terminated. We had * elog(ERROR, ...) here, but as we are testing for a - * can't-happen condition, Assert() seems more appropriate. + * can't-happen condition, Assert() seems more + * appropriate. */ Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN)); @@ -1725,6 +1721,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, * moved while cleaning this page or some previous one. */ Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF); + /* * MOVED_OFF by another VACUUM would have caused the * visibility check to set XMIN_COMMITTED or XMIN_INVALID. @@ -1734,16 +1731,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* Can't we Assert(keep_tuples > 0) here? */ if (keep_tuples == 0) continue; - if (chain_tuple_moved) /* some chains was moved - * while */ - { /* cleaning this page */ + if (chain_tuple_moved) /* some chains was moved while */ + { /* cleaning this page */ Assert(vacpage->offsets_free > 0); for (i = 0; i < vacpage->offsets_free; i++) { if (vacpage->offsets[i] == offnum) break; } - if (i >= vacpage->offsets_free) /* not found */ + if (i >= vacpage->offsets_free) /* not found */ { vacpage->offsets[vacpage->offsets_free++] = offnum; keep_tuples--; @@ -2128,18 +2124,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, off <= maxoff; off = OffsetNumberNext(off)) { - ItemId itemid = PageGetItemId(page, off); - HeapTupleHeader htup; + ItemId itemid = PageGetItemId(page, off); + HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) continue; htup = (HeapTupleHeader) PageGetItem(page, itemid); if (htup->t_infomask & HEAP_XMIN_COMMITTED) continue; + /* - ** See comments in the walk-along-page loop above, why we - ** have Asserts here instead of if (...) elog(ERROR). - */ + * * See comments in the walk-along-page loop above, why + * we * have Asserts here instead of if (...) elog(ERROR). + */ Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(HeapTupleHeaderGetXvac(htup) == myXID); @@ -2152,7 +2149,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (vacpage->offsets[i] == off) break; } - if (i >= vacpage->offsets_free) /* not found */ + if (i >= vacpage->offsets_free) /* not found */ { vacpage->offsets[vacpage->offsets_free++] = off; Assert(keep_tuples > 0); @@ -2247,7 +2244,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, */ update_hint_bits(onerel, fraged_pages, num_fraged_pages, last_move_dest_block, num_moved); - + /* * It'd be cleaner to make this report at the bottom of this routine, * but then the rusage would double-count the second pass of index @@ -2255,11 +2252,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, * processing that occurs below. */ ereport(elevel, - (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", - RelationGetRelationName(onerel), - num_moved, nblocks, blkno), - errdetail("%s", - vac_show_rusage(&ru0)))); + (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", + RelationGetRelationName(onerel), + num_moved, nblocks, blkno), + errdetail("%s", + vac_show_rusage(&ru0)))); /* * Reflect the motion of system tuples to catalog cache here. @@ -2284,6 +2281,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, *vpleft = *vpright; *vpright = vpsave; } + /* * keep_tuples is the number of tuples that have been moved * off a page during chain moves but not been scanned over @@ -2301,13 +2299,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (vacpage->blkno == (blkno - 1) && vacpage->offsets_free > 0) { - Buffer buf; - Page page; - OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)]; - OffsetNumber offnum, - maxoff; - int uncnt; - int num_tuples = 0; + Buffer buf; + Page page; + OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)]; + OffsetNumber offnum, + maxoff; + int uncnt; + int num_tuples = 0; buf = ReadBuffer(onerel, vacpage->blkno); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -2317,7 +2315,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemid = PageGetItemId(page, offnum); + ItemId itemid = PageGetItemId(page, offnum); HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) @@ -2327,9 +2325,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, continue; /* - ** See comments in the walk-along-page loop above, why we - ** have Asserts here instead of if (...) elog(ERROR). - */ + * * See comments in the walk-along-page loop above, why + * we * have Asserts here instead of if (...) elog(ERROR). + */ Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(HeapTupleHeaderGetXvac(htup) == myXID); @@ -2418,10 +2416,10 @@ move_chain_tuple(Relation rel, ExecContext ec, ItemPointer ctid, bool cleanVpd) { TransactionId myXID = GetCurrentTransactionId(); - HeapTupleData newtup; - OffsetNumber newoff; - ItemId newitemid; - Size tuple_len = old_tup->t_len; + HeapTupleData newtup; + OffsetNumber newoff; + ItemId newitemid; + Size tuple_len = old_tup->t_len; heap_copytuple_with_tuple(old_tup, &newtup); @@ -2434,36 +2432,32 @@ move_chain_tuple(Relation rel, START_CRIT_SECTION(); old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | - HEAP_XMIN_INVALID | - HEAP_MOVED_IN); + HEAP_XMIN_INVALID | + HEAP_MOVED_IN); old_tup->t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(old_tup->t_data, myXID); /* * If this page was not used before - clean it. * - * NOTE: a nasty bug used to lurk here. It is possible - * for the source and destination pages to be the same - * (since this tuple-chain member can be on a page - * lower than the one we're currently processing in - * the outer loop). If that's true, then after - * vacuum_page() the source tuple will have been - * moved, and tuple.t_data will be pointing at - * garbage. Therefore we must do everything that uses + * NOTE: a nasty bug used to lurk here. It is possible for the source + * and destination pages to be the same (since this tuple-chain member + * can be on a page lower than the one we're currently processing in + * the outer loop). If that's true, then after vacuum_page() the + * source tuple will have been moved, and tuple.t_data will be + * pointing at garbage. Therefore we must do everything that uses * old_tup->t_data BEFORE this step!! * - * This path is different from the other callers of - * vacuum_page, because we have already incremented - * the vacpage's offsets_used field to account for the - * tuple(s) we expect to move onto the page. Therefore - * vacuum_page's check for offsets_used == 0 is wrong. - * But since that's a good debugging check for all - * other callers, we work around it here rather than - * remove it. + * This path is different from the other callers of vacuum_page, because + * we have already incremented the vacpage's offsets_used field to + * account for the tuple(s) we expect to move onto the page. Therefore + * vacuum_page's check for offsets_used == 0 is wrong. But since + * that's a good debugging check for all other callers, we work around + * it here rather than remove it. */ if (!PageIsEmpty(dst_page) && cleanVpd) { - int sv_offsets_used = dst_vacpage->offsets_used; + int sv_offsets_used = dst_vacpage->offsets_used; dst_vacpage->offsets_used = 0; vacuum_page(rel, dst_buf, dst_vacpage); @@ -2471,8 +2465,8 @@ move_chain_tuple(Relation rel, } /* - * Update the state of the copied tuple, and store it - * on the destination page. + * Update the state of the copied tuple, and store it on the + * destination page. */ newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | @@ -2484,7 +2478,7 @@ move_chain_tuple(Relation rel, if (newoff == InvalidOffsetNumber) { elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain", - (unsigned long) tuple_len, dst_vacpage->blkno); + (unsigned long) tuple_len, dst_vacpage->blkno); } newitemid = PageGetItemId(dst_page, newoff); pfree(newtup.t_data); @@ -2509,8 +2503,7 @@ move_chain_tuple(Relation rel, else { /* - * No XLOG record, but still need to flag that XID - * exists on disk + * No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } @@ -2518,9 +2511,8 @@ move_chain_tuple(Relation rel, END_CRIT_SECTION(); /* - * Set new tuple's t_ctid pointing to itself for last - * tuple in chain, and to next tuple in chain - * otherwise. + * Set new tuple's t_ctid pointing to itself for last tuple in chain, + * and to next tuple in chain otherwise. */ /* Is this ok after log_heap_move() and END_CRIT_SECTION()? */ if (!ItemPointerIsValid(ctid)) @@ -2559,10 +2551,10 @@ move_plain_tuple(Relation rel, ExecContext ec) { TransactionId myXID = GetCurrentTransactionId(); - HeapTupleData newtup; - OffsetNumber newoff; - ItemId newitemid; - Size tuple_len = old_tup->t_len; + HeapTupleData newtup; + OffsetNumber newoff; + ItemId newitemid; + Size tuple_len = old_tup->t_len; /* copy tuple */ heap_copytuple_with_tuple(old_tup, &newtup); @@ -2570,9 +2562,9 @@ move_plain_tuple(Relation rel, /* * register invalidation of source tuple in catcaches. * - * (Note: we do not need to register the copied tuple, because we - * are not changing the tuple contents and so there cannot be - * any need to flush negative catcache entries.) + * (Note: we do not need to register the copied tuple, because we are not + * changing the tuple contents and so there cannot be any need to + * flush negative catcache entries.) */ CacheInvalidateHeapTuple(rel, old_tup); @@ -2609,8 +2601,8 @@ move_plain_tuple(Relation rel, * Mark old tuple as MOVED_OFF by me. */ old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | - HEAP_XMIN_INVALID | - HEAP_MOVED_IN); + HEAP_XMIN_INVALID | + HEAP_MOVED_IN); old_tup->t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(old_tup->t_data, myXID); @@ -2628,8 +2620,7 @@ move_plain_tuple(Relation rel, else { /* - * No XLOG record, but still need to flag that XID exists - * on disk + * No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } @@ -2637,7 +2628,7 @@ move_plain_tuple(Relation rel, END_CRIT_SECTION(); dst_vacpage->free = ((PageHeader) dst_page)->pd_upper - - ((PageHeader) dst_page)->pd_lower; + ((PageHeader) dst_page)->pd_lower; LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK); LockBuffer(old_buf, BUFFER_LOCK_UNLOCK); @@ -2670,17 +2661,17 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages, { int checked_moved = 0; int i; - VacPage *curpage; + VacPage *curpage; for (i = 0, curpage = fraged_pages->pagedesc; i < num_fraged_pages; i++, curpage++) { - Buffer buf; - Page page; - OffsetNumber max_offset; - OffsetNumber off; - int num_tuples = 0; + Buffer buf; + Page page; + OffsetNumber max_offset; + OffsetNumber off; + int num_tuples = 0; vacuum_delay_point(); @@ -2696,17 +2687,18 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages, off <= max_offset; off = OffsetNumberNext(off)) { - ItemId itemid = PageGetItemId(page, off); - HeapTupleHeader htup; + ItemId itemid = PageGetItemId(page, off); + HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) continue; htup = (HeapTupleHeader) PageGetItem(page, itemid); if (htup->t_infomask & HEAP_XMIN_COMMITTED) continue; + /* - * See comments in the walk-along-page loop above, why we - * have Asserts here instead of if (...) elog(ERROR). The + * See comments in the walk-along-page loop above, why we have + * Asserts here instead of if (...) elog(ERROR). The * difference here is that we may see MOVED_IN. */ Assert(htup->t_infomask & HEAP_MOVED); @@ -2865,14 +2857,14 @@ scan_index(Relation indrel, double num_tuples) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s", + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); /* * Check for tuple count mismatch. If the index is partial, then it's @@ -2932,16 +2924,16 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + "%s", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); /* * Check for tuple count mismatch. If the index is partial, then it's @@ -3370,7 +3362,7 @@ vacuum_delay_point(void) if (VacuumCostActive && !InterruptPending && VacuumCostBalance >= VacuumCostLimit) { - int msec; + int msec; msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit; if (msec > VacuumCostDelay * 4) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index bfd41beec5..f19001d679 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -31,7 +31,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.44 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.45 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -594,14 +594,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s", + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); pfree(stats); } @@ -654,16 +654,16 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + "%s", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); pfree(stats); } diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 8c962c5206..cb4a3cde71 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.100 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.101 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,7 +62,7 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid list syntax for parameter \"datestyle\""))); + errmsg("invalid list syntax for parameter \"datestyle\""))); return NULL; } @@ -148,8 +148,8 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized \"datestyle\" key word: \"%s\"", - tok))); + errmsg("unrecognized \"datestyle\" key word: \"%s\"", + tok))); ok = false; break; } @@ -314,9 +314,10 @@ assign_timezone(const char *value, bool doit, GucSource source) * * During GUC initialization, since the timezone library isn't * set up yet, pg_get_current_timezone will return NULL and we - * will leave the setting as UNKNOWN. If this isn't overridden - * from the config file then pg_timezone_initialize() will - * eventually select a default value from the environment. + * will leave the setting as UNKNOWN. If this isn't + * overridden from the config file then + * pg_timezone_initialize() will eventually select a default + * value from the environment. */ const char *curzone = pg_get_current_timezone(); @@ -329,13 +330,14 @@ assign_timezone(const char *value, bool doit, GucSource source) * Otherwise assume it is a timezone name. * * We have to actually apply the change before we can have any - * hope of checking it. So, save the old value in case we have - * to back out. We have to copy since pg_get_current_timezone - * returns a pointer to its static state. + * hope of checking it. So, save the old value in case we + * have to back out. We have to copy since + * pg_get_current_timezone returns a pointer to its static + * state. * - * This would all get a lot simpler if the TZ library had a better - * API that would let us look up and test a timezone name without - * making it the default. + * This would all get a lot simpler if the TZ library had a + * better API that would let us look up and test a timezone + * name without making it the default. */ const char *cur_tz; char *save_tz; @@ -368,22 +370,23 @@ assign_timezone(const char *value, bool doit, GucSource source) else { /* - * TZ library wasn't initialized yet. Annoyingly, we will - * come here during startup because guc-file.l checks - * the value with doit = false before actually applying. - * The best approach seems to be as follows: + * TZ library wasn't initialized yet. Annoyingly, we + * will come here during startup because guc-file.l + * checks the value with doit = false before actually + * applying. The best approach seems to be as follows: * * 1. known && acceptable: leave the setting in place, * since we'll apply it soon anyway. This is mainly - * so that any log messages printed during this interval - * are timestamped with the user's requested timezone. + * so that any log messages printed during this + * interval are timestamped with the user's requested + * timezone. * - * 2. known && !acceptable: revert to GMT for lack of - * any better idea. (select_default_timezone() may get + * 2. known && !acceptable: revert to GMT for lack of any + * better idea. (select_default_timezone() may get * called later to undo this.) * - * 3. !known: no need to do anything since TZ library - * did not change its state. + * 3. !known: no need to do anything since TZ library did + * not change its state. * * Again, this should all go away sometime soon. */ @@ -441,7 +444,7 @@ assign_timezone(const char *value, bool doit, GucSource source) const char * show_timezone(void) { - const char *tzn; + const char *tzn; if (HasCTZSet) { @@ -472,14 +475,14 @@ assign_XactIsoLevel(const char *value, bool doit, GucSource source) { if (doit && source >= PGC_S_INTERACTIVE) { - if (SerializableSnapshot != NULL) - ereport(ERROR, - (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query"))); - if (IsSubTransaction()) - ereport(ERROR, - (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction"))); + if (SerializableSnapshot != NULL) + ereport(ERROR, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query"))); + if (IsSubTransaction()) + ereport(ERROR, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction"))); } if (strcmp(value, "serializable") == 0) @@ -596,7 +599,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source) * limit on names, so we can tell whether we're being passed an initial * username or a saved/restored value. */ -extern char *session_authorization_string; /* in guc.c */ +extern char *session_authorization_string; /* in guc.c */ const char * assign_session_authorization(const char *value, bool doit, GucSource source) diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index aafc42b1d4..abc37fcc8f 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.84 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.85 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -191,8 +191,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc) newattr->atttypmod != oldattr->atttypmod) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot change data type of view column \"%s\"", - NameStr(oldattr->attname)))); + errmsg("cannot change data type of view column \"%s\"", + NameStr(oldattr->attname)))); /* We can ignore the remaining attributes of an attribute... */ } diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index ad1b5817c4..6cbad491ed 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.80 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.81 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -51,7 +51,7 @@ * needs access to variables of the current outer tuple. (The handling of * this parameter is currently pretty inconsistent: some callers pass NULL * and some pass down their parent's value; so don't rely on it in other - * situations. It'd probably be better to remove the whole thing and use + * situations. It'd probably be better to remove the whole thing and use * the generalized parameter mechanism instead.) */ void @@ -64,7 +64,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt) /* If we have changed parameters, propagate that info */ if (node->chgParam != NULL) { - ListCell *l; + ListCell *l; foreach(l, node->initPlan) { @@ -365,19 +365,19 @@ ExecMayReturnRawTuples(PlanState *node) { /* * At a table scan node, we check whether ExecAssignScanProjectionInfo - * decided to do projection or not. Most non-scan nodes always project - * and so we can return "false" immediately. For nodes that don't - * project but just pass up input tuples, we have to recursively + * decided to do projection or not. Most non-scan nodes always + * project and so we can return "false" immediately. For nodes that + * don't project but just pass up input tuples, we have to recursively * examine the input plan node. * - * Note: Hash and Material are listed here because they sometimes - * return an original input tuple, not a copy. But Sort and SetOp - * never return an original tuple, so they can be treated like - * projecting nodes. + * Note: Hash and Material are listed here because they sometimes return + * an original input tuple, not a copy. But Sort and SetOp never + * return an original tuple, so they can be treated like projecting + * nodes. */ switch (nodeTag(node)) { - /* Table scan nodes */ + /* Table scan nodes */ case T_SeqScanState: case T_IndexScanState: case T_TidScanState: @@ -387,7 +387,7 @@ ExecMayReturnRawTuples(PlanState *node) return true; break; - /* Non-projecting nodes */ + /* Non-projecting nodes */ case T_HashState: case T_MaterialState: case T_UniqueState: @@ -395,19 +395,19 @@ ExecMayReturnRawTuples(PlanState *node) return ExecMayReturnRawTuples(node->lefttree); case T_AppendState: - { - AppendState *appendstate = (AppendState *) node; - int j; - - for (j = 0; j < appendstate->as_nplans; j++) { - if (ExecMayReturnRawTuples(appendstate->appendplans[j])) - return true; + AppendState *appendstate = (AppendState *) node; + int j; + + for (j = 0; j < appendstate->as_nplans; j++) + { + if (ExecMayReturnRawTuples(appendstate->appendplans[j])) + return true; + } + break; } - break; - } - /* All projecting node types come here */ + /* All projecting node types come here */ default: break; } diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 44157fc686..e31dc7dfcb 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.10 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.11 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -26,8 +26,8 @@ static TupleHashTable CurTupleHashTable = NULL; static uint32 TupleHashTableHash(const void *key, Size keysize); -static int TupleHashTableMatch(const void *key1, const void *key2, - Size keysize); +static int TupleHashTableMatch(const void *key1, const void *key2, + Size keysize); /***************************************************************************** @@ -303,7 +303,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, Assert(entrysize >= sizeof(TupleHashEntryData)); hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt, - sizeof(TupleHashTableData)); + sizeof(TupleHashTableData)); hashtable->numCols = numCols; hashtable->keyColIdx = keyColIdx; @@ -321,7 +321,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, hash_ctl.hcxt = tablecxt; hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets, &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); if (hashtable->hashtab == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -359,8 +359,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * Set up data needed by hash and match functions * - * We save and restore CurTupleHashTable just in case someone manages - * to invoke this code re-entrantly. + * We save and restore CurTupleHashTable just in case someone manages to + * invoke this code re-entrantly. */ hashtable->tupdesc = tupdesc; saveCurHT = CurTupleHashTable; @@ -389,8 +389,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * Zero any caller-requested space in the entry. (This zaps - * the "key data" dynahash.c copied into the new entry, but - * we don't care since we're about to overwrite it anyway.) + * the "key data" dynahash.c copied into the new entry, but we + * don't care since we're about to overwrite it anyway.) */ MemSet(entry, 0, hashtable->entrysize); @@ -414,13 +414,13 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, * * The passed-in key is a pointer to a HeapTuple pointer -- this is either * the firstTuple field of a TupleHashEntry struct, or the key value passed - * to hash_search. We ignore the keysize. + * to hash_search. We ignore the keysize. * * CurTupleHashTable must be set before calling this, since dynahash.c * doesn't provide any API that would let us get at the hashtable otherwise. * * Also, the caller must select an appropriate memory context for running - * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.) + * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.) */ static uint32 TupleHashTableHash(const void *key, Size keysize) diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index 43f58e036e..c797c343d3 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.42 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.43 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -274,9 +274,9 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot) * dealing with a small number of attributes. for large tuples we just * use palloc. * - * Note: we could use just one set of arrays if we were willing to - * assume that the resno mapping is monotonic... I think it is, but - * won't take the risk of breaking things right now. + * Note: we could use just one set of arrays if we were willing to assume + * that the resno mapping is monotonic... I think it is, but won't + * take the risk of breaking things right now. */ if (cleanLength > 64) { @@ -309,7 +309,7 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot) */ for (i = 0; i < cleanLength; i++) { - int j = cleanMap[i] - 1; + int j = cleanMap[i] - 1; values[i] = old_values[j]; nulls[i] = old_nulls[j]; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 6e386d2529..d77bc7054a 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.236 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -521,8 +521,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) * Multiple result relations (due to inheritance) * parseTree->resultRelations identifies them all */ - ResultRelInfo *resultRelInfo; - ListCell *l; + ResultRelInfo *resultRelInfo; + ListCell *l; numResultRelations = list_length(resultRelations); resultRelInfos = (ResultRelInfo *) @@ -644,10 +644,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) /* * Initialize the junk filter if needed. SELECT and INSERT queries * need a filter if there are any junk attrs in the tlist. INSERT and - * SELECT INTO also need a filter if the plan may return raw disk tuples - * (else heap_insert will be scribbling on the source relation!). - * UPDATE and DELETE always need a filter, since there's always a junk - * 'ctid' attribute present --- no need to look first. + * SELECT INTO also need a filter if the plan may return raw disk + * tuples (else heap_insert will be scribbling on the source + * relation!). UPDATE and DELETE always need a filter, since there's + * always a junk 'ctid' attribute present --- no need to look first. */ { bool junk_filter_needed = false; @@ -1460,7 +1460,7 @@ ldelete:; &ctid, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -1596,7 +1596,7 @@ lreplace:; &ctid, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 65721e0863..2ad0423810 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.167 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.168 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -59,51 +59,51 @@ static Datum ExecEvalArrayRef(ArrayRefExprState *astate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalAggref(AggrefExprState *aggref, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo, List *argList, ExprContext *econtext); static Datum ExecMakeFunctionResultNoSets(FuncExprState *fcache, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCaseTestExpr(ExprState *exprstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalArray(ArrayExprState *astate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalRow(RowExprState *rstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullTest(GenericExprState *nstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); @@ -114,14 +114,14 @@ static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCoerceToDomainValue(ExprState *exprstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFieldSelect(FieldSelectState *fstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFieldStore(FieldStoreState *fstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalRelabelType(GenericExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); @@ -145,7 +145,7 @@ static Datum ExecEvalRelabelType(GenericExprState *exprstate, * * Note: for notational simplicity we declare these functions as taking the * specific type of ExprState that they work on. This requires casting when - * assigning the function pointer in ExecInitExpr. Be careful that the + * assigning the function pointer in ExecInitExpr. Be careful that the * function signature is declared correctly, because the cast suppresses * automatic checking! * @@ -236,13 +236,13 @@ ExecEvalArrayRef(ArrayRefExprState *astate, isDone)); /* - * If refexpr yields NULL, and it's a fetch, then result is NULL. - * In the assignment case, we'll cons up something below. + * If refexpr yields NULL, and it's a fetch, then result is NULL. In + * the assignment case, we'll cons up something below. */ if (*isNull) { if (isDone && *isDone == ExprEndResult) - return (Datum) NULL; /* end of set result */ + return (Datum) NULL; /* end of set result */ if (!isAssignment) return (Datum) NULL; } @@ -321,10 +321,11 @@ ExecEvalArrayRef(ArrayRefExprState *astate, * * XXX At some point we'll need to look into making the old value of * the array element available via CaseTestExpr, as is done by - * ExecEvalFieldStore. This is not needed now but will be needed - * to support arrays of composite types; in an assignment to a field - * of an array member, the parser would generate a FieldStore that - * expects to fetch its input tuple via CaseTestExpr. + * ExecEvalFieldStore. This is not needed now but will be needed + * to support arrays of composite types; in an assignment to a + * field of an array member, the parser would generate a + * FieldStore that expects to fetch its input tuple via + * CaseTestExpr. */ sourceData = ExecEvalExpr(astate->refassgnexpr, econtext, @@ -339,15 +340,16 @@ ExecEvalArrayRef(ArrayRefExprState *astate, return PointerGetDatum(array_source); /* - * For an assignment, if all the subscripts and the input expression - * are non-null but the original array is null, then substitute an - * empty (zero-dimensional) array and proceed with the assignment. - * This only works for varlena arrays, though; for fixed-length - * array types we punt and return the null input array. + * For an assignment, if all the subscripts and the input + * expression are non-null but the original array is null, then + * substitute an empty (zero-dimensional) array and proceed with + * the assignment. This only works for varlena arrays, though; for + * fixed-length array types we punt and return the null input + * array. */ if (*isNull) { - if (astate->refattrlength > 0) /* fixed-length array? */ + if (astate->refattrlength > 0) /* fixed-length array? */ return PointerGetDatum(array_source); array_source = construct_md_array(NULL, 0, NULL, NULL, @@ -444,10 +446,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, /* * Get the slot and attribute number we want * - * The asserts check that references to system attributes only appear - * at the level of a relation scan; at higher levels, system attributes - * must be treated as ordinary variables (since we no longer have access - * to the original tuple). + * The asserts check that references to system attributes only appear at + * the level of a relation scan; at higher levels, system attributes + * must be treated as ordinary variables (since we no longer have + * access to the original tuple). */ attnum = variable->varattno; @@ -476,8 +478,8 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, tuple_type = slot->ttc_tupleDescriptor; /* - * Some checks that are only applied for user attribute numbers - * (bogus system attnums will be caught inside heap_getattr). + * Some checks that are only applied for user attribute numbers (bogus + * system attnums will be caught inside heap_getattr). */ if (attnum > 0) { @@ -488,9 +490,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, tuple_type->attrs[attnum - 1] != NULL); /* - * If the attribute's column has been dropped, we force a NULL result. - * This case should not happen in normal use, but it could happen if - * we are executing a plan cached before the column was dropped. + * If the attribute's column has been dropped, we force a NULL + * result. This case should not happen in normal use, but it could + * happen if we are executing a plan cached before the column was + * dropped. */ if (tuple_type->attrs[attnum - 1]->attisdropped) { @@ -499,13 +502,14 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, } /* - * This assert checks that the datatype the plan expects to get (as - * told by our "variable" argument) is in fact the datatype of the - * attribute being fetched (as seen in the current context, identified - * by our "econtext" argument). Otherwise crashes are likely. + * This assert checks that the datatype the plan expects to get + * (as told by our "variable" argument) is in fact the datatype of + * the attribute being fetched (as seen in the current context, + * identified by our "econtext" argument). Otherwise crashes are + * likely. * - * Note that we can't check dropped columns, since their atttypid - * has been zeroed. + * Note that we can't check dropped columns, since their atttypid has + * been zeroed. */ Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid); } @@ -590,7 +594,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext, else { /* - * All other parameter types must be sought in ecxt_param_list_info. + * All other parameter types must be sought in + * ecxt_param_list_info. */ ParamListInfo paramInfo; @@ -964,7 +969,7 @@ ExecMakeFunctionResult(FuncExprState *fcache, { RegisterExprContextCallback(econtext, ShutdownFuncExpr, - PointerGetDatum(fcache)); + PointerGetDatum(fcache)); fcache->shutdown_reg = true; } } @@ -1006,8 +1011,8 @@ ExecMakeFunctionResult(FuncExprState *fcache, * * We change the ExprState function pointer to use the simpler * ExecMakeFunctionResultNoSets on subsequent calls. This amounts - * to assuming that no argument can return a set if it didn't do so - * the first time. + * to assuming that no argument can return a set if it didn't do + * so the first time. */ fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets; @@ -1098,7 +1103,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache, } } } - /* fcinfo.isnull = false; */ /* handled by MemSet */ + /* fcinfo.isnull = false; */ /* handled by MemSet */ result = FunctionCallInvoke(&fcinfo); *isNull = fcinfo.isnull; @@ -1273,9 +1278,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, break; /* - * Can't do anything useful with NULL rowtype values. Currently - * we raise an error, but another alternative is to just ignore - * the result and "continue" to get another row. + * Can't do anything useful with NULL rowtype values. + * Currently we raise an error, but another alternative is to + * just ignore the result and "continue" to get another row. */ if (returnsTuple && fcinfo.isnull) ereport(ERROR, @@ -1293,13 +1298,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, { /* * Use the type info embedded in the rowtype Datum to - * look up the needed tupdesc. Make a copy for the query. + * look up the needed tupdesc. Make a copy for the + * query. */ - HeapTupleHeader td; + HeapTupleHeader td; td = DatumGetHeapTupleHeader(result); tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td), - HeapTupleHeaderGetTypMod(td)); + HeapTupleHeaderGetTypMod(td)); tupdesc = CreateTupleDescCopy(tupdesc); } else @@ -1326,7 +1332,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, */ if (returnsTuple) { - HeapTupleHeader td; + HeapTupleHeader td; td = DatumGetHeapTupleHeader(result); @@ -1826,10 +1832,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, *isDone = ExprSingleResult; /* - * If there's a test expression, we have to evaluate it and save - * the value where the CaseTestExpr placeholders can find it. - * We must save and restore prior setting of econtext's caseValue fields, - * in case this node is itself within a larger CASE. + * If there's a test expression, we have to evaluate it and save the + * value where the CaseTestExpr placeholders can find it. We must save + * and restore prior setting of econtext's caseValue fields, in case + * this node is itself within a larger CASE. */ save_datum = econtext->caseValue_datum; save_isNull = econtext->caseValue_isNull; @@ -1838,7 +1844,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, { econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg, econtext, - &econtext->caseValue_isNull, + &econtext->caseValue_isNull, NULL); } @@ -2009,7 +2015,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot merge incompatible arrays"), errdetail("Array with element type %s cannot be " - "included in ARRAY construct with element type %s.", + "included in ARRAY construct with element type %s.", format_type_be(ARR_ELEMTYPE(array)), format_type_be(element_type)))); @@ -2021,8 +2027,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, if (ndims <= 0 || ndims > MAXDIM) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("number of array dimensions (%d) exceeds " \ - "the maximum allowed (%d)", ndims, MAXDIM))); + errmsg("number of array dimensions (%d) exceeds " \ + "the maximum allowed (%d)", ndims, MAXDIM))); elem_dims = (int *) palloc(elem_ndims * sizeof(int)); memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int)); @@ -2600,18 +2606,18 @@ ExecEvalFieldStore(FieldStoreState *fstate, forboth(l1, fstate->newvals, l2, fstore->fieldnums) { - ExprState *newval = (ExprState *) lfirst(l1); - AttrNumber fieldnum = lfirst_int(l2); + ExprState *newval = (ExprState *) lfirst(l1); + AttrNumber fieldnum = lfirst_int(l2); bool eisnull; Assert(fieldnum > 0 && fieldnum <= tupDesc->natts); /* - * Use the CaseTestExpr mechanism to pass down the old value of the - * field being replaced; this is useful in case we have a nested field - * update situation. It's safe to reuse the CASE mechanism because - * there cannot be a CASE between here and where the value would be - * needed. + * Use the CaseTestExpr mechanism to pass down the old value of + * the field being replaced; this is useful in case we have a + * nested field update situation. It's safe to reuse the CASE + * mechanism because there cannot be a CASE between here and where + * the value would be needed. */ econtext->caseValue_datum = values[fieldnum - 1]; econtext->caseValue_isNull = (nulls[fieldnum - 1] == 'n'); @@ -2981,7 +2987,7 @@ ExecInitExpr(Expr *node, PlanState *parent) break; case T_RowExpr: { - RowExpr *rowexpr = (RowExpr *) node; + RowExpr *rowexpr = (RowExpr *) node; RowExprState *rstate = makeNode(RowExprState); Form_pg_attribute *attrs; List *outlist = NIL; @@ -3016,15 +3022,15 @@ ExecInitExpr(Expr *node, PlanState *parent) /* * Guard against ALTER COLUMN TYPE on rowtype * since the RowExpr was created. XXX should we - * check typmod too? Not sure we can be sure it'll - * be the same. + * check typmod too? Not sure we can be sure + * it'll be the same. */ if (exprType((Node *) e) != attrs[i]->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", - format_type_be(exprType((Node *) e)), - format_type_be(attrs[i]->atttypid)))); + format_type_be(exprType((Node *) e)), + format_type_be(attrs[i]->atttypid)))); } else { @@ -3111,7 +3117,7 @@ ExecInitExpr(Expr *node, PlanState *parent) TargetEntry *tle = (TargetEntry *) node; GenericExprState *gstate = makeNode(GenericExprState); - gstate->xprstate.evalfunc = NULL; /* not used */ + gstate->xprstate.evalfunc = NULL; /* not used */ gstate->arg = ExecInitExpr(tle->expr, parent); state = (ExprState *) gstate; } @@ -3546,8 +3552,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone) /* * store the tuple in the projection slot and return the slot. */ - return ExecStoreTuple(newTuple, /* tuple to store */ - slot, /* slot to store in */ - InvalidBuffer, /* tuple has no buffer */ + return ExecStoreTuple(newTuple, /* tuple to store */ + slot, /* slot to store in */ + InvalidBuffer, /* tuple has no buffer */ true); } diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index fd123bbd55..6adefdc266 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.32 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.33 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -224,8 +224,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc return false; /* tlist too long */ /* - * If the plan context requires a particular hasoid setting, then - * that has to match, too. + * If the plan context requires a particular hasoid setting, then that + * has to match, too. */ if (ExecContextForcesOids(ps, &hasoid) && hasoid != tupdesc->tdhasoid) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 98f4d503e8..92d6cd4374 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.81 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.82 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -117,7 +117,7 @@ static TupleDesc ExecTypeFromTLInternal(List *targetList, - bool hasoid, bool skipjunk); + bool hasoid, bool skipjunk); /* ---------------------------------------------------------------- @@ -149,7 +149,7 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in /* * Now allocate our new table along with space for the pointers to the - * tuples. Zero out the slots. + * tuples. Zero out the slots. */ newtable = (TupleTable) palloc(sizeof(TupleTableData)); @@ -568,10 +568,10 @@ ExecCleanTypeFromTL(List *targetList, bool hasoid) static TupleDesc ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) { - TupleDesc typeInfo; - ListCell *l; - int len; - int cur_resno = 1; + TupleDesc typeInfo; + ListCell *l; + int len; + int cur_resno = 1; if (skipjunk) len = ExecCleanTargetListLength(targetList); @@ -581,8 +581,8 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) foreach(l, targetList) { - TargetEntry *tle = lfirst(l); - Resdom *resdom = tle->resdom; + TargetEntry *tle = lfirst(l); + Resdom *resdom = tle->resdom; if (skipjunk && resdom->resjunk) continue; @@ -605,16 +605,16 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) TupleDesc ExecTypeFromExprList(List *exprList) { - TupleDesc typeInfo; - ListCell *l; - int cur_resno = 1; + TupleDesc typeInfo; + ListCell *l; + int cur_resno = 1; char fldname[NAMEDATALEN]; typeInfo = CreateTemplateTupleDesc(list_length(exprList), false); foreach(l, exprList) { - Node *e = lfirst(l); + Node *e = lfirst(l); sprintf(fldname, "f%d", cur_resno); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 1e5694a926..79ab787b07 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.113 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.114 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -179,7 +179,7 @@ CreateExecutorState(void) */ estate->es_direction = ForwardScanDirection; estate->es_snapshot = SnapshotNow; - estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */ + estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */ estate->es_range_table = NIL; estate->es_result_relations = NULL; @@ -248,7 +248,8 @@ FreeExecutorState(EState *estate) */ while (estate->es_exprcontexts) { - /* XXX: seems there ought to be a faster way to implement this + /* + * XXX: seems there ought to be a faster way to implement this * than repeated list_delete(), no? */ FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts)); @@ -364,7 +365,7 @@ FreeExprContext(ExprContext *econtext) * ReScanExprContext * * Reset an expression context in preparation for a rescan of its - * plan node. This requires calling any registered shutdown callbacks, + * plan node. This requires calling any registered shutdown callbacks, * since any partially complete set-returning-functions must be canceled. * * Note we make no assumption about the caller's memory context. diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f06fabb5fc..ea3b12be5f 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.85 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.86 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -58,7 +58,7 @@ typedef struct local_es */ typedef struct { - Oid *argtypes; /* resolved types of arguments */ + Oid *argtypes; /* resolved types of arguments */ Oid rettype; /* actual return type */ int typlen; /* length of the return type */ bool typbyval; /* true if return type is pass by value */ @@ -94,7 +94,7 @@ init_execution_state(List *queryTree_list) { execution_state *firstes = NULL; execution_state *preves = NULL; - ListCell *qtl_item; + ListCell *qtl_item; foreach(qtl_item, queryTree_list) { @@ -180,8 +180,8 @@ init_sql_fcache(FmgrInfo *finfo) typeStruct = (Form_pg_type) GETSTRUCT(typeTuple); /* - * get the type length and by-value flag from the type tuple; also - * do a preliminary check for returnsTuple (this may prove inaccurate, + * get the type length and by-value flag from the type tuple; also do + * a preliminary check for returnsTuple (this may prove inaccurate, * see below). */ fcache->typlen = typeStruct->typlen; @@ -190,8 +190,8 @@ init_sql_fcache(FmgrInfo *finfo) rettype == RECORDOID); /* - * Parse and rewrite the queries. We need the argument type info to pass - * to the parser. + * Parse and rewrite the queries. We need the argument type info to + * pass to the parser. */ nargs = procedureStruct->pronargs; haspolyarg = false; @@ -240,11 +240,11 @@ init_sql_fcache(FmgrInfo *finfo) * If the function has any arguments declared as polymorphic types, * then it wasn't type-checked at definition time; must do so now. * - * Also, force a type-check if the declared return type is a rowtype; - * we need to find out whether we are actually returning the whole - * tuple result, or just regurgitating a rowtype expression result. - * In the latter case we clear returnsTuple because we need not act - * different from the scalar result case. + * Also, force a type-check if the declared return type is a rowtype; we + * need to find out whether we are actually returning the whole tuple + * result, or just regurgitating a rowtype expression result. In the + * latter case we clear returnsTuple because we need not act different + * from the scalar result case. */ if (haspolyarg || fcache->returnsTuple) fcache->returnsTuple = check_sql_fn_retval(rettype, @@ -395,9 +395,9 @@ postquel_execute(execution_state *es, * XXX do we need to remove junk attrs from the result tuple? * Probably OK to leave them, as long as they are at the end. */ - HeapTupleHeader dtup; - Oid dtuptype; - int32 dtuptypmod; + HeapTupleHeader dtup; + Oid dtuptype; + int32 dtuptypmod; dtup = (HeapTupleHeader) palloc(tup->t_len); memcpy((char *) dtup, (char *) tup->t_data, tup->t_len); @@ -433,8 +433,8 @@ postquel_execute(execution_state *es, else { /* - * Returning a scalar, which we have to extract from the - * first column of the SELECT result, and then copy into current + * Returning a scalar, which we have to extract from the first + * column of the SELECT result, and then copy into current * execution context if needed. */ value = heap_getattr(tup, 1, tupDesc, &(fcinfo->isnull)); @@ -635,7 +635,8 @@ sql_exec_error_callback(void *arg) fn_name = NameStr(functup->proname); /* - * If there is a syntax error position, convert to internal syntax error + * If there is a syntax error position, convert to internal syntax + * error */ syntaxerrposition = geterrposition(); if (syntaxerrposition > 0) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 99173a17a0..b31cd8b0e9 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -45,7 +45,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.124 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.125 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -252,11 +252,11 @@ initialize_aggregates(AggState *aggstate, } /* - * If we are reinitializing after a group boundary, we have to free - * any prior transValue to avoid memory leakage. We must check not - * only the isnull flag but whether the pointer is NULL; since - * pergroupstate is initialized with palloc0, the initial condition - * has isnull = 0 and null pointer. + * If we are reinitializing after a group boundary, we have to + * free any prior transValue to avoid memory leakage. We must + * check not only the isnull flag but whether the pointer is NULL; + * since pergroupstate is initialized with palloc0, the initial + * condition has isnull = 0 and null pointer. */ if (!peraggstate->transtypeByVal && !pergroupstate->transValueIsNull && @@ -811,14 +811,14 @@ agg_retrieve_direct(AggState *aggstate) /* * If we have no first tuple (ie, the outerPlan didn't return * anything), create a dummy all-nulls input tuple for use by - * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles, - * because ordinarily the projected output tuple's targetlist - * cannot contain any direct (non-aggregated) references to input - * columns, so the dummy tuple will not be referenced. However - * there are special cases where this isn't so --- in particular - * an UPDATE involving an aggregate will have a targetlist - * reference to ctid. We need to return a null for ctid in that - * situation, not coredump. + * ExecQual/ExecProject. 99.44% of the time this is a waste of + * cycles, because ordinarily the projected output tuple's + * targetlist cannot contain any direct (non-aggregated) + * references to input columns, so the dummy tuple will not be + * referenced. However there are special cases where this isn't so + * --- in particular an UPDATE involving an aggregate will have a + * targetlist reference to ctid. We need to return a null for + * ctid in that situation, not coredump. * * The values returned for the aggregates will be the initial values * of the transition functions. @@ -865,9 +865,9 @@ agg_retrieve_direct(AggState *aggstate) if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate results - * and the representative input tuple. Note we do not support - * aggregates returning sets ... + * Form and return a projection tuple using the aggregate + * results and the representative input tuple. Note we do not + * support aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -1009,9 +1009,9 @@ agg_retrieve_hash_table(AggState *aggstate) if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate results - * and the representative input tuple. Note we do not support - * aggregates returning sets ... + * Form and return a projection tuple using the aggregate + * results and the representative input tuple. Note we do not + * support aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -1478,7 +1478,10 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt) } else { - /* Reset the per-group state (in particular, mark transvalues null) */ + /* + * Reset the per-group state (in particular, mark transvalues + * null) + */ MemSet(node->pergroup, 0, sizeof(AggStatePerGroupData) * node->numaggs); } diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index f941ec3289..fbc5565571 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.63 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.64 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -559,7 +559,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate, if (nread != sizeof(HeapTupleData)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from hash-join temporary file: %m"))); + errmsg("could not read from hash-join temporary file: %m"))); heapTuple = palloc(HEAPTUPLESIZE + htup.t_len); memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData)); heapTuple->t_datamcxt = CurrentMemoryContext; @@ -569,7 +569,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate, if (nread != (size_t) htup.t_len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from hash-join temporary file: %m"))); + errmsg("could not read from hash-join temporary file: %m"))); return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true); } @@ -627,14 +627,14 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); innerFile = hashtable->innerBatchFile[newbatch - 1]; if (BufFileSeek(innerFile, 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); /* * Reload the hash table with the new inner batch @@ -685,12 +685,12 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple, if (written != sizeof(HeapTupleData)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to hash-join temporary file: %m"))); + errmsg("could not write to hash-join temporary file: %m"))); written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len); if (written != (size_t) heapTuple->t_len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to hash-join temporary file: %m"))); + errmsg("could not write to hash-join temporary file: %m"))); } void diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index eb7b572035..2ff0121baf 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.96 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.97 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ * In a multiple-index plan, we must take care to return any given tuple * only once, even if it matches conditions of several index scans. Our * preferred way to do this is to record already-returned tuples in a hash - * table (using the TID as unique identifier). However, in a very large + * table (using the TID as unique identifier). However, in a very large * scan this could conceivably run out of memory. We limit the hash table * to no more than work_mem KB; if it grows past that, we fall back to the * pre-7.4 technique: evaluate the prior-scan index quals again for each @@ -129,11 +129,11 @@ IndexNext(IndexScanState *node) scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid; /* - * Clear any reference to the previously returned tuple. The idea here - * is to not have the tuple slot be the last holder of a pin on that - * tuple's buffer; if it is, we'll need a separate visit to the bufmgr - * to release the buffer. By clearing here, we get to have the release - * done by ReleaseAndReadBuffer inside index_getnext. + * Clear any reference to the previously returned tuple. The idea + * here is to not have the tuple slot be the last holder of a pin on + * that tuple's buffer; if it is, we'll need a separate visit to the + * bufmgr to release the buffer. By clearing here, we get to have the + * release done by ReleaseAndReadBuffer inside index_getnext. */ ExecClearTuple(slot); @@ -215,8 +215,9 @@ IndexNext(IndexScanState *node) false); /* don't pfree */ /* - * If any of the index operators involved in this scan are lossy, - * recheck them by evaluating the original operator clauses. + * If any of the index operators involved in this scan are + * lossy, recheck them by evaluating the original operator + * clauses. */ if (lossyQual) { @@ -224,15 +225,19 @@ IndexNext(IndexScanState *node) ResetExprContext(econtext); if (!ExecQual(lossyQual, econtext, false)) { - /* Fails lossy op, so drop it and loop back for another */ + /* + * Fails lossy op, so drop it and loop back for + * another + */ ExecClearTuple(slot); continue; } } /* - * If it's a multiple-index scan, make sure not to double-report - * a tuple matched by more than one index. (See notes above.) + * If it's a multiple-index scan, make sure not to + * double-report a tuple matched by more than one index. (See + * notes above.) */ if (numIndices > 1) { @@ -240,7 +245,7 @@ IndexNext(IndexScanState *node) if (node->iss_DupHash) { DupHashTabEntry *entry; - bool found; + bool found; entry = (DupHashTabEntry *) hash_search(node->iss_DupHash, @@ -248,7 +253,7 @@ IndexNext(IndexScanState *node) HASH_ENTER, &found); if (entry == NULL || - node->iss_DupHash->hctl->nentries > node->iss_MaxHash) + node->iss_DupHash->hctl->nentries > node->iss_MaxHash) { /* out of memory (either hard or soft limit) */ /* release hash table and fall thru to old code */ @@ -679,10 +684,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate) * initialize child expressions * * Note: we don't initialize all of the indxqual expression, only the - * sub-parts corresponding to runtime keys (see below). The indxqualorig - * expression is always initialized even though it will only be used in - * some uncommon cases --- would be nice to improve that. (Problem is - * that any SubPlans present in the expression must be found now...) + * sub-parts corresponding to runtime keys (see below). The + * indxqualorig expression is always initialized even though it will + * only be used in some uncommon cases --- would be nice to improve + * that. (Problem is that any SubPlans present in the expression must + * be found now...) */ indexstate->ss.ps.targetlist = (List *) ExecInitExpr((Expr *) node->scan.plan.targetlist, @@ -788,14 +794,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate) lossyflag_cell = list_head(lossyflags); for (j = 0; j < n_keys; j++) { - OpExpr *clause; /* one clause of index qual */ - Expr *leftop; /* expr on lhs of operator */ - Expr *rightop; /* expr on rhs ... */ + OpExpr *clause; /* one clause of index qual */ + Expr *leftop; /* expr on lhs of operator */ + Expr *rightop; /* expr on rhs ... */ int flags = 0; AttrNumber varattno; /* att number used in scan */ StrategyNumber strategy; /* op's strategy number */ - Oid subtype; /* op's strategy subtype */ - int lossy; /* op's recheck flag */ + Oid subtype; /* op's strategy subtype */ + int lossy; /* op's recheck flag */ RegProcedure opfuncid; /* operator proc id used in scan */ Datum scanvalue; /* value used in scan (if const) */ @@ -819,15 +825,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate) /* * Here we figure out the contents of the index qual. The * usual case is (var op const) which means we form a scan key - * for the attribute listed in the var node and use the value of - * the const as comparison data. + * for the attribute listed in the var node and use the value + * of the const as comparison data. * * If we don't have a const node, it means our scan key is a - * function of information obtained during the execution of the - * plan, in which case we need to recalculate the index scan key - * at run time. Hence, we set have_runtime_keys to true and place - * the appropriate subexpression in run_keys. The corresponding - * scan key values are recomputed at run time. + * function of information obtained during the execution of + * the plan, in which case we need to recalculate the index + * scan key at run time. Hence, we set have_runtime_keys to + * true and place the appropriate subexpression in run_keys. + * The corresponding scan key values are recomputed at run + * time. */ run_keys[j] = NULL; @@ -892,18 +899,18 @@ ExecInitIndexScan(IndexScan *node, EState *estate) scanvalue); /* constant */ /* - * If this operator is lossy, add its indxqualorig - * expression to the list of quals to recheck. The - * list_nth() calls here could be avoided by chasing the - * lists in parallel to all the other lists, but since - * lossy operators are very uncommon, it's probably a - * waste of time to do so. + * If this operator is lossy, add its indxqualorig expression + * to the list of quals to recheck. The list_nth() calls here + * could be avoided by chasing the lists in parallel to all + * the other lists, but since lossy operators are very + * uncommon, it's probably a waste of time to do so. */ if (lossy) { - List *qualOrig = indexstate->indxqualorig; + List *qualOrig = indexstate->indxqualorig; + lossyQuals[i] = lappend(lossyQuals[i], - list_nth((List *) list_nth(qualOrig, i), j)); + list_nth((List *) list_nth(qualOrig, i), j)); } } @@ -1037,7 +1044,7 @@ create_duphash(IndexScanState *node) node->iss_DupHash = hash_create("DupHashTable", nbuckets, &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); if (node->iss_DupHash == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 585eee19fe..e913757d2e 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.67 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.68 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -104,10 +104,10 @@ static void MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals, PlanState *parent) { - List *ltexprs, - *gtexprs; - ListCell *ltcdr, - *gtcdr; + List *ltexprs, + *gtexprs; + ListCell *ltcdr, + *gtcdr; /* * Make modifiable copies of the qualList. diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 7a4c0cc80b..f3976c872a 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.49 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.50 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,11 +62,11 @@ SeqNext(SeqScanState *node) slot = node->ss_ScanTupleSlot; /* - * Clear any reference to the previously returned tuple. The idea here - * is to not have the tuple slot be the last holder of a pin on that - * tuple's buffer; if it is, we'll need a separate visit to the bufmgr - * to release the buffer. By clearing here, we get to have the release - * done by ReleaseAndReadBuffer inside heap_getnext. + * Clear any reference to the previously returned tuple. The idea + * here is to not have the tuple slot be the last holder of a pin on + * that tuple's buffer; if it is, we'll need a separate visit to the + * bufmgr to release the buffer. By clearing here, we get to have the + * release done by ReleaseAndReadBuffer inside heap_getnext. */ ExecClearTuple(slot); diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 028640c4b9..0a35b11110 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.64 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.65 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -912,7 +912,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) SubLinkType subLinkType = subplan->subLinkType; MemoryContext oldcontext; TupleTableSlot *slot; - ListCell *l; + ListCell *l; bool found = false; ArrayBuildState *astate = NULL; diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 183068a319..3b71629ad5 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.43 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.44 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -109,8 +109,9 @@ ExecUnique(UniqueState *node) * he next calls us. * * tgl 3/2004: the above concern is no longer valid; junkfilters used to - * modify their input's return slot but don't anymore, and I don't think - * anyplace else does either. Not worth changing this code though. + * modify their input's return slot but don't anymore, and I don't + * think anyplace else does either. Not worth changing this code + * though. */ if (node->priorTuple != NULL) heap_freetuple(node->priorTuple); |