/* -------------------------------------------------------------------------
* pg_dumplo
*
- * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.6 2001/03/22 03:59:10 momjian Exp $
+ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_export.c,v 1.7 2001/03/22 06:16:06 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
int i;
int n;
- /* ----------
+ /*
* Now find any candidate tables who have columns of type oid.
*
* NOTE: System tables including pg_largeobject will be ignored.
*
* NOTE: the system oid column is ignored, as it has attnum < 1.
* This shouldn't matter for correctness, but it saves time.
- * ----------
*/
pgLO->res = PQexec(pgLO->conn,
"SELECT c.relname, a.attname "
for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
{
- /* ----------
+ /*
* Query: find the LOs referenced by this column
- * ----------
*/
sprintf(Qbuff, "SELECT DISTINCT l.loid FROM \"%s\" x, pg_largeobject l WHERE x.\"%s\" = l.loid",
ll->lo_table, ll->lo_attr);
int t;
char *val;
- /* ----------
+ /*
* Create DIR/FILE
- * ----------
*/
if (pgLO->action != ACTION_SHOW)
{
/* -------------------------------------------------------------------------
* pg_dumplo
*
- * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.4 2001/03/22 03:59:10 momjian Exp $
+ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/lo_import.c,v 1.5 2001/03/22 06:16:06 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
sprintf(lo_path, "%s/%s", pgLO->space, path);
- /* ----------
+ /*
* Import LO
- * ----------
*/
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0)
{
pgLO->counter++;
- /* ----------
+ /*
* UPDATE oid in tab
- * ----------
*/
sprintf(Qbuff, "UPDATE \"%s\" SET \"%s\"=%u WHERE \"%s\"=%u",
loa.lo_table, loa.lo_attr, new_oid, loa.lo_attr, loa.lo_oid);
/* -------------------------------------------------------------------------
* pg_dumplo
*
- * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.7 2001/03/22 03:59:10 momjian Exp $
+ * $Header: /cvsroot/pgsql/contrib/pg_dumplo/Attic/main.c,v 1.8 2001/03/22 06:16:06 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
progname = argv[0];
- /* ----------
+ /*
* Parse ARGV
- * ----------
*/
if (argc > 1)
{
exit(RE_ERROR);
}
- /* ----------
+ /*
* Check space
- * ----------
*/
if (!pgLO->space && !pgLO->action == ACTION_SHOW)
{
exit(RE_ERROR);
}
- /* ----------
+ /*
* Make connection
- * ----------
*/
pgLO->conn = PQsetdbLogin(pgLO->host, NULL, NULL, NULL, pgLO->db,
pgLO->user, pwd);
pgLO->user = PQuser(pgLO->conn);
- /* ----------
+ /*
* Init index file
- * ----------
*/
if (pgLO->action != ACTION_SHOW)
index_file(pgLO);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.71 2001/03/22 06:16:06 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
* there's a null somewhere in the tuple
*/
- /* ----------------
- * check to see if desired att is null
- * ----------------
+ /*
+ * check to see if desired att is null
*/
#ifdef IN_MACRO
}
#endif
- /* ----------------
- * Now check to see if any preceding bits are null...
- * ----------------
+ /*
+ * Now check to see if any preceding bits are null...
*/
{
int byte = attnum >> 3;
HeapTuple newTuple;
uint8 infomask;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(HeapTupleIsValid(tuple));
Assert(RelationIsValid(relation));
numberOfAttributes = RelationGetForm(relation)->relnatts;
- /* ----------------
- * allocate and fill *value and *nulls arrays from either
- * the tuple or the repl information, as appropriate.
- * ----------------
+ /*
+ * allocate and fill *value and *nulls arrays from either the tuple or
+ * the repl information, as appropriate.
*/
value = (Datum *) palloc(numberOfAttributes * sizeof *value);
nulls = (char *) palloc(numberOfAttributes * sizeof *nulls);
}
}
- /* ----------------
- * create a new tuple from the *values and *nulls arrays
- * ----------------
+ /*
+ * create a new tuple from the *values and *nulls arrays
*/
newTuple = heap_formtuple(RelationGetDescr(relation),
value,
nulls);
- /* ----------------
- * copy the header except for t_len, t_natts, t_hoff, t_bits, t_infomask
- * ----------------
+ /*
+ * copy the header except for t_len, t_natts, t_hoff, t_bits,
+ * t_infomask
*/
infomask = newTuple->t_data->t_infomask;
memmove((char *) &newTuple->t_data->t_oid, /* XXX */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.54 2001/03/22 06:16:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
infomask |= size;
- /* ----------------
+ /*
* initialize metadata
- * ----------------
*/
tuple->t_info = infomask;
return tuple;
int data_off; /* tuple data offset */
(void) isnull; /* not used */
- /* ----------------
- * sanity checks
- * ----------------
+
+ /*
+ * sanity checks
*/
/* ----------------
}
else
{ /* there's a null somewhere in the tuple */
- /* ----------------
- * check to see if desired att is null
- * ----------------
+
+ /*
+ * check to see if desired att is null
*/
/* XXX "knows" t_bits are just after fixed tuple header! */
}
#endif
- /* ----------------
- * Now check to see if any preceding bits are null...
- * ----------------
+ /*
+ * Now check to see if any preceding bits are null...
*/
{
int byte = attnum >> 3;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.59 2001/03/22 06:16:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (myState->attrinfo != typeinfo || myState->nattrs != natts)
printtup_prepare_info(myState, typeinfo, natts);
- /* ----------------
- * tell the frontend to expect new tuple data (in ASCII style)
- * ----------------
+ /*
+ * tell the frontend to expect new tuple data (in ASCII style)
*/
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'D');
- /* ----------------
- * send a bitmap of which attributes are not null
- * ----------------
+ /*
+ * send a bitmap of which attributes are not null
*/
j = 0;
k = 1 << 7;
if (k != (1 << 7)) /* flush last partial byte */
pq_sendint(&buf, j, 1);
- /* ----------------
- * send the attributes of this tuple
- * ----------------
+ /*
+ * send the attributes of this tuple
*/
for (i = 0; i < natts; ++i)
{
if (myState->attrinfo != typeinfo || myState->nattrs != natts)
printtup_prepare_info(myState, typeinfo, natts);
- /* ----------------
- * tell the frontend to expect new tuple data (in binary style)
- * ----------------
+ /*
+ * tell the frontend to expect new tuple data (in binary style)
*/
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'B');
- /* ----------------
- * send a bitmap of which attributes are not null
- * ----------------
+ /*
+ * send a bitmap of which attributes are not null
*/
j = 0;
k = 1 << 7;
if (k != (1 << 7)) /* flush last partial byte */
pq_sendint(&buf, j, 1);
- /* ----------------
- * send the attributes of this tuple
- * ----------------
+ /*
+ * send the attributes of this tuple
*/
#ifdef IPORTAL_DEBUG
fprintf(stderr, "sending tuple with %d atts\n", natts);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.73 2001/03/22 06:16:06 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
uint32 size;
TupleDesc desc;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts >= 1);
- /* ----------------
- * allocate enough memory for the tuple descriptor and
- * zero it as TupleDescInitEntry assumes that the descriptor
- * is filled with NULL pointers.
- * ----------------
+ /*
+ * allocate enough memory for the tuple descriptor and zero it as
+ * TupleDescInitEntry assumes that the descriptor is filled with NULL
+ * pointers.
*/
size = natts * sizeof(Form_pg_attribute);
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
{
TupleDesc desc;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts >= 1);
Form_pg_type typeForm;
Form_pg_attribute att;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(PointerIsValid(desc));
AssertArg(attributeNumber >= 1);
AssertArg(!PointerIsValid(desc->attrs[attributeNumber - 1]));
- /* ----------------
- * allocate storage for this attribute
- * ----------------
+ /*
+ * allocate storage for this attribute
*/
att = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
desc->attrs[attributeNumber - 1] = att;
- /* ----------------
- * initialize the attribute fields
- * ----------------
+ /*
+ * initialize the attribute fields
*/
att->attrelid = 0; /* dummy value */
0, 0, 0);
if (!HeapTupleIsValid(tuple))
{
- /* ----------------
- * here type info does not exist yet so we just fill
- * the attribute with dummy information and return false.
- * ----------------
+
+ /*
+ * here type info does not exist yet so we just fill the attribute
+ * with dummy information and return false.
*/
att->atttypid = InvalidOid;
att->attlen = (int16) 0;
return false;
}
- /* ----------------
- * type info exists so we initialize our attribute
- * information from the type tuple we found..
- * ----------------
+ /*
+ * type info exists so we initialize our attribute information from
+ * the type tuple we found..
*/
typeForm = (Form_pg_type) GETSTRUCT(tuple);
att->atttypid = tuple->t_data->t_oid;
- /*------------------------
+ /*
* There are a couple of cases where we must override the information
* stored in pg_type.
*
* First: if this attribute is a set, what is really stored in the
- * attribute is the OID of a tuple in the pg_proc catalog.
- * The pg_proc tuple contains the query string which defines
- * this set - i.e., the query to run to get the set.
- * So the atttypid (just assigned above) refers to the type returned
- * by this query, but the actual length of this attribute is the
- * length (size) of an OID.
+ * attribute is the OID of a tuple in the pg_proc catalog. The pg_proc
+ * tuple contains the query string which defines this set - i.e., the
+ * query to run to get the set. So the atttypid (just assigned above)
+ * refers to the type returned by this query, but the actual length of
+ * this attribute is the length (size) of an OID.
*
- * (Why not just make the atttypid point to the OID type, instead
- * of the type the query returns? Because the executor uses the atttypid
- * to tell the front end what type will be returned (in BeginCommand),
- * and in the end the type returned will be the result of the query, not
- * an OID.)
+ * (Why not just make the atttypid point to the OID type, instead of the
+ * type the query returns? Because the executor uses the atttypid to
+ * tell the front end what type will be returned (in BeginCommand),
+ * and in the end the type returned will be the result of the query,
+ * not an OID.)
*
* (Why not wait until the return type of the set is known (i.e., the
* recursive call to the executor to execute the set has returned)
*
* A set of complex type is first and foremost a set, so its
* representation is Oid not pointer. So, test that case first.
- *-----------------------------------------
*/
if (attisset)
{
int ndef = 0;
bool attisset;
- /* ----------------
- * allocate a new tuple descriptor
- * ----------------
+ /*
+ * allocate a new tuple descriptor
*/
natts = length(schema);
desc = CreateTemplateTupleDesc(natts);
ColumnDef *entry = lfirst(p);
List *arry;
- /* ----------------
- * for each entry in the list, get the name and type
- * information from the list and have TupleDescInitEntry
- * fill in the attribute information we need.
- * ----------------
+ /*
+ * for each entry in the list, get the name and type information
+ * from the list and have TupleDescInitEntry fill in the attribute
+ * information we need.
*/
attnum++;
typenameTypeId(typename),
atttypmod, attdim, attisset))
{
- /* ----------------
- * if TupleDescInitEntry() fails, it means there is
- * no type in the system catalogs. So now we check if
- * the type name equals the relation name. If so we
- * have a self reference, otherwise it's an error.
- * ----------------
+
+ /*
+ * if TupleDescInitEntry() fails, it means there is no type in
+ * the system catalogs. So now we check if the type name
+ * equals the relation name. If so we have a self reference,
+ * otherwise it's an error.
*/
if (strcmp(typename, relname) == 0)
TupleDescMakeSelfReference(desc, attnum, relname);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.112 2001/03/22 06:16:07 momjian Exp $
*
*
* INTERFACE ROUTINES
unsigned nkeys,
ScanKey key)
{
- /* ----------------
- * Make sure we have up-to-date idea of number of blocks in relation.
- * It is sufficient to do this once at scan start, since any tuples
- * added while the scan is in progress will be invisible to my
- * transaction anyway...
- * ----------------
+
+ /*
+ * Make sure we have up-to-date idea of number of blocks in relation.
+ * It is sufficient to do this once at scan start, since any tuples
+ * added while the scan is in progress will be invisible to my
+ * transaction anyway...
*/
relation->rd_nblocks = RelationGetNumberOfBlocks(relation);
if (relation->rd_nblocks == 0)
{
- /* ----------------
- * relation is empty
- * ----------------
+
+ /*
+ * relation is empty
*/
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt =
scan->rs_ptup.t_datamcxt = NULL;
}
else if (atend)
{
- /* ----------------
- * reverse scan
- * ----------------
+
+ /*
+ * reverse scan
*/
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt = NULL;
scan->rs_ntup.t_data = scan->rs_ctup.t_data = NULL;
}
else
{
- /* ----------------
- * forward scan
- * ----------------
+
+ /*
+ * forward scan
*/
scan->rs_ctup.t_datamcxt = scan->rs_ptup.t_datamcxt = NULL;
scan->rs_ctup.t_data = scan->rs_ptup.t_data = NULL;
ItemPointerSetInvalid(&(scan->rs_mntid));
ItemPointerSetInvalid(&(scan->rs_mcd));
- /* ----------------
- * copy the scan key, if appropriate
- * ----------------
+ /*
+ * copy the scan key, if appropriate
*/
if (key != NULL)
memmove(scan->rs_key, key, nkeys * sizeof(ScanKeyData));
if (BufferIsValid(scan->rs_pbuf))
ReleaseBuffer(scan->rs_pbuf);
- /* ------------------------------------
- * Scan will pin buffer once for each non-NULL tuple pointer
- * (ptup, ctup, ntup), so they have to be unpinned multiple
- * times.
- * ------------------------------------
+ /*
+ * Scan will pin buffer once for each non-NULL tuple pointer (ptup,
+ * ctup, ntup), so they have to be unpinned multiple times.
*/
if (BufferIsValid(scan->rs_cbuf))
ReleaseBuffer(scan->rs_cbuf);
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_heapgettup);
IncrHeapAccessStat(global_heapgettup);
- /* ----------------
- * debugging stuff
+ /*
+ * debugging stuff
*
- * check validity of arguments, here and for other functions too
- * Note: no locking manipulations needed--this is a local function
- * ----------------
+ * check validity of arguments, here and for other functions too Note: no
+ * locking manipulations needed--this is a local function
*/
#ifdef HEAPDEBUGALL
if (ItemPointerIsValid(tid))
tuple->t_tableOid = relation->rd_id;
- /* ----------------
- * return null immediately if relation is empty
- * ----------------
+ /*
+ * return null immediately if relation is empty
*/
if (!(pages = relation->rd_nblocks))
{
return;
}
- /* ----------------
- * calculate next starting lineoff, given scan direction
- * ----------------
+ /*
+ * calculate next starting lineoff, given scan direction
*/
if (!dir)
{
- /* ----------------
+
+ /*
* ``no movement'' scan direction
- * ----------------
*/
/* assume it is a valid TID XXX */
if (ItemPointerIsValid(tid) == false)
}
else if (dir < 0)
{
- /* ----------------
- * reverse scan direction
- * ----------------
+
+ /*
+ * reverse scan direction
*/
if (ItemPointerIsValid(tid) == false)
tid = NULL;
}
else
{
- /* ----------------
- * forward scan direction
- * ----------------
+
+ /*
+ * forward scan direction
*/
if (ItemPointerIsValid(tid) == false)
{
/* 'dir' is now non-zero */
- /* ----------------
- * calculate line pointer and number of remaining items
- * to check on this page.
- * ----------------
+ /*
+ * calculate line pointer and number of remaining items to check on
+ * this page.
*/
lpp = PageGetItemId(dp, lineoff);
if (dir < 0)
else
linesleft = lines - lineoff;
- /* ----------------
- * advance the scan until we find a qualifying tuple or
- * run out of stuff to scan
- * ----------------
+ /*
+ * advance the scan until we find a qualifying tuple or run out of
+ * stuff to scan
*/
for (;;)
{
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
tuple->t_len = ItemIdGetLength(lpp);
ItemPointerSet(&(tuple->t_self), page, lineoff);
- /* ----------------
- * if current tuple qualifies, return it.
- * ----------------
+
+ /*
+ * if current tuple qualifies, return it.
*/
HeapTupleSatisfies(tuple, relation, *buffer, (PageHeader) dp,
snapshot, nkeys, key);
}
}
- /* ----------------
- * otherwise move to the next item on the page
- * ----------------
+ /*
+ * otherwise move to the next item on the page
*/
--linesleft;
if (dir < 0)
}
}
- /* ----------------
- * if we get here, it means we've exhausted the items on
- * this page and it's time to move to the next..
- * ----------------
+ /*
+ * if we get here, it means we've exhausted the items on this page
+ * and it's time to move to the next..
*/
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
page = nextpage(page, dir);
- /* ----------------
- * return NULL if we've exhausted all the pages..
- * ----------------
+ /*
+ * return NULL if we've exhausted all the pages..
*/
if (page < 0 || page >= pages)
{
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_open);
IncrHeapAccessStat(global_open);
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_openr);
IncrHeapAccessStat(global_openr);
{
Relation r;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_open);
IncrHeapAccessStat(global_open);
{
Relation r;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_openr);
IncrHeapAccessStat(global_openr);
{
Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_close);
IncrHeapAccessStat(global_close);
{
HeapScanDesc scan;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_beginscan);
IncrHeapAccessStat(global_beginscan);
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (!RelationIsValid(relation))
elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
- /* ----------------
- * increment relation ref count while scanning relation
+ /*
+ * increment relation ref count while scanning relation
*
- * This is just to make really sure the relcache entry won't go away
- * while the scan has a pointer to it. Caller should be holding the
- * rel open anyway, so this is redundant in all normal scenarios...
- * ----------------
+ * This is just to make really sure the relcache entry won't go away
+ * while the scan has a pointer to it. Caller should be holding the
+ * rel open anyway, so this is redundant in all normal scenarios...
*/
RelationIncrementReferenceCount(relation);
if (relation->rd_rel->relkind == RELKIND_UNCATALOGED)
snapshot = SnapshotSelf;
- /* ----------------
- * allocate and initialize scan descriptor
- * ----------------
+ /*
+ * allocate and initialize scan descriptor
*/
scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
bool scanFromEnd,
ScanKey key)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_rescan);
IncrHeapAccessStat(global_rescan);
- /* ----------------
- * unpin scan buffers
- * ----------------
+ /*
+ * unpin scan buffers
*/
unpinscan(scan);
- /* ----------------
- * reinitialize scan descriptor
- * ----------------
+ /*
+ * reinitialize scan descriptor
*/
scan->rs_atend = scanFromEnd;
initscan(scan, scan->rs_rd, scanFromEnd, scan->rs_nkeys, key);
void
heap_endscan(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_endscan);
IncrHeapAccessStat(global_endscan);
/* Note: no locking manipulations needed */
- /* ----------------
- * unpin scan buffers
- * ----------------
+ /*
+ * unpin scan buffers
*/
unpinscan(scan);
- /* ----------------
- * decrement relation reference count and free scan descriptor storage
- * ----------------
+ /*
+ * decrement relation reference count and free scan descriptor storage
*/
RelationDecrementReferenceCount(scan->rs_rd);
{
HeapScanDesc scan = scandesc;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_getnext);
IncrHeapAccessStat(global_getnext);
/* Note: no locking manipulations needed */
- /* ----------------
- * argument checks
- * ----------------
+ /*
+ * argument checks
*/
if (scan == NULL)
elog(ERROR, "heap_getnext: NULL relscan");
- /* ----------------
- * initialize return buffer to InvalidBuffer
- * ----------------
+ /*
+ * initialize return buffer to InvalidBuffer
*/
HEAPDEBUG_1; /* heap_getnext( info ) */
if (backw)
{
- /* ----------------
- * handle reverse scan
- * ----------------
+
+ /*
+ * handle reverse scan
*/
HEAPDEBUG_2; /* heap_getnext called with backw */
}
else
{
- /* ----------------
- * handle forward scan
- * ----------------
+
+ /*
+ * handle forward scan
*/
if (scan->rs_ctup.t_data == scan->rs_ntup.t_data &&
BufferIsInvalid(scan->rs_nbuf))
scan->rs_nbuf = UnknownBuffer;
}
- /* ----------------
- * if we get here it means we have a new current scan tuple, so
- * point to the proper return buffer and return the tuple.
- * ----------------
+ /*
+ * if we get here it means we have a new current scan tuple, so point
+ * to the proper return buffer and return the tuple.
*/
HEAPDEBUG_7; /* heap_getnext returning tuple */
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_fetch);
IncrHeapAccessStat(global_fetch);
- /* ----------------
- * get the buffer from the relation descriptor
- * Note that this does a buffer pin.
- * ----------------
+ /*
+ * get the buffer from the relation descriptor Note that this does a
+ * buffer pin.
*/
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- /* ----------------
- * get the item line pointer corresponding to the requested tid
- * ----------------
+ /*
+ * get the item line pointer corresponding to the requested tid
*/
dp = (PageHeader) BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(tid);
lp = PageGetItemId(dp, offnum);
- /* ----------------
- * more sanity checks
- * ----------------
+ /*
+ * more sanity checks
*/
if (!ItemIdIsUsed(lp))
tuple->t_len = ItemIdGetLength(lp);
tuple->t_tableOid = relation->rd_id;
- /* ----------------
- * check time qualification of tid
- * ----------------
+ /*
+ * check time qualification of tid
*/
HeapTupleSatisfies(tuple, relation, buffer, dp,
bool invalidBlock,
linkend;
- /* ----------------
- * get the buffer from the relation descriptor
- * Note that this does a buffer pin.
- * ----------------
+ /*
+ * get the buffer from the relation descriptor Note that this does a
+ * buffer pin.
*/
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
- /* ----------------
- * get the item line pointer corresponding to the requested tid
- * ----------------
+ /*
+ * get the item line pointer corresponding to the requested tid
*/
dp = (PageHeader) BufferGetPage(buffer);
offnum = ItemPointerGetOffsetNumber(tid);
return NULL;
}
- /* ----------------
- * more sanity checks
- * ----------------
+ /*
+ * more sanity checks
*/
tp.t_datamcxt = NULL;
tp.t_self = *tid;
ctid = tp.t_data->t_ctid;
- /* ----------------
- * check time qualification of tid
- * ----------------
+ /*
+ * check time qualification of tid
*/
HeapTupleSatisfies(&tp, relation, buffer, dp,
IncrHeapAccessStat(local_insert);
IncrHeapAccessStat(global_insert);
- /* ----------------
- * If the object id of this tuple has already been assigned, trust
- * the caller. There are a couple of ways this can happen. At initial
- * db creation, the backend program sets oids for tuples. When we
- * define an index, we set the oid. Finally, in the future, we may
- * allow users to set their own object ids in order to support a
- * persistent object store (objects need to contain pointers to one
- * another).
- * ----------------
+ /*
+ * If the object id of this tuple has already been assigned, trust the
+ * caller. There are a couple of ways this can happen. At initial db
+ * creation, the backend program sets oids for tuples. When we define
+ * an index, we set the oid. Finally, in the future, we may allow
+ * users to set their own object ids in order to support a persistent
+ * object store (objects need to contain pointers to one another).
*/
if (!OidIsValid(tup->t_data->t_oid))
tup->t_data->t_oid = newoid();
tup->t_tableOid = relation->rd_id;
#ifdef TUPLE_TOASTER_ACTIVE
- /* ----------
- * If the new tuple is too big for storage or contains already
- * toasted attributes from some other relation, invoke the toaster.
- * ----------
+
+ /*
+ * If the new tuple is too big for storage or contains already toasted
+ * attributes from some other relation, invoke the toaster.
*/
if (HeapTupleHasExtended(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
#ifdef TUPLE_TOASTER_ACTIVE
- /* ----------
- * If the relation has toastable attributes, we need to delete
- * no longer needed items there too. We have to do this before
+
+ /*
+ * If the relation has toastable attributes, we need to delete no
+ * longer needed items there too. We have to do this before
* WriteBuffer because we need to look at the contents of the tuple,
* but it's OK to release the context lock on the buffer first.
- * ----------
*/
if (HeapTupleHasExtended(&tp))
heap_tuple_toast_attrs(relation, NULL, &(tp));
heap_markpos(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_markpos);
IncrHeapAccessStat(global_markpos);
scan->rs_key);
}
- /* ----------------
+ /*
* Should not unpin the buffer pages. They may still be in use.
- * ----------------
*/
if (scan->rs_ptup.t_data != NULL)
scan->rs_mptid = scan->rs_ptup.t_self;
void
heap_restrpos(HeapScanDesc scan)
{
- /* ----------------
- * increment access statistics
- * ----------------
+
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_restrpos);
IncrHeapAccessStat(global_restrpos);
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
+ * $Id: hio.c,v 1.37 2001/03/22 06:16:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ItemId itemId;
Item item;
- /* ----------------
- * increment access statistics
- * ----------------
+ /*
+ * increment access statistics
*/
IncrHeapAccessStat(local_RelationPutHeapTuple);
IncrHeapAccessStat(global_RelationPutHeapTuple);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.23 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.24 2001/03/22 06:16:07 momjian Exp $
*
* NOTES
* initam should be moved someplace else.
MemoryContext oldContext;
HeapAccessStatistics stats;
- /* ----------------
- * make sure we don't initialize things twice
- * ----------------
+ /*
+ * make sure we don't initialize things twice
*/
if (heap_access_stats != NULL)
return;
- /* ----------------
- * allocate statistics structure from the top memory context
- * ----------------
+ /*
+ * allocate statistics structure from the top memory context
*/
oldContext = MemoryContextSwitchTo(TopMemoryContext);
stats = (HeapAccessStatistics)
palloc(sizeof(HeapAccessStatisticsData));
- /* ----------------
- * initialize fields to default values
- * ----------------
+ /*
+ * initialize fields to default values
*/
stats->global_open = 0;
stats->global_openr = 0;
stats->local_RelationNameGetRelation = 0;
stats->global_RelationNameGetRelation = 0;
- /* ----------------
- * record init times
- * ----------------
+ /*
+ * record init times
*/
time(&stats->init_global_timestamp);
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
- /* ----------------
- * return to old memory context
- * ----------------
+ /*
+ * return to old memory context
*/
MemoryContextSwitchTo(oldContext);
{
HeapAccessStatistics stats;
- /* ----------------
- * do nothing if stats aren't initialized
- * ----------------
+ /*
+ * do nothing if stats aren't initialized
*/
if (heap_access_stats == NULL)
return;
stats = heap_access_stats;
- /* ----------------
- * reset local counts
- * ----------------
+ /*
+ * reset local counts
*/
stats->local_open = 0;
stats->local_openr = 0;
stats->local_RelationPutHeapTuple = 0;
stats->local_RelationPutLongHeapTuple = 0;
- /* ----------------
- * reset local timestamps
- * ----------------
+ /*
+ * reset local timestamps
*/
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
{
HeapAccessStatistics stats;
- /* ----------------
- * return nothing if stats aren't initialized
- * ----------------
+ /*
+ * return nothing if stats aren't initialized
*/
if (heap_access_stats == NULL)
return NULL;
- /* ----------------
- * record the current request time
- * ----------------
+ /*
+ * record the current request time
*/
time(&heap_access_stats->last_request_timestamp);
- /* ----------------
- * allocate a copy of the stats and return it to the caller.
- * ----------------
+ /*
+ * allocate a copy of the stats and return it to the caller.
*/
stats = (HeapAccessStatistics)
palloc(sizeof(HeapAccessStatisticsData));
void
PrintHeapAccessStatistics(HeapAccessStatistics stats)
{
- /* ----------------
- * return nothing if stats aren't valid
- * ----------------
+
+ /*
+ * return nothing if stats aren't valid
*/
if (stats == NULL)
return;
void
initam(void)
{
- /* ----------------
- * initialize heap statistics.
- * ----------------
+
+ /*
+ * initialize heap statistics.
*/
InitHeapAccessStatistics();
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.19 2001/03/22 06:16:07 momjian Exp $
*
*
* INTERFACE ROUTINES
if (VARATT_IS_EXTERNAL(attr))
{
- /* ----------
+
+ /*
* This is an external stored plain value
- * ----------
*/
result = toast_fetch_datum(attr);
}
else
{
- /* ----------
- * This is a plain value inside of the main tuple - why am I called?
- * ----------
+
+ /*
+ * This is a plain value inside of the main tuple - why am I
+ * called?
*/
result = attr;
}
}
else
{
- /* ----------
+
+ /*
* This is an external stored plain value
- * ----------
*/
result = toast_fetch_datum(attr);
}
}
else if (VARATT_IS_COMPRESSED(attr))
{
- /* ----------
+
+ /*
* This is a compressed value inside of the main tuple
- * ----------
*/
result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ VARHDRSZ);
pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
- /* ----------
- * This is a plain value inside of the main tuple - why am I called?
- * ----------
+
+ /*
+ * This is a plain value inside of the main tuple - why am I
+ * called?
*/
return attr;
Datum value;
bool isnull;
- /* ----------
- * Get the tuple descriptor, the number of and attribute
- * descriptors.
- * ----------
+ /*
+ * Get the tuple descriptor, the number of and attribute descriptors.
*/
tupleDesc = rel->rd_att;
numAttrs = tupleDesc->natts;
att = tupleDesc->attrs;
- /* ----------
- * Check for external stored attributes and delete them
- * from the secondary relation.
- * ----------
+ /*
+ * Check for external stored attributes and delete them from the
+ * secondary relation.
*/
for (i = 0; i < numAttrs; i++)
{
bool toast_free[MaxHeapAttributeNumber];
bool toast_delold[MaxHeapAttributeNumber];
- /* ----------
- * Get the tuple descriptor, the number of and attribute
- * descriptors and the location of the tuple values.
- * ----------
+ /*
+ * Get the tuple descriptor, the number of and attribute descriptors
+ * and the location of the tuple values.
*/
tupleDesc = rel->rd_att;
numAttrs = tupleDesc->natts;
if (oldtup != NULL)
{
- /* ----------
+
+ /*
* For UPDATE get the old and new values of this attribute
- * ----------
*/
old_value = (varattrib *) DatumGetPointer(
heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
new_value = (varattrib *) DatumGetPointer(toast_values[i]);
- /* ----------
- * If the old value is an external stored one, check if it
- * has changed so we have to delete it later.
- * ----------
+ /*
+ * If the old value is an external stored one, check if it has
+ * changed so we have to delete it later.
*/
if (!old_isnull && att[i]->attlen == -1 &&
VARATT_IS_EXTERNAL(old_value))
old_value->va_content.va_external.va_attno !=
new_value->va_content.va_external.va_attno)
{
- /* ----------
- * The old external store value isn't needed any
- * more after the update
- * ----------
+
+ /*
+ * The old external store value isn't needed any more
+ * after the update
*/
toast_delold[i] = true;
need_delold = true;
}
else
{
- /* ----------
- * This attribute isn't changed by this update
- * so we reuse the original reference to the old
- * value in the new tuple.
- * ----------
+
+ /*
+ * This attribute isn't changed by this update so we
+ * reuse the original reference to the old value in
+ * the new tuple.
*/
toast_action[i] = 'p';
toast_sizes[i] = VARATT_SIZE(toast_values[i]);
}
else
{
- /* ----------
+
+ /*
* For INSERT simply get the new value
- * ----------
*/
toast_values[i] =
heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
}
- /* ----------
+ /*
* Handle NULL attributes
- * ----------
*/
if (new_isnull)
{
continue;
}
- /* ----------
+ /*
* Now look at varsize attributes
- * ----------
*/
if (att[i]->attlen == -1)
{
- /* ----------
+
+ /*
* If the table's attribute says PLAIN always, force it so.
- * ----------
*/
if (att[i]->attstorage == 'p')
toast_action[i] = 'p';
- /* ----------
+ /*
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Expand it to plain (and, probably, toast it again below).
- * ----------
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
need_free = true;
}
- /* ----------
+ /*
* Remember the size of this attribute
- * ----------
*/
toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
}
else
{
- /* ----------
+
+ /*
* Not a variable size attribute, plain storage always
- * ----------
*/
toast_action[i] = 'p';
toast_sizes[i] = att[i]->attlen;
maxDataLen += BITMAPLEN(numAttrs);
maxDataLen = TOAST_TUPLE_TARGET - MAXALIGN(maxDataLen);
- /* ----------
+ /*
* Look for attributes with attstorage 'x' to compress
- * ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen)
Datum old_value;
Datum new_value;
- /* ----------
+ /*
* Search for the biggest yet uncompressed internal attribute
- * ----------
*/
for (i = 0; i < numAttrs; i++)
{
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Attempt to compress it inline
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
}
}
- /* ----------
- * Second we look for attributes of attstorage 'x' or 'e' that
- * are still inline.
- * ----------
+ /*
+ * Second we look for attributes of attstorage 'x' or 'e' that are
+ * still inline.
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
int32 biggest_size = MAXALIGN(sizeof(varattrib));
Datum old_value;
- /* ----------
- * Search for the biggest yet inlined attribute with
- * attstorage = 'x' or 'e'
- * ----------
+ /*
+ * Search for the biggest yet inlined attribute with attstorage =
+ * 'x' or 'e'
*/
for (i = 0; i < numAttrs; i++)
{
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Store this external
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
need_free = true;
}
- /* ----------
- * Round 3 - this time we take attributes with storage
- * 'm' into compression
- * ----------
+ /*
+ * Round 3 - this time we take attributes with storage 'm' into
+ * compression
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen)
Datum old_value;
Datum new_value;
- /* ----------
+ /*
* Search for the biggest yet uncompressed internal attribute
- * ----------
*/
for (i = 0; i < numAttrs; i++)
{
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Attempt to compress it inline
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
}
}
- /* ----------
+ /*
* Finally we store attributes of type 'm' external
- * ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
int32 biggest_size = MAXALIGN(sizeof(varattrib));
Datum old_value;
- /* ----------
- * Search for the biggest yet inlined attribute with
- * attstorage = 'm'
- * ----------
+ /*
+ * Search for the biggest yet inlined attribute with attstorage =
+ * 'm'
*/
for (i = 0; i < numAttrs; i++)
{
if (biggest_attno < 0)
break;
- /* ----------
+ /*
* Store this external
- * ----------
*/
i = biggest_attno;
old_value = toast_values[i];
need_free = true;
}
- /* ----------
- * In the case we toasted any values, we need to build
- * a new heap tuple with the changed values.
- * ----------
+ /*
+ * In the case we toasted any values, we need to build a new heap
+ * tuple with the changed values.
*/
if (need_change)
{
MemoryContext oldcxt;
HeapTupleHeader olddata;
- /* ----------
+ /*
* Calculate the new size of the tuple
- * ----------
*/
new_len = offsetof(HeapTupleHeaderData, t_bits);
if (has_nulls)
new_len = MAXALIGN(new_len);
new_len += ComputeDataSize(tupleDesc, toast_values, toast_nulls);
- /* ----------
+ /*
* Remember the old memory location of the tuple (for below),
- * switch to the memory context of the HeapTuple structure
- * and allocate the new tuple.
- * ----------
+ * switch to the memory context of the HeapTuple structure and
+ * allocate the new tuple.
*/
olddata = newtup->t_data;
oldcxt = MemoryContextSwitchTo(newtup->t_datamcxt);
new_data = palloc(new_len);
- /* ----------
+ /*
* Put the tuple header and the changed values into place
- * ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
newtup->t_data = (HeapTupleHeader) new_data;
&(newtup->t_data->t_infomask),
has_nulls ? newtup->t_data->t_bits : NULL);
- /* ----------
- * In the case we modified a previously modified tuple again,
- * free the memory from the previous run
- * ----------
+ /*
+ * In the case we modified a previously modified tuple again, free
+ * the memory from the previous run
*/
if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
- /* ----------
+ /*
* Switch back to the old memory context
- * ----------
*/
MemoryContextSwitchTo(oldcxt);
}
- /* ----------
+ /*
* Free allocated temp values
- * ----------
*/
if (need_free)
for (i = 0; i < numAttrs; i++)
if (toast_free[i])
pfree(DatumGetPointer(toast_values[i]));
- /* ----------
+ /*
* Delete external values from the old tuple
- * ----------
*/
if (need_delold)
for (i = 0; i < numAttrs; i++)
char *data_p;
int32 data_todo;
- /* ----------
+ /*
* Create the varattrib reference
- * ----------
*/
result = (varattrib *) palloc(sizeof(varattrib));
result->va_content.va_external.va_rowid = mainoid;
result->va_content.va_external.va_attno = attno;
- /* ----------
+ /*
* Initialize constant parts of the tuple data
- * ----------
*/
t_values[0] = ObjectIdGetDatum(result->va_content.va_external.va_valueid);
t_values[2] = PointerGetDatum(chunk_data);
t_nulls[1] = ' ';
t_nulls[2] = ' ';
- /* ----------
+ /*
* Get the data to process
- * ----------
*/
data_p = VARATT_DATA(value);
data_todo = VARATT_SIZE(value) - VARHDRSZ;
- /* ----------
+ /*
* Open the toast relation
- * ----------
*/
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(rel->rd_rel->reltoastidxid);
- /* ----------
+ /*
* Split up the item into chunks
- * ----------
*/
while (data_todo > 0)
{
- /* ----------
+
+ /*
* Calculate the size of this chunk
- * ----------
*/
chunk_size = Min(TOAST_MAX_CHUNK_SIZE, data_todo);
- /* ----------
+ /*
* Build a tuple
- * ----------
*/
t_values[1] = Int32GetDatum(chunk_seq++);
VARATT_SIZEP(chunk_data) = chunk_size + VARHDRSZ;
if (!HeapTupleIsValid(toasttup))
elog(ERROR, "Failed to build TOAST tuple");
- /* ----------
+ /*
* Store it and create the index entry
- * ----------
*/
heap_insert(toastrel, toasttup);
idxres = index_insert(toastidx, t_values, t_nulls,
if (idxres == NULL)
elog(ERROR, "Failed to insert index entry for TOAST tuple");
- /* ----------
+ /*
* Free memory
- * ----------
*/
heap_freetuple(toasttup);
pfree(idxres);
- /* ----------
+ /*
* Move on to next chunk
- * ----------
*/
data_todo -= chunk_size;
data_p += chunk_size;
}
- /* ----------
+ /*
* Done - close toast relation and return the reference
- * ----------
*/
index_close(toastidx);
heap_close(toastrel, RowExclusiveLock);
if (!VARATT_IS_EXTERNAL(attr))
return;
- /* ----------
+ /*
* Open the toast relation and it's index
- * ----------
*/
toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
RowExclusiveLock);
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
- /* ----------
+ /*
* Setup a scan key to fetch from the index by va_valueid
- * ----------
*/
ScanKeyEntryInitialize(&toastkey,
(bits16) 0,
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
- /* ----------
+ /*
* Read the chunks by index
- * ----------
*/
toastscan = index_beginscan(toastidx, false, 1, &toastkey);
while ((indexRes = index_getnext(toastscan, ForwardScanDirection)) != NULL)
if (!toasttup.t_data)
continue;
- /* ----------
+ /*
* Have a chunk, delete it
- * ----------
*/
simple_heap_delete(toastrel, &toasttup.t_self);
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* End scan and close relations
- * ----------
*/
index_endscan(toastscan);
index_close(toastidx);
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
- /* ----------
+ /*
* Open the toast relation and it's index
- * ----------
*/
toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
AccessShareLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
- /* ----------
+ /*
* Setup a scan key to fetch from the index by va_valueid
- * ----------
*/
ScanKeyEntryInitialize(&toastkey,
(bits16) 0,
(RegProcedure) F_OIDEQ,
ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
- /* ----------
+ /*
* Read the chunks by index
*
* Note we will not necessarily see the chunks in sequence-number order.
- * ----------
*/
toastscan = index_beginscan(toastidx, false, 1, &toastkey);
while ((indexRes = index_getnext(toastscan, ForwardScanDirection)) != NULL)
continue;
ttup = &toasttup;
- /* ----------
+ /*
* Have a chunk, extract the sequence number and the data
- * ----------
*/
residx = DatumGetInt32(heap_getattr(ttup, 2, toasttupDesc, &isnull));
Assert(!isnull);
Assert(!isnull);
chunksize = VARATT_SIZE(chunk) - VARHDRSZ;
- /* ----------
+ /*
* Some checks on the data we've found
- * ----------
*/
if (residx < 0 || residx >= numchunks)
elog(ERROR, "unexpected chunk number %d for toast value %d",
residx,
attr->va_content.va_external.va_valueid);
- /* ----------
+ /*
* Copy the data into proper place in our result
- * ----------
*/
memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* Final checks that we successfully fetched the datum
- * ----------
*/
if (memcmp(chunks_found, chunks_expected, numchunks) != 0)
elog(ERROR, "not all toast chunks found for value %d",
pfree(chunks_expected);
pfree(chunks_found);
- /* ----------
+ /*
* End scan and close relations
- * ----------
*/
index_endscan(toastscan);
index_close(toastidx);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.47 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.48 2001/03/22 06:16:07 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relationId
RELATION_CHECKS;
GET_REL_PROCEDURE(insert, aminsert);
- /* ----------------
- * have the am's insert proc do all the work.
- * ----------------
+ /*
+ * have the am's insert proc do all the work.
*/
specificResult = (InsertIndexResult)
DatumGetPointer(OidFunctionCall5(procedure,
RelationIncrementReferenceCount(relation);
- /* ----------------
- * Acquire AccessShareLock for the duration of the scan
+ /*
+ * Acquire AccessShareLock for the duration of the scan
*
- * Note: we could get an SI inval message here and consequently have
- * to rebuild the relcache entry. The refcount increment above
- * ensures that we will rebuild it and not just flush it...
- * ----------------
+ * Note: we could get an SI inval message here and consequently have to
+ * rebuild the relcache entry. The refcount increment above ensures
+ * that we will rebuild it and not just flush it...
*/
LockRelation(relation, AccessShareLock);
SCAN_CHECKS;
- /* ----------------
- * Look up the access procedure only once per scan.
- * ----------------
+ /*
+ * Look up the access procedure only once per scan.
*/
if (scan->fn_getnext.fn_oid == InvalidOid)
{
fmgr_info(procedure, &scan->fn_getnext);
}
- /* ----------------
- * have the am's gettuple proc do all the work.
- * ----------------
+ /*
+ * have the am's gettuple proc do all the work.
*/
result = (RetrieveIndexResult)
DatumGetPointer(FunctionCall2(&scan->fn_getnext,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.65 2001/03/22 06:16:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
high = mid;
}
- /*--------------------
+ /*
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key
- * (which could be the last slot + 1).
- *--------------------
+ * On a leaf page, we always return the first key >= scan key (which
+ * could be the last slot + 1).
*/
if (P_ISLEAF(opaque))
return low;
- /*--------------------
- * On a non-leaf page, return the last key < scan key.
- * There must be one if _bt_compare() is playing by the rules.
- *--------------------
+ /*
+ * On a non-leaf page, return the last key < scan key. There must be
+ * one if _bt_compare() is playing by the rules.
*/
Assert(low > P_FIRSTDATAKEY(opaque));
ItemPointerSet(current, blkno, offnum);
- /*----------
- * At this point we are positioned at the first item >= scan key,
- * or possibly at the end of a page on which all the existing items
- * are < scan key and we know that everything on later pages is
- * >= scan key. We could step forward in the latter case, but that'd
- * be a waste of time if we want to scan backwards. So, it's now time to
- * examine the scan strategy to find the exact place to start the scan.
+ /*
+ * At this point we are positioned at the first item >= scan key, or
+ * possibly at the end of a page on which all the existing items are <
+ * scan key and we know that everything on later pages is >= scan key.
+ * We could step forward in the latter case, but that'd be a waste of
+ * time if we want to scan backwards. So, it's now time to examine
+ * the scan strategy to find the exact place to start the scan.
*
- * Note: if _bt_step fails (meaning we fell off the end of the index
- * in one direction or the other), we either return NULL (no matches) or
- * call _bt_endpoint() to set up a scan starting at that index endpoint,
- * as appropriate for the desired scan type.
+ * Note: if _bt_step fails (meaning we fell off the end of the index in
+ * one direction or the other), we either return NULL (no matches) or
+ * call _bt_endpoint() to set up a scan starting at that index
+ * endpoint, as appropriate for the desired scan type.
*
* it's yet other place to add some code later for is(not)null ...
- *----------
*/
switch (strat_total)
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.43 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
XidStatus xidstatus; /* recorded status of xid */
bool fail = false; /* success/failure */
- /* ----------------
- * during initialization consider all transactions
- * as having been committed
- * ----------------
+ /*
+ * during initialization consider all transactions as having been
+ * committed
*/
if (!RelationIsValid(LogRelation))
return (bool) (status == XID_COMMIT);
- /* ----------------
- * before going to the buffer manager, check our single
- * item cache to see if we didn't just check the transaction
- * status a moment ago.
- * ----------------
+ /*
+ * before going to the buffer manager, check our single item cache to
+ * see if we didn't just check the transaction status a moment ago.
*/
if (TransactionIdEquals(transactionId, cachedTestXid))
return (bool)
(status == cachedTestXidStatus);
- /* ----------------
- * compute the item pointer corresponding to the
- * page containing our transaction id. We save the item in
- * our cache to speed up things if we happen to ask for the
- * same xid's status more than once.
- * ----------------
+ /*
+ * compute the item pointer corresponding to the page containing our
+ * transaction id. We save the item in our cache to speed up things
+ * if we happen to ask for the same xid's status more than once.
*/
TransComputeBlockNumber(LogRelation, transactionId, &blockNumber);
xidstatus = TransBlockNumberGetXidStatus(LogRelation,
return (bool) (status == xidstatus);
}
- /* ----------------
- * here the block didn't contain the information we wanted
- * ----------------
+ /*
+ * here the block didn't contain the information we wanted
*/
elog(ERROR, "TransactionLogTest: failed to get xidstatus");
BlockNumber blockNumber;
bool fail = false; /* success/failure */
- /* ----------------
- * during initialization we don't record any updates.
- * ----------------
+ /*
+ * during initialization we don't record any updates.
*/
if (!RelationIsValid(LogRelation))
return;
- /* ----------------
- * update the log relation
- * ----------------
+ /*
+ * update the log relation
*/
TransComputeBlockNumber(LogRelation, transactionId, &blockNumber);
TransBlockNumberSetXidStatus(LogRelation,
TransRecover(Relation logRelation)
{
#ifdef NOT_USED
- /* ----------------
- * first get the last recorded transaction in the log.
- * ----------------
+
+ /*
+ * first get the last recorded transaction in the log.
*/
TransGetLastRecordedTransaction(logRelation, logLastXid, &fail);
if (fail == true)
elog(ERROR, "TransRecover: failed TransGetLastRecordedTransaction");
- /* ----------------
- * next get the "last" and "next" variables
- * ----------------
+ /*
+ * next get the "last" and "next" variables
*/
VariableRelationGetLastXid(&varLastXid);
VariableRelationGetNextXid(&varNextXid);
- /* ----------------
- * intregity test (1)
- * ----------------
+ /*
+ * intregity test (1)
*/
if (TransactionIdIsLessThan(varNextXid, logLastXid))
elog(ERROR, "TransRecover: varNextXid < logLastXid");
- /* ----------------
- * intregity test (2)
- * ----------------
+ /*
+ * intregity test (2)
*/
- /* ----------------
- * intregity test (3)
- * ----------------
+ /*
+ * intregity test (3)
*/
- /* ----------------
- * here we have a valid "
+ /*
+ * here we have a valid "
*
- * **** RESUME HERE ****
- * ----------------
+ **** RESUME HERE ****
*/
varNextXid = TransactionIdDup(varLastXid);
TransactionIdIncrement(&varNextXid);
Relation logRelation;
MemoryContext oldContext;
- /* ----------------
- * don't do anything during bootstrapping
- * ----------------
+ /*
+ * don't do anything during bootstrapping
*/
if (AMI_OVERRIDE)
return;
- /* ----------------
- * disable the transaction system so the access methods
- * don't interfere during initialization.
- * ----------------
+ /*
+ * disable the transaction system so the access methods don't
+ * interfere during initialization.
*/
OverrideTransactionSystem(true);
- /* ----------------
- * make sure allocations occur within the top memory context
- * so that our log management structures are protected from
- * garbage collection at the end of every transaction.
- * ----------------
+ /*
+ * make sure allocations occur within the top memory context so that
+ * our log management structures are protected from garbage collection
+ * at the end of every transaction.
*/
oldContext = MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * first open the log and time relations
- * (these are created by amiint so they are guaranteed to exist)
- * ----------------
+ /*
+ * first open the log and time relations (these are created by amiint
+ * so they are guaranteed to exist)
*/
logRelation = heap_openr(LogRelationName, NoLock);
VariableRelation = heap_openr(VariableRelationName, NoLock);
- /* ----------------
- * XXX TransactionLogUpdate requires that LogRelation
- * is valid so we temporarily set it so we can initialize
- * things properly. This could be done cleaner.
- * ----------------
+ /*
+ * XXX TransactionLogUpdate requires that LogRelation is valid so we
+ * temporarily set it so we can initialize things properly. This could
+ * be done cleaner.
*/
LogRelation = logRelation;
- /* ----------------
- * if we have a virgin database, we initialize the log
- * relation by committing the AmiTransactionId (id 512) and we
- * initialize the variable relation by setting the next available
- * transaction id to FirstTransactionId (id 514). OID initialization
- * happens as a side effect of bootstrapping in varsup.c.
- * ----------------
+ /*
+ * if we have a virgin database, we initialize the log relation by
+ * committing the AmiTransactionId (id 512) and we initialize the
+ * variable relation by setting the next available transaction id to
+ * FirstTransactionId (id 514). OID initialization happens as a side
+ * effect of bootstrapping in varsup.c.
*/
SpinAcquire(OidGenLockId);
if (!TransactionIdDidCommit(AmiTransactionId))
}
else if (RecoveryCheckingEnabled())
{
- /* ----------------
- * if we have a pre-initialized database and if the
- * perform recovery checking flag was passed then we
- * do our database integrity checking.
- * ----------------
+
+ /*
+ * if we have a pre-initialized database and if the perform
+ * recovery checking flag was passed then we do our database
+ * integrity checking.
*/
TransRecover(logRelation);
}
LogRelation = (Relation) NULL;
SpinRelease(OidGenLockId);
- /* ----------------
- * now re-enable the transaction system
- * ----------------
+ /*
+ * now re-enable the transaction system
*/
OverrideTransactionSystem(false);
- /* ----------------
- * instantiate the global variables
- * ----------------
+ /*
+ * instantiate the global variables
*/
LogRelation = logRelation;
- /* ----------------
- * restore the memory context to the previous context
- * before we return from initialization.
- * ----------------
+ /*
+ * restore the memory context to the previous context before we return
+ * from initialization.
*/
MemoryContextSwitchTo(oldContext);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.30 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* This file contains support functions for the high
{
long itemsPerBlock = 0;
- /* ----------------
- * we calculate the block number of our transaction
- * by dividing the transaction id by the number of
- * transaction things per block.
- * ----------------
+ /*
+ * we calculate the block number of our transaction by dividing the
+ * transaction id by the number of transaction things per block.
*/
if (relation == LogRelation)
itemsPerBlock = TP_NumXidStatusPerBlock;
BitIndex offset;
XidStatus xstatus;
- /* ----------------
- * sanity check
- * ----------------
+ /*
+ * sanity check
*/
Assert((tblock != NULL));
- /* ----------------
- * search downward from the top of the block data, looking
- * for the first Non-in progress transaction status. Since we
- * are scanning backward, this will be last recorded transaction
- * status on the block.
- * ----------------
+ /*
+ * search downward from the top of the block data, looking for the
+ * first Non-in progress transaction status. Since we are scanning
+ * backward, this will be last recorded transaction status on the
+ * block.
*/
maxIndex = TP_NumXidStatusPerBlock;
for (index = maxIndex; index > 0; index--)
xstatus = (bit1 | bit2);
- /* ----------------
- * here we have the status of some transaction, so test
- * if the status is recorded as "in progress". If so, then
- * we save the transaction id in the place specified by the caller.
- * ----------------
+ /*
+ * here we have the status of some transaction, so test if the
+ * status is recorded as "in progress". If so, then we save the
+ * transaction id in the place specified by the caller.
*/
if (xstatus != XID_INPROGRESS)
{
}
}
- /* ----------------
- * if we get here and index is 0 it means we couldn't find
- * a non-inprogress transaction on the block. For now we just
- * return this info to the user. They can check if the return
- * status is "in progress" to know this condition has arisen.
- * ----------------
+ /*
+ * if we get here and index is 0 it means we couldn't find a
+ * non-inprogress transaction on the block. For now we just return
+ * this info to the user. They can check if the return status is "in
+ * progress" to know this condition has arisen.
*/
if (index == 0)
{
TransactionIdStore(baseXid, returnXidP);
}
- /* ----------------
- * return the status to the user
- * ----------------
+ /*
+ * return the status to the user
*/
return xstatus;
}
*/
index = transactionId % TP_NumXidStatusPerBlock;
- /* ----------------
- * get the data at the specified index
- * ----------------
+ /*
+ * get the data at the specified index
*/
offset = BitIndexOf(index);
bit1 = ((bits8) BitArrayBitIsSet((BitArray) tblock, offset++)) << 1;
bit2 = (bits8) BitArrayBitIsSet((BitArray) tblock, offset);
- /* ----------------
- * return the transaction status to the caller
- * ----------------
+ /*
+ * return the transaction status to the caller
*/
return (XidStatus) (bit1 | bit2);
}
offset = BitIndexOf(index);
- /* ----------------
- * store the transaction value at the specified offset
- * ----------------
+ /*
+ * store the transaction value at the specified offset
*/
switch (xstatus)
{
XidStatus xstatus; /* recorded status of xid */
bool localfail; /* bool used if failP = NULL */
- /* ----------------
- * get the page containing the transaction information
- * ----------------
+ /*
+ * get the page containing the transaction information
*/
buffer = ReadBuffer(relation, blockNumber);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
block = BufferGetBlock(buffer);
- /* ----------------
- * get the status from the block. note, for now we always
- * return false in failP.
- * ----------------
+ /*
+ * get the status from the block. note, for now we always return
+ * false in failP.
*/
if (failP == NULL)
failP = &localfail;
xstatus = TransBlockGetXidStatus(block, xid);
- /* ----------------
- * release the buffer and return the status
- * ----------------
+ /*
+ * release the buffer and return the status
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
Block block; /* block containing xstatus */
bool localfail; /* bool used if failP = NULL */
- /* ----------------
- * get the block containing the transaction status
- * ----------------
+ /*
+ * get the block containing the transaction status
*/
buffer = ReadBuffer(relation, blockNumber);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
block = BufferGetBlock(buffer);
- /* ----------------
- * attempt to update the status of the transaction on the block.
- * if we are successful, write the block. otherwise release the buffer.
- * note, for now we always return false in failP.
- * ----------------
+ /*
+ * attempt to update the status of the transaction on the block. if we
+ * are successful, write the block. otherwise release the buffer.
+ * note, for now we always return false in failP.
*/
if (failP == NULL)
failP = &localfail;
(*failP) = false;
- /* ----------------
- * SOMEDAY gain exclusive access to the log relation
+ /*
+ * SOMEDAY gain exclusive access to the log relation
*
- * That someday is today 5 Aug. 1991 -mer
- * It looks to me like we only need to set a read lock here, despite
- * the above comment about exclusive access. The block is never
- * actually written into, we only check status bits.
- * ----------------
+ * That someday is today 5 Aug. 1991 -mer It looks to me like we only
+ * need to set a read lock here, despite the above comment about
+ * exclusive access. The block is never actually written into, we
+ * only check status bits.
*/
RelationSetLockForRead(relation);
- /* ----------------
- * we assume the last block of the log contains the last
- * recorded transaction. If the relation is empty we return
- * failure to the user.
- * ----------------
+ /*
+ * we assume the last block of the log contains the last recorded
+ * transaction. If the relation is empty we return failure to the
+ * user.
*/
n = RelationGetNumberOfBlocks(relation);
if (n == 0)
return;
}
- /* ----------------
- * get the block containing the transaction information
- * ----------------
+ /*
+ * get the block containing the transaction information
*/
blockNumber = n - 1;
buffer = ReadBuffer(relation, blockNumber);
block = BufferGetBlock(buffer);
- /* ----------------
- * get the last xid on the block
- * ----------------
+ /*
+ * get the last xid on the block
*/
baseXid = blockNumber * TP_NumXidStatusPerBlock;
ReleaseBuffer(buffer);
- /* ----------------
- * SOMEDAY release our lock on the log relation
- * ----------------
+ /*
+ * SOMEDAY release our lock on the log relation
*/
RelationUnsetLockForRead(relation);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.101 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" transaction id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" transaction id.
*/
if (s->state == TRANS_DISABLED)
return (TransactionId) DisabledTransactionId;
- /* ----------------
- * otherwise return the current transaction id.
- * ----------------
+ /*
+ * otherwise return the current transaction id.
*/
return (TransactionId) s->transactionIdData;
}
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" command id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" command id.
*/
if (s->state == TRANS_DISABLED)
return (CommandId) DisabledCommandId;
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" command id.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" command id.
*/
if (s->state == TRANS_DISABLED)
return (CommandId) DisabledCommandId;
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * if the transaction system is disabled, we return
- * the special "disabled" starting time.
- * ----------------
+ /*
+ * if the transaction system is disabled, we return the special
+ * "disabled" starting time.
*/
if (s->state == TRANS_DISABLED)
return (AbsoluteTime) DisabledStartTime;
static void
AtStart_Memory(void)
{
- /* ----------------
- * We shouldn't have any transaction contexts already.
- * ----------------
+
+ /*
+ * We shouldn't have any transaction contexts already.
*/
Assert(TopTransactionContext == NULL);
Assert(TransactionCommandContext == NULL);
- /* ----------------
- * Create a toplevel context for the transaction.
- * ----------------
+ /*
+ * Create a toplevel context for the transaction.
*/
TopTransactionContext =
AllocSetContextCreate(TopMemoryContext,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
- /* ----------------
- * Create a statement-level context and make it active.
- * ----------------
+ /*
+ * Create a statement-level context and make it active.
*/
TransactionCommandContext =
AllocSetContextCreate(TopTransactionContext,
static void
AtCommit_Cache(void)
{
- /* ----------------
+
+ /*
* Make catalog changes visible to all backend.
- * ----------------
*/
RegisterInvalid(true);
}
static void
AtCommit_LocalCache(void)
{
- /* ----------------
+
+ /*
* Make catalog changes visible to me for the next command.
- * ----------------
*/
ImmediateLocalInvalidation(true);
}
static void
AtCommit_Locks(void)
{
- /* ----------------
- * XXX What if ProcReleaseLocks fails? (race condition?)
+
+ /*
+ * XXX What if ProcReleaseLocks fails? (race condition?)
*
- * Then you're up a creek! -mer 5/24/92
- * ----------------
+ * Then you're up a creek! -mer 5/24/92
*/
ProcReleaseLocks(true);
}
static void
AtCommit_Memory(void)
{
- /* ----------------
- * Now that we're "out" of a transaction, have the
- * system allocate things in the top memory context instead
- * of per-transaction contexts.
- * ----------------
+
+ /*
+ * Now that we're "out" of a transaction, have the system allocate
+ * things in the top memory context instead of per-transaction
+ * contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * Release all transaction-local memory.
- * ----------------
+ /*
+ * Release all transaction-local memory.
*/
Assert(TopTransactionContext != NULL);
MemoryContextDelete(TopTransactionContext);
static void
AtAbort_Locks(void)
{
- /* ----------------
- * XXX What if ProcReleaseLocks() fails? (race condition?)
+
+ /*
+ * XXX What if ProcReleaseLocks() fails? (race condition?)
*
- * Then you're up a creek without a paddle! -mer
- * ----------------
+ * Then you're up a creek without a paddle! -mer
*/
ProcReleaseLocks(false);
}
static void
AtAbort_Memory(void)
{
- /* ----------------
- * Make sure we are in a valid context (not a child of
- * TransactionCommandContext...). Note that it is possible
- * for this code to be called when we aren't in a transaction
- * at all; go directly to TopMemoryContext in that case.
- * ----------------
+
+ /*
+ * Make sure we are in a valid context (not a child of
+ * TransactionCommandContext...). Note that it is possible for this
+ * code to be called when we aren't in a transaction at all; go
+ * directly to TopMemoryContext in that case.
*/
if (TransactionCommandContext != NULL)
{
MemoryContextSwitchTo(TransactionCommandContext);
- /* ----------------
- * We do not want to destroy transaction contexts yet,
- * but it should be OK to delete any command-local memory.
- * ----------------
+ /*
+ * We do not want to destroy transaction contexts yet, but it
+ * should be OK to delete any command-local memory.
*/
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
static void
AtCleanup_Memory(void)
{
- /* ----------------
- * Now that we're "out" of a transaction, have the
- * system allocate things in the top memory context instead
- * of per-transaction contexts.
- * ----------------
+
+ /*
+ * Now that we're "out" of a transaction, have the system allocate
+ * things in the top memory context instead of per-transaction
+ * contexts.
*/
MemoryContextSwitchTo(TopMemoryContext);
- /* ----------------
- * Release all transaction-local memory.
- * ----------------
+ /*
+ * Release all transaction-local memory.
*/
if (TopTransactionContext != NULL)
MemoryContextDelete(TopTransactionContext);
FreeXactSnapshot();
XactIsoLevel = DefaultXactIsoLevel;
- /* ----------------
- * Check the current transaction state. If the transaction system
- * is switched off, or if we're already in a transaction, do nothing.
- * We're already in a transaction when the monitor sends a null
- * command to the backend to flush the comm channel. This is a
- * hacky fix to a communications problem, and we keep having to
- * deal with it here. We should fix the comm channel code. mao 080891
- * ----------------
+ /*
+ * Check the current transaction state. If the transaction system is
+ * switched off, or if we're already in a transaction, do nothing.
+ * We're already in a transaction when the monitor sends a null
+ * command to the backend to flush the comm channel. This is a hacky
+ * fix to a communications problem, and we keep having to deal with it
+ * here. We should fix the comm channel code. mao 080891
*/
if (s->state == TRANS_DISABLED || s->state == TRANS_INPROGRESS)
return;
- /* ----------------
- * set the current transaction state information
- * appropriately during start processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * start processing
*/
s->state = TRANS_START;
SetReindexProcessing(false);
- /* ----------------
- * generate a new transaction id
- * ----------------
+ /*
+ * generate a new transaction id
*/
GetNewTransactionId(&(s->transactionIdData));
XactLockTableInsert(s->transactionIdData);
- /* ----------------
- * initialize current transaction state fields
- * ----------------
+ /*
+ * initialize current transaction state fields
*/
s->commandId = FirstCommandId;
s->scanCommandId = FirstCommandId;
s->startTime = GetCurrentAbsoluteTime();
- /* ----------------
- * initialize the various transaction subsystems
- * ----------------
+ /*
+ * initialize the various transaction subsystems
*/
AtStart_Memory();
AtStart_Cache();
AtStart_Locks();
- /* ----------------
- * Tell the trigger manager to we're starting a transaction
- * ----------------
+ /*
+ * Tell the trigger manager to we're starting a transaction
*/
DeferredTriggerBeginXact();
- /* ----------------
- * done with start processing, set current transaction
- * state to "in progress"
- * ----------------
+ /*
+ * done with start processing, set current transaction state to "in
+ * progress"
*/
s->state = TRANS_INPROGRESS;
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
- /* ----------------
- * Tell the trigger manager that this transaction is about to be
- * committed. He'll invoke all trigger deferred until XACT before
- * we really start on committing the transaction.
- * ----------------
+ /*
+ * Tell the trigger manager that this transaction is about to be
+ * committed. He'll invoke all trigger deferred until XACT before we
+ * really start on committing the transaction.
*/
DeferredTriggerEndXact();
- /* ----------------
- * set the current transaction state information
- * appropriately during the abort processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * the abort processing
*/
s->state = TRANS_COMMIT;
- /* ----------------
- * do commit processing
- * ----------------
+ /*
+ * do commit processing
*/
/* handle commit for large objects [ PA, 7/17/98 ] */
SharedBufferChanged = false;/* safest place to do it */
- /* ----------------
- * done with commit processing, set current transaction
- * state back to default
- * ----------------
+ /*
+ * done with commit processing, set current transaction state back to
+ * default
*/
s->state = TRANS_DEFAULT;
*/
LockWaitCancel();
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
{
if (s->state != TRANS_INPROGRESS)
elog(NOTICE, "AbortTransaction and not in in-progress state");
- /* ----------------
- * set the current transaction state information
- * appropriately during the abort processing
- * ----------------
+ /*
+ * set the current transaction state information appropriately during
+ * the abort processing
*/
s->state = TRANS_ABORT;
*/
SetUserId(GetSessionUserId());
- /* ----------------
- * do abort processing
- * ----------------
+ /*
+ * do abort processing
*/
DeferredTriggerAbortXact();
lo_commit(false); /* 'false' means it's abort */
SharedBufferChanged = false;/* safest place to do it */
- /* ----------------
- * State remains TRANS_ABORT until CleanupTransaction().
- * ----------------
+ /*
+ * State remains TRANS_ABORT until CleanupTransaction().
*/
RESUME_INTERRUPTS();
}
if (s->state == TRANS_DISABLED)
return;
- /* ----------------
- * State should still be TRANS_ABORT from AbortTransaction().
- * ----------------
+ /*
+ * State should still be TRANS_ABORT from AbortTransaction().
*/
if (s->state != TRANS_ABORT)
elog(FATAL, "CleanupTransaction and not in abort state");
- /* ----------------
- * do abort cleanup processing
- * ----------------
+ /*
+ * do abort cleanup processing
*/
AtCleanup_Memory();
- /* ----------------
- * done with abort processing, set current transaction
- * state back to default
- * ----------------
+ /*
+ * done with abort processing, set current transaction state back to
+ * default
*/
s->state = TRANS_DEFAULT;
}
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do our usual start transaction.
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do our usual
+ * start transaction.
*/
case TBLOCK_DEFAULT:
StartTransaction();
break;
- /* ----------------
- * We should never experience this -- if we do it
- * means the BEGIN state was not changed in the previous
- * CommitTransactionCommand(). If we get it, we print
- * a warning and change to the in-progress state.
- * ----------------
+ /*
+ * We should never experience this -- if we do it means the
+ * BEGIN state was not changed in the previous
+ * CommitTransactionCommand(). If we get it, we print a
+ * warning and change to the in-progress state.
*/
case TBLOCK_BEGIN:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_BEGIN");
s->blockState = TBLOCK_INPROGRESS;
break;
- /* ----------------
- * This is the case when are somewhere in a transaction
- * block and about to start a new command. For now we
- * do nothing but someday we may do command-local resource
- * initialization.
- * ----------------
+ /*
+ * This is the case when are somewhere in a transaction block
+ * and about to start a new command. For now we do nothing
+ * but someday we may do command-local resource
+ * initialization.
*/
case TBLOCK_INPROGRESS:
break;
- /* ----------------
- * As with BEGIN, we should never experience this
- * if we do it means the END state was not changed in the
- * previous CommitTransactionCommand(). If we get it, we
- * print a warning, commit the transaction, start a new
- * transaction and change to the default state.
- * ----------------
+ /*
+ * As with BEGIN, we should never experience this if we do it
+ * means the END state was not changed in the previous
+ * CommitTransactionCommand(). If we get it, we print a
+ * warning, commit the transaction, start a new transaction
+ * and change to the default state.
*/
case TBLOCK_END:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_END");
StartTransaction();
break;
- /* ----------------
- * Here we are in the middle of a transaction block but
- * one of the commands caused an abort so we do nothing
- * but remain in the abort state. Eventually we will get
- * to the "END TRANSACTION" which will set things straight.
- * ----------------
+ /*
+ * Here we are in the middle of a transaction block but one of
+ * the commands caused an abort so we do nothing but remain in
+ * the abort state. Eventually we will get to the "END
+ * TRANSACTION" which will set things straight.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * This means we somehow aborted and the last call to
- * CommitTransactionCommand() didn't clear the state so
- * we remain in the ENDABORT state and maybe next time
- * we get to CommitTransactionCommand() the state will
- * get reset to default.
- * ----------------
+ /*
+ * This means we somehow aborted and the last call to
+ * CommitTransactionCommand() didn't clear the state so we
+ * remain in the ENDABORT state and maybe next time we get to
+ * CommitTransactionCommand() the state will get reset to
+ * default.
*/
case TBLOCK_ENDABORT:
elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_ENDABORT");
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do our usual transaction commit
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do our usual
+ * transaction commit
*/
case TBLOCK_DEFAULT:
CommitTransaction();
break;
- /* ----------------
- * This is the case right after we get a "BEGIN TRANSACTION"
- * command, but the user hasn't done anything else yet, so
- * we change to the "transaction block in progress" state
- * and return.
- * ----------------
+ /*
+ * This is the case right after we get a "BEGIN TRANSACTION"
+ * command, but the user hasn't done anything else yet, so we
+ * change to the "transaction block in progress" state and
+ * return.
*/
case TBLOCK_BEGIN:
s->blockState = TBLOCK_INPROGRESS;
break;
- /* ----------------
- * This is the case when we have finished executing a command
- * someplace within a transaction block. We increment the
- * command counter and return. Someday we may free resources
- * local to the command.
+ /*
+ * This is the case when we have finished executing a command
+ * someplace within a transaction block. We increment the
+ * command counter and return. Someday we may free resources
+ * local to the command.
*
- * That someday is today, at least for memory allocated in
- * TransactionCommandContext.
- * - vadim 03/25/97
- * ----------------
+ * That someday is today, at least for memory allocated in
+ * TransactionCommandContext. - vadim 03/25/97
*/
case TBLOCK_INPROGRESS:
CommandCounterIncrement();
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
break;
- /* ----------------
- * This is the case when we just got the "END TRANSACTION"
- * statement, so we commit the transaction and go back to
- * the default state.
- * ----------------
+ /*
+ * This is the case when we just got the "END TRANSACTION"
+ * statement, so we commit the transaction and go back to the
+ * default state.
*/
case TBLOCK_END:
CommitTransaction();
s->blockState = TBLOCK_DEFAULT;
break;
- /* ----------------
- * Here we are in the middle of a transaction block but
- * one of the commands caused an abort so we do nothing
- * but remain in the abort state. Eventually we will get
- * to the "END TRANSACTION" which will set things straight.
- * ----------------
+ /*
+ * Here we are in the middle of a transaction block but one of
+ * the commands caused an abort so we do nothing but remain in
+ * the abort state. Eventually we will get to the "END
+ * TRANSACTION" which will set things straight.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * Here we were in an aborted transaction block which
- * just processed the "END TRANSACTION" command from the
- * user, so clean up and return to the default state.
- * ----------------
+ /*
+ * Here we were in an aborted transaction block which just
+ * processed the "END TRANSACTION" command from the user, so
+ * clean up and return to the default state.
*/
case TBLOCK_ENDABORT:
CleanupTransaction();
switch (s->blockState)
{
- /* ----------------
- * if we aren't in a transaction block, we
- * just do the basic abort & cleanup transaction.
- * ----------------
+
+ /*
+ * if we aren't in a transaction block, we just do the basic
+ * abort & cleanup transaction.
*/
case TBLOCK_DEFAULT:
AbortTransaction();
CleanupTransaction();
break;
- /* ----------------
- * If we are in the TBLOCK_BEGIN it means something
- * screwed up right after reading "BEGIN TRANSACTION"
- * so we enter the abort state. Eventually an "END
- * TRANSACTION" will fix things.
- * ----------------
+ /*
+ * If we are in the TBLOCK_BEGIN it means something screwed up
+ * right after reading "BEGIN TRANSACTION" so we enter the
+ * abort state. Eventually an "END TRANSACTION" will fix
+ * things.
*/
case TBLOCK_BEGIN:
s->blockState = TBLOCK_ABORT;
/* CleanupTransaction happens when we exit TBLOCK_ABORT */
break;
- /* ----------------
- * This is the case when are somewhere in a transaction
- * block which aborted so we abort the transaction and
- * set the ABORT state. Eventually an "END TRANSACTION"
- * will fix things and restore us to a normal state.
- * ----------------
+ /*
+ * This is the case when are somewhere in a transaction block
+ * which aborted so we abort the transaction and set the ABORT
+ * state. Eventually an "END TRANSACTION" will fix things and
+ * restore us to a normal state.
*/
case TBLOCK_INPROGRESS:
s->blockState = TBLOCK_ABORT;
/* CleanupTransaction happens when we exit TBLOCK_ABORT */
break;
- /* ----------------
- * Here, the system was fouled up just after the
- * user wanted to end the transaction block so we
- * abort the transaction and put us back into the
- * default state.
- * ----------------
+ /*
+ * Here, the system was fouled up just after the user wanted
+ * to end the transaction block so we abort the transaction
+ * and put us back into the default state.
*/
case TBLOCK_END:
s->blockState = TBLOCK_DEFAULT;
CleanupTransaction();
break;
- /* ----------------
- * Here, we are already in an aborted transaction
- * state and are waiting for an "END TRANSACTION" to
- * come along and lo and behold, we abort again!
- * So we just remain in the abort state.
- * ----------------
+ /*
+ * Here, we are already in an aborted transaction state and
+ * are waiting for an "END TRANSACTION" to come along and lo
+ * and behold, we abort again! So we just remain in the abort
+ * state.
*/
case TBLOCK_ABORT:
break;
- /* ----------------
- * Here we were in an aborted transaction block which
- * just processed the "END TRANSACTION" command but somehow
- * aborted again.. since we must have done the abort
- * processing, we clean up and return to the default state.
- * ----------------
+ /*
+ * Here we were in an aborted transaction block which just
+ * processed the "END TRANSACTION" command but somehow aborted
+ * again.. since we must have done the abort processing, we
+ * clean up and return to the default state.
*/
case TBLOCK_ENDABORT:
CleanupTransaction();
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState != TBLOCK_DEFAULT)
elog(NOTICE, "BEGIN: already a transaction in progress");
- /* ----------------
- * set the current transaction block state information
- * appropriately during begin processing
- * ----------------
+ /*
+ * set the current transaction block state information appropriately
+ * during begin processing
*/
s->blockState = TBLOCK_BEGIN;
- /* ----------------
- * do begin processing
- * ----------------
+ /*
+ * do begin processing
*/
- /* ----------------
- * done with begin processing, set block state to inprogress
- * ----------------
+ /*
+ * done with begin processing, set block state to inprogress
*/
s->blockState = TBLOCK_INPROGRESS;
}
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we are in a transaction block which should commit
- * when we get to the upcoming CommitTransactionCommand()
- * so we set the state to "END". CommitTransactionCommand()
- * will recognize this and commit the transaction and return
- * us to the default state
- * ----------------
+
+ /*
+ * here we are in a transaction block which should commit when we
+ * get to the upcoming CommitTransactionCommand() so we set the
+ * state to "END". CommitTransactionCommand() will recognize this
+ * and commit the transaction and return us to the default state
*/
s->blockState = TBLOCK_END;
return;
if (s->blockState == TBLOCK_ABORT)
{
- /* ----------------
- * here, we are in a transaction block which aborted
- * and since the AbortTransaction() was already done,
- * we do whatever is needed and change to the special
- * "END ABORT" state. The upcoming CommitTransactionCommand()
- * will recognise this and then put us back in the default
- * state.
- * ----------------
+
+ /*
+ * here, we are in a transaction block which aborted and since the
+ * AbortTransaction() was already done, we do whatever is needed
+ * and change to the special "END ABORT" state. The upcoming
+ * CommitTransactionCommand() will recognise this and then put us
+ * back in the default state.
*/
s->blockState = TBLOCK_ENDABORT;
return;
}
- /* ----------------
- * here, the user issued COMMIT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued COMMIT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "COMMIT: no transaction in progress");
AbortTransaction();
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we were inside a transaction block something
- * screwed up inside the system so we enter the abort state,
- * do the abort processing and then return.
- * We remain in the abort state until we see an
- * END TRANSACTION command.
- * ----------------
+
+ /*
+ * here we were inside a transaction block something screwed up
+ * inside the system so we enter the abort state, do the abort
+ * processing and then return. We remain in the abort state until
+ * we see an END TRANSACTION command.
*/
s->blockState = TBLOCK_ABORT;
AbortTransaction();
return;
}
- /* ----------------
- * here, the user issued ABORT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued ABORT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "ROLLBACK: no transaction in progress");
AbortTransaction();
{
TransactionState s = CurrentTransactionState;
- /* ----------------
- * check the current transaction state
- * ----------------
+ /*
+ * check the current transaction state
*/
if (s->state == TRANS_DISABLED)
return;
if (s->blockState == TBLOCK_INPROGRESS)
{
- /* ----------------
- * here we were inside a transaction block and we
- * got an abort command from the user, so we move to
- * the abort state, do the abort processing and
- * then change to the ENDABORT state so we will end up
- * in the default state after the upcoming
- * CommitTransactionCommand().
- * ----------------
+
+ /*
+ * here we were inside a transaction block and we got an abort
+ * command from the user, so we move to the abort state, do the
+ * abort processing and then change to the ENDABORT state so we
+ * will end up in the default state after the upcoming
+ * CommitTransactionCommand().
*/
s->blockState = TBLOCK_ABORT;
AbortTransaction();
return;
}
- /* ----------------
- * here, the user issued ABORT when not inside a transaction.
- * Issue a notice and go to abort state. The upcoming call to
- * CommitTransactionCommand() will then put us back into the
- * default state.
- * ----------------
+ /*
+ * here, the user issued ABORT when not inside a transaction. Issue a
+ * notice and go to abort state. The upcoming call to
+ * CommitTransactionCommand() will then put us back into the default
+ * state.
*/
elog(NOTICE, "ROLLBACK: no transaction in progress");
AbortTransaction();
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.105 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.106 2001/03/22 06:16:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int DebugMode;
-static MemoryContext nogc = NULL; /* special no-gc mem context */
+static MemoryContext nogc = NULL; /* special no-gc mem context */
extern int optind;
extern char *optarg;
char *dbName;
int flag;
int xlogop = BS_XLOG_NOP;
- char *potential_DataDir = NULL;
+ char *potential_DataDir = NULL;
- /* --------------------
- * initialize globals
- * -------------------
+ /*
+ * initialize globals
*/
MyProcPid = getpid();
MemoryContextInit();
}
- /* ----------------
- * process command arguments
- * ----------------
+ /*
+ * process command arguments
*/
/* Set defaults, to be overriden by explicit options below */
if (!IsUnderPostmaster)
{
ResetAllOptions();
- potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA variable */
+ potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
+ * variable */
}
while ((flag = getopt(argc, argv, "D:dCQx:pB:F")) != EOF)
if (!potential_DataDir)
{
fprintf(stderr, "%s does not know where to find the database system "
- "data. You must specify the directory that contains the "
- "database system either by specifying the -D invocation "
- "option or by setting the PGDATA environment variable.\n\n",
+ "data. You must specify the directory that contains the "
+ "database system either by specifying the -D invocation "
+ "option or by setting the PGDATA environment variable.\n\n",
argv[0]);
proc_exit(1);
}
if (IsUnderPostmaster)
{
+
/*
* Properly accept or ignore signals the postmaster might send us
*/
pqsignal(SIGHUP, SIG_IGN);
- pqsignal(SIGINT, SIG_IGN); /* ignore query-cancel */
+ pqsignal(SIGINT, SIG_IGN); /* ignore query-cancel */
pqsignal(SIGTERM, die);
pqsignal(SIGQUIT, quickdie);
pqsignal(SIGUSR1, SIG_IGN);
pqsignal(SIGUSR2, SIG_IGN);
+
/*
* Reset some signals that are accepted by postmaster but not here
*/
pqsignal(SIGTTOU, SIG_DFL);
pqsignal(SIGCONT, SIG_DFL);
pqsignal(SIGWINCH, SIG_DFL);
+
/*
- * Unblock signals (they were blocked when the postmaster forked us)
+ * Unblock signals (they were blocked when the postmaster forked
+ * us)
*/
PG_SETMASK(&UnBlockSig);
}
/*
* Create lockfile for data directory.
*/
- if (! CreateDataDirLockFile(DataDir, false))
+ if (!CreateDataDirLockFile(DataDir, false))
proc_exit(1);
}
for (i = 0; i < HASHTABLESIZE; ++i)
hashtable[i] = NULL;
- /* ----------------
- * abort processing resumes here
- * ----------------
+ /*
+ * abort processing resumes here
*/
if (sigsetjmp(Warn_restart, 1) != 0)
{
AbortCurrentTransaction();
}
- /* ----------------
- * process input.
- * ----------------
+ /*
+ * process input.
*/
/*
ObjectIdGetDatum(ap->am_typ.typelem),
Int32GetDatum(-1));
prt = DatumGetCString(OidFunctionCall3(ap->am_typ.typoutput,
- values[i],
- ObjectIdGetDatum(ap->am_typ.typelem),
- Int32GetDatum(-1)));
+ values[i],
+ ObjectIdGetDatum(ap->am_typ.typelem),
+ Int32GetDatum(-1)));
if (!Quiet)
printf("%s ", prt);
pfree(prt);
printf("Typ == NULL, typeindex = %u idx = %d\n", typeindex, i);
values[i] = OidFunctionCall3(Procid[typeindex].inproc,
CStringGetDatum(value),
- ObjectIdGetDatum(Procid[typeindex].elem),
+ ObjectIdGetDatum(Procid[typeindex].elem),
Int32GetDatum(-1));
prt = DatumGetCString(OidFunctionCall3(Procid[typeindex].outproc,
- values[i],
- ObjectIdGetDatum(Procid[typeindex].elem),
- Int32GetDatum(-1)));
+ values[i],
+ ObjectIdGetDatum(Procid[typeindex].elem),
+ Int32GetDatum(-1)));
if (!Quiet)
printf("%s ", prt);
pfree(prt);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.162 2001/03/22 06:16:10 momjian Exp $
*
*
* INTERFACE ROUTINES
MemoryContext oldcxt;
Oid tblNode = MyDatabaseId;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(natts > 0);
relname);
}
- /* ----------------
- * real ugly stuff to assign the proper relid in the relation
- * descriptor follows.
- * ----------------
+ /*
+ * real ugly stuff to assign the proper relid in the relation
+ * descriptor follows.
*/
if (relname && IsSystemRelationName(relname))
{
(int) MyProcPid, uniqueId++);
}
- /* ----------------
- * switch to the cache context to create the relcache entry.
- * ----------------
+ /*
+ * switch to the cache context to create the relcache entry.
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- /* ----------------
- * allocate a new relation descriptor.
- * ----------------
+ /*
+ * allocate a new relation descriptor.
*/
rel = (Relation) palloc(sizeof(RelationData));
MemSet((char *) rel, 0, sizeof(RelationData));
*/
rel->rd_att = CreateTupleDescCopyConstr(tupDesc);
- /* ----------------
- * nail the reldesc if this is a bootstrap create reln and
- * we may need it in the cache later on in the bootstrap
- * process so we don't ever want it kicked out. e.g. pg_attribute!!!
- * ----------------
+ /*
+ * nail the reldesc if this is a bootstrap create reln and we may need
+ * it in the cache later on in the bootstrap process so we don't ever
+ * want it kicked out. e.g. pg_attribute!!!
*/
if (nailme)
rel->rd_isnailed = true;
- /* ----------------
- * initialize the fields of our new relation descriptor
- * ----------------
+ /*
+ * initialize the fields of our new relation descriptor
*/
rel->rd_rel = (Form_pg_class) palloc(sizeof *rel->rd_rel);
MemSet((char *) rel->rd_rel, 0, sizeof *rel->rd_rel);
rel->rd_node.relNode = relid;
rel->rd_rel->relfilenode = relid;
- /* ----------------
- * done building relcache entry.
- * ----------------
+ /*
+ * done building relcache entry.
*/
MemoryContextSwitchTo(oldcxt);
- /* ----------------
- * have the storage manager create the relation.
- * ----------------
+ /*
+ * have the storage manager create the relation.
*/
if (storage_create)
heap_storage_create(rel);
int j;
int natts = tupdesc->natts;
- /* ----------------
- * first check for collision with system attribute names
- * ----------------
+ /*
+ * first check for collision with system attribute names
*
- * also, warn user if attribute to be created has
- * an unknown typid (usually as a result of a 'retrieve into'
- * - jolly
+ * also, warn user if attribute to be created has an unknown typid
+ * (usually as a result of a 'retrieve into' - jolly
*/
for (i = 0; i < natts; i++)
{
}
}
- /* ----------------
- * next check for repeated attribute names
- * ----------------
+ /*
+ * next check for repeated attribute names
*/
for (i = 1; i < natts; i++)
{
pg_class_desc = heap_openr(RelationRelationName, AccessShareLock);
- /* ----------------
- * At bootstrap time, we have to do this the hard way. Form the
- * scan key.
- * ----------------
+ /*
+ * At bootstrap time, we have to do this the hard way. Form the
+ * scan key.
*/
ScanKeyEntryInitialize(&key,
0,
(RegProcedure) F_NAMEEQ,
(Datum) relname);
- /* ----------------
- * begin the scan
- * ----------------
+ /*
+ * begin the scan
*/
pg_class_scan = heap_beginscan(pg_class_desc,
0,
1,
&key);
- /* ----------------
- * get a tuple. if the tuple is NULL then it means we
- * didn't find an existing relation.
- * ----------------
+ /*
+ * get a tuple. if the tuple is NULL then it means we didn't find
+ * an existing relation.
*/
tuple = heap_getnext(pg_class_scan, 0);
Relation idescs[Num_pg_attr_indices];
int natts = tupdesc->natts;
- /* ----------------
- * open pg_attribute
- * ----------------
+ /*
+ * open pg_attribute
*/
rel = heap_openr(AttributeRelationName, RowExclusiveLock);
- /* -----------------
+ /*
* Check if we have any indices defined on pg_attribute.
- * -----------------
*/
hasindex = RelationGetForm(rel)->relhasindex;
if (hasindex)
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
- /* ----------------
- * first we add the user attributes..
- * ----------------
+ /*
+ * first we add the user attributes..
*/
dpp = tupdesc->attrs;
for (i = 0; i < natts; i++)
dpp++;
}
- /* ----------------
- * next we add the system attributes..
- * ----------------
+ /*
+ * next we add the system attributes..
*/
dpp = HeapAtt;
for (i = 0; i < -1 - FirstLowInvalidHeapAttributeNumber; i++)
HeapTuple tup;
Relation idescs[Num_pg_class_indices];
- /* ----------------
- * first we update some of the information in our
- * uncataloged relation's relation descriptor.
- * ----------------
+ /*
+ * first we update some of the information in our uncataloged
+ * relation's relation descriptor.
*/
new_rel_reltup = new_rel_desc->rd_rel;
- /* ----------------
- * Here we insert bogus estimates of the size of the new relation.
- * In reality, of course, the new relation has 0 tuples and pages,
- * and if we were tracking these statistics accurately then we'd
- * set the fields that way. But at present the stats will be updated
- * only by VACUUM or CREATE INDEX, and the user might insert a lot of
- * tuples before he gets around to doing either of those. So, instead
- * of saying the relation is empty, we insert guesstimates. The point
- * is to keep the optimizer from making really stupid choices on
+ /*
+ * Here we insert bogus estimates of the size of the new relation. In
+ * reality, of course, the new relation has 0 tuples and pages, and if
+ * we were tracking these statistics accurately then we'd set the
+ * fields that way. But at present the stats will be updated only by
+ * VACUUM or CREATE INDEX, and the user might insert a lot of tuples
+ * before he gets around to doing either of those. So, instead of
+ * saying the relation is empty, we insert guesstimates. The point is
+ * to keep the optimizer from making really stupid choices on
* never-yet-vacuumed tables; so the estimates need only be large
* enough to discourage the optimizer from using nested-loop plans.
- * With this hack, nested-loop plans will be preferred only after
- * the table has been proven to be small by VACUUM or CREATE INDEX.
- * Maintaining the stats on-the-fly would solve the problem more cleanly,
- * but the overhead of that would likely cost more than it'd save.
- * (NOTE: CREATE INDEX inserts the same bogus estimates if it finds the
- * relation has 0 rows and pages. See index.c.)
- * ----------------
+ * With this hack, nested-loop plans will be preferred only after the
+ * table has been proven to be small by VACUUM or CREATE INDEX.
+ * Maintaining the stats on-the-fly would solve the problem more
+ * cleanly, but the overhead of that would likely cost more than it'd
+ * save. (NOTE: CREATE INDEX inserts the same bogus estimates if it
+ * finds the relation has 0 rows and pages. See index.c.)
*/
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->reltuples = 1000;
int natts = tupdesc->natts;
char *temp_relname = NULL;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(IsNormalProcessingMode() || IsBootstrapProcessingMode());
if (natts <= 0 || natts > MaxHeapAttributeNumber)
strcpy(relname, temp_relname); /* heap_create will change this */
}
- /* ----------------
- * Tell heap_create not to create a physical file; we'll do that
- * below after all our catalog updates are done. (This isn't really
- * necessary anymore, but we may as well avoid the cycles of creating
- * and deleting the file in case we fail.)
+ /*
+ * Tell heap_create not to create a physical file; we'll do that below
+ * after all our catalog updates are done. (This isn't really
+ * necessary anymore, but we may as well avoid the cycles of creating
+ * and deleting the file in case we fail.)
*
- * Note: The call to heap_create() changes relname for
- * temp tables; it becomes the true physical relname.
- * The call to heap_storage_create() does all the "real"
- * work of creating the disk file for the relation.
- * ----------------
+ * Note: The call to heap_create() changes relname for temp tables; it
+ * becomes the true physical relname. The call to
+ * heap_storage_create() does all the "real" work of creating the disk
+ * file for the relation.
*/
new_rel_desc = heap_create(relname, tupdesc, istemp, false,
allow_system_table_mods);
/* Assign an OID for the relation's tuple type */
new_type_oid = newoid();
- /* ----------------
- * now create an entry in pg_class for the relation.
+ /*
+ * now create an entry in pg_class for the relation.
*
- * NOTE: we could get a unique-index failure here, in case someone else
- * is creating the same relation name in parallel but hadn't committed
- * yet when we checked for a duplicate name above.
- * ----------------
+ * NOTE: we could get a unique-index failure here, in case someone else
+ * is creating the same relation name in parallel but hadn't committed
+ * yet when we checked for a duplicate name above.
*/
pg_class_desc = heap_openr(RelationRelationName, RowExclusiveLock);
relkind,
temp_relname);
- /* ----------------
- * since defining a relation also defines a complex type,
- * we add a new system type corresponding to the new relation.
+ /*
+ * since defining a relation also defines a complex type, we add a new
+ * system type corresponding to the new relation.
*
- * NOTE: we could get a unique-index failure here, in case the same name
- * has already been used for a type.
- * ----------------
+ * NOTE: we could get a unique-index failure here, in case the same name
+ * has already been used for a type.
*/
AddNewRelationType(relname, new_rel_oid, new_type_oid);
- /* ----------------
- * now add tuples to pg_attribute for the attributes in
- * our new relation.
- * ----------------
+ /*
+ * now add tuples to pg_attribute for the attributes in our new
+ * relation.
*/
AddNewAttributeTuples(new_rel_oid, tupdesc);
if (relkind != RELKIND_VIEW)
heap_storage_create(new_rel_desc);
- /* ----------------
- * ok, the relation has been cataloged, so close our relations
- * and return the oid of the newly created relation.
+ /*
+ * ok, the relation has been cataloged, so close our relations and
+ * return the oid of the newly created relation.
*
- * SOMEDAY: fill the STATISTIC relation properly.
- * ----------------
+ * SOMEDAY: fill the STATISTIC relation properly.
*/
heap_close(new_rel_desc, NoLock); /* do not unlock till end of xact */
heap_close(pg_class_desc, RowExclusiveLock);
ScanKeyData entry;
bool found = false;
- /* ----------------
- * open pg_inherits
- * ----------------
+ /*
+ * open pg_inherits
*/
catalogRelation = heap_openr(InheritsRelationName, RowExclusiveLock);
- /* ----------------
- * form a scan key for the subclasses of this class
- * and begin scanning
- * ----------------
+ /*
+ * form a scan key for the subclasses of this class and begin scanning
*/
ScanKeyEntryInitialize(&entry, 0x0, Anum_pg_inherits_inhparent,
F_OIDEQ,
1,
&entry);
- /* ----------------
- * if any subclasses exist, then we disallow the deletion.
- * ----------------
+ /*
+ * if any subclasses exist, then we disallow the deletion.
*/
tuple = heap_getnext(scan, 0);
if (HeapTupleIsValid(tuple))
}
heap_endscan(scan);
- /* ----------------
- * If we get here, it means the relation has no subclasses
- * so we can trash it. First we remove dead INHERITS tuples.
- * ----------------
+ /*
+ * If we get here, it means the relation has no subclasses so we can
+ * trash it. First we remove dead INHERITS tuples.
*/
entry.sk_attno = Anum_pg_inherits_inhrelid;
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
- /* ----------------
- * now remove dead IPL tuples
- * ----------------
+ /*
+ * now remove dead IPL tuples
*/
catalogRelation = heap_openr(InheritancePrecidenceListRelationName,
RowExclusiveLock);
Relation pg_class_desc;
HeapTuple tup;
- /* ----------------
- * open pg_class
- * ----------------
+ /*
+ * open pg_class
*/
pg_class_desc = heap_openr(RelationRelationName, RowExclusiveLock);
elog(ERROR, "Relation \"%s\" does not exist",
RelationGetRelationName(rel));
- /* ----------------
- * delete the relation tuple from pg_class, and finish up.
- * ----------------
+ /*
+ * delete the relation tuple from pg_class, and finish up.
*/
simple_heap_delete(pg_class_desc, &tup->t_self);
heap_freetuple(tup);
rel = heap_openr(relname, AccessExclusiveLock);
rid = RelationGetRelid(rel);
- /* ----------------
- * TRUNCATE TABLE within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the relation's physical file. Disallow it
- * except for a rel created in the current xact (which would be deleted
- * on abort, anyway).
- * ----------------
+ /*
+ * TRUNCATE TABLE within a transaction block is dangerous, because if
+ * the transaction is later rolled back we have no way to undo
+ * truncation of the relation's physical file. Disallow it except for
+ * a rel created in the current xact (which would be deleted on abort,
+ * anyway).
*/
if (IsTransactionBlock() && !rel->rd_myxactonly)
elog(ERROR, "TRUNCATE TABLE cannot run inside a BEGIN/END block");
HeapTuple tup;
int2 attnum;
- /* ----------------
- * open pg_attribute
- * ----------------
+ /*
+ * open pg_attribute
*/
pg_attribute_desc = heap_openr(AttributeRelationName, RowExclusiveLock);
HeapTuple atttup;
Oid typoid;
- /* ----------------
- * open pg_type
- * ----------------
+ /*
+ * open pg_type
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
- /* ----------------
- * create a scan key to locate the type tuple corresponding
- * to this relation.
- * ----------------
+ /*
+ * create a scan key to locate the type tuple corresponding to this
+ * relation.
*/
ScanKeyEntryInitialize(&key, 0,
Anum_pg_type_typrelid,
1,
&key);
- /* ----------------
- * use heap_getnext() to fetch the pg_type tuple. If this
- * tuple is not valid then something's wrong.
- * ----------------
+ /*
+ * use heap_getnext() to fetch the pg_type tuple. If this tuple is
+ * not valid then something's wrong.
*/
tup = heap_getnext(pg_type_scan, 0);
RelationGetRelationName(rel));
}
- /* ----------------
- * now scan pg_attribute. if any other relations have
- * attributes of the type of the relation we are deleteing
- * then we have to disallow the deletion. should talk to
- * stonebraker about this. -cim 6/19/90
- * ----------------
+ /*
+ * now scan pg_attribute. if any other relations have attributes of
+ * the type of the relation we are deleteing then we have to disallow
+ * the deletion. should talk to stonebraker about this. -cim 6/19/90
*/
typoid = tup->t_data->t_oid;
1,
&attkey);
- /* ----------------
- * try and get a pg_attribute tuple. if we succeed it means
- * we can't delete the relation because something depends on
- * the schema.
- * ----------------
+ /*
+ * try and get a pg_attribute tuple. if we succeed it means we can't
+ * delete the relation because something depends on the schema.
*/
atttup = heap_getnext(pg_attribute_scan, 0);
heap_endscan(pg_attribute_scan);
heap_close(pg_attribute_desc, RowExclusiveLock);
- /* ----------------
- * Ok, it's safe so we delete the relation tuple
- * from pg_type and finish up.
- * ----------------
+ /*
+ * Ok, it's safe so we delete the relation tuple from pg_type and
+ * finish up.
*/
simple_heap_delete(pg_type_desc, &tup->t_self);
bool istemp = is_temp_rel_name(relname);
int i;
- /* ----------------
- * Open and lock the relation.
- * ----------------
+ /*
+ * Open and lock the relation.
*/
rel = heap_openr(relname, AccessExclusiveLock);
rid = RelationGetRelid(rel);
has_toasttable = rel->rd_rel->reltoastrelid != InvalidOid;
- /* ----------------
- * prevent deletion of system relations
- * ----------------
+ /*
+ * prevent deletion of system relations
*/
/* allow temp of pg_class? Guess so. */
if (!istemp && !allow_system_table_mods &&
elog(ERROR, "System relation \"%s\" may not be dropped",
RelationGetRelationName(rel));
- /* ----------------
- * Release all buffers that belong to this relation, after writing
- * any that are dirty
- * ----------------
+ /*
+ * Release all buffers that belong to this relation, after writing any
+ * that are dirty
*/
i = FlushRelationBuffers(rel, (BlockNumber) 0);
if (i < 0)
elog(ERROR, "heap_drop_with_catalog: FlushRelationBuffers returned %d",
i);
- /* ----------------
- * remove rules if necessary
- * ----------------
+ /*
+ * remove rules if necessary
*/
if (rel->rd_rules != NULL)
RelationRemoveRules(rid);
/* triggers */
RelationRemoveTriggers(rel);
- /* ----------------
- * remove inheritance information
- * ----------------
+ /*
+ * remove inheritance information
*/
RelationRemoveInheritance(rel);
- /* ----------------
- * remove indexes if necessary
- * ----------------
+ /*
+ * remove indexes if necessary
*/
RelationRemoveIndexes(rel);
- /* ----------------
- * delete attribute tuples
- * ----------------
+ /*
+ * delete attribute tuples
*/
DeleteAttributeTuples(rel);
- /* ----------------
- * delete comments, statistics, and constraints
- * ----------------
+ /*
+ * delete comments, statistics, and constraints
*/
DeleteComments(RelationGetRelid(rel));
RemoveConstraints(rel);
- /* ----------------
- * delete type tuple
- * ----------------
+ /*
+ * delete type tuple
*/
DeleteTypeTuple(rel);
- /* ----------------
- * delete relation tuple
- * ----------------
+ /*
+ * delete relation tuple
*/
DeleteRelationTuple(rel);
- /* ----------------
- * unlink the relation's physical file and finish up.
- * ----------------
+ /*
+ * unlink the relation's physical file and finish up.
*/
if (rel->rd_rel->relkind != RELKIND_VIEW)
smgrunlink(DEFAULT_SMGR, rel);
*/
heap_close(rel, NoLock);
- /* ----------------
- * flush the relation from the relcache
- * ----------------
+ /*
+ * flush the relation from the relcache
*/
RelationForgetRelation(rid);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.144 2001/03/22 06:16:10 momjian Exp $
*
*
* INTERFACE ROUTINES
heapTupDesc = RelationGetDescr(heapRelation);
natts = RelationGetForm(heapRelation)->relnatts;
- /* ----------------
- * allocate the new tuple descriptor
- * ----------------
+ /*
+ * allocate the new tuple descriptor
*/
indexTupDesc = CreateTemplateTupleDesc(numatts);
Form_pg_attribute from;
Form_pg_attribute to;
- /* ----------------
- * get the attribute number and make sure it's valid;
- * determine which attribute descriptor to copy
- * ----------------
+ /*
+ * get the attribute number and make sure it's valid; determine
+ * which attribute descriptor to copy
*/
atnum = attNums[i];
if (!AttrNumberIsForUserDefinedAttr(atnum))
{
- /* ----------------
- * here we are indexing on a system attribute (-1...-n)
- * so we convert atnum into a usable index 0...n-1 so we can
- * use it to dereference the array sysatts[] which stores
- * tuple descriptor information for system attributes.
- * ----------------
+
+ /*
+ * here we are indexing on a system attribute (-1...-n) so we
+ * convert atnum into a usable index 0...n-1 so we can use it
+ * to dereference the array sysatts[] which stores tuple
+ * descriptor information for system attributes.
*/
if (atnum <= FirstLowInvalidHeapAttributeNumber || atnum >= 0)
elog(ERROR, "Cannot create index on system attribute: attribute number out of range (%d)", atnum);
}
else
{
- /* ----------------
- * here we are indexing on a normal attribute (1...n)
- * ----------------
+
+ /*
+ * here we are indexing on a normal attribute (1...n)
*/
if (atnum > natts)
elog(ERROR, "Cannot create index: attribute %d does not exist",
from = heapTupDesc->attrs[atind];
}
- /* ----------------
- * now that we've determined the "from", let's copy
- * the tuple desc data...
- * ----------------
+ /*
+ * now that we've determined the "from", let's copy the tuple desc
+ * data...
*/
indexTupDesc->attrs[i] = to =
(Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
ScanKeyData key;
Form_pg_am aform;
- /* ----------------
- * form a scan key for the pg_am relation
- * ----------------
+ /*
+ * form a scan key for the pg_am relation
*/
ScanKeyEntryInitialize(&key, 0, ObjectIdAttributeNumber,
F_OIDEQ,
ObjectIdGetDatum(accessMethodObjectId));
- /* ----------------
- * fetch the desired access method tuple
- * ----------------
+ /*
+ * fetch the desired access method tuple
*/
pg_am_desc = heap_openr(AccessMethodRelationName, AccessShareLock);
pg_am_scan = heap_beginscan(pg_am_desc, 0, SnapshotNow, 1, &key);
pg_am_tuple = heap_getnext(pg_am_scan, 0);
- /* ----------------
- * return NULL if not found
- * ----------------
+ /*
+ * return NULL if not found
*/
if (!HeapTupleIsValid(pg_am_tuple))
{
return NULL;
}
- /* ----------------
- * if found AM tuple, then copy it into resultCxt and return the copy
- * ----------------
+ /*
+ * if found AM tuple, then copy it into resultCxt and return the copy
*/
aform = (Form_pg_am) MemoryContextAlloc(resultCxt, sizeof *aform);
memcpy(aform, GETSTRUCT(pg_am_tuple), sizeof *aform);
indexRelation->rd_am = AccessMethodObjectIdGetForm(amoid,
CacheMemoryContext);
- /* ----------------
- * XXX missing the initialization of some other fields
- * ----------------
+ /*
+ * XXX missing the initialization of some other fields
*/
indexRelation->rd_rel->relowner = GetUserId();
CLASS_TUPLE_SIZE,
(char *) indexRelation->rd_rel);
- /* ----------------
- * the new tuple must have the same oid as the relcache entry for the
- * index. sure would be embarrassing to do this sort of thing in
- * polite company.
- * ----------------
+ /*
+ * the new tuple must have the same oid as the relcache entry for the
+ * index. sure would be embarrassing to do this sort of thing in
+ * polite company.
*/
tuple->t_data->t_oid = RelationGetRelid(indexRelation);
heap_insert(pg_class, tuple);
TupleDesc indexTupDesc;
int i;
- /* ----------------
- * open the attribute relation
- * ----------------
+ /*
+ * open the attribute relation
*/
pg_attribute = heap_openr(AttributeRelationName, RowExclusiveLock);
- /* ----------------
- * initialize *null, *replace and *value
- * ----------------
+ /*
+ * initialize *null, *replace and *value
*/
MemSet(nullv, ' ', Natts_pg_attribute);
MemSet(replace, ' ', Natts_pg_attribute);
- /* ----------------
+ /* ----------
* create the first attribute tuple.
* XXX For now, only change the ATTNUM attribute value
- * ----------------
+ * ----------
*/
replace[Anum_pg_attribute_attnum - 1] = 'r';
replace[Anum_pg_attribute_attcacheoff - 1] = 'r';
CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs);
}
- /* ----------------
- * insert the first attribute tuple.
- * ----------------
+ /*
+ * insert the first attribute tuple.
*/
cur_tuple = heap_modifytuple(init_tuple,
pg_attribute,
if (hasind)
CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, cur_tuple);
- /* ----------------
- * now we use the information in the index cur_tuple
- * descriptor to form the remaining attribute tuples.
- * ----------------
+ /*
+ * now we use the information in the index cur_tuple descriptor to
+ * form the remaining attribute tuples.
*/
indexTupDesc = RelationGetDescr(indexRelation);
for (i = 1; i < numatts; i += 1)
{
- /* ----------------
- * process the remaining attributes...
- * ----------------
+
+ /*
+ * process the remaining attributes...
*/
memmove(GETSTRUCT(cur_tuple),
(char *) indexTupDesc->attrs[i],
if (hasind)
CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, new_tuple);
- /* ----------------
- * ModifyHeapTuple returns a new copy of a cur_tuple
- * so we free the original and use the copy..
- * ----------------
+ /*
+ * ModifyHeapTuple returns a new copy of a cur_tuple so we free
+ * the original and use the copy..
*/
cur_tuple = new_tuple;
}
int i;
Relation idescs[Num_pg_index_indices];
- /* ----------------
- * allocate a Form_pg_index big enough to hold the
- * index-predicate (if any) in string form
- * ----------------
+ /*
+ * allocate a Form_pg_index big enough to hold the index-predicate (if
+ * any) in string form
*/
if (indexInfo->ii_Predicate != NULL)
{
indexForm = (Form_pg_index) palloc(itupLen);
MemSet(indexForm, 0, sizeof(FormData_pg_index));
- /* ----------------
- * store information into the index tuple form
- * ----------------
+ /*
+ * store information into the index tuple form
*/
indexForm->indexrelid = indexoid;
indexForm->indrelid = heapoid;
indexForm->indisprimary = primary;
memcpy((char *) &indexForm->indpred, (char *) predText, predLen);
- /* ----------------
- * copy index key and op class information
+ /*
+ * copy index key and op class information
*
- * We zeroed the extra slots (if any) above --- that's essential.
- * ----------------
+ * We zeroed the extra slots (if any) above --- that's essential.
*/
for (i = 0; i < indexInfo->ii_NumKeyAttrs; i++)
indexForm->indkey[i] = indexInfo->ii_KeyAttrNumbers[i];
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
indexForm->indclass[i] = classOids[i];
- /* ----------------
- * open the system catalog index relation
- * ----------------
+ /*
+ * open the system catalog index relation
*/
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
- /* ----------------
- * form a tuple to insert into pg_index
- * ----------------
+ /*
+ * form a tuple to insert into pg_index
*/
tuple = heap_addheader(Natts_pg_index,
itupLen,
(char *) indexForm);
- /* ----------------
- * insert the tuple into the pg_index
- * ----------------
+ /*
+ * insert the tuple into the pg_index
*/
heap_insert(pg_index, tuple);
- /* ----------------
- * add index tuples for it
- * ----------------
+ /*
+ * add index tuples for it
*/
if (!IsIgnoringSystemIndexes())
{
CatalogCloseIndices(Num_pg_index_indices, idescs);
}
- /* ----------------
- * close the relation and free the tuple
- * ----------------
+ /*
+ * close the relation and free the tuple
*/
heap_close(pg_index, RowExclusiveLock);
pfree(predText);
Oid attrelid;
Size strsize;
- /* ----------------
- * get information from the index relation descriptor
- * ----------------
+ /*
+ * get information from the index relation descriptor
*/
attrelid = indexRelation->rd_att->attrs[0]->attrelid;
amstrategies = indexRelation->rd_am->amstrategies;
amsupport = indexRelation->rd_am->amsupport;
- /* ----------------
- * get the size of the strategy
- * ----------------
+ /*
+ * get the size of the strategy
*/
strsize = AttributeNumberGetIndexStrategySize(numatts, amstrategies);
- /* ----------------
- * allocate the new index strategy structure
+ /*
+ * allocate the new index strategy structure
*
- * the index strategy has to be allocated in the same
- * context as the relation descriptor cache or else
- * it will be lost at the end of the transaction.
- * ----------------
+ * the index strategy has to be allocated in the same context as the
+ * relation descriptor cache or else it will be lost at the end of the
+ * transaction.
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
else
support = (RegProcedure *) NULL;
- /* ----------------
- * fill in the index strategy structure with information
- * from the catalogs. First we must advance the command counter
- * so that we will see the newly-entered index catalog tuples.
- * ----------------
+ /*
+ * fill in the index strategy structure with information from the
+ * catalogs. First we must advance the command counter so that we
+ * will see the newly-entered index catalog tuples.
*/
CommandCounterIncrement();
attrelid, accessMethodObjectId,
amstrategies, amsupport, numatts);
- /* ----------------
- * store the strategy information in the index reldesc
- * ----------------
+ /*
+ * store the strategy information in the index reldesc
*/
RelationSetIndexSupport(indexRelation, strategy, support);
}
SetReindexProcessing(false);
- /* ----------------
- * check parameters
- * ----------------
+ /*
+ * check parameters
*/
if (indexInfo->ii_NumIndexAttrs < 1 ||
indexInfo->ii_NumKeyAttrs < 1)
elog(ERROR, "must index at least one attribute");
- /* ----------------
- * get heap relation oid and open the heap relation
- * ----------------
+ /*
+ * get heap relation oid and open the heap relation
*/
heapoid = GetHeapRelationOid(heapRelationName, indexRelationName, istemp);
*/
heapRelation = heap_open(heapoid, ShareLock);
- /* ----------------
- * construct new tuple descriptor
- * ----------------
+ /*
+ * construct new tuple descriptor
*/
if (OidIsValid(indexInfo->ii_FuncOid))
indexTupDesc = BuildFuncTupleDesc(indexInfo->ii_FuncOid);
* change this */
}
- /* ----------------
- * create the index relation
- * ----------------
+ /*
+ * create the index relation
*/
indexRelation = heap_create(indexRelationName, indexTupDesc,
istemp, false, allow_system_table_mods);
*/
LockRelation(indexRelation, AccessExclusiveLock);
- /* ----------------
- * construct the index relation descriptor
+ /*
+ * construct the index relation descriptor
*
- * XXX should have a proper way to create cataloged relations
- * ----------------
+ * XXX should have a proper way to create cataloged relations
*/
ConstructIndexReldesc(indexRelation, accessMethodObjectId);
*/
heap_storage_create(indexRelation);
- /* ----------------
- * now update the object id's of all the attribute
- * tuple forms in the index relation's tuple descriptor
- * ----------------
+ /*
+ * now update the object id's of all the attribute tuple forms in the
+ * index relation's tuple descriptor
*/
InitializeAttributeOids(indexRelation,
indexInfo->ii_NumIndexAttrs,
indexoid);
- /* ----------------
- * append ATTRIBUTE tuples for the index
- * ----------------
+ /*
+ * append ATTRIBUTE tuples for the index
*/
AppendAttributeTuples(indexRelation, indexInfo->ii_NumIndexAttrs);
UpdateIndexRelation(indexoid, heapoid, indexInfo,
classObjectId, islossy, primary);
- /* ----------------
- * initialize the index strategy
- * ----------------
+ /*
+ * initialize the index strategy
*/
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
indexRelation,
Assert(OidIsValid(indexId));
- /* ----------------
- * To drop an index safely, we must grab exclusive lock on its parent
- * table; otherwise there could be other backends using the index!
- * Exclusive lock on the index alone is insufficient because the index
- * access routines are a little slipshod about obtaining adequate locking
- * (see ExecOpenIndices()). We do grab exclusive lock on the index too,
- * just to be safe. Both locks must be held till end of transaction,
- * else other backends will still see this index in pg_index.
- * ----------------
+ /*
+ * To drop an index safely, we must grab exclusive lock on its parent
+ * table; otherwise there could be other backends using the index!
+ * Exclusive lock on the index alone is insufficient because the index
+ * access routines are a little slipshod about obtaining adequate
+ * locking (see ExecOpenIndices()). We do grab exclusive lock on the
+ * index too, just to be safe. Both locks must be held till end of
+ * transaction, else other backends will still see this index in
+ * pg_index.
*/
heapId = IndexGetRelation(indexId);
userHeapRelation = heap_open(heapId, AccessExclusiveLock);
userIndexRelation = index_open(indexId);
LockRelation(userIndexRelation, AccessExclusiveLock);
- /* ----------------
- * Note: unlike heap_drop_with_catalog, we do not need to prevent
- * deletion of system indexes here; that's checked for upstream.
- * If we did check it here, deletion of TOAST tables would fail...
- * ----------------
+ /*
+ * Note: unlike heap_drop_with_catalog, we do not need to prevent
+ * deletion of system indexes here; that's checked for upstream. If we
+ * did check it here, deletion of TOAST tables would fail...
*/
- /* ----------------
+ /*
* fix DESCRIPTION relation
- * ----------------
*/
DeleteComments(indexId);
- /* ----------------
+ /*
* fix RELATION relation
- * ----------------
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
heap_close(relationRelation, RowExclusiveLock);
- /* ----------------
+ /*
* fix ATTRIBUTE relation
- * ----------------
*/
attributeRelation = heap_openr(AttributeRelationName, RowExclusiveLock);
}
heap_close(attributeRelation, RowExclusiveLock);
- /* ----------------
+ /*
* fix INDEX relation
- * ----------------
*/
indexRelation = heap_openr(IndexRelationName, RowExclusiveLock);
int i;
int numKeys;
- /* ----------------
- * count the number of keys, and copy them into the IndexInfo
- * ----------------
+ /*
+ * count the number of keys, and copy them into the IndexInfo
*/
numKeys = 0;
for (i = 0; i < INDEX_MAX_KEYS &&
}
ii->ii_NumKeyAttrs = numKeys;
- /* ----------------
- * Handle functional index.
+ /*
+ * Handle functional index.
*
- * If we have a functional index then the number of
- * attributes defined in the index must be 1 (the function's
- * single return value). Otherwise it's same as number of keys.
- * ----------------
+ * If we have a functional index then the number of attributes defined in
+ * the index must be 1 (the function's single return value).
+ * Otherwise it's same as number of keys.
*/
ii->ii_FuncOid = indexStruct->indproc;
else
ii->ii_NumIndexAttrs = numKeys;
- /* ----------------
- * If partial index, convert predicate into expression nodetree
- * ----------------
+ /*
+ * If partial index, convert predicate into expression nodetree
*/
if (VARSIZE(&indexStruct->indpred) != 0)
{
if (OidIsValid(indexInfo->ii_FuncOid))
{
- /* ----------------
- * Functional index --- compute the single index attribute
- * ----------------
+
+ /*
+ * Functional index --- compute the single index attribute
*/
FunctionCallInfoData fcinfo;
bool anynull = false;
}
else
{
- /* ----------------
- * Plain index --- for each attribute we need from the heap tuple,
- * get the attribute and stick it into the datum and nullv arrays.
- * ----------------
+
+ /*
+ * Plain index --- for each attribute we need from the heap tuple,
+ * get the attribute and stick it into the datum and nullv arrays.
*/
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
relid);
}
- /* ----------------
- * Update hasindex in pg_class.
- * ----------------
+ /*
+ * Update hasindex in pg_class.
*/
if (pg_class_scan)
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
HeapScanDesc pg_class_scan = NULL;
bool in_place_upd;
- /* ----------------
+ /*
* This routine handles updates for both the heap and index relation
- * statistics. In order to guarantee that we're able to *see* the index
- * relation tuple, we bump the command counter id here. The index
- * relation tuple was created in the current transaction.
- * ----------------
+ * statistics. In order to guarantee that we're able to *see* the
+ * index relation tuple, we bump the command counter id here. The
+ * index relation tuple was created in the current transaction.
*/
CommandCounterIncrement();
- /* ----------------
+ /*
* CommandCounterIncrement() flushes invalid cache entries, including
* those for the heap and index relations for which we're updating
* statistics. Now that the cache is flushed, it's safe to open the
* relation again. We need the relation open in order to figure out
* how many blocks it contains.
- * ----------------
*/
/*
/* Grab lock to be held till end of xact (probably redundant...) */
LockRelation(whichRel, ShareLock);
- /* ----------------
+ /*
* Find the RELATION relation tuple for the given relation.
- * ----------------
*/
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
relid);
}
- /* ----------------
+ /*
* Figure values to insert.
*
- * If we found zero tuples in the scan, do NOT believe it; instead put
- * a bogus estimate into the statistics fields. Otherwise, the common
+ * If we found zero tuples in the scan, do NOT believe it; instead put a
+ * bogus estimate into the statistics fields. Otherwise, the common
* pattern "CREATE TABLE; CREATE INDEX; insert data" leaves the table
- * with zero size statistics until a VACUUM is done. The optimizer will
- * generate very bad plans if the stats claim the table is empty when
- * it is actually sizable. See also CREATE TABLE in heap.c.
- * ----------------
+ * with zero size statistics until a VACUUM is done. The optimizer
+ * will generate very bad plans if the stats claim the table is empty
+ * when it is actually sizable. See also CREATE TABLE in heap.c.
*/
relpages = RelationGetNumberOfBlocks(whichRel);
whichRel->rd_rel->relpages = relpages;
whichRel->rd_rel->reltuples = reltuples;
- /* ----------------
- * Update statistics in pg_class.
- * ----------------
+ /*
+ * Update statistics in pg_class.
*/
if (in_place_upd)
{
ExprContext *econtext;
InsertIndexResult insertResult;
- /* ----------------
- * more & better checking is needed
- * ----------------
+ /*
+ * more & better checking is needed
*/
Assert(OidIsValid(indexRelation->rd_rel->relam)); /* XXX */
econtext = MakeExprContext(NULL, TransactionCommandContext);
#endif /* OMIT_PARTIAL_INDEX */
- /* ----------------
- * Ok, begin our scan of the base relation.
- * ----------------
+ /*
+ * Ok, begin our scan of the base relation.
*/
scan = heap_beginscan(heapRelation, /* relation */
0, /* start at end */
reltuples = indtuples = 0;
- /* ----------------
- * for each tuple in the base relation, we create an index
- * tuple and add it to the index relation. We keep a running
- * count of the number of tuples so that we can update pg_class
- * with correct statistics when we're done building the index.
- * ----------------
+ /*
+ * for each tuple in the base relation, we create an index tuple and
+ * add it to the index relation. We keep a running count of the
+ * number of tuples so that we can update pg_class with correct
+ * statistics when we're done building the index.
*/
while (HeapTupleIsValid(heapTuple = heap_getnext(scan, 0)))
{
indtuples++;
- /* ----------------
- * FormIndexDatum fills in its datum and null parameters
- * with attribute information taken from the given heap tuple.
- * ----------------
+ /*
+ * FormIndexDatum fills in its datum and null parameters with
+ * attribute information taken from the given heap tuple.
*/
FormIndexDatum(indexInfo,
heapTuple,
{
RegProcedure procedure;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(RelationIsValid(indexRelation));
Assert(PointerIsValid(indexRelation->rd_am));
procedure = indexRelation->rd_am->ambuild;
- /* ----------------
- * use the access method build procedure if supplied, else default.
- * ----------------
+ /*
+ * use the access method build procedure if supplied, else default.
*/
if (RegProcedureIsValid(procedure))
OidFunctionCall5(procedure,
accessMethodId;
bool old;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.57 2001/03/22 06:16:10 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
opKey[1].sk_nargs = opKey[1].sk_func.fn_nargs;
opKey[2].sk_nargs = opKey[2].sk_func.fn_nargs;
- /* ----------------
- * form scan key
- * ----------------
+ /*
+ * form scan key
*/
opKey[0].sk_argument = PointerGetDatum(operatorName);
opKey[1].sk_argument = ObjectIdGetDatum(leftObjectId);
opKey[2].sk_argument = ObjectIdGetDatum(rightObjectId);
- /* ----------------
- * begin the scan
- * ----------------
+ /*
+ * begin the scan
*/
pg_operator_scan = heap_beginscan(pg_operator_desc,
0,
3,
opKey);
- /* ----------------
- * fetch the operator tuple, if it exists, and determine
- * the proper return oid value.
- * ----------------
+ /*
+ * fetch the operator tuple, if it exists, and determine the proper
+ * return oid value.
*/
tup = heap_getnext(pg_operator_scan, 0);
*defined = false;
}
- /* ----------------
- * close the scan and return the oid.
- * ----------------
+ /*
+ * close the scan and return the oid.
*/
heap_endscan(pg_operator_scan);
bool leftDefined = false;
bool rightDefined = false;
- /* ----------------
- * look up the operator data types.
+ /*
+ * look up the operator data types.
*
- * Note: types must be defined before operators
- * ----------------
+ * Note: types must be defined before operators
*/
if (leftTypeName)
{
(OidIsValid(rightObjectId) && rightDefined)))
elog(ERROR, "OperatorGet: must have at least one argument type");
- /* ----------------
- * open the pg_operator relation
- * ----------------
+ /*
+ * open the pg_operator relation
*/
pg_operator_desc = heap_openr(OperatorRelationName, AccessShareLock);
- /* ----------------
- * get the oid for the operator with the appropriate name
- * and left/right types.
- * ----------------
+ /*
+ * get the oid for the operator with the appropriate name and
+ * left/right types.
*/
operatorObjectId = OperatorGetWithOpenRelation(pg_operator_desc,
operatorName,
rightObjectId,
defined);
- /* ----------------
- * close the relation and return the operator oid.
- * ----------------
+ /*
+ * close the relation and return the operator oid.
*/
heap_close(pg_operator_desc, AccessShareLock);
NameData oname;
TupleDesc tupDesc;
- /* ----------------
- * initialize our *nulls and *values arrays
- * ----------------
+ /*
+ * initialize our *nulls and *values arrays
*/
for (i = 0; i < Natts_pg_operator; ++i)
{
values[i] = (Datum) NULL; /* redundant, but safe */
}
- /* ----------------
- * initialize *values with the operator name and input data types.
- * Note that oprcode is set to InvalidOid, indicating it's a shell.
- * ----------------
+ /*
+ * initialize *values with the operator name and input data types.
+ * Note that oprcode is set to InvalidOid, indicating it's a shell.
*/
i = 0;
namestrcpy(&oname, operatorName);
values[i++] = ObjectIdGetDatum(InvalidOid);
values[i++] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
- * create a new operator tuple
- * ----------------
+ /*
+ * create a new operator tuple
*/
tupDesc = pg_operator_desc->rd_att;
values,
nulls);
- /* ----------------
- * insert our "shell" operator tuple and
- * close the relation
- * ----------------
+ /*
+ * insert our "shell" operator tuple and close the relation
*/
heap_insert(pg_operator_desc, tup);
operatorObjectId = tup->t_data->t_oid;
CatalogCloseIndices(Num_pg_operator_indices, idescs);
}
- /* ----------------
- * free the tuple and return the operator oid
- * ----------------
+ /*
+ * free the tuple and return the operator oid
*/
heap_freetuple(tup);
bool leftDefined = false;
bool rightDefined = false;
- /* ----------------
- * get the left and right type oid's for this operator
- * ----------------
+ /*
+ * get the left and right type oid's for this operator
*/
if (leftTypeName)
leftObjectId = TypeGet(leftTypeName, &leftDefined);
(OidIsValid(rightObjectId) && rightDefined)))
elog(ERROR, "OperatorShellMake: no valid argument types??");
- /* ----------------
- * open pg_operator
- * ----------------
+ /*
+ * open pg_operator
*/
pg_operator_desc = heap_openr(OperatorRelationName, RowExclusiveLock);
- /* ----------------
- * add a "shell" operator tuple to the operator relation
- * and recover the shell tuple's oid.
- * ----------------
+ /*
+ * add a "shell" operator tuple to the operator relation and recover
+ * the shell tuple's oid.
*/
operatorObjectId = OperatorShellMakeWithOpenRelation(pg_operator_desc,
operatorName,
leftObjectId,
rightObjectId);
- /* ----------------
- * close the operator relation and return the oid.
- * ----------------
+
+ /*
+ * close the operator relation and return the oid.
*/
heap_close(pg_operator_desc, RowExclusiveLock);
* filling in a previously-created shell.
*/
- /* ----------------
- * look up the operator data types.
+ /*
+ * look up the operator data types.
*
- * Note: types must be defined before operators
- * ----------------
+ * Note: types must be defined before operators
*/
if (leftTypeName)
{
nulls[i] = ' ';
}
- /* ----------------
- * Look up registered procedures -- find the return type
- * of procedureName to place in "result" field.
- * Do this before shells are created so we don't
- * have to worry about deleting them later.
- * ----------------
+ /*
+ * Look up registered procedures -- find the return type of
+ * procedureName to place in "result" field. Do this before shells are
+ * created so we don't have to worry about deleting them later.
*/
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
if (!leftTypeName)
ReleaseSysCache(tup);
- /* ----------------
- * find restriction
- * ----------------
+ /*
+ * find restriction
*/
if (restrictionName)
{ /* optional */
else
values[Anum_pg_operator_oprrest - 1] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
- * find join - only valid for binary operators
- * ----------------
+ /*
+ * find join - only valid for binary operators
*/
if (joinName)
{ /* optional */
else
values[Anum_pg_operator_oprjoin - 1] = ObjectIdGetDatum(InvalidOid);
- /* ----------------
+ /*
* set up values in the operator tuple
- * ----------------
*/
i = 0;
namestrcpy(&oname, operatorName);
elog(ERROR, "OperatorCreate: only binary operators can have sort links");
}
- /* ----------------
- * Use OperatorDef() to define the specified operator and
- * also create shells for the operator's associated operators
- * if they don't already exist.
- * ----------------
+ /*
+ * Use OperatorDef() to define the specified operator and also create
+ * shells for the operator's associated operators if they don't
+ * already exist.
*/
OperatorDef(operatorName,
leftTypeName,
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.55 2001/03/22 06:16:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TupleDesc tupDesc;
Oid retval;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(PointerIsValid(prosrc));
Assert(PointerIsValid(probin));
if (strcmp(procedureName, GENERICSETNAME) == 0)
{
#ifdef SETS_FIXED
- /* ----------
- * The code below doesn't work any more because the
- * PROSRC system cache and the pg_proc_prosrc_index
- * have been removed. Instead a sequential heap scan
- * or something better must get implemented. The reason
- * for removing is that nbtree index crashes if sources
- * exceed 2K --- what's likely for procedural languages.
+
+ /*
+ * The code below doesn't work any more because the PROSRC
+ * system cache and the pg_proc_prosrc_index have been
+ * removed. Instead a sequential heap scan or something better
+ * must get implemented. The reason for removing is that
+ * nbtree index crashes if sources exceed 2K --- what's likely
+ * for procedural languages.
*
* 1999/09/30 Jan
- * ----------
*/
text *prosrctext;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.61 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid typoid;
ScanKeyData typeKey[1];
- /* ----------------
- * initialize the scan key and begin a scan of pg_type
- * ----------------
+ /*
+ * initialize the scan key and begin a scan of pg_type
*/
ScanKeyEntryInitialize(typeKey,
0,
1,
typeKey);
- /* ----------------
- * get the type tuple, if it exists.
- * ----------------
+ /*
+ * get the type tuple, if it exists.
*/
tup = heap_getnext(scan, 0);
- /* ----------------
- * if no type tuple exists for the given type name, then
- * end the scan and return appropriate information.
- * ----------------
+ /*
+ * if no type tuple exists for the given type name, then end the scan
+ * and return appropriate information.
*/
if (!HeapTupleIsValid(tup))
{
return InvalidOid;
}
- /* ----------------
- * here, the type tuple does exist so we pull information from
- * the typisdefined field of the tuple and return the tuple's
- * oid, which is the oid of the type.
- * ----------------
+ /*
+ * here, the type tuple does exist so we pull information from the
+ * typisdefined field of the tuple and return the tuple's oid, which
+ * is the oid of the type.
*/
*defined = (bool) ((Form_pg_type) GETSTRUCT(tup))->typisdefined;
typoid = tup->t_data->t_oid;
Relation pg_type_desc;
Oid typeoid;
- /* ----------------
- * open the pg_type relation
- * ----------------
+ /*
+ * open the pg_type relation
*/
pg_type_desc = heap_openr(TypeRelationName, AccessShareLock);
- /* ----------------
- * scan the type relation for the information we want
- * ----------------
+ /*
+ * scan the type relation for the information we want
*/
typeoid = TypeGetWithOpenRelation(pg_type_desc,
typeName,
defined);
- /* ----------------
- * close the type relation and return the type oid.
- * ----------------
+ /*
+ * close the type relation and return the type oid.
*/
heap_close(pg_type_desc, AccessShareLock);
NameData name;
TupleDesc tupDesc;
- /* ----------------
- * initialize our *nulls and *values arrays
- * ----------------
+ /*
+ * initialize our *nulls and *values arrays
*/
for (i = 0; i < Natts_pg_type; ++i)
{
values[i] = (Datum) NULL; /* redundant, but safe */
}
- /* ----------------
- * initialize *values with the type name and dummy values
- * ----------------
+ /*
+ * initialize *values with the type name and dummy values
*/
i = 0;
namestrcpy(&name, typeName);
values[i++] = DirectFunctionCall1(textin,
CStringGetDatum(typeName)); /* 17 */
- /* ----------------
- * create a new type tuple with FormHeapTuple
- * ----------------
+ /*
+ * create a new type tuple with FormHeapTuple
*/
tupDesc = pg_type_desc->rd_att;
tup = heap_formtuple(tupDesc, values, nulls);
- /* ----------------
- * insert the tuple in the relation and get the tuple's oid.
- * ----------------
+ /*
+ * insert the tuple in the relation and get the tuple's oid.
*/
heap_insert(pg_type_desc, tup);
typoid = tup->t_data->t_oid;
CatalogIndexInsert(idescs, Num_pg_type_indices, pg_type_desc, tup);
CatalogCloseIndices(Num_pg_type_indices, idescs);
}
- /* ----------------
- * free the tuple and return the type-oid
- * ----------------
+
+ /*
+ * free the tuple and return the type-oid
*/
heap_freetuple(tup);
Assert(PointerIsValid(typeName));
- /* ----------------
- * open pg_type
- * ----------------
+ /*
+ * open pg_type
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
- /* ----------------
- * insert the shell tuple
- * ----------------
+ /*
+ * insert the shell tuple
*/
typoid = TypeShellMakeWithOpenRelation(pg_type_desc, typeName);
- /* ----------------
- * close pg_type and return the tuple's oid.
- * ----------------
+ /*
+ * close pg_type and return the tuple's oid.
*/
heap_close(pg_type_desc, RowExclusiveLock);
Oid argList[FUNC_MAX_ARGS];
ScanKeyData typeKey[1];
- /* ----------------
- * check that the type is not already defined. It might exist as
- * a shell type, however (but only if assignedTypeOid is not given).
- * ----------------
+ /*
+ * check that the type is not already defined. It might exist as a
+ * shell type, however (but only if assignedTypeOid is not given).
*/
typeObjectId = TypeGet(typeName, &defined);
if (OidIsValid(typeObjectId) &&
(defined || assignedTypeOid != InvalidOid))
elog(ERROR, "TypeCreate: type %s already defined", typeName);
- /* ----------------
- * if this type has an associated elementType, then we check that
- * it is defined.
- * ----------------
+ /*
+ * if this type has an associated elementType, then we check that it
+ * is defined.
*/
if (elementTypeName)
{
elog(ERROR, "TypeCreate: type %s is not defined", elementTypeName);
}
- /* ----------------
- * XXX comment me
- * ----------------
+ /*
+ * XXX comment me
*/
if (externalSize == 0)
externalSize = -1; /* variable length */
- /* ----------------
- * initialize arrays needed by FormHeapTuple
- * ----------------
+ /*
+ * initialize arrays needed by FormHeapTuple
*/
for (i = 0; i < Natts_pg_type; ++i)
{
if (internalSize == 0)
internalSize = -1;
- /* ----------------
- * initialize the *values information
- * ----------------
+ /*
+ * initialize the *values information
*/
i = 0;
namestrcpy(&name, typeName);
values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
}
- /* ----------------
+ /*
* set default alignment
- * ----------------
*/
values[i++] = CharGetDatum(alignment); /* 15 */
- /* ----------------
- * set default storage for TOAST
- * ----------------
+ /*
+ * set default storage for TOAST
*/
values[i++] = CharGetDatum(storage); /* 16 */
- /* ----------------
- * initialize the default value for this type.
- * ----------------
+ /*
+ * initialize the default value for this type.
*/
values[i] = DirectFunctionCall1(textin, /* 17 */
CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
- /* ----------------
- * open pg_type and begin a scan for the type name.
- * ----------------
+ /*
+ * open pg_type and begin a scan for the type name.
*/
pg_type_desc = heap_openr(TypeRelationName, RowExclusiveLock);
1,
typeKey);
- /* ----------------
- * define the type either by adding a tuple to the type
- * relation, or by updating the fields of the "shell" tuple
- * already there.
- * ----------------
+ /*
+ * define the type either by adding a tuple to the type relation, or
+ * by updating the fields of the "shell" tuple already there.
*/
tup = heap_getnext(pg_type_scan, 0);
if (HeapTupleIsValid(tup))
typeObjectId = tup->t_data->t_oid;
}
- /* ----------------
- * finish up
- * ----------------
+ /*
+ * finish up
*/
heap_endscan(pg_type_scan);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.12 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.13 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* skip the rule rewrite and time qual stuff
*/
- /* ----------------------------------------------------------
- * 1) plan the main query, everything from an eye node back to
- a Tee
- * ---------------------------------------------------------- */
+ /*
+ * 1) plan the main query, everything from an eye node back to a
+ * Tee
+ */
parsetree = qList->qtrees[0];
/*
plan = planner(parsetree);
- /* ----------------------------------------------------------
- * 2) plan the tee queries, (subgraphs rooted from a Tee)
- by the time the eye is processed, all tees that contribute
- to that eye will have been included in the teeInfo list
- * ---------------------------------------------------------- */
+ /*
+ * 2) plan the tee queries, (subgraphs rooted from a Tee) by the
+ * time the eye is processed, all tees that contribute to that eye
+ * will have been included in the teeInfo list
+ */
if (teeInfo)
{
int t;
}
}
- /* ----------------------------------------------------------
- * 3) replace the tee table scans in the main plan with
- actual tee plannodes
- * ---------------------------------------------------------- */
+ /*
+ * 3) replace the tee table scans in the main plan with actual
+ * tee plannodes
+ */
plan = replaceTeeScans(plan, parsetree, teeInfo);
queryDesc = CreateQueryDesc(parsetree,
plan,
whereToSendOutput);
- /* ----------------
- * call ExecStart to prepare the plan for execution
- * ----------------
+
+ /*
+ * call ExecStart to prepare the plan for execution
*/
attinfo = ExecutorStart(queryDesc, NULL);
orig = q->qtrees[0];
- /*-------------------------------------------------------------------
- step 1:
-
- form a combined range table from all the range tables in the original
- query as well as the input nodes
-
- form a combined qualification from the qual in the original plus
- the quals of the input nodes
- -------------------------------------------------------------------
- */
+ /*
+ * step 1:
+ *
+ * form a combined range table from all the range tables in the original
+ * query as well as the input nodes
+ *
+ * form a combined qualification from the qual in the original plus the
+ * quals of the input nodes
+ */
/* start with the original range table */
rtable = orig->rtable;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.16 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
for (i = 0; i < Natts_pg_statistic; ++i)
nulls[i] = ' ';
- /* ----------------
- * initialize values[]
- * ----------------
+ /*
+ * initialize values[]
*/
i = 0;
values[i++] = ObjectIdGetDatum(relid); /* starelid */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.124 2001/03/22 06:16:11 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
{
MemoryContext oldcontext;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(PortalIsValid(portal));
AssertArg(portal->cleanup == PortalCleanup);
- /* ----------------
- * set proper portal-executor context before calling ExecMain.
- * ----------------
+ /*
+ * set proper portal-executor context before calling ExecMain.
*/
oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
- /* ----------------
- * tell the executor to shutdown the query
- * ----------------
+ /*
+ * tell the executor to shutdown the query
*/
ExecutorEnd(PortalGetQueryDesc(portal), PortalGetState(portal));
- /* ----------------
- * switch back to previous context
- * ----------------
+ /*
+ * switch back to previous context
*/
MemoryContextSwitchTo(oldcontext);
}
EState *estate;
MemoryContext oldcontext;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (name == NULL)
{
return;
}
- /* ----------------
- * get the portal from the portal name
- * ----------------
+ /*
+ * get the portal from the portal name
*/
portal = GetPortalByName(name);
if (!PortalIsValid(portal))
return;
}
- /* ----------------
- * switch into the portal context
- * ----------------
+ /*
+ * switch into the portal context
*/
oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
- /* ----------------
- * tell the destination to prepare to receive some tuples.
+ /*
+ * tell the destination to prepare to receive some tuples.
*
- * If we've been asked for a MOVE, make a temporary QueryDesc
- * with the appropriate dummy destination.
- * ----------------
+ * If we've been asked for a MOVE, make a temporary QueryDesc with the
+ * appropriate dummy destination.
*/
queryDesc = PortalGetQueryDesc(portal);
estate = PortalGetState(portal);
tag,
dest);
- /* ----------------
- * Determine which direction to go in, and check to see if we're already
- * at the end of the available tuples in that direction. If so, do
- * nothing. (This check exists because not all plan node types are
- * robust about being called again if they've already returned NULL
- * once.) If it's OK to do the fetch, call the executor. Then,
- * update the atStart/atEnd state depending on the number of tuples
- * that were retrieved.
- * ----------------
+ /*
+ * Determine which direction to go in, and check to see if we're
+ * already at the end of the available tuples in that direction. If
+ * so, do nothing. (This check exists because not all plan node types
+ * are robust about being called again if they've already returned
+ * NULL once.) If it's OK to do the fetch, call the executor. Then,
+ * update the atStart/atEnd state depending on the number of tuples
+ * that were retrieved.
*/
if (forward)
{
}
}
- /* ----------------
- * Clean up and switch back to old context.
- * ----------------
+ /*
+ * Clean up and switch back to old context.
*/
if (dest == None) /* MOVE */
pfree(queryDesc);
MemoryContextSwitchTo(oldcontext);
- /* ----------------
- * Note: the "end-of-command" tag is returned by higher-level
- * utility code
- * ----------------
+ /*
+ * Note: the "end-of-command" tag is returned by higher-level utility
+ * code
*/
}
{
Portal portal;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
if (name == NULL)
{
return;
}
- /* ----------------
- * get the portal from the portal name
- * ----------------
+ /*
+ * get the portal from the portal name
*/
portal = GetPortalByName(name);
if (!PortalIsValid(portal))
return;
}
- /* ----------------
- * Note: PortalCleanup is called as a side-effect
- * ----------------
+ /*
+ * Note: PortalCleanup is called as a side-effect
*/
PortalDrop(&portal);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.136 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- /* ----------------
+ /*
* Check the constraints of the tuple
- * ----------------
*/
if (rel->rd_att->constr)
ExecConstraints("CopyFrom", resultRelInfo, slot, estate);
- /* ----------------
+ /*
* OK, store the tuple and create index entries for it
- * ----------------
*/
heap_insert(rel, tuple);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.74 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
stmt->relname, NAMEDATALEN);
StrNCpy(relname, stmt->relname, NAMEDATALEN);
- /* ----------------
- * Look up inheritance ancestors and generate relation schema,
- * including inherited attributes.
- * ----------------
+ /*
+ * Look up inheritance ancestors and generate relation schema,
+ * including inherited attributes.
*/
schema = MergeAttributes(schema, stmt->inhRelnames, stmt->istemp,
&inheritOids, &old_constraints);
if (numberOfAttributes <= 0)
elog(ERROR, "DefineRelation: please inherit from a relation or define an attribute");
- /* ----------------
- * create a relation descriptor from the relation schema
- * and create the relation. Note that in this stage only
- * inherited (pre-cooked) defaults and constraints will be
- * included into the new relation. (BuildDescForRelation
- * takes care of the inherited defaults, but we have to copy
- * inherited constraints here.)
- * ----------------
+ /*
+ * create a relation descriptor from the relation schema and create
+ * the relation. Note that in this stage only inherited (pre-cooked)
+ * defaults and constraints will be included into the new relation.
+ * (BuildDescForRelation takes care of the inherited defaults, but we
+ * have to copy inherited constraints here.)
*/
descriptor = BuildDescForRelation(schema, relname);
List *entry;
HeapTuple tuple;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
AssertArg(OidIsValid(relationId));
if (supers == NIL)
return;
- /* ----------------
+ /*
* Catalog INHERITS information using direct ancestors only.
- * ----------------
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
* ----------------
*/
- /* ----------------
- * 1. append after each relationId, its superclasses, recursively.
- * ----------------
+ /*
+ * 1. append after each relationId, its superclasses, recursively.
*/
foreach(entry, supers)
{
lnext(current) = next;
}
- /* ----------------
- * 2. remove all but last of duplicates.
- * ----------------
+ /*
+ * 2. remove all but last of duplicates.
*/
foreach(entry, supers)
{
}
}
- /* ----------------
+ /*
* Catalog IPL information using expanded list.
- * ----------------
*/
relation = heap_openr(InheritancePrecidenceListRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.54 2001/03/22 06:16:11 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
if (functionName == NULL)
elog(ERROR, "Define: \"procedure\" unspecified");
- /* ----------------
- * now have OperatorCreate do all the work..
- * ----------------
+ /*
+ * now have OperatorCreate do all the work..
*/
OperatorCreate(oprName, /* operator name */
typeName1, /* first type name */
if (internalLength != -1 && storage != 'p')
elog(ERROR, "Define: fixed size types must have storage PLAIN");
- /* ----------------
- * now have TypeCreate do all the real work.
- * ----------------
+ /*
+ * now have TypeCreate do all the real work.
*/
TypeCreate(typeName, /* type name */
InvalidOid, /* preassigned type oid (not done here) */
alignment, /* required alignment */
storage); /* TOAST strategy */
- /* ----------------
- * When we create a base type (as opposed to a complex type)
- * we need to have an array entry for it in pg_type as well.
- * ----------------
+ /*
+ * When we create a base type (as opposed to a complex type) we need
+ * to have an array entry for it in pg_type as well.
*/
shadow_type = makeArrayTypeName(typeName);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.47 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
nargs++;
}
- /* ----------------
+ /*
* Lookup the function procedure to get its OID and result type.
*
- * We rely on parse_func.c to find the correct function in the
- * possible presence of binary-compatible types. However, parse_func
- * may do too much: it will accept a function that requires run-time
- * coercion of input types, and the executor is not currently set up
- * to support that. So, check to make sure that the selected function
- * has exact-match or binary-compatible input types.
- * ----------------
+ * We rely on parse_func.c to find the correct function in the possible
+ * presence of binary-compatible types. However, parse_func may do
+ * too much: it will accept a function that requires run-time coercion
+ * of input types, and the executor is not currently set up to support
+ * that. So, check to make sure that the selected function has
+ * exact-match or binary-compatible input types.
*/
if (!func_get_detail(funcIndex->name, nargs, argTypes,
&funcid, &rettype, &retset, &true_typeids))
HeapTuple tuple;
bool overwrite = false;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
{
HeapTuple tuple;
- /* ----------------
- * REINDEX within a transaction block is dangerous, because
- * if the transaction is later rolled back we have no way to
- * undo truncation of the index's physical file. Disallow it.
- * ----------------
+ /*
+ * REINDEX within a transaction block is dangerous, because if the
+ * transaction is later rolled back we have no way to undo truncation
+ * of the index's physical file. Disallow it.
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX cannot run inside a BEGIN/END block");
int i;
- /* ----------------
+ /*
* Check permission
- * ----------------
*/
if (!superuser())
{
"permitted to create procedural languages");
}
- /* ----------------
- * Translate the language name and check that
- * this language doesn't already exist
- * ----------------
+ /*
+ * Translate the language name and check that this language doesn't
+ * already exist
*/
case_translate_language_name(stmt->plname, languageName);
0, 0, 0))
elog(ERROR, "Language %s already exists", languageName);
- /* ----------------
- * Lookup the PL handler function and check that it is
- * of return type Opaque
- * ----------------
+ /*
+ * Lookup the PL handler function and check that it is of return type
+ * Opaque
*/
memset(typev, 0, sizeof(typev));
procTup = SearchSysCache(PROCNAME,
elog(ERROR, "PL handler function %s() isn't of return type Opaque",
stmt->plhandler);
- /* ----------------
+ /*
* Insert the new language into pg_language
- * ----------------
*/
for (i = 0; i < Natts_pg_language; i++)
{
HeapTuple langTup;
Relation rel;
- /* ----------------
+ /*
* Check permission
- * ----------------
*/
if (!superuser())
{
"permitted to drop procedural languages");
}
- /* ----------------
- * Translate the language name, check that
- * this language exist and is a PL
- * ----------------
+ /*
+ * Translate the language name, check that this language exist and is
+ * a PL
*/
case_translate_language_name(stmt->plname, languageName);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.90 2001/03/22 06:16:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!pg_ownercheck(GetUserId(), stmt->relname, RELNAME))
elog(ERROR, "%s: %s", stmt->relname, aclcheck_error_strings[ACLCHECK_NOT_OWNER]);
- /* ----------
- * If trigger is a constraint, user trigger name as constraint
- * name and build a unique trigger name instead.
- * ----------
+ /*
+ * If trigger is a constraint, user trigger name as constraint name
+ * and build a unique trigger name instead.
*/
if (stmt->isconstraint)
{
heap_endscan(tgscan);
- /* ----------
- * If we deleted any triggers, must update pg_class entry and
- * advance command counter to make the updated entry visible.
- * This is fairly annoying, since we'e just going to drop the
- * durn thing later, but it's necessary to have a consistent
- * state in case we do CommandCounterIncrement() below ---
- * if RelationBuildTriggers() runs, it will complain otherwise.
- * Perhaps RelationBuildTriggers() shouldn't be so picky...
- * ----------
+ /*
+ * If we deleted any triggers, must update pg_class entry and advance
+ * command counter to make the updated entry visible. This is fairly
+ * annoying, since we'e just going to drop the durn thing later, but
+ * it's necessary to have a consistent state in case we do
+ * CommandCounterIncrement() below --- if RelationBuildTriggers()
+ * runs, it will complain otherwise. Perhaps RelationBuildTriggers()
+ * shouldn't be so picky...
*/
if (found)
{
CommandCounterIncrement();
}
- /* ----------
+ /*
* Also drop all constraint triggers referencing this relation
- * ----------
*/
ScanKeyEntryInitialize(&key, 0, Anum_pg_trigger_tgconstrrelid,
F_OIDEQ, RelationGetRelid(rel));
DropTrigger(&stmt);
- /* ----------
- * Need to do a command counter increment here to show up
- * new pg_class.reltriggers in the next loop iteration
- * (in case there are multiple referential integrity action
- * triggers for the same FK table defined on the PK table).
- * ----------
+ /*
+ * Need to do a command counter increment here to show up new
+ * pg_class.reltriggers in the next loop iteration (in case there
+ * are multiple referential integrity action triggers for the same
+ * FK table defined on the PK table).
*/
CommandCounterIncrement();
List *sl;
DeferredTriggerStatus trigstate;
- /* ----------
- * Not deferrable triggers (i.e. normal AFTER ROW triggers
- * and constraints declared NOT DEFERRABLE, the state is
- * allways false.
- * ----------
+ /*
+ * Not deferrable triggers (i.e. normal AFTER ROW triggers and
+ * constraints declared NOT DEFERRABLE, the state is allways false.
*/
if ((itemstate & TRIGGER_DEFERRED_DEFERRABLE) == 0)
return false;
- /* ----------
+ /*
* Lookup if we know an individual state for this trigger
- * ----------
*/
foreach(sl, deftrig_trigstates)
{
return trigstate->dts_tgisdeferred;
}
- /* ----------
- * No individual state known - so if the user issued a
- * SET CONSTRAINT ALL ..., we return that instead of the
- * triggers default state.
- * ----------
+ /*
+ * No individual state known - so if the user issued a SET CONSTRAINT
+ * ALL ..., we return that instead of the triggers default state.
*/
if (deftrig_all_isset)
return deftrig_all_isdeferred;
- /* ----------
- * No ALL state known either, remember the default state
- * as the current and return that.
- * ----------
+ /*
+ * No ALL state known either, remember the default state as the
+ * current and return that.
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
Buffer oldbuffer;
Buffer newbuffer;
- /* ----------
+ /*
* Open the heap and fetch the required OLD and NEW tuples.
- * ----------
*/
rel = heap_open(event->dte_relid, NoLock);
elog(ERROR, "deferredTriggerExecute: failed to fetch new tuple");
}
- /* ----------
+ /*
* Setup the trigger information
- * ----------
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
break;
}
- /* ----------
- * Call the trigger and throw away an eventually returned
- * updated tuple.
- * ----------
+ /*
+ * Call the trigger and throw away an eventually returned updated
+ * tuple.
*/
rettuple = ExecCallTriggerFunc(LocTriggerData.tg_trigger,
&LocTriggerData,
if (rettuple != NULL && rettuple != &oldtuple && rettuple != &newtuple)
heap_freetuple(rettuple);
- /* ----------
- * Might have been a referential integrity constraint trigger.
- * Reset the snapshot overriding flag.
- * ----------
+ /*
+ * Might have been a referential integrity constraint trigger. Reset
+ * the snapshot overriding flag.
*/
ReferentialIntegritySnapshotOverride = false;
- /* ----------
+ /*
* Release buffers and close the relation
- * ----------
*/
if (ItemPointerIsValid(&(event->dte_oldctid)))
ReleaseBuffer(oldbuffer);
int i;
MemoryContext per_tuple_context;
- /* ----------
- * For now we process all events - to speedup transaction blocks
- * we need to remember the actual end of the queue at EndQuery
- * and process only events that are newer. On state changes we
- * simply reset the position to the beginning of the queue and
- * process all events once with the new states when the
- * SET CONSTRAINTS ... command finishes and calls EndQuery.
- * ----------
+ /*
+ * For now we process all events - to speedup transaction blocks we
+ * need to remember the actual end of the queue at EndQuery and
+ * process only events that are newer. On state changes we simply
+ * reset the position to the beginning of the queue and process all
+ * events once with the new states when the SET CONSTRAINTS ...
+ * command finishes and calls EndQuery.
*/
/* Make a per-tuple memory context for trigger function calls */
for (event = deftrig_events; event != NULL; event = event->dte_next)
{
- /* ----------
+
+ /*
* Check if event is completely done.
- * ----------
*/
if (event->dte_event & (TRIGGER_DEFERRED_DONE |
TRIGGER_DEFERRED_CANCELED))
MemoryContextReset(per_tuple_context);
- /* ----------
+ /*
* Check each trigger item in the event.
- * ----------
*/
still_deferred_ones = false;
for (i = 0; i < event->dte_n_items; i++)
if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE)
continue;
- /* ----------
- * This trigger item hasn't been called yet. Check if
- * we should call it now.
- * ----------
+ /*
+ * This trigger item hasn't been called yet. Check if we
+ * should call it now.
*/
if (immediate_only && deferredTriggerCheckState(
event->dte_item[i].dti_tgoid,
continue;
}
- /* ----------
+ /*
* So let's fire it...
- * ----------
*/
deferredTriggerExecute(event, i, per_tuple_context);
event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
}
- /* ----------
- * Remember in the event itself if all trigger items are
- * done.
- * ----------
+ /*
+ * Remember in the event itself if all trigger items are done.
*/
if (!still_deferred_ones)
event->dte_event |= TRIGGER_DEFERRED_DONE;
elog(ERROR,
"DeferredTriggerBeginXact() called while inside transaction");
- /* ----------
- * Create the per transaction memory context and copy all states
- * from the per session context to here.
- * ----------
+ /*
+ * Create the per transaction memory context and copy all states from
+ * the per session context to here.
*/
deftrig_cxt = AllocSetContextCreate(TopTransactionContext,
"DeferredTriggerXact",
void
DeferredTriggerEndQuery(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
void
DeferredTriggerEndXact(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
void
DeferredTriggerAbortXact(void)
{
- /* ----------
+
+ /*
* Ignore call if we aren't in a transaction.
- * ----------
*/
if (deftrig_cxt == NULL)
return;
DeferredTriggerStatus state;
bool hasindex;
- /* ----------
+ /*
* Handle SET CONSTRAINTS ALL ...
- * ----------
*/
if (stmt->constraints == NIL)
{
if (!IsTransactionBlock())
{
- /* ----------
+
+ /*
* ... outside of a transaction block
*
* Drop all information about individual trigger states per
* session.
- * ----------
*/
l = deftrig_dfl_trigstates;
while (l != NIL)
}
deftrig_dfl_trigstates = NIL;
- /* ----------
+ /*
* Set the session ALL state to known.
- * ----------
*/
deftrig_dfl_all_isset = true;
deftrig_dfl_all_isdeferred = stmt->deferred;
}
else
{
- /* ----------
+
+ /*
* ... inside of a transaction block
*
* Drop all information about individual trigger states per
* transaction.
- * ----------
*/
l = deftrig_trigstates;
while (l != NIL)
}
deftrig_trigstates = NIL;
- /* ----------
+ /*
* Set the per transaction ALL state to known.
- * ----------
*/
deftrig_all_isset = true;
deftrig_all_isdeferred = stmt->deferred;
Form_pg_trigger pg_trigger;
Oid constr_oid;
- /* ----------
+ /*
* Check that only named constraints are set explicitly
- * ----------
*/
if (strcmp((char *) lfirst(l), "") == 0)
elog(ERROR, "unnamed constraints cannot be set explicitly");
- /* ----------
+ /*
* Setup to scan pg_trigger by tgconstrname ...
- * ----------
*/
ScanKeyEntryInitialize(&skey,
(bits16) 0x0,
else
tgscan = heap_beginscan(tgrel, 0, SnapshotNow, 1, &skey);
- /* ----------
+ /*
* ... and search for the constraint trigger row
- * ----------
*/
found = false;
for (;;)
break;
}
- /* ----------
- * If we found some, check that they fit the deferrability
- * but skip ON <event> RESTRICT ones, since they are silently
+ /*
+ * If we found some, check that they fit the deferrability but
+ * skip ON <event> RESTRICT ones, since they are silently
* never deferrable.
- * ----------
*/
pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
if (stmt->deferred && !pg_trigger->tgdeferrable &&
ReleaseBuffer(buffer);
}
- /* ----------
+ /*
* Not found ?
- * ----------
*/
if (!found)
elog(ERROR, "Constraint '%s' does not exist", (char *) lfirst(l));
if (!IsTransactionBlock())
{
- /* ----------
- * Outside of a transaction block set the trigger
- * states of individual triggers on session level.
- * ----------
+
+ /*
+ * Outside of a transaction block set the trigger states of
+ * individual triggers on session level.
*/
oldcxt = MemoryContextSwitchTo(deftrig_gcxt);
}
else
{
- /* ----------
- * Inside of a transaction block set the trigger
- * states of individual triggers on transaction level.
- * ----------
+
+ /*
+ * Inside of a transaction block set the trigger states of
+ * individual triggers on transaction level.
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
elog(ERROR,
"DeferredTriggerSaveEvent() called outside of transaction");
- /* ----------
+ /*
* Get the CTID's of OLD and NEW
- * ----------
*/
if (oldtup != NULL)
ItemPointerCopy(&(oldtup->t_self), &(oldctid));
else
ItemPointerSetInvalid(&(newctid));
- /* ----------
+ /*
* Create a new event
- * ----------
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
break;
case TRIGGER_EVENT_UPDATE:
- /* ----------
- * On UPDATE check if the tuple updated has been inserted
- * or a foreign referenced key value that's changing now
- * has been updated once before in this transaction.
- * ----------
+
+ /*
+ * On UPDATE check if the tuple updated has been inserted or a
+ * foreign referenced key value that's changing now has been
+ * updated once before in this transaction.
*/
if (oldtup->t_data->t_xmin != GetCurrentTransactionId())
prev_event = NULL;
prev_event =
deferredTriggerGetPreviousEvent(rel->rd_id, &oldctid);
- /* ----------
+ /*
* Now check if one of the referenced keys is changed.
- * ----------
*/
for (i = 0; i < ntriggers; i++)
{
bool is_ri_trigger;
bool key_unchanged;
- /* ----------
+ /*
* We are interested in RI_FKEY triggers only.
- * ----------
*/
switch (triggers[i]->tgfoid)
{
if (key_unchanged)
{
- /* ----------
+
+ /*
* The key hasn't changed, so no need later to invoke
* the trigger at all. But remember other states from
* the possible earlier event.
- * ----------
*/
new_event->dte_item[i].dti_state |= TRIGGER_DEFERRED_DONE;
if (prev_event->dte_event &
TRIGGER_DEFERRED_ROW_INSERTED)
{
- /* ----------
- * This is a row inserted during our transaction.
- * So any key value is considered changed.
- * ----------
+
+ /*
+ * This is a row inserted during our
+ * transaction. So any key value is considered
+ * changed.
*/
new_event->dte_event |=
TRIGGER_DEFERRED_ROW_INSERTED;
}
else
{
- /* ----------
- * This is a row, previously updated. So
- * if this key has been changed before, we
- * still remember that it happened.
- * ----------
+
+ /*
+ * This is a row, previously updated. So if
+ * this key has been changed before, we still
+ * remember that it happened.
*/
if (prev_event->dte_item[i].dti_state &
TRIGGER_DEFERRED_KEY_CHANGED)
}
else
{
- /* ----------
+
+ /*
* Bomb out if this key has been changed before.
* Otherwise remember that we do so.
- * ----------
*/
if (prev_event)
{
NameGetDatum(&(rel->rd_rel->relname)))));
}
- /* ----------
- * This is the first change to this key, so let
- * it happen.
- * ----------
+ /*
+ * This is the first change to this key, so let it
+ * happen.
*/
new_event->dte_item[i].dti_state |=
TRIGGER_DEFERRED_KEY_CHANGED;
break;
case TRIGGER_EVENT_DELETE:
- /* ----------
- * On DELETE check if the tuple deleted has been inserted
- * or a possibly referenced key value has changed in this
+
+ /*
+ * On DELETE check if the tuple deleted has been inserted or a
+ * possibly referenced key value has changed in this
* transaction.
- * ----------
*/
if (oldtup->t_data->t_xmin != GetCurrentTransactionId())
break;
- /* ----------
+ /*
* Look at the previous event to the same tuple.
- * ----------
*/
prev_event = deferredTriggerGetPreviousEvent(rel->rd_id, &oldctid);
if (prev_event->dte_event & TRIGGER_DEFERRED_KEY_CHANGED)
break;
}
- /* ----------
+ /*
* Anything's fine up to here. Add the new event to the queue.
- * ----------
*/
oldcxt = MemoryContextSwitchTo(deftrig_cxt);
deferredTriggerAddEvent(new_event);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.75 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
usesysid = DatumGetInt32(heap_getattr(tuple, Anum_pg_shadow_usesysid, pg_shadow_dsc, &null));
- /*-------------------
+ /*
* Check if user still owns a database. If so, error out.
*
- * (It used to be that this function would drop the database automatically.
- * This is not only very dangerous for people that don't read the manual,
- * it doesn't seem to be the behaviour one would expect either.)
- * -- petere 2000/01/14)
- *-------------------*/
+ * (It used to be that this function would drop the database
+ * automatically. This is not only very dangerous for people that
+ * don't read the manual, it doesn't seem to be the behaviour one
+ * would expect either.) -- petere 2000/01/14)
+ */
pg_rel = heap_openr(DatabaseRelationName, AccessExclusiveLock);
pg_dsc = RelationGetDescr(pg_rel);
* ExecInitTee
* ExecEndTee
*
- * $Id: nodeTee.c,v 1.9 2001/01/24 19:42:55 momjian Exp $
+ * $Id: nodeTee.c,v 1.10 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (node->plan.state)
return TRUE;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
/*
node->plan.state = estate;
- /* ----------------
+ /*
* create teeState structure
- * ----------------
*/
teeState = makeNode(TeeState);
teeState->tee_leftPlace = 0;
ExecAssignExprContext(estate, &(teeState->cstate));
#define TEE_NSLOTS 2
- /* ----------------
- * initialize tuple slots
- * ----------------
+
+ /*
+ * initialize tuple slots
*/
ExecInitResultTupleSlot(estate, &(teeState->cstate));
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * the tuple type info is from the outer plan of this node
- * the result type is also the same as the outerplan
+ /*
+ * the tuple type info is from the outer plan of this node the result
+ * type is also the same as the outerplan
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &(teeState->cstate));
ExecAssignProjectionInfo((Plan *) node, &teeState->cstate);
- /* ---------------------------------------
- initialize temporary relation to buffer tuples
- */
+ /*
+ * initialize temporary relation to buffer tuples
+ */
tupType = ExecGetResultType(&(teeState->cstate));
len = ExecTargetListLength(((Plan *) node)->targetlist);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
+ * $Id: execAmi.c,v 1.58 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Relation relation;
Pointer scanDesc;
- /* ----------------
- * note: scanDesc returned by ExecBeginScan can be either
- * a HeapScanDesc or an IndexScanDesc so for now we
- * make it a Pointer. There should be a better scan
- * abstraction someday -cim 9/9/89
- * ----------------
+ /*
+ * note: scanDesc returned by ExecBeginScan can be either a
+ * HeapScanDesc or an IndexScanDesc so for now we make it a Pointer.
+ * There should be a better scan abstraction someday -cim 9/9/89
*/
- /* ----------------
- * Open the relation with the correct call depending
- * on whether this is a heap relation or an index relation.
+ /*
+ * Open the relation with the correct call depending on whether this
+ * is a heap relation or an index relation.
*
- * For a table, acquire AccessShareLock for the duration of the query
- * execution. For indexes, acquire no lock here; the index machinery
- * does its own locks and unlocks. (We rely on having some kind of
- * lock on the parent table to ensure the index won't go away!)
- * ----------------
+ * For a table, acquire AccessShareLock for the duration of the query
+ * execution. For indexes, acquire no lock here; the index machinery
+ * does its own locks and unlocks. (We rely on having some kind of
+ * lock on the parent table to ensure the index won't go away!)
*/
if (isindex)
relation = index_open(relOid);
{
Pointer scanDesc;
- /* ----------------
- * open the appropriate type of scan.
+ /*
+ * open the appropriate type of scan.
*
- * Note: ambeginscan()'s second arg is a boolean indicating
- * that the scan should be done in reverse.. That is,
- * if you pass it true, then the scan is backward.
- * ----------------
+ * Note: ambeginscan()'s second arg is a boolean indicating that the scan
+ * should be done in reverse.. That is, if you pass it true, then the
+ * scan is backward.
*/
if (isindex)
{
Relation relation;
HeapScanDesc scanDesc;
- /* ----------------
- * get state for node and shut down the heap scan, if any
- * ----------------
+ /*
+ * get state for node and shut down the heap scan, if any
*/
switch (nodeTag(node))
{
if (scanDesc != NULL)
heap_endscan(scanDesc);
- /* ----------------
- * if this is an index scan then we have to take care
- * of the index relations as well.
- * ----------------
+ /*
+ * if this is an index scan then we have to take care of the index
+ * relations as well.
*/
if (IsA(node, IndexScan))
{
for (i = 0; i < numIndices; i++)
{
- /* ----------------
- * shut down each of the index scans and
- * close each of the index relations
- * ----------------
+
+ /*
+ * shut down each of the index scans and close each of the
+ * index relations
*/
if (indexScanDescs[i] != NULL)
index_endscan(indexScanDescs[i]);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.27 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ALLOCSET_DEFAULT_MAXSIZE);
oldContext = MemoryContextSwitchTo(junkContext);
- /* ---------------------
- * First find the "clean" target list, i.e. all the entries
- * in the original target list which have a false 'resjunk'
- * NOTE: make copy of the Resdom nodes, because we have
- * to change the 'resno's...
- * ---------------------
+ /*
+ * First find the "clean" target list, i.e. all the entries in the
+ * original target list which have a false 'resjunk' NOTE: make copy
+ * of the Resdom nodes, because we have to change the 'resno's...
*/
cleanTargetList = NIL;
cleanResno = 1;
}
}
- /* ---------------------
+ /*
* Now calculate the tuple type for the cleaned tuple (we were already
* given the type for the original targetlist).
- * ---------------------
*/
cleanTupType = ExecTypeFromTL(cleanTargetList);
len = ExecTargetListLength(targetList);
cleanLength = ExecTargetListLength(cleanTargetList);
- /* ---------------------
- * Now calculate the "map" between the original tuple's attributes
- * and the "clean" tuple's attributes.
+ /*
+ * Now calculate the "map" between the original tuple's attributes and
+ * the "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e.
- * one entry for every attribute of the "clean" tuple.
- * The value of this entry is the attribute number of the corresponding
- * attribute of the "original" tuple.
- * ---------------------
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one
+ * entry for every attribute of the "clean" tuple. The value of this
+ * entry is the attribute number of the corresponding attribute of the
+ * "original" tuple.
*/
if (cleanLength > 0)
{
else
cleanMap = NULL;
- /* ---------------------
+ /*
* Finally create and initialize the JunkFilter struct.
- * ---------------------
*/
junkfilter = makeNode(JunkFilter);
TupleDesc tupType;
HeapTuple tuple;
- /* ---------------------
- * first look in the junkfilter's target list for
- * an attribute with the given name
- * ---------------------
+ /*
+ * first look in the junkfilter's target list for an attribute with
+ * the given name
*/
resno = InvalidAttrNumber;
targetList = junkfilter->jf_targetList;
return false;
}
- /* ---------------------
+ /*
* Now extract the attribute value from the tuple.
- * ---------------------
*/
tuple = slot->val;
tupType = junkfilter->jf_tupType;
Datum values_array[64];
char nulls_array[64];
- /* ----------------
- * get info from the slot and the junk filter
- * ----------------
+ /*
+ * get info from the slot and the junk filter
*/
tuple = slot->val;
cleanLength = junkfilter->jf_cleanLength;
cleanMap = junkfilter->jf_cleanMap;
- /* ---------------------
- * Handle the trivial case first.
- * ---------------------
+ /*
+ * Handle the trivial case first.
*/
if (cleanLength == 0)
return (HeapTuple) NULL;
- /* ---------------------
- * Create the arrays that will hold the attribute values
- * and the null information for the new "clean" tuple.
+ /*
+ * Create the arrays that will hold the attribute values and the null
+ * information for the new "clean" tuple.
*
- * Note: we use memory on the stack to optimize things when
- * we are dealing with a small number of tuples.
- * for large tuples we just use palloc.
- * ---------------------
+ * Note: we use memory on the stack to optimize things when we are
+ * dealing with a small number of tuples. for large tuples we just use
+ * palloc.
*/
if (cleanLength > 64)
{
nulls = nulls_array;
}
- /* ---------------------
+ /*
* Exctract one by one all the values of the "clean" tuple.
- * ---------------------
*/
for (i = 0; i < cleanLength; i++)
{
nulls[i] = ' ';
}
- /* ---------------------
+ /*
* Now form the new tuple.
- * ---------------------
*/
cleanTuple = heap_formtuple(cleanTupType,
values,
nulls);
- /* ---------------------
- * We are done. Free any space allocated for 'values' and 'nulls'
- * and return the new tuple.
- * ---------------------
+ /*
+ * We are done. Free any space allocated for 'values' and 'nulls' and
+ * return the new tuple.
*/
if (cleanLength > 64)
{
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.25 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.26 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool result;
List *subp;
- /* ----------------
- * do nothing when we get to the end
- * of a leaf on tree.
- * ----------------
+ /*
+ * do nothing when we get to the end of a leaf on tree.
*/
if (node == NULL)
return FALSE;
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
result = ExecInitResult((Result *) node, estate, parent);
result = ExecInitAppend((Append *) node, estate, parent);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
result = ExecInitSeqScan((SeqScan *) node, estate, parent);
parent);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
result = ExecInitNestLoop((NestLoop *) node, estate, parent);
result = ExecInitHashJoin((HashJoin *) node, estate, parent);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
result = ExecInitMaterial((Material *) node, estate, parent);
CHECK_FOR_INTERRUPTS();
- /* ----------------
- * deal with NULL nodes..
- * ----------------
+ /*
+ * deal with NULL nodes..
*/
if (node == NULL)
return NULL;
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
result = ExecResult((Result *) node);
result = ExecProcAppend((Append *) node);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
result = ExecSeqScan((SeqScan *) node);
result = ExecSubqueryScan((SubqueryScan *) node);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
result = ExecNestLoop((NestLoop *) node);
result = ExecHashJoin((HashJoin *) node);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
result = ExecMaterial((Material *) node);
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
return ExecCountSlotsResult((Result *) node);
case T_Append:
return ExecCountSlotsAppend((Append *) node);
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
return ExecCountSlotsSeqScan((SeqScan *) node);
case T_SubqueryScan:
return ExecCountSlotsSubqueryScan((SubqueryScan *) node);
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
return ExecCountSlotsNestLoop((NestLoop *) node);
case T_HashJoin:
return ExecCountSlotsHashJoin((HashJoin *) node);
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
return ExecCountSlotsMaterial((Material *) node);
{
List *subp;
- /* ----------------
- * do nothing when we get to the end
- * of a leaf on tree.
- * ----------------
+ /*
+ * do nothing when we get to the end of a leaf on tree.
*/
if (node == NULL)
return;
switch (nodeTag(node))
{
- /* ----------------
- * control nodes
- * ----------------
+
+ /*
+ * control nodes
*/
case T_Result:
ExecEndResult((Result *) node);
ExecEndAppend((Append *) node);
break;
- /* ----------------
- * scan nodes
- * ----------------
+ /*
+ * scan nodes
*/
case T_SeqScan:
ExecEndSeqScan((SeqScan *) node);
ExecEndSubqueryScan((SubqueryScan *) node);
break;
- /* ----------------
- * join nodes
- * ----------------
+ /*
+ * join nodes
*/
case T_NestLoop:
ExecEndNestLoop((NestLoop *) node);
ExecEndHashJoin((HashJoin *) node);
break;
- /* ----------------
- * materialization nodes
- * ----------------
+ /*
+ * materialization nodes
*/
case T_Material:
ExecEndMaterial((Material *) node);
break;
default:
- /* ----------------
- * should never get here
- * ----------------
+
+ /*
+ * should never get here
*/
elog(ERROR, "ExecGetTupType: node type %d unsupported",
(int) nodeTag(node));
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.17 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ExprDoneCond isDone;
TupleTableSlot *resultSlot;
- /* ----------------
- * Fetch data from node
- * ----------------
+ /*
+ * Fetch data from node
*/
estate = node->plan.state;
scanstate = node->scanstate;
econtext = scanstate->cstate.cs_ExprContext;
qual = node->plan.qual;
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * scan tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * scan tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (scanstate->cstate.cs_TupFromTlist)
{
scanstate->cstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a scan tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a scan tuple.
*/
ResetExprContext(econtext);
slot = (*accessMtd) (node);
- /* ----------------
- * if the slot returned by the accessMtd contains
- * NULL, then it means there is nothing more to scan
- * so we just return an empty slot, being careful to use
- * the projection result slot so it has correct tupleDesc.
- * ----------------
+ /*
+ * if the slot returned by the accessMtd contains NULL, then it
+ * means there is nothing more to scan so we just return an empty
+ * slot, being careful to use the projection result slot so it has
+ * correct tupleDesc.
*/
if (TupIsNull(slot))
{
true);
}
- /* ----------------
- * place the current tuple into the expr context
- * ----------------
+ /*
+ * place the current tuple into the expr context
*/
econtext->ecxt_scantuple = slot;
- /* ----------------
- * check that the current tuple satisfies the qual-clause
+ /*
+ * check that the current tuple satisfies the qual-clause
*
- * check for non-nil qual here to avoid a function call to
- * ExecQual() when the qual is nil ... saves only a few cycles,
- * but they add up ...
- * ----------------
+ * check for non-nil qual here to avoid a function call to ExecQual()
+ * when the qual is nil ... saves only a few cycles, but they add
+ * up ...
*/
if (!qual || ExecQual(qual, econtext, false))
{
- /* ----------------
- * Found a satisfactory scan tuple.
+
+ /*
+ * Found a satisfactory scan tuple.
*
- * Form a projection tuple, store it in the result tuple
- * slot and return it --- unless we find we can project no
- * tuples from this scan tuple, in which case continue scan.
- * ----------------
+ * Form a projection tuple, store it in the result tuple slot and
+ * return it --- unless we find we can project no tuples from
+ * this scan tuple, in which case continue scan.
*/
resultSlot = ExecProject(scanstate->cstate.cs_ProjInfo, &isDone);
if (isDone != ExprEndResult)
}
}
- /* ----------------
- * Tuple fails qual, so free per-tuple memory and try again.
- * ----------------
+ /*
+ * Tuple fails qual, so free per-tuple memory and try again.
*/
ResetExprContext(econtext);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.48 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TupleTable newtable; /* newly allocated table */
TupleTableSlot *array; /* newly allocated slot array */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(initialSize >= 1);
- /* ----------------
- * Now allocate our new table along with space for the pointers
- * to the tuples.
+ /*
+ * Now allocate our new table along with space for the pointers to the
+ * tuples.
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData));
array = (TupleTableSlot *) palloc(initialSize * sizeof(TupleTableSlot));
- /* ----------------
- * clean out the slots we just allocated
- * ----------------
+ /*
+ * clean out the slots we just allocated
*/
MemSet(array, 0, initialSize * sizeof(TupleTableSlot));
- /* ----------------
- * initialize the new table and return it to the caller.
- * ----------------
+ /*
+ * initialize the new table and return it to the caller.
*/
newtable->size = initialSize;
newtable->next = 0;
TupleTableSlot *array; /* start of table array */
int i; /* counter */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(table != NULL);
- /* ----------------
- * get information from the table
- * ----------------
+ /*
+ * get information from the table
*/
array = table->array;
next = table->next;
- /* ----------------
- * first free all the valid pointers in the tuple array
- * and drop refcounts of any referenced buffers,
- * if that's what the caller wants. (There is probably
- * no good reason for the caller ever not to want it!)
- * ----------------
+ /*
+ * first free all the valid pointers in the tuple array and drop
+ * refcounts of any referenced buffers, if that's what the caller
+ * wants. (There is probably no good reason for the caller ever not
+ * to want it!)
*/
if (shouldFree)
{
}
}
- /* ----------------
- * finally free the tuple array and the table itself.
- * ----------------
+ /*
+ * finally free the tuple array and the table itself.
*/
pfree(array);
pfree(table);
int slotnum; /* new slot number */
TupleTableSlot *slot;
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(table != NULL);
- /* ----------------
- * if our table is full we have to allocate a larger
- * size table. Since ExecAllocTableSlot() is only called
- * before the table is ever used to store tuples, we don't
- * have to worry about the contents of the old table.
- * If this changes, then we will have to preserve the contents.
- * -cim 6/23/90
+ /*
+ * if our table is full we have to allocate a larger size table.
+ * Since ExecAllocTableSlot() is only called before the table is ever
+ * used to store tuples, we don't have to worry about the contents of
+ * the old table. If this changes, then we will have to preserve the
+ * contents. -cim 6/23/90
*
- * Unfortunately, we *cannot* do this. All of the nodes in
- * the plan that have already initialized their slots will have
- * pointers into _freed_ memory. This leads to bad ends. We
- * now count the number of slots we will need and create all the
- * slots we will need ahead of time. The if below should never
- * happen now. Fail if it does. -mer 4 Aug 1992
- * ----------------
+ * Unfortunately, we *cannot* do this. All of the nodes in the plan that
+ * have already initialized their slots will have pointers into
+ * _freed_ memory. This leads to bad ends. We now count the number
+ * of slots we will need and create all the slots we will need ahead
+ * of time. The if below should never happen now. Fail if it does.
+ * -mer 4 Aug 1992
*/
if (table->next >= table->size)
elog(ERROR, "Plan requires more slots than are available"
"\n\tsend mail to your local executor guru to fix this");
- /* ----------------
- * at this point, space in the table is guaranteed so we
- * reserve the next slot, initialize and return it.
- * ----------------
+ /*
+ * at this point, space in the table is guaranteed so we reserve the
+ * next slot, initialize and return it.
*/
slotnum = table->next;
table->next++;
Buffer buffer,
bool shouldFree)
{
- /* ----------------
- * sanity checks
- * ----------------
+
+ /*
+ * sanity checks
*/
Assert(slot != NULL);
/* passing shouldFree=true for a tuple on a disk page is not sane */
/* clear out any old contents of the slot */
ExecClearTuple(slot);
- /* ----------------
- * store the new tuple into the specified slot and
- * return the slot into which we stored the tuple.
- * ----------------
+ /*
+ * store the new tuple into the specified slot and return the slot
+ * into which we stored the tuple.
*/
slot->val = tuple;
slot->ttc_buffer = buffer;
{
HeapTuple oldtuple; /* prior contents of slot */
- /* ----------------
- * sanity checks
- * ----------------
+ /*
+ * sanity checks
*/
Assert(slot != NULL);
- /* ----------------
- * get information from the tuple table
- * ----------------
+ /*
+ * get information from the tuple table
*/
oldtuple = slot->val;
- /* ----------------
- * free the old contents of the specified slot if necessary.
- * ----------------
+ /*
+ * free the old contents of the specified slot if necessary.
*/
if (slot->ttc_shouldFree && oldtuple != NULL)
heap_freetuple(oldtuple);
slot->ttc_shouldFree = true;/* probably useless code... */
- /* ----------------
- * Drop the pin on the referenced buffer, if there is one.
- * ----------------
+ /*
+ * Drop the pin on the referenced buffer, if there is one.
*/
if (BufferIsValid(slot->ttc_buffer))
ReleaseBuffer(slot->ttc_buffer);
Oid restype;
int len;
- /* ----------------
- * examine targetlist - if empty then return NULL
- * ----------------
+ /*
+ * examine targetlist - if empty then return NULL
*/
len = ExecTargetListLength(targetList);
if (len == 0)
return NULL;
- /* ----------------
- * allocate a new typeInfo
- * ----------------
+ /*
+ * allocate a new typeInfo
*/
typeInfo = CreateTemplateTupleDesc(len);
- /* ----------------
+ /*
* scan list, generate type info for each entry
- * ----------------
*/
foreach(tlitem, targetList)
{
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.75 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
ProjectionInfo *projInfo;
- /* ----------------
- * get projection info. if NULL then this node has
- * none so we just return.
- * ----------------
+ /*
+ * get projection info. if NULL then this node has none so we just
+ * return.
*/
projInfo = commonstate->cs_ProjInfo;
if (projInfo == NULL)
return;
- /* ----------------
- * clean up memory used.
- * ----------------
+ /*
+ * clean up memory used.
*/
if (projInfo->pi_tupValue != NULL)
pfree(projInfo->pi_tupValue);
{
ExprContext *econtext;
- /* ----------------
- * get expression context. if NULL then this node has
- * none so we just return.
- * ----------------
+ /*
+ * get expression context. if NULL then this node has none so we just
+ * return.
*/
econtext = commonstate->cs_ExprContext;
if (econtext == NULL)
return;
- /* ----------------
- * clean up memory used.
- * ----------------
+ /*
+ * clean up memory used.
*/
MemoryContextDelete(econtext->ecxt_per_tuple_memory);
pfree(econtext);
IsSystemRelationName(RelationGetRelationName(resultRelation)))
return;
- /* ----------------
- * Get cached list of index OIDs
- * ----------------
+ /*
+ * Get cached list of index OIDs
*/
indexoidlist = RelationGetIndexList(resultRelation);
len = length(indexoidlist);
if (len == 0)
return;
- /* ----------------
- * allocate space for result arrays
- * ----------------
+ /*
+ * allocate space for result arrays
*/
relationDescs = (RelationPtr) palloc(len * sizeof(Relation));
indexInfoArray = (IndexInfo **) palloc(len * sizeof(IndexInfo *));
resultRelInfo->ri_IndexRelationDescs = relationDescs;
resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
- /* ----------------
- * For each index, open the index relation and save pg_index info.
- * ----------------
+ /*
+ * For each index, open the index relation and save pg_index info.
*/
i = 0;
foreach(indexoidscan, indexoidlist)
HeapTuple indexTuple;
IndexInfo *ii;
- /* ----------------
+ /*
* Open (and lock, if necessary) the index relation
*
* Hack for not btree and hash indices: they use relation level
- * exclusive locking on update (i.e. - they are not ready for MVCC)
- * and so we have to exclusively lock indices here to prevent
- * deadlocks if we will scan them - index_beginscan places
+ * exclusive locking on update (i.e. - they are not ready for
+ * MVCC) and so we have to exclusively lock indices here to
+ * prevent deadlocks if we will scan them - index_beginscan places
* AccessShareLock, indices update methods don't use locks at all.
* We release this lock in ExecCloseIndices. Note, that hashes use
* page level locking - i.e. are not deadlock-free - let's them be
* on their way -:)) vadim 03-12-1998
*
* If there are multiple not-btree-or-hash indices, all backends must
- * lock the indices in the same order or we will get deadlocks here
- * during concurrent updates. This is now guaranteed by
+ * lock the indices in the same order or we will get deadlocks
+ * here during concurrent updates. This is now guaranteed by
* RelationGetIndexList(), which promises to return the index list
* in OID order. tgl 06-19-2000
- * ----------------
*/
indexDesc = index_open(indexOid);
indexDesc->rd_rel->relam != HASH_AM_OID)
LockRelation(indexDesc, AccessExclusiveLock);
- /* ----------------
- * Get the pg_index tuple for the index
- * ----------------
+ /*
+ * Get the pg_index tuple for the index
*/
indexTuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexOid),
if (!HeapTupleIsValid(indexTuple))
elog(ERROR, "ExecOpenIndices: index %u not found", indexOid);
- /* ----------------
- * extract the index key information from the tuple
- * ----------------
+ /*
+ * extract the index key information from the tuple
*/
ii = BuildIndexInfo(indexTuple);
/* Arrange for econtext's scan tuple to be the tuple under test */
econtext->ecxt_scantuple = slot;
- /* ----------------
- * for each index, form and insert the index tuple
- * ----------------
+ /*
+ * for each index, form and insert the index tuple
*/
for (i = 0; i < numIndices; i++)
{
continue;
}
- /* ----------------
- * FormIndexDatum fills in its datum and null parameters
- * with attribute information taken from the given heap tuple.
- * ----------------
+ /*
+ * FormIndexDatum fills in its datum and null parameters with
+ * attribute information taken from the given heap tuple.
*/
FormIndexDatum(indexInfo,
heapTuple,
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation);
- /* ----------------
- * keep track of index inserts for debugging
- * ----------------
+ /*
+ * keep track of index inserts for debugging
*/
IncrIndexInserted();
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.45 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum tmp;
bool isNull;
- /* ----------------
- * get the procedure tuple corresponding to the given function Oid
- * ----------------
+ /*
+ * get the procedure tuple corresponding to the given function Oid
*/
procedureTuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(foid),
procedureStruct = (Form_pg_proc) GETSTRUCT(procedureTuple);
- /* ----------------
- * get the return type from the procedure tuple
- * ----------------
+ /*
+ * get the return type from the procedure tuple
*/
typeTuple = SearchSysCache(TYPEOID,
ObjectIdGetDatum(procedureStruct->prorettype),
fcache = (SQLFunctionCachePtr) palloc(sizeof(SQLFunctionCache));
MemSet(fcache, 0, sizeof(SQLFunctionCache));
- /* ----------------
- * get the type length and by-value flag from the type tuple
- * ----------------
+ /*
+ * get the type length and by-value flag from the type tuple
*/
fcache->typlen = typeStruct->typlen;
if (typeStruct->typrelid == InvalidOid)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.76 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.77 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
peraggstate->transValue = peraggstate->initValue;
peraggstate->transValueIsNull = peraggstate->initValueIsNull;
- /* ------------------------------------------
+ /*
* If the initial value for the transition state doesn't exist in the
- * pg_aggregate table then we will let the first non-NULL value returned
- * from the outer procNode become the initial value. (This is useful for
- * aggregates like max() and min().) The noTransValue flag signals that
- * we still need to do this.
- * ------------------------------------------
+ * pg_aggregate table then we will let the first non-NULL value
+ * returned from the outer procNode become the initial value. (This is
+ * useful for aggregates like max() and min().) The noTransValue flag
+ * signals that we still need to do this.
*/
peraggstate->noTransValue = peraggstate->initValueIsNull;
}
int aggno;
bool isNull;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
aggstate = node->aggstate;
estate = node->plan.state;
inputTuple = NULL; /* no saved input tuple yet */
- /* ----------------
- * for each tuple from the outer plan, update all the aggregates
- * ----------------
+ /*
+ * for each tuple from the outer plan, update all the aggregates
*/
for (;;)
{
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize source tuple type.
- * ----------------
+ /*
+ * initialize source tuple type.
*/
ExecAssignScanTypeFromOuterPlan((Plan *) node, &aggstate->csstate);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.39 2001/01/24 19:42:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.40 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int whichplan;
int nplans;
- /* ----------------
- * get information from the append node
- * ----------------
+ /*
+ * get information from the append node
*/
estate = node->plan.state;
appendstate = node->appendstate;
if (whichplan < 0)
{
- /* ----------------
- * if scanning in reverse, we start at
- * the last scan in the list and then
- * proceed back to the first.. in any case
- * we inform ExecProcAppend that we are
- * at the end of the line by returning FALSE
- * ----------------
+
+ /*
+ * if scanning in reverse, we start at the last scan in the list
+ * and then proceed back to the first.. in any case we inform
+ * ExecProcAppend that we are at the end of the line by returning
+ * FALSE
*/
appendstate->as_whichplan = 0;
return FALSE;
}
else if (whichplan >= nplans)
{
- /* ----------------
- * as above, end the scan if we go beyond
- * the last scan in our list..
- * ----------------
+
+ /*
+ * as above, end the scan if we go beyond the last scan in our
+ * list..
*/
appendstate->as_whichplan = nplans - 1;
return FALSE;
}
else
{
- /* ----------------
- * initialize the scan
+
+ /*
+ * initialize the scan
*
* If we are controlling the target relation, select the proper
* active ResultRelInfo and junk filter for this target.
- * ----------------
*/
if (node->isTarget)
{
CXT1_printf("ExecInitAppend: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * assign execution state to node and get information
- * for append state
- * ----------------
+ /*
+ * assign execution state to node and get information for append state
*/
node->plan.state = estate;
initialized = (bool *) palloc(nplans * sizeof(bool));
MemSet(initialized, 0, nplans * sizeof(bool));
- /* ----------------
- * create new AppendState for our append node
- * ----------------
+ /*
+ * create new AppendState for our append node
*/
appendstate = makeNode(AppendState);
appendstate->as_whichplan = 0;
node->appendstate = appendstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Append plans don't have expression contexts because they
- * never call ExecQual or ExecProject.
- * ----------------
+ * Append plans don't have expression contexts because they never call
+ * ExecQual or ExecProject.
*/
#define APPEND_NSLOTS 1
- /* ----------------
- * append nodes still have Result slots, which hold pointers
- * to tuples, so we have to initialize them.
- * ----------------
+
+ /*
+ * append nodes still have Result slots, which hold pointers to
+ * tuples, so we have to initialize them.
*/
ExecInitResultTupleSlot(estate, &appendstate->cstate);
- /* ----------------
- * call ExecInitNode on each of the plans in our list
- * and save the results into the array "initialized"
- * ----------------
+ /*
+ * call ExecInitNode on each of the plans in our list and save the
+ * results into the array "initialized"
*/
for (i = 0; i < nplans; i++)
{
initialized[i] = ExecInitNode(initNode, estate, (Plan *) node);
}
- /* ----------------
- * initialize tuple type
- * ----------------
+ /*
+ * initialize tuple type
*/
ExecAssignResultTypeFromTL((Plan *) node, &appendstate->cstate);
appendstate->cstate.cs_ProjInfo = NULL;
- /* ----------------
- * return the result from the first subplan's initialization
- * ----------------
+ /*
+ * return the result from the first subplan's initialization
*/
appendstate->as_whichplan = 0;
exec_append_initialize_next(node);
TupleTableSlot *result_slot;
ScanDirection direction;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
appendstate = node->appendstate;
estate = node->plan.state;
whichplan = appendstate->as_whichplan;
result_slot = appendstate->cstate.cs_ResultTupleSlot;
- /* ----------------
- * figure out which subplan we are currently processing
- * ----------------
+ /*
+ * figure out which subplan we are currently processing
*/
subnode = (Plan *) nth(whichplan, appendplans);
if (subnode == NULL)
elog(DEBUG, "ExecProcAppend: subnode is NULL");
- /* ----------------
- * get a tuple from the subplan
- * ----------------
+ /*
+ * get a tuple from the subplan
*/
result = ExecProcNode(subnode, (Plan *) node);
if (!TupIsNull(result))
{
- /* ----------------
- * if the subplan gave us something then place a copy of
- * whatever we get into our result slot and return it.
+
+ /*
+ * if the subplan gave us something then place a copy of whatever
+ * we get into our result slot and return it.
*
- * Note we rely on the subplan to retain ownership of the
- * tuple for as long as we need it --- we don't copy it.
- * ----------------
+ * Note we rely on the subplan to retain ownership of the tuple for
+ * as long as we need it --- we don't copy it.
*/
return ExecStoreTuple(result->val, result_slot, InvalidBuffer, false);
}
else
{
- /* ----------------
- * .. go on to the "next" subplan in the appropriate
- * direction and try processing again (recursively)
- * ----------------
+
+ /*
+ * .. go on to the "next" subplan in the appropriate direction and
+ * try processing again (recursively)
*/
if (ScanDirectionIsForward(direction))
appendstate->as_whichplan++;
else
appendstate->as_whichplan--;
- /* ----------------
- * return something from next node or an empty slot
- * if all of our subplans have been exhausted.
- * ----------------
+ /*
+ * return something from next node or an empty slot if all of our
+ * subplans have been exhausted.
*/
if (exec_append_initialize_next(node))
{
bool *initialized;
int i;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
appendstate = node->appendstate;
estate = node->plan.state;
nplans = appendstate->as_nplans;
initialized = appendstate->as_initialized;
- /* ----------------
- * shut down each of the subscans
- * ----------------
+ /*
+ * shut down each of the subscans
*/
for (i = 0; i < nplans; i++)
{
* locate group boundaries.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.42 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.43 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
grpstate = node->grpstate;
if (grpstate->grp_done)
InvalidBuffer, false);
}
- /* ----------------
- * form a projection tuple, store it in the result tuple
- * slot and return it.
- * ----------------
+ /*
+ * form a projection tuple, store it in the result tuple slot and
+ * return it.
*/
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
ProjectionInfo *projInfo;
TupleTableSlot *resultSlot;
- /* ---------------------
- * get state info from node
- * ---------------------
+ /*
+ * get state info from node
*/
grpstate = node->grpstate;
if (grpstate->grp_done)
break;
}
- /* ----------------
- * form a projection tuple, store it in the result tuple
- * slot and return it.
- * ----------------
+ /*
+ * form a projection tuple, store it in the result tuple slot and
+ * return it.
*/
projInfo = grpstate->csstate.cstate.cs_ProjInfo;
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type.
- * ----------------
+ /*
+ * initialize tuple type.
*/
ExecAssignScanTypeFromOuterPlan((Plan *) node, &grpstate->csstate);
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $Id: nodeHash.c,v 1.55 2001/03/22 03:59:27 momjian Exp $
+ * $Id: nodeHash.c,v 1.56 2001/03/22 06:16:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nbatch;
int i;
- /* ----------------
- * get state info from node
- * ----------------
+ /*
+ * get state info from node
*/
hashstate = node->hashstate;
if (nbatch > 0)
{
- /* ----------------
- * Open temp files for inner batches, if needed.
- * Note that file buffers are palloc'd in regular executor context.
- * ----------------
+
+ /*
+ * Open temp files for inner batches, if needed. Note that file
+ * buffers are palloc'd in regular executor context.
*/
for (i = 0; i < nbatch; i++)
hashtable->innerBatchFile[i] = BufFileCreateTemp();
}
- /* ----------------
- * set expression context
- * ----------------
+ /*
+ * set expression context
*/
hashkey = node->hashkey;
econtext = hashstate->cstate.cs_ExprContext;
- /* ----------------
- * get all inner tuples and insert into the hash table (or temp files)
- * ----------------
+ /*
+ * get all inner tuples and insert into the hash table (or temp files)
*/
for (;;)
{
ExecClearTuple(slot);
}
- /* ---------------------
- * Return the slot so that we have the tuple descriptor
- * when we need to save/restore them. -Jeff 11 July 1991
- * ---------------------
+ /*
+ * Return the slot so that we have the tuple descriptor when we need
+ * to save/restore them. -Jeff 11 July 1991
*/
return slot;
}
SO1_printf("ExecInitHash: %s\n",
"initializing hash node");
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
hashstate = makeNode(HashState);
node->hashstate = hashstate;
hashstate->hashtable = NULL;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &hashstate->cstate);
- /* ----------------
+ /*
* initialize our result slot
- * ----------------
*/
ExecInitResultTupleSlot(estate, &hashstate->cstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerPlan = outerPlan(node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type. no need to initialize projection
- * info because this node doesn't do projections
- * ----------------
+ /*
+ * initialize tuple type. no need to initialize projection info
+ * because this node doesn't do projections
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &hashstate->cstate);
hashstate->cstate.cs_ProjInfo = NULL;
HashState *hashstate;
Plan *outerPlan;
- /* ----------------
- * get info from the hash state
- * ----------------
+ /*
+ * get info from the hash state
*/
hashstate = node->hashstate;
- /* ----------------
- * free projection info. no need to free result type info
- * because that came from the outer plan...
- * ----------------
+ /*
+ * free projection info. no need to free result type info because
+ * that came from the outer plan...
*/
ExecFreeProjectionInfo(&hashstate->cstate);
ExecFreeExprContext(&hashstate->cstate);
- /* ----------------
- * shut down the subplan
- * ----------------
+ /*
+ * shut down the subplan
*/
outerPlan = outerPlan(node);
ExecEndNode(outerPlan, (Plan *) node);
int i;
MemoryContext oldcxt;
- /* ----------------
- * Get information about the size of the relation to be hashed
- * (it's the "outer" subtree of this node, but the inner relation of
- * the hashjoin).
- * Caution: this is only the planner's estimates, and so
- * can't be trusted too far. Apply a healthy fudge factor.
- * ----------------
+ /*
+ * Get information about the size of the relation to be hashed (it's
+ * the "outer" subtree of this node, but the inner relation of the
+ * hashjoin).
+ *
+ * Caution: this is only the planner's estimates, and so can't be trusted
+ * too far. Apply a healthy fudge factor.
*/
outerNode = outerPlan(node);
ntuples = outerNode->plan_rows;
nbatch, totalbuckets, nbuckets);
#endif
- /* ----------------
- * Initialize the hash table control block.
- * The hashtable control block is just palloc'd from the executor's
- * per-query memory context.
- * ----------------
+ /*
+ * Initialize the hash table control block.
+ *
+ * The hashtable control block is just palloc'd from the executor's
+ * per-query memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashTableData));
hashtable->nbuckets = nbuckets;
hashtable->innerBatchSize = NULL;
hashtable->outerBatchSize = NULL;
- /* ----------------
- * Get info about the datatype of the hash key.
- * ----------------
+ /*
+ * Get info about the datatype of the hash key.
*/
get_typlenbyval(exprType(node->hashkey),
&hashtable->typLen,
&hashtable->typByVal);
- /* ----------------
- * Create temporary memory contexts in which to keep the hashtable
- * working storage. See notes in executor/hashjoin.h.
- * ----------------
+ /*
+ * Create temporary memory contexts in which to keep the hashtable
+ * working storage. See notes in executor/hashjoin.h.
*/
hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
"HashTableContext",
if (nbatch > 0)
{
- /* ---------------
- * allocate and initialize the file arrays in hashCxt
- * ---------------
+
+ /*
+ * allocate and initialize the file arrays in hashCxt
*/
hashtable->innerBatchFile = (BufFile **)
palloc(nbatch * sizeof(BufFile *));
TupleTableSlot *slot = econtext->ecxt_innertuple;
HeapTuple heapTuple = slot->val;
- /* ----------------
- * decide whether to put the tuple in the hash table or a tmp file
- * ----------------
+ /*
+ * decide whether to put the tuple in the hash table or a tmp file
*/
if (bucketno < hashtable->nbuckets)
{
- /* ---------------
- * put the tuple in hash table
- * ---------------
+
+ /*
+ * put the tuple in hash table
*/
HashJoinTuple hashTuple;
int hashTupleSize;
}
else
{
- /* -----------------
+
+ /*
* put the tuple into a tmp file for other batches
- * -----------------
*/
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
Datum keyval;
bool isNull;
- /* ----------------
- * Get the join attribute value of the tuple
+ /*
+ * Get the join attribute value of the tuple
*
- * We reset the eval context each time to avoid any possibility
- * of memory leaks in the hash function.
- * ----------------
+ * We reset the eval context each time to avoid any possibility of memory
+ * leaks in the hash function.
*/
ResetExprContext(econtext);
keyval = ExecEvalExprSwitchContext(hashkey, econtext, &isNull, NULL);
- /* ------------------
- * compute the hash function
- * ------------------
+ /*
+ * compute the hash function
*/
if (isNull)
bucketno = 0;
hashTuple = hashTuple->next;
}
- /* ----------------
- * no match
- * ----------------
+ /*
+ * no match
*/
return NULL;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.37 2001/03/22 03:59:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.38 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int i;
bool hashPhaseDone;
- /* ----------------
- * get information from HashJoin node
- * ----------------
+ /*
+ * get information from HashJoin node
*/
hjstate = node->hashjoinstate;
hjclauses = node->hashclauses;
hashPhaseDone = hjstate->hj_hashdone;
dir = estate->es_direction;
- /* -----------------
+ /*
* get information from HashJoin state
- * -----------------
*/
hashtable = hjstate->hj_HashTable;
econtext = hjstate->jstate.cs_ExprContext;
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * join tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (hjstate->jstate.cs_TupFromTlist)
{
hjstate->jstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * if this is the first call, build the hash table for inner relation
- * ----------------
+ /*
+ * if this is the first call, build the hash table for inner relation
*/
if (!hashPhaseDone)
{ /* if the hash phase not completed */
if (hashtable == NULL)
{ /* if the hash table has not been created */
- /* ----------------
+
+ /*
* create the hash table
- * ----------------
*/
hashtable = ExecHashTableCreate(hashNode);
hjstate->hj_HashTable = hashtable;
hjstate->hj_InnerHashKey = hashNode->hashkey;
- /* ----------------
+ /*
* execute the Hash node, to build the hash table
- * ----------------
*/
hashNode->hashstate->hashtable = hashtable;
innerTupleSlot = ExecProcNode((Plan *) hashNode, (Plan *) node);
}
hjstate->hj_hashdone = true;
- /* ----------------
- * Open temp files for outer batches, if needed.
- * Note that file buffers are palloc'd in regular executor context.
- * ----------------
+
+ /*
+ * Open temp files for outer batches, if needed. Note that file
+ * buffers are palloc'd in regular executor context.
*/
for (i = 0; i < hashtable->nbatch; i++)
hashtable->outerBatchFile[i] = BufFileCreateTemp();
else if (hashtable == NULL)
return NULL;
- /* ----------------
- * Now get an outer tuple and probe into the hash table for matches
- * ----------------
+ /*
+ * Now get an outer tuple and probe into the hash table for matches
*/
outerTupleSlot = hjstate->jstate.cs_OuterTupleSlot;
outerVar = (Node *) get_leftop(clause);
outerVar);
hjstate->hj_CurTuple = NULL;
- /* ----------------
- * Now we've got an outer tuple and the corresponding hash bucket,
- * but this tuple may not belong to the current batch.
- * This need only be checked in the first pass.
- * ----------------
+ /*
+ * Now we've got an outer tuple and the corresponding hash
+ * bucket, but this tuple may not belong to the current batch.
+ * This need only be checked in the first pass.
*/
if (hashtable->curbatch == 0)
{
/* reset temp memory each time to avoid leaks from qual expr */
ResetExprContext(econtext);
- /* ----------------
- * if we pass the qual, then save state for next call and
- * have ExecProject form the projection, store it
- * in the tuple table, and return the slot.
+ /*
+ * if we pass the qual, then save state for next call and have
+ * ExecProject form the projection, store it in the tuple
+ * table, and return the slot.
*
- * Only the joinquals determine MatchedOuter status,
- * but all quals must pass to actually return the tuple.
- * ----------------
+ * Only the joinquals determine MatchedOuter status, but all
+ * quals must pass to actually return the tuple.
*/
if (ExecQual(joinqual, econtext, false))
{
}
}
- /* ----------------
- * Now the current outer tuple has run out of matches,
- * so check whether to emit a dummy outer-join tuple.
- * If not, loop around to get a new outer tuple.
- * ----------------
+ /*
+ * Now the current outer tuple has run out of matches, so check
+ * whether to emit a dummy outer-join tuple. If not, loop around
+ * to get a new outer tuple.
*/
hjstate->hj_NeedNewOuter = true;
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification was satisfied so we project and
- * return the slot containing the result tuple
- * using ExecProject().
- * ----------------
+
+ /*
+ * qualification was satisfied so we project and return
+ * the slot containing the result tuple using
+ * ExecProject().
*/
TupleTableSlot *result;
Plan *outerNode;
Hash *hashNode;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->join.plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
hjstate = makeNode(HashJoinState);
node->hashjoinstate = hjstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &hjstate->jstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerNode = outerPlan((Plan *) node);
hashNode = (Hash *) innerPlan((Plan *) node);
ExecInitNode((Plan *) hashNode, estate, (Plan *) node);
#define HASHJOIN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &hjstate->jstate);
hjstate->hj_OuterTupleSlot = ExecInitExtraTupleSlot(estate);
(int) node->join.jointype);
}
- /* ----------------
- * now for some voodoo. our temporary tuple slot
- * is actually the result tuple slot of the Hash node
- * (which is our inner plan). we do this because Hash
- * nodes don't return tuples via ExecProcNode() -- instead
- * the hash join node uses ExecScanHashBucket() to get
- * at the contents of the hash table. -cim 6/9/91
- * ----------------
+ /*
+ * now for some voodoo. our temporary tuple slot is actually the
+ * result tuple slot of the Hash node (which is our inner plan). we
+ * do this because Hash nodes don't return tuples via ExecProcNode()
+ * -- instead the hash join node uses ExecScanHashBucket() to get at
+ * the contents of the hash table. -cim 6/9/91
*/
{
HashState *hashstate = hashNode->hashstate;
hjstate->hj_HashTupleSlot = slot;
}
- /* ----------------
- * initialize tuple type and projection info
- * ----------------
+ /*
+ * initialize tuple type and projection info
*/
ExecAssignResultTypeFromTL((Plan *) node, &hjstate->jstate);
ExecAssignProjectionInfo((Plan *) node, &hjstate->jstate);
ExecGetTupType(outerNode),
false);
- /* ----------------
- * initialize hash-specific info
- * ----------------
+ /*
+ * initialize hash-specific info
*/
hjstate->hj_hashdone = false;
{
HashJoinState *hjstate;
- /* ----------------
- * get info from the HashJoin state
- * ----------------
+ /*
+ * get info from the HashJoin state
*/
hjstate = node->hashjoinstate;
- /* ----------------
+ /*
* free hash table in case we end plan before all tuples are retrieved
- * ---------------
*/
if (hjstate->hj_HashTable)
{
hjstate->hj_HashTable = NULL;
}
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(hjstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(hjstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&hjstate->jstate);
ExecFreeExprContext(&hjstate->jstate);
- /* ----------------
+ /*
* clean up subtrees
- * ----------------
*/
ExecEndNode(outerPlan((Plan *) node), (Plan *) node);
ExecEndNode(innerPlan((Plan *) node), (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(hjstate->jstate.cs_ResultTupleSlot);
ExecClearTuple(hjstate->hj_OuterTupleSlot);
hashtable->outerBatchFile[newbatch - 2] = NULL;
}
- /* --------------
- * We can skip over any batches that are empty on either side.
- * Release associated temp files right away.
- * --------------
+ /*
+ * We can skip over any batches that are empty on either side. Release
+ * associated temp files right away.
*/
while (newbatch <= nbatch &&
(innerBatchSize[newbatch - 1] == 0L ||
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.58 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.59 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool bBackward;
int indexNumber;
- /* ----------------
- * extract necessary information from index scan node
- * ----------------
+ /*
+ * extract necessary information from index scan node
*/
estate = node->scan.plan.state;
direction = estate->es_direction;
tuple = &(indexstate->iss_htup);
- /* ----------------
- * ok, now that we have what we need, fetch an index tuple.
- * if scanning this index succeeded then return the
- * appropriate heap tuple.. else return NULL.
- * ----------------
+ /*
+ * ok, now that we have what we need, fetch an index tuple. if
+ * scanning this index succeeded then return the appropriate heap
+ * tuple.. else return NULL.
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
indexstate->iss_IndexPtr++;
}
}
- /* ----------------
- * if we get here it means the index scan failed so we
- * are at the end of the scan..
- * ----------------
+
+ /*
+ * if we get here it means the index scan failed so we are at the end
+ * of the scan..
*/
return ExecClearTuple(slot);
}
{
IndexScanState *indexstate = node->indxstate;
- /* ----------------
- * If we have runtime keys and they've not already been set up,
- * do it now.
- * ----------------
+ /*
+ * If we have runtime keys and they've not already been set up, do it
+ * now.
*/
if (indexstate->iss_RuntimeKeyInfo && !indexstate->iss_RuntimeKeysReady)
ExecReScan((Plan *) node, NULL, NULL);
- /* ----------------
- * use IndexNext as access method
- * ----------------
+ /*
+ * use IndexNext as access method
*/
return ExecScan(&node->scan, (ExecScanAccessMtd) IndexNext);
}
indxqual = node->indxqual;
runtimeKeyInfo = indexstate->iss_RuntimeKeyInfo;
- /* ----------------
- * extract information from the node
- * ----------------
+ /*
+ * extract information from the node
*/
numIndices = indexstate->iss_NumIndices;
scanKeys = indexstate->iss_ScanKeys;
numScanKeys = indexstate->iss_NumScanKeys;
- /* ----------------
- * Free the projection info and the scan attribute info
+ /*
+ * Free the projection info and the scan attribute info
*
- * Note: we don't ExecFreeResultType(scanstate)
- * because the rule manager depends on the tupType
- * returned by ExecMain(). So for now, this
- * is freed at end-transaction time. -cim 6/2/91
- * ----------------
+ * Note: we don't ExecFreeResultType(scanstate) because the rule manager
+ * depends on the tupType returned by ExecMain(). So for now, this is
+ * freed at end-transaction time. -cim 6/2/91
*/
ExecFreeProjectionInfo(&scanstate->cstate);
ExecFreeExprContext(&scanstate->cstate);
if (indexstate->iss_RuntimeContext)
FreeExprContext(indexstate->iss_RuntimeContext);
- /* ----------------
- * close the heap and index relations
- * ----------------
+ /*
+ * close the heap and index relations
*/
ExecCloseR((Plan *) node);
- /* ----------------
- * free the scan keys used in scanning the indices
- * ----------------
+ /*
+ * free the scan keys used in scanning the indices
*/
for (i = 0; i < numIndices; i++)
{
pfree(runtimeKeyInfo);
}
- /* ----------------
- * clear out tuple table slots
- * ----------------
+ /*
+ * clear out tuple table slots
*/
ExecClearTuple(scanstate->cstate.cs_ResultTupleSlot);
ExecClearTuple(scanstate->css_ScanTupleSlot);
HeapScanDesc currentScanDesc;
ScanDirection direction;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->scan.plan.state = estate;
- /* --------------------------------
- * Part 1) initialize scan state
+ /*
+ * Part 1) initialize scan state
*
- * create new CommonScanState for node
- * --------------------------------
+ * create new CommonScanState for node
*/
scanstate = makeNode(CommonScanState);
node->scan.scanstate = scanstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * + create expression context for node
- * ----------------
+ * create expression context for node
*/
ExecAssignExprContext(estate, &scanstate->cstate);
#define INDEXSCAN_NSLOTS 3
- /* ----------------
- * tuple table initialization
- * ----------------
+
+ /*
+ * tuple table initialization
*/
ExecInitResultTupleSlot(estate, &scanstate->cstate);
ExecInitScanTupleSlot(estate, scanstate);
- /* ----------------
- * initialize projection info. result type comes from scan desc
- * below..
- * ----------------
+ /*
+ * initialize projection info. result type comes from scan desc
+ * below..
*/
ExecAssignProjectionInfo((Plan *) node, &scanstate->cstate);
- /* --------------------------------
- * Part 2) initialize index scan state
- *
- * create new IndexScanState for node
- * --------------------------------
- */
+ /*
+ * Part 2) initialize index scan state
+ *
+ * create new IndexScanState for node
+ */
indexstate = makeNode(IndexScanState);
indexstate->iss_NumIndices = 0;
indexstate->iss_IndexPtr = -1;
node->indxstate = indexstate;
- /* ----------------
- * get the index node information
- * ----------------
+ /*
+ * get the index node information
*/
indxid = node->indxid;
numIndices = length(indxid);
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * scanKeys is used to keep track of the ScanKey's. This is needed
- * because a single scan may use several indices and each index has
- * its own ScanKey.
- * ----------------
+ /*
+ * scanKeys is used to keep track of the ScanKey's. This is needed
+ * because a single scan may use several indices and each index has
+ * its own ScanKey.
*/
numScanKeys = (int *) palloc(numIndices * sizeof(int));
scanKeys = (ScanKey *) palloc(numIndices * sizeof(ScanKey));
relationDescs = (RelationPtr) palloc(numIndices * sizeof(Relation));
scanDescs = (IndexScanDescPtr) palloc(numIndices * sizeof(IndexScanDesc));
- /* ----------------
- * initialize space for runtime key info (may not be needed)
- * ----------------
+ /*
+ * initialize space for runtime key info (may not be needed)
*/
have_runtime_keys = false;
runtimeKeyInfo = (int **) palloc(numIndices * sizeof(int *));
- /* ----------------
- * build the index scan keys from the index qualification
- * ----------------
+ /*
+ * build the index scan keys from the index qualification
*/
indxqual = node->indxqual;
for (i = 0; i < numIndices; i++)
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
- /* ----------------
- * for each opclause in the given qual,
- * convert each qual's opclause into a single scan key
- * ----------------
+ /*
+ * for each opclause in the given qual, convert each qual's
+ * opclause into a single scan key
*/
for (j = 0; j < n_keys; j++)
{
Oid opid; /* operator id used in scan */
Datum scanvalue = 0; /* value used in scan (if const) */
- /* ----------------
- * extract clause information from the qualification
- * ----------------
+ /*
+ * extract clause information from the qualification
*/
clause = nth(j, qual);
opid = op->opid;
- /* ----------------
- * Here we figure out the contents of the index qual.
- * The usual case is (var op const) or (const op var)
- * which means we form a scan key for the attribute
- * listed in the var node and use the value of the const.
+ /*
+ * Here we figure out the contents of the index qual. The
+ * usual case is (var op const) or (const op var) which means
+ * we form a scan key for the attribute listed in the var node
+ * and use the value of the const.
*
- * If we don't have a const node, then it means that
- * one of the var nodes refers to the "scan" tuple and
- * is used to determine which attribute to scan, and the
- * other expression is used to calculate the value used in
- * scanning the index.
+ * If we don't have a const node, then it means that one of the
+ * var nodes refers to the "scan" tuple and is used to
+ * determine which attribute to scan, and the other expression
+ * is used to calculate the value used in scanning the index.
*
- * This means our index scan's scan key is a function of
- * information obtained during the execution of the plan
- * in which case we need to recalculate the index scan key
- * at run time.
+ * This means our index scan's scan key is a function of
+ * information obtained during the execution of the plan in
+ * which case we need to recalculate the index scan key at run
+ * time.
*
- * Hence, we set have_runtime_keys to true and then set
- * the appropriate flag in run_keys to LEFT_OP or RIGHT_OP.
- * The corresponding scan keys are recomputed at run time.
+ * Hence, we set have_runtime_keys to true and then set the
+ * appropriate flag in run_keys to LEFT_OP or RIGHT_OP. The
+ * corresponding scan keys are recomputed at run time.
*
- * XXX Although this code *thinks* it can handle an indexqual
- * with the indexkey on either side, in fact it cannot.
- * Indexscans only work with quals that have the indexkey on
- * the left (the planner/optimizer makes sure it never passes
- * anything else). The reason: the scankey machinery has no
- * provision for distinguishing which side of the operator is
- * the indexed attribute and which is the compared-to constant.
- * It just assumes that the attribute is on the left :-(
+ * XXX Although this code *thinks* it can handle an indexqual
+ * with the indexkey on either side, in fact it cannot.
+ * Indexscans only work with quals that have the indexkey on
+ * the left (the planner/optimizer makes sure it never passes
+ * anything else). The reason: the scankey machinery has no
+ * provision for distinguishing which side of the operator is
+ * the indexed attribute and which is the compared-to
+ * constant. It just assumes that the attribute is on the left
+ * :-(
*
- * I am leaving this code able to support both ways, even though
- * half of it is dead code, on the off chance that someone will
- * fix the scankey machinery someday --- tgl 8/11/99.
- * ----------------
+ * I am leaving this code able to support both ways, even though
+ * half of it is dead code, on the off chance that someone
+ * will fix the scankey machinery someday --- tgl 8/11/99.
*/
scanvar = NO_OP;
run_keys[j] = NO_OP;
- /* ----------------
- * determine information in leftop
- * ----------------
+ /*
+ * determine information in leftop
*/
leftop = (Node *) get_leftop(clause);
if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
{
- /* ----------------
- * if the leftop is a "rel-var", then it means
- * that it is a var node which tells us which
- * attribute to use for our scan key.
- * ----------------
+
+ /*
+ * if the leftop is a "rel-var", then it means that it is
+ * a var node which tells us which attribute to use for
+ * our scan key.
*/
varattno = ((Var *) leftop)->varattno;
scanvar = LEFT_OP;
}
else if (IsA(leftop, Const))
{
- /* ----------------
- * if the leftop is a const node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+
+ /*
+ * if the leftop is a const node then it means it
+ * identifies the value to place in our scan key.
*/
scanvalue = ((Const *) leftop)->constvalue;
if (((Const *) leftop)->constisnull)
{
bool isnull;
- /* ----------------
- * if the leftop is a Param node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+ /*
+ * if the leftop is a Param node then it means it
+ * identifies the value to place in our scan key.
*/
/* Life was so easy before ... subselects */
}
else
{
- /* ----------------
- * otherwise, the leftop contains an expression evaluable
- * at runtime to figure out the value to place in our
- * scan key.
- * ----------------
+
+ /*
+ * otherwise, the leftop contains an expression evaluable
+ * at runtime to figure out the value to place in our scan
+ * key.
*/
have_runtime_keys = true;
run_keys[j] = LEFT_OP;
}
- /* ----------------
- * now determine information in rightop
- * ----------------
+ /*
+ * now determine information in rightop
*/
rightop = (Node *) get_rightop(clause);
if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
{
- /* ----------------
- * here we make sure only one op identifies the
- * scan-attribute...
- * ----------------
+
+ /*
+ * here we make sure only one op identifies the
+ * scan-attribute...
*/
if (scanvar == LEFT_OP)
elog(ERROR, "ExecInitIndexScan: %s",
"both left and right op's are rel-vars");
- /* ----------------
- * if the rightop is a "rel-var", then it means
- * that it is a var node which tells us which
- * attribute to use for our scan key.
- * ----------------
+ /*
+ * if the rightop is a "rel-var", then it means that it is
+ * a var node which tells us which attribute to use for
+ * our scan key.
*/
varattno = ((Var *) rightop)->varattno;
scanvar = RIGHT_OP;
}
else if (IsA(rightop, Const))
{
- /* ----------------
- * if the rightop is a const node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+
+ /*
+ * if the rightop is a const node then it means it
+ * identifies the value to place in our scan key.
*/
scanvalue = ((Const *) rightop)->constvalue;
if (((Const *) rightop)->constisnull)
{
bool isnull;
- /* ----------------
- * if the rightop is a Param node then it means
- * it identifies the value to place in our scan key.
- * ----------------
+ /*
+ * if the rightop is a Param node then it means it
+ * identifies the value to place in our scan key.
*/
/* Life was so easy before ... subselects */
}
else
{
- /* ----------------
- * otherwise, the rightop contains an expression evaluable
- * at runtime to figure out the value to place in our
- * scan key.
- * ----------------
+
+ /*
+ * otherwise, the rightop contains an expression evaluable
+ * at runtime to figure out the value to place in our scan
+ * key.
*/
have_runtime_keys = true;
run_keys[j] = RIGHT_OP;
}
- /* ----------------
- * now check that at least one op tells us the scan
- * attribute...
- * ----------------
+ /*
+ * now check that at least one op tells us the scan
+ * attribute...
*/
if (scanvar == NO_OP)
elog(ERROR, "ExecInitIndexScan: %s",
"neither leftop nor rightop refer to scan relation");
- /* ----------------
- * initialize the scan key's fields appropriately
- * ----------------
+ /*
+ * initialize the scan key's fields appropriately
*/
ScanKeyEntryInitialize(&scan_keys[j],
flags,
scanvalue); /* constant */
}
- /* ----------------
- * store the key information into our arrays.
- * ----------------
+ /*
+ * store the key information into our arrays.
*/
numScanKeys[i] = n_keys;
scanKeys[i] = scan_keys;
indexstate->iss_ScanKeys = scanKeys;
indexstate->iss_NumScanKeys = numScanKeys;
- /* ----------------
- * If all of our keys have the form (op var const) , then we have no
- * runtime keys so we store NULL in the runtime key info.
- * Otherwise runtime key info contains an array of pointers
- * (one for each index) to arrays of flags (one for each key)
- * which indicate that the qual needs to be evaluated at runtime.
- * -cim 10/24/89
+ /*
+ * If all of our keys have the form (op var const) , then we have no
+ * runtime keys so we store NULL in the runtime key info. Otherwise
+ * runtime key info contains an array of pointers (one for each index)
+ * to arrays of flags (one for each key) which indicate that the qual
+ * needs to be evaluated at runtime. -cim 10/24/89
*
- * If we do have runtime keys, we need an ExprContext to evaluate them;
- * the node's standard context won't do because we want to reset that
- * context for every tuple. So, build another context just like the
- * other one...
- * -tgl 7/11/00
- * ----------------
+ * If we do have runtime keys, we need an ExprContext to evaluate them;
+ * the node's standard context won't do because we want to reset that
+ * context for every tuple. So, build another context just like the
+ * other one... -tgl 7/11/00
*/
if (have_runtime_keys)
{
pfree(runtimeKeyInfo);
}
- /* ----------------
- * get the range table and direction information
- * from the execution state (these are needed to
- * open the relations).
- * ----------------
+ /*
+ * get the range table and direction information from the execution
+ * state (these are needed to open the relations).
*/
rangeTable = estate->es_range_table;
direction = estate->es_direction;
- /* ----------------
- * open the base relation
- * ----------------
+ /*
+ * open the base relation
*/
relid = node->scan.scanrelid;
rtentry = rt_fetch(relid, rangeTable);
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = currentScanDesc;
- /* ----------------
- * get the scan type from the relation descriptor.
- * ----------------
+ /*
+ * get the scan type from the relation descriptor.
*/
ExecAssignScanType(scanstate, RelationGetDescr(currentRelation), false);
ExecAssignResultTypeFromTL((Plan *) node, &scanstate->cstate);
- /* ----------------
- * open the index relations and initialize
- * relation and scan descriptors.
- * ----------------
+ /*
+ * open the index relations and initialize relation and scan
+ * descriptors.
*/
for (i = 0; i < numIndices; i++)
{
indexstate->iss_RelationDescs = relationDescs;
indexstate->iss_ScanDescs = scanDescs;
- /* ----------------
- * all done.
- * ----------------
+ /*
+ * all done.
*/
return TRUE;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.4 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.5 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Plan *outerPlan;
long netlimit;
- /* ----------------
- * get information from the node
- * ----------------
+ /*
+ * get information from the node
*/
limitstate = node->limitstate;
direction = node->plan.state->es_direction;
outerPlan = outerPlan((Plan *) node);
resultTupleSlot = limitstate->cstate.cs_ResultTupleSlot;
- /* ----------------
- * If first call for this scan, compute limit/offset.
- * (We can't do this any earlier, because parameters from upper nodes
- * may not be set until now.)
- * ----------------
+ /*
+ * If first call for this scan, compute limit/offset. (We can't do
+ * this any earlier, because parameters from upper nodes may not be
+ * set until now.)
*/
if (!limitstate->parmsSet)
recompute_limits(node);
netlimit = limitstate->offset + limitstate->count;
- /* ----------------
- * now loop, returning only desired tuples.
- * ----------------
+ /*
+ * now loop, returning only desired tuples.
*/
for (;;)
{
- /*----------------
- * If we have reached the subplan EOF or the limit, just quit.
+
+ /*
+ * If we have reached the subplan EOF or the limit, just quit.
*
* NOTE: when scanning forwards, we must fetch one tuple beyond the
- * COUNT limit before we can return NULL, else the subplan won't be
- * properly positioned to start going backwards. Hence test here
- * is for position > netlimit not position >= netlimit.
+ * COUNT limit before we can return NULL, else the subplan won't
+ * be properly positioned to start going backwards. Hence test
+ * here is for position > netlimit not position >= netlimit.
*
* Similarly, when scanning backwards, we must re-fetch the last
- * tuple in the offset region before we can return NULL. Otherwise
- * we won't be correctly aligned to start going forward again. So,
- * although you might think we can quit when position = offset + 1,
- * we have to fetch a subplan tuple first, and then exit when
- * position = offset.
- *----------------
+ * tuple in the offset region before we can return NULL.
+ * Otherwise we won't be correctly aligned to start going forward
+ * again. So, although you might think we can quit when position
+ * = offset + 1, we have to fetch a subplan tuple first, and then
+ * exit when position = offset.
*/
if (ScanDirectionIsForward(direction))
{
if (limitstate->position <= limitstate->offset)
return NULL;
}
- /* ----------------
- * fetch a tuple from the outer subplan
- * ----------------
+
+ /*
+ * fetch a tuple from the outer subplan
*/
slot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(slot))
}
limitstate->atEnd = false;
- /* ----------------
- * Now, is this a tuple we want? If not, loop around to fetch
- * another tuple from the subplan.
- * ----------------
+ /*
+ * Now, is this a tuple we want? If not, loop around to fetch
+ * another tuple from the subplan.
*/
if (limitstate->position > limitstate->offset &&
(limitstate->noCount || limitstate->position <= netlimit))
LimitState *limitstate;
Plan *outerPlan;
- /* ----------------
- * assign execution state to node
- * ----------------
+ /*
+ * assign execution state to node
*/
node->plan.state = estate;
- /* ----------------
- * create new LimitState for node
- * ----------------
+ /*
+ * create new LimitState for node
*/
limitstate = makeNode(LimitState);
node->limitstate = limitstate;
limitstate->parmsSet = false;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Limit nodes never call ExecQual or ExecProject, but they need
- * an exprcontext anyway to evaluate the limit/offset parameters in.
- * ----------------
+ * Limit nodes never call ExecQual or ExecProject, but they need an
+ * exprcontext anyway to evaluate the limit/offset parameters in.
*/
ExecAssignExprContext(estate, &limitstate->cstate);
#define LIMIT_NSLOTS 1
- /* ------------
+
+ /*
* Tuple table initialization
- * ------------
*/
ExecInitResultTupleSlot(estate, &limitstate->cstate);
- /* ----------------
- * then initialize outer plan
- * ----------------
+ /*
+ * then initialize outer plan
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * limit nodes do no projections, so initialize
- * projection info for this node appropriately
- * ----------------
+ /*
+ * limit nodes do no projections, so initialize projection info for
+ * this node appropriately
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &limitstate->cstate);
limitstate->cstate.cs_ProjInfo = NULL;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.34 2001/03/22 03:59:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.35 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TupleTableSlot *slot;
bool should_free;
- /* ----------------
- * get state info from node
- * ----------------
+ /*
+ * get state info from node
*/
matstate = node->matstate;
estate = node->plan.state;
dir = estate->es_direction;
tuplestorestate = (Tuplestorestate *) matstate->tuplestorestate;
- /* ----------------
- * If first time through, read all tuples from outer plan and
- * pass them to tuplestore.c.
- * Subsequent calls just fetch tuples from tuplestore.
- * ----------------
+ /*
+ * If first time through, read all tuples from outer plan and pass
+ * them to tuplestore.c. Subsequent calls just fetch tuples from
+ * tuplestore.
*/
if (tuplestorestate == NULL)
{
Plan *outerNode;
- /* ----------------
- * Want to scan subplan in the forward direction while creating
- * the stored data. (Does setting my direction actually affect
- * the subplan? I bet this is useless code...)
- * ----------------
+ /*
+ * Want to scan subplan in the forward direction while creating
+ * the stored data. (Does setting my direction actually affect
+ * the subplan? I bet this is useless code...)
*/
estate->es_direction = ForwardScanDirection;
- /* ----------------
- * Initialize tuplestore module.
- * ----------------
+ /*
+ * Initialize tuplestore module.
*/
tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
SortMem);
matstate->tuplestorestate = (void *) tuplestorestate;
- /* ----------------
- * Scan the subplan and feed all the tuples to tuplestore.
- * ----------------
+ /*
+ * Scan the subplan and feed all the tuples to tuplestore.
*/
outerNode = outerPlan((Plan *) node);
ExecClearTuple(slot);
}
- /* ----------------
- * Complete the store.
- * ----------------
+ /*
+ * Complete the store.
*/
tuplestore_donestoring(tuplestorestate);
- /* ----------------
- * restore to user specified direction
- * ----------------
+ /*
+ * restore to user specified direction
*/
estate->es_direction = dir;
}
- /* ----------------
- * Get the first or next tuple from tuplestore.
- * Returns NULL if no more tuples.
- * ----------------
+ /*
+ * Get the first or next tuple from tuplestore. Returns NULL if no
+ * more tuples.
*/
slot = (TupleTableSlot *) matstate->csstate.cstate.cs_ResultTupleSlot;
heapTuple = tuplestore_getheaptuple(tuplestorestate,
MaterialState *matstate;
Plan *outerPlan;
- /* ----------------
- * assign the node's execution state
- * ----------------
+ /*
+ * assign the node's execution state
*/
node->plan.state = estate;
- /* ----------------
+ /*
* create state structure
- * ----------------
*/
matstate = makeNode(MaterialState);
matstate->tuplestorestate = NULL;
node->matstate = matstate;
- /* ----------------
- * Miscellaneous initialization
+ /*
+ * Miscellaneous initialization
*
- * Materialization nodes don't need ExprContexts because
- * they never call ExecQual or ExecProject.
- * ----------------
+ * Materialization nodes don't need ExprContexts because they never call
+ * ExecQual or ExecProject.
*/
#define MATERIAL_NSLOTS 1
- /* ----------------
+
+ /*
* tuple table initialization
*
- * material nodes only return tuples from their materialized
- * relation.
- * ----------------
+ * material nodes only return tuples from their materialized relation.
*/
ExecInitResultTupleSlot(estate, &matstate->csstate.cstate);
ExecInitScanTupleSlot(estate, &matstate->csstate);
- /* ----------------
+ /*
* initializes child nodes
- * ----------------
*/
outerPlan = outerPlan((Plan *) node);
ExecInitNode(outerPlan, estate, (Plan *) node);
- /* ----------------
- * initialize tuple type. no need to initialize projection
- * info because this node doesn't do projections.
- * ----------------
+ /*
+ * initialize tuple type. no need to initialize projection info
+ * because this node doesn't do projections.
*/
ExecAssignResultTypeFromOuterPlan((Plan *) node, &matstate->csstate.cstate);
ExecAssignScanTypeFromOuterPlan((Plan *) node, &matstate->csstate);
MaterialState *matstate;
Plan *outerPlan;
- /* ----------------
- * get info from the material state
- * ----------------
+ /*
+ * get info from the material state
*/
matstate = node->matstate;
- /* ----------------
- * shut down the subplan
- * ----------------
+ /*
+ * shut down the subplan
*/
outerPlan = outerPlan((Plan *) node);
ExecEndNode(outerPlan, (Plan *) node);
- /* ----------------
- * clean out the tuple table
- * ----------------
+ /*
+ * clean out the tuple table
*/
ExecClearTuple(matstate->csstate.css_ScanTupleSlot);
- /* ----------------
- * Release tuplestore resources
- * ----------------
+ /*
+ * Release tuplestore resources
*/
if (matstate->tuplestorestate != NULL)
tuplestore_end((Tuplestorestate *) matstate->tuplestorestate);
{
MaterialState *matstate = node->matstate;
- /* ----------------
- * if we haven't materialized yet, just return.
- * ----------------
+ /*
+ * if we haven't materialized yet, just return.
*/
if (!matstate->tuplestorestate)
return;
{
MaterialState *matstate = node->matstate;
- /* ----------------
- * if we haven't materialized yet, just return.
- * ----------------
+ /*
+ * if we haven't materialized yet, just return.
*/
if (!matstate->tuplestorestate)
return;
- /* ----------------
- * restore the scan to the previously marked position
- * ----------------
+ /*
+ * restore the scan to the previously marked position
*/
tuplestore_restorepos((Tuplestorestate *) matstate->tuplestorestate);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.43 2001/03/22 03:59:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.44 2001/03/22 06:16:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid oprleft,
oprright;
- /* ----------------
- * qualList is a list: ((op .. ..) ...)
- * first we make a copy of it. copyObject() makes a deep copy
- * so let's use it instead of the old fashoned lispCopy()...
- * ----------------
+ /*
+ * qualList is a list: ((op .. ..) ...)
+ *
+ * first we make a copy of it. copyObject() makes a deep copy so let's
+ * use it instead of the old fashoned lispCopy()...
*/
qualCopy = (List *) copyObject((Node *) qualList);
foreach(qualcdr, qualCopy)
{
- /* ----------------
- * first get the current (op .. ..) list
- * ----------------
+
+ /*
+ * first get the current (op .. ..) list
*/
qual = lfirst(qualcdr);
- /* ----------------
- * now get at the op
- * ----------------
+ /*
+ * now get at the op
*/
op = (Oper *) qual->oper;
if (!IsA(op, Oper))
elog(ERROR, "MJFormSkipQual: op not an Oper!");
- /* ----------------
- * Get the declared left and right operand types of the operator.
- * Note we do *not* use the actual operand types, since those might
- * be different in scenarios with binary-compatible data types.
- * There should be "<" and ">" operators matching a mergejoinable
- * "=" operator's declared operand types, but we might not find them
- * if we search with the actual operand types.
- * ----------------
+ /*
+ * Get the declared left and right operand types of the operator.
+ * Note we do *not* use the actual operand types, since those
+ * might be different in scenarios with binary-compatible data
+ * types. There should be "<" and ">" operators matching a
+ * mergejoinable "=" operator's declared operand types, but we
+ * might not find them if we search with the actual operand types.
*/
optup = SearchSysCache(OPEROID,
ObjectIdGetDatum(op->opno),
oprright = opform->oprright;
ReleaseSysCache(optup);
- /* ----------------
- * Now look up the matching "<" or ">" operator. If there isn't one,
- * whoever marked the "=" operator mergejoinable was a loser.
- * ----------------
+ /*
+ * Now look up the matching "<" or ">" operator. If there isn't
+ * one, whoever marked the "=" operator mergejoinable was a loser.
*/
optup = SearchSysCache(OPERNAME,
PointerGetDatum(replaceopname),
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);
- /* ----------------
- * And replace the data in the copied operator node.
- * ----------------
+ /*
+ * And replace the data in the copied operator node.
*/
op->opno = optup->t_data->t_oid;
op->opid = opform->oprcode;
*/
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
- /* ----------------
- * for each pair of clauses, test them until
- * our compare conditions are satisfied.
- * if we reach the end of the list, none of our key greater-than
- * conditions were satisfied so we return false.
- * ----------------
+ /*
+ * for each pair of clauses, test them until our compare conditions
+ * are satisfied. if we reach the end of the list, none of our key
+ * greater-than conditions were satisfied so we return false.
*/
result = false; /* assume 'false' result */
Datum const_value;
bool isNull;
- /* ----------------
- * first test if our compare clause is satisfied.
- * if so then return true.
+ /*
+ * first test if our compare clause is satisfied. if so then
+ * return true.
*
- * A NULL result is considered false.
- * ----------------
+ * A NULL result is considered false.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause), econtext,
&isNull, NULL);
break;
}
- /* ----------------
- * ok, the compare clause failed so we test if the keys
- * are equal... if key1 != key2, we return false.
- * otherwise key1 = key2 so we move on to the next pair of keys.
- * ----------------
+ /*
+ * ok, the compare clause failed so we test if the keys are
+ * equal... if key1 != key2, we return false. otherwise key1 =
+ * key2 so we move on to the next pair of keys.
*/
const_value = ExecEvalExpr((Node *) lfirst(eqclause),
econtext,
bool doFillOuter;
bool doFillInner;
- /* ----------------
- * get information from node
- * ----------------
+ /*
+ * get information from node
*/
mergestate = node->mergestate;
estate = node->join.plan.state;
innerSkipQual = mergestate->mj_OuterSkipQual;
}
- /* ----------------
- * Check to see if we're still projecting out tuples from a previous
- * join tuple (because there is a function-returning-set in the
- * projection expressions). If so, try to project another one.
- * ----------------
+ /*
+ * Check to see if we're still projecting out tuples from a previous
+ * join tuple (because there is a function-returning-set in the
+ * projection expressions). If so, try to project another one.
*/
if (mergestate->jstate.cs_TupFromTlist)
{
mergestate->jstate.cs_TupFromTlist = false;
}
- /* ----------------
- * Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
- * happen until we're done projecting out tuples from a join tuple.
- * ----------------
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle. Note this can't
+ * happen until we're done projecting out tuples from a join tuple.
*/
ResetExprContext(econtext);
- /* ----------------
- * ok, everything is setup.. let's go to work
- * ----------------
+ /*
+ * ok, everything is setup.. let's go to work
*/
for (;;)
{
- /* ----------------
- * get the current state of the join and do things accordingly.
- * Note: The join states are highlighted with 32-* comments for
- * improved readability.
- * ----------------
+
+ /*
+ * get the current state of the join and do things accordingly.
+ * Note: The join states are highlighted with 32-* comments for
+ * improved readability.
*/
MJ_dump(mergestate);
return NULL;
}
- /* ----------------
- * OK, we have the initial tuples. Begin by skipping
- * unmatched inner tuples.
- * ----------------
+ /*
+ * OK, we have the initial tuples. Begin by skipping
+ * unmatched inner tuples.
*/
mergestate->mj_JoinState = EXEC_MJ_SKIPINNER_BEGIN;
break;
if (qualResult)
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
}
}
- /* ----------------
- * now we get the next inner tuple, if any
- * ----------------
+ /*
+ * now we get the next inner tuple, if any
*/
innerTupleSlot = ExecProcNode(innerPlan, (Plan *) node);
mergestate->mj_InnerTupleSlot = innerTupleSlot;
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
}
}
- /* ----------------
- * now we get the next outer tuple, if any
- * ----------------
+ /*
+ * now we get the next outer tuple, if any
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
mergestate->mj_OuterTupleSlot = outerTupleSlot;
MJ_DEBUG_PROC_NODE(outerTupleSlot);
mergestate->mj_MatchedOuter = false;
- /* ----------------
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
- * ----------------
+ /*
+ * if the outer tuple is null then we are done with the
+ * join, unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
case EXEC_MJ_TESTOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_TESTOUTER\n");
- /* ----------------
- * here we compare the outer tuple with the marked inner tuple
- * ----------------
+ /*
+ * here we compare the outer tuple with the marked inner
+ * tuple
*/
ResetExprContext(econtext);
case EXEC_MJ_SKIPOUTER_BEGIN:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_BEGIN\n");
- /* ----------------
- * before we advance, make sure the current tuples
- * do not satisfy the mergeclauses. If they do, then
- * we update the marked tuple and go join them.
- * ----------------
+ /*
+ * before we advance, make sure the current tuples do not
+ * satisfy the mergeclauses. If they do, then we update
+ * the marked tuple and go join them.
*/
ResetExprContext(econtext);
case EXEC_MJ_SKIPOUTER_TEST:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_TEST\n");
- /* ----------------
- * ok, now test the skip qualification
- * ----------------
+ /*
+ * ok, now test the skip qualification
*/
outerTupleSlot = mergestate->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
MJ_DEBUG_MERGE_COMPARE(outerSkipQual, compareResult);
- /* ----------------
- * compareResult is true as long as we should
- * continue skipping outer tuples.
- * ----------------
+ /*
+ * compareResult is true as long as we should continue
+ * skipping outer tuples.
*/
if (compareResult)
{
break;
}
- /* ----------------
- * now check the inner skip qual to see if we
- * should now skip inner tuples... if we fail the
- * inner skip qual, then we know we have a new pair
- * of matching tuples.
- * ----------------
+ /*
+ * now check the inner skip qual to see if we should now
+ * skip inner tuples... if we fail the inner skip qual,
+ * then we know we have a new pair of matching tuples.
*/
compareResult = MergeCompare(mergeclauses,
innerSkipQual,
mergestate->mj_JoinState = EXEC_MJ_JOINMARK;
break;
- /*------------------------------------------------
+ /*
* Before advancing, we check to see if we must emit an
* outer-join fill tuple for this outer tuple.
- *------------------------------------------------
*/
case EXEC_MJ_SKIPOUTER_ADVANCE:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER_ADVANCE\n");
if (ExecQual(otherqual, econtext, false))
{
- /* ----------------
- * qualification succeeded. now form the desired
- * projection tuple and return the slot containing it.
- * ----------------
+
+ /*
+ * qualification succeeded. now form the desired
+ * projection tuple and return the slot containing
+ * it.
*/
TupleTableSlot *result;
ExprDoneCond isDone;
}
}
- /* ----------------
- * now we get the next outer tuple, if any
- * ----------------
+ /*
+ * now we get the next outer tuple, if any
*/
outerTupleSlot = ExecProcNode(outerPlan, (Plan *) node);
mergestate->mj_OuterTupleSlot = outerTupleSlot;
MJ_DEBUG_PROC_NODE(outerTupleSlot);
mergestate->mj_MatchedOuter = false;
- /* ----------------
- * if the outer tuple is null then we are done with the
- * join, unless we have inner tuples we need to null-fill.
- * ----------------
+ /*
+ * if the outer tuple is null then we are done with the
+ * join, unless we have inner tuples we need to null-fill.
*/
if (TupIsNull(outerTupleSlot))
{
return NULL;
}
- /* ----------------
- * otherwise test the new tuple against the skip qual.
- * ----------------
+ /*
+ * otherwise test the new tuple against the skip qual.
*/
mergestate->mj_JoinState = EXEC_MJ_SKIPOUTER_TEST;
break;
case EXEC_MJ_SKIPINNER_BEGIN:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER_BEGIN\n");
- /* ----------------
- * before we advance, make sure the current tuples
- * do not satisfy the mergeclauses. If they do, then
- * we update the marked tuple and go join them.
- * ----------------
+ /*
+ * before we advance, make sure the current tuples do not
+ * satisfy the mergeclauses. If they do, then we update
+ * the marked tuple and go join them.
*/
ResetExprContext(econtext);
case EXEC_MJ_SKIPINNER_TEST:
MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER_TEST\n");
- /* ----------------
- * ok, now test the skip qualification
- * ----------------
+ /*
+ * ok, now test the skip qualification
*/
outerTupleSlot = mergestate->mj_OuterTupleSlot;
econtext->ecxt_outertuple = outerTupleSlot;
MJ_DEBUG_MERGE_COMPARE(innerSkipQual, compareResult);
- /* ----------------
- * compareResult is true as long as we should
- * continue skipping inner tuples.
- * ----------------
+ /*
+ * compareResult is true as long as we should continue
+ * skipping inner tuples.
*/
if (compareResult)
{
break;
}
- /* ----------------
- * now check the outer skip qual to see if we
- * should now skip outer tuples... if we fail the
- * outer skip qual, then we know we have a new pair
- * of matching tuples.
- * ----------------
+ /*
+ * now check the outer skip qual to see if we should now
+ * skip outer tuples... if we fail the outer skip qual,
+ * then we know we have a new pair of matching tuples.
*/
compareResult = MergeCompare(mergeclauses,
outerSkipQual,