* Copyright (c) 2008-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/auto_explain/auto_explain.c,v 1.13 2010/02/16 22:19:59 adunstan Exp $
+ * $PostgreSQL: pgsql/contrib/auto_explain/auto_explain.c,v 1.14 2010/02/26 02:00:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool auto_explain_log_nested_statements = false;
static const struct config_enum_entry format_options[] = {
- {"text", EXPLAIN_FORMAT_TEXT, false},
- {"xml", EXPLAIN_FORMAT_XML, false},
- {"json", EXPLAIN_FORMAT_JSON, false},
- {"yaml", EXPLAIN_FORMAT_YAML, false},
- {NULL, 0, false}
+ {"text", EXPLAIN_FORMAT_TEXT, false},
+ {"xml", EXPLAIN_FORMAT_XML, false},
+ {"json", EXPLAIN_FORMAT_JSON, false},
+ {"yaml", EXPLAIN_FORMAT_YAML, false},
+ {NULL, 0, false}
};
/* Current nesting depth of ExecutorRun calls */
msec = queryDesc->totaltime->total * 1000.0;
if (msec >= auto_explain_log_min_duration)
{
- ExplainState es;
+ ExplainState es;
ExplainInitState(&es);
es.analyze = (queryDesc->instrument_options && auto_explain_log_analyze);
ereport(LOG,
(errmsg("duration: %.3f ms plan:\n%s",
msec, es.str->data),
- errhidestmt(true)));
+ errhidestmt(true)));
pfree(es.str->data);
}
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_cash.c,v 1.10 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_cash.c,v 1.11 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_cashkey_cmp(const void *a, const void *b)
{
- cashKEY *ia = (cashKEY*)(((Nsrt *) a)->t);
- cashKEY *ib = (cashKEY*)(((Nsrt *) b)->t);
+ cashKEY *ia = (cashKEY *) (((Nsrt *) a)->t);
+ cashKEY *ib = (cashKEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_date.c,v 1.8 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_date.c,v 1.9 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_datekey_cmp(const void *a, const void *b)
{
- dateKEY *ia = (dateKEY*)(((Nsrt *) a)->t);
- dateKEY *ib = (dateKEY*)(((Nsrt *) b)->t);
- int res;
+ dateKEY *ia = (dateKEY *) (((Nsrt *) a)->t);
+ dateKEY *ib = (dateKEY *) (((Nsrt *) b)->t);
+ int res;
res = DatumGetInt32(DirectFunctionCall2(date_cmp, DateADTGetDatum(ia->lower), DateADTGetDatum(ib->lower)));
if (res == 0)
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_float4.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_float4.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_float4key_cmp(const void *a, const void *b)
{
- float4KEY *ia = (float4KEY*)(((Nsrt *) a)->t);
- float4KEY *ib = (float4KEY*)(((Nsrt *) b)->t);
+ float4KEY *ia = (float4KEY *) (((Nsrt *) a)->t);
+ float4KEY *ib = (float4KEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_float8.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_float8.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_float8key_cmp(const void *a, const void *b)
{
- float8KEY *ia = (float8KEY*)(((Nsrt *) a)->t);
- float8KEY *ib = (float8KEY*)(((Nsrt *) b)->t);
+ float8KEY *ia = (float8KEY *) (((Nsrt *) a)->t);
+ float8KEY *ib = (float8KEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_inet.c,v 1.11 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_inet.c,v 1.12 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_inetkey_cmp(const void *a, const void *b)
{
- inetKEY *ia = (inetKEY*)(((Nsrt *) a)->t);
- inetKEY *ib = (inetKEY*)(((Nsrt *) b)->t);
+ inetKEY *ia = (inetKEY *) (((Nsrt *) a)->t);
+ inetKEY *ib = (inetKEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int2.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int2.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_int2key_cmp(const void *a, const void *b)
{
- int16KEY *ia = (int16KEY*)(((Nsrt *) a)->t);
- int16KEY *ib = (int16KEY*)(((Nsrt *) b)->t);
+ int16KEY *ia = (int16KEY *) (((Nsrt *) a)->t);
+ int16KEY *ib = (int16KEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int4.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int4.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_int4key_cmp(const void *a, const void *b)
{
- int32KEY *ia = (int32KEY*)(((Nsrt *) a)->t);
- int32KEY *ib = (int32KEY*)(((Nsrt *) b)->t);
+ int32KEY *ia = (int32KEY *) (((Nsrt *) a)->t);
+ int32KEY *ib = (int32KEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_int8.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_int8.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_int8key_cmp(const void *a, const void *b)
{
- int64KEY *ia = (int64KEY*)(((Nsrt *) a)->t);
- int64KEY *ib = (int64KEY*)(((Nsrt *) b)->t);
+ int64KEY *ia = (int64KEY *) (((Nsrt *) a)->t);
+ int64KEY *ib = (int64KEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_interval.c,v 1.13 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_interval.c,v 1.14 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_intvkey_cmp(const void *a, const void *b)
{
- intvKEY *ia = (intvKEY*)(((Nsrt *) a)->t);
- intvKEY *ib = (intvKEY*)(((Nsrt *) b)->t);
- int res;
+ intvKEY *ia = (intvKEY *) (((Nsrt *) a)->t);
+ intvKEY *ib = (intvKEY *) (((Nsrt *) b)->t);
+ int res;
res = DatumGetInt32(DirectFunctionCall2(interval_cmp, IntervalPGetDatum(&ia->lower), IntervalPGetDatum(&ib->lower)));
if (res == 0)
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_macaddr.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_macaddr.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_macadkey_cmp(const void *a, const void *b)
{
- macKEY *ia = (macKEY*)(((Nsrt *) a)->t);
- macKEY *ib = (macKEY*)(((Nsrt *) b)->t);
- int res;
+ macKEY *ia = (macKEY *) (((Nsrt *) a)->t);
+ macKEY *ib = (macKEY *) (((Nsrt *) b)->t);
+ int res;
res = DatumGetInt32(DirectFunctionCall2(macaddr_cmp, MacaddrPGetDatum(&ia->lower), MacaddrPGetDatum(&ib->lower)));
if (res == 0)
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_oid.c,v 1.9 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_oid.c,v 1.10 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_oidkey_cmp(const void *a, const void *b)
{
- oidKEY *ia = (oidKEY*)(((Nsrt *) a)->t);
- oidKEY *ib = (oidKEY*)(((Nsrt *) b)->t);
+ oidKEY *ia = (oidKEY *) (((Nsrt *) a)->t);
+ oidKEY *ib = (oidKEY *) (((Nsrt *) b)->t);
if (ia->lower == ib->lower)
{
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_time.c,v 1.17 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_time.c,v 1.18 2010/02/26 02:00:31 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_timekey_cmp(const void *a, const void *b)
{
- timeKEY *ia = (timeKEY*)(((Nsrt *) a)->t);
- timeKEY *ib = (timeKEY*)(((Nsrt *) b)->t);
- int res;
+ timeKEY *ia = (timeKEY *) (((Nsrt *) a)->t);
+ timeKEY *ib = (timeKEY *) (((Nsrt *) b)->t);
+ int res;
res = DatumGetInt32(DirectFunctionCall2(time_cmp, TimeADTGetDatumFast(ia->lower), TimeADTGetDatumFast(ib->lower)));
if (res == 0)
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_ts.c,v 1.18 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_ts.c,v 1.19 2010/02/26 02:00:32 momjian Exp $
*/
#include "btree_gist.h"
#include "btree_utils_num.h"
static int
gbt_tskey_cmp(const void *a, const void *b)
{
- tsKEY *ia = (tsKEY*)(((Nsrt *) a)->t);
- tsKEY *ib = (tsKEY*)(((Nsrt *) b)->t);
- int res;
+ tsKEY *ia = (tsKEY *) (((Nsrt *) a)->t);
+ tsKEY *ib = (tsKEY *) (((Nsrt *) b)->t);
+ int res;
res = DatumGetInt32(DirectFunctionCall2(timestamp_cmp, TimestampGetDatumFast(ia->lower), TimestampGetDatumFast(ib->lower)));
if (res == 0)
/*
- * $PostgreSQL: pgsql/contrib/btree_gist/btree_utils_var.c,v 1.22 2009/12/02 13:13:24 teodor Exp $
+ * $PostgreSQL: pgsql/contrib/btree_gist/btree_utils_var.c,v 1.23 2010/02/26 02:00:32 momjian Exp $
*/
#include "btree_gist.h"
GBT_VARKEY_R ar = gbt_var_key_readable(((const Vsrt *) a)->t);
GBT_VARKEY_R br = gbt_var_key_readable(((const Vsrt *) b)->t);
const gbtree_vinfo *tinfo = (const gbtree_vinfo *) arg;
- int res;
+ int res;
res = (*tinfo->f_cmp) (ar.lower, br.lower);
if (res == 0)
* Darko Prenosil <Darko.Prenosil@finteh.hr>
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
- * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.90 2010/02/24 05:20:49 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.91 2010/02/26 02:00:32 momjian Exp $
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
static void dblink_res_error(const char *conname, PGresult *res, const char *dblink_context_msg, bool fail);
static char *get_connect_string(const char *servername);
static char *escape_param_str(const char *from);
-static int get_nondropped_natts(Oid relid);
+static int get_nondropped_natts(Oid relid);
/* Global */
static remoteConn *pconn = NULL;
Datum
dblink_fetch(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- PGresult *res = NULL;
- char *conname = NULL;
- remoteConn *rconn = NULL;
- PGconn *conn = NULL;
- StringInfoData buf;
- char *curname = NULL;
- int howmany = 0;
- bool fail = true; /* default to backward compatible */
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ PGresult *res = NULL;
+ char *conname = NULL;
+ remoteConn *rconn = NULL;
+ PGconn *conn = NULL;
+ StringInfoData buf;
+ char *curname = NULL;
+ int howmany = 0;
+ bool fail = true; /* default to backward compatible */
DBLINK_INIT;
/*
* Try to execute the query. Note that since libpq uses malloc, the
- * PGresult will be long-lived even though we are still in a
- * short-lived memory context.
+ * PGresult will be long-lived even though we are still in a short-lived
+ * memory context.
*/
res = PQexec(conn, buf.data);
if (!res ||
static Datum
dblink_record_internal(FunctionCallInfo fcinfo, bool is_async)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- char *msg;
- PGresult *res = NULL;
- PGconn *conn = NULL;
- char *connstr = NULL;
- char *sql = NULL;
- char *conname = NULL;
- remoteConn *rconn = NULL;
- bool fail = true; /* default to backward compatible */
- bool freeconn = false;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ char *msg;
+ PGresult *res = NULL;
+ PGconn *conn = NULL;
+ char *connstr = NULL;
+ char *sql = NULL;
+ char *conname = NULL;
+ remoteConn *rconn = NULL;
+ bool fail = true; /* default to backward compatible */
+ bool freeconn = false;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
static void
materializeResult(FunctionCallInfo fcinfo, PGresult *res)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
Assert(rsinfo->returnMode == SFRM_Materialize);
is_sql_cmd = true;
/*
- * need a tuple descriptor representing one TEXT column to
- * return the command status string as our result tuple
+ * need a tuple descriptor representing one TEXT column to return
+ * the command status string as our result tuple
*/
tupdesc = CreateTemplateTupleDesc(1, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
if (ntuples > 0)
{
- AttInMetadata *attinmeta;
- Tuplestorestate *tupstore;
- MemoryContext oldcontext;
- int row;
- char **values;
+ AttInMetadata *attinmeta;
+ Tuplestorestate *tupstore;
+ MemoryContext oldcontext;
+ int row;
+ char **values;
attinmeta = TupleDescGetAttInMetadata(tupdesc);
oldcontext = MemoryContextSwitchTo(
- rsinfo->econtext->ecxt_per_query_memory);
+ rsinfo->econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->setResult = tupstore;
rsinfo->setDesc = tupdesc;
"attributes too large")));
/*
- * ensure we don't ask for more pk attributes than we have
- * non-dropped columns
+ * ensure we don't ask for more pk attributes than we have non-dropped
+ * columns
*/
nondropped_natts = get_nondropped_natts(relid);
if (pknumatts > nondropped_natts)
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("number of primary key fields exceeds number of specified relation attributes")));
+ errmsg("number of primary key fields exceeds number of specified relation attributes")));
/*
* Source array is made up of key values that will be used to locate the
"attributes too large")));
/*
- * ensure we don't ask for more pk attributes than we have
- * non-dropped columns
+ * ensure we don't ask for more pk attributes than we have non-dropped
+ * columns
*/
nondropped_natts = get_nondropped_natts(relid);
if (pknumatts > nondropped_natts)
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("number of primary key fields exceeds number of specified relation attributes")));
+ errmsg("number of primary key fields exceeds number of specified relation attributes")));
/*
* Target array is made up of key values that will be used to build the
"attributes too large")));
/*
- * ensure we don't ask for more pk attributes than we have
- * non-dropped columns
+ * ensure we don't ask for more pk attributes than we have non-dropped
+ * columns
*/
nondropped_natts = get_nondropped_natts(relid);
if (pknumatts > nondropped_natts)
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("number of primary key fields exceeds number of specified relation attributes")));
+ errmsg("number of primary key fields exceeds number of specified relation attributes")));
/*
* Source array is made up of key values that will be used to locate the
}
/*
- * Retrieve async notifications for a connection.
+ * Retrieve async notifications for a connection.
*
* Returns an setof record of notifications, or an empty set if none recieved.
* Can optionally take a named connection as parameter, but uses the unnamed connection per default.
Datum
dblink_get_notify(PG_FUNCTION_ARGS)
{
- PGconn *conn = NULL;
- remoteConn *rconn = NULL;
- PGnotify *notify;
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
+ PGconn *conn = NULL;
+ remoteConn *rconn = NULL;
+ PGnotify *notify;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
DBLINK_INIT;
if (PG_NARGS() == 1)
relation_close(rel, AccessShareLock);
return nondropped_natts;
}
-
* Copyright (c) 2007-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.8 2010/01/02 16:57:32 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.9 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *dictoptions = (List *) PG_GETARG_POINTER(0);
DictSyn *d;
ListCell *l;
- char *filename = NULL;
+ char *filename = NULL;
d = (DictSyn *) palloc0(sizeof(DictSyn));
d->len = 0;
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore.h,v 1.9 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore.h,v 1.10 2010/02/26 02:00:32 momjian Exp $
*/
#ifndef __HSTORE_H__
#define __HSTORE_H__
* HEntry: there is one of these for each key _and_ value in an hstore
*
* the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry. the ISFIRST flag lets us tell
+ * by subtraction from the previous entry. the ISFIRST flag lets us tell
* whether there is a previous entry.
*/
typedef struct
/*
* it's not possible to get more than 2^28 items into an hstore,
* so we reserve the top few bits of the size field. See hstore_compat.c
- * for one reason why. Some bits are left for future use here.
+ * for one reason why. Some bits are left for future use here.
*/
#define HS_FLAG_NEWVERSION 0x80000000
* evaluation here.
*/
#define HS_COPYITEM(dent_,dbuf_,dptr_,sptr_,klen_,vlen_,vnull_) \
- do { \
+ do { \
memcpy((dptr_), (sptr_), (klen_)+(vlen_)); \
(dptr_) += (klen_)+(vlen_); \
(dent_)++->entry = ((dptr_) - (dbuf_) - (vlen_)) & HENTRY_POSMASK; \
/* finalize a newly-constructed hstore */
#define HS_FINALIZE(hsp_,count_,buf_,ptr_) \
do { \
- int buflen = (ptr_) - (buf_); \
+ int buflen = (ptr_) - (buf_); \
if ((count_)) \
ARRPTR(hsp_)[0].entry |= HENTRY_ISFIRST; \
if ((count_) != HS_COUNT((hsp_))) \
/* ensure the varlena size of an existing hstore is correct */
#define HS_FIXSIZE(hsp_,count_) \
do { \
- int bl = (count_) ? HSE_ENDPOS(ARRPTR(hsp_)[2*(count_)-1]) : 0; \
+ int bl = (count_) ? HSE_ENDPOS(ARRPTR(hsp_)[2*(count_)-1]) : 0; \
SET_VARSIZE((hsp_), CALCDATASIZE((count_),bl)); \
} while (0)
#define HStoreExistsStrategyNumber 9
#define HStoreExistsAnyStrategyNumber 10
#define HStoreExistsAllStrategyNumber 11
-#define HStoreOldContainsStrategyNumber 13 /* backwards compatibility */
+#define HStoreOldContainsStrategyNumber 13 /* backwards compatibility */
/*
* defining HSTORE_POLLUTE_NAMESPACE=0 will prevent use of old function names;
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_compat.c,v 1.1 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_compat.c,v 1.2 2010/02/26 02:00:32 momjian Exp $
*
* Notes on old/new hstore format disambiguation.
*
pos:31;
} HOldEntry;
-static int hstoreValidNewFormat(HStore *hs);
-static int hstoreValidOldFormat(HStore *hs);
+static int hstoreValidNewFormat(HStore *hs);
+static int hstoreValidOldFormat(HStore *hs);
/*
* Validity test for a new-format hstore.
- * 0 = not valid
- * 1 = valid but with "slop" in the length
- * 2 = exactly valid
+ * 0 = not valid
+ * 1 = valid but with "slop" in the length
+ * 2 = exactly valid
*/
static int
hstoreValidNewFormat(HStore *hs)
{
- int count = HS_COUNT(hs);
- HEntry *entries = ARRPTR(hs);
- int buflen = (count) ? HSE_ENDPOS(entries[2*(count)-1]) : 0;
- int vsize = CALCDATASIZE(count,buflen);
- int i;
+ int count = HS_COUNT(hs);
+ HEntry *entries = ARRPTR(hs);
+ int buflen = (count) ? HSE_ENDPOS(entries[2 * (count) - 1]) : 0;
+ int vsize = CALCDATASIZE(count, buflen);
+ int i;
if (hs->size_ & HS_FLAG_NEWVERSION)
return 2;
/* entry position must be nondecreasing */
- for (i = 1; i < 2*count; ++i)
+ for (i = 1; i < 2 * count; ++i)
{
if (HSE_ISFIRST(entries[i])
- || (HSE_ENDPOS(entries[i]) < HSE_ENDPOS(entries[i-1])))
+ || (HSE_ENDPOS(entries[i]) < HSE_ENDPOS(entries[i - 1])))
return 0;
}
for (i = 1; i < count; ++i)
{
- if (HS_KEYLEN(entries,i) < HS_KEYLEN(entries,i-1))
+ if (HS_KEYLEN(entries, i) < HS_KEYLEN(entries, i - 1))
return 0;
- if (HSE_ISNULL(entries[2*i]))
+ if (HSE_ISNULL(entries[2 * i]))
return 0;
}
/*
* Validity test for an old-format hstore.
- * 0 = not valid
- * 1 = valid but with "slop" in the length
- * 2 = exactly valid
+ * 0 = not valid
+ * 1 = valid but with "slop" in the length
+ * 2 = exactly valid
*/
static int
hstoreValidOldFormat(HStore *hs)
{
- int count = hs->size_;
- HOldEntry *entries = (HOldEntry *) ARRPTR(hs);
- int vsize;
- int lastpos = 0;
- int i;
+ int count = hs->size_;
+ HOldEntry *entries = (HOldEntry *) ARRPTR(hs);
+ int vsize;
+ int lastpos = 0;
+ int i;
if (hs->size_ & HS_FLAG_NEWVERSION)
return 0;
if (count > 0xFFFFFFF)
return 0;
- if (CALCDATASIZE(count,0) > VARSIZE(hs))
+ if (CALCDATASIZE(count, 0) > VARSIZE(hs))
return 0;
if (entries[0].pos != 0)
for (i = 1; i < count; ++i)
{
- if (entries[i].keylen < entries[i-1].keylen)
+ if (entries[i].keylen < entries[i - 1].keylen)
return 0;
}
/*
- * entry position must be strictly increasing, except for the
- * first entry (which can be ""=>"" and thus zero-length); and
- * all entries must be properly contiguous
+ * entry position must be strictly increasing, except for the first entry
+ * (which can be ""=>"" and thus zero-length); and all entries must be
+ * properly contiguous
*/
for (i = 0; i < count; ++i)
+ ((entries[i].valisnull) ? 0 : entries[i].vallen));
}
- vsize = CALCDATASIZE(count,lastpos);
+ vsize = CALCDATASIZE(count, lastpos);
if (vsize > VARSIZE(hs))
return 0;
if (valid_new)
{
/*
- * force the "new version" flag and the correct varlena
- * length, but only if we have a writable copy already
- * (which we almost always will, since short new-format
- * values won't come through here)
+ * force the "new version" flag and the correct varlena length,
+ * but only if we have a writable copy already (which we almost
+ * always will, since short new-format values won't come through
+ * here)
*/
if (writable)
{
- HS_SETCOUNT(hs,HS_COUNT(hs));
- HS_FIXSIZE(hs,HS_COUNT(hs));
+ HS_SETCOUNT(hs, HS_COUNT(hs));
+ HS_FIXSIZE(hs, HS_COUNT(hs));
}
return hs;
}
else
{
- elog(ERROR,"invalid hstore value found");
+ elog(ERROR, "invalid hstore value found");
}
}
/*
- * this is the tricky edge case. It is only possible in some
- * quite extreme cases (the hstore must have had a lot
- * of wasted padding space at the end).
- * But the only way a "new" hstore value could get here is if
- * we're upgrading in place from a pre-release version of
- * hstore-new (NOT contrib/hstore), so we work off the following
- * assumptions:
- * 1. If you're moving from old contrib/hstore to hstore-new,
- * you're required to fix up any potential conflicts first,
- * e.g. by running ALTER TABLE ... USING col::text::hstore;
- * on all hstore columns before upgrading.
- * 2. If you're moving from old contrib/hstore to new
- * contrib/hstore, then "new" values are impossible here
- * 3. If you're moving from pre-release hstore-new to hstore-new,
- * then "old" values are impossible here
- * 4. If you're moving from pre-release hstore-new to new
- * contrib/hstore, you're not doing so as an in-place upgrade,
- * so there is no issue
- * So the upshot of all this is that we can treat all the edge
- * cases as "new" if we're being built as hstore-new, and "old"
- * if we're being built as contrib/hstore.
+ * this is the tricky edge case. It is only possible in some quite extreme
+ * cases (the hstore must have had a lot of wasted padding space at the
+ * end). But the only way a "new" hstore value could get here is if we're
+ * upgrading in place from a pre-release version of hstore-new (NOT
+ * contrib/hstore), so we work off the following assumptions: 1. If you're
+ * moving from old contrib/hstore to hstore-new, you're required to fix up
+ * any potential conflicts first, e.g. by running ALTER TABLE ... USING
+ * col::text::hstore; on all hstore columns before upgrading. 2. If you're
+ * moving from old contrib/hstore to new contrib/hstore, then "new" values
+ * are impossible here 3. If you're moving from pre-release hstore-new to
+ * hstore-new, then "old" values are impossible here 4. If you're moving
+ * from pre-release hstore-new to new contrib/hstore, you're not doing so
+ * as an in-place upgrade, so there is no issue So the upshot of all this
+ * is that we can treat all the edge cases as "new" if we're being built
+ * as hstore-new, and "old" if we're being built as contrib/hstore.
*
- * XXX the WARNING can probably be downgraded to DEBUG1 once this
- * has been beta-tested. But for now, it would be very useful to
- * know if anyone can actually reach this case in a non-contrived
- * setting.
+ * XXX the WARNING can probably be downgraded to DEBUG1 once this has been
+ * beta-tested. But for now, it would be very useful to know if anyone can
+ * actually reach this case in a non-contrived setting.
*/
if (valid_new)
{
#if HSTORE_IS_HSTORE_NEW
- elog(WARNING,"ambiguous hstore value resolved as hstore-new");
+ elog(WARNING, "ambiguous hstore value resolved as hstore-new");
/*
- * force the "new version" flag and the correct varlena
- * length, but only if we have a writable copy already
- * (which we almost always will, since short new-format
- * values won't come through here)
+ * force the "new version" flag and the correct varlena length, but
+ * only if we have a writable copy already (which we almost always
+ * will, since short new-format values won't come through here)
*/
if (writable)
{
- HS_SETCOUNT(hs,HS_COUNT(hs));
- HS_FIXSIZE(hs,HS_COUNT(hs));
+ HS_SETCOUNT(hs, HS_COUNT(hs));
+ HS_FIXSIZE(hs, HS_COUNT(hs));
}
return hs;
#else
- elog(WARNING,"ambiguous hstore value resolved as hstore-old");
+ elog(WARNING, "ambiguous hstore value resolved as hstore-old");
#endif
}
/*
- * must have an old-style value. Overwrite it in place as a new-style
- * one, making sure we have a writable copy first.
+ * must have an old-style value. Overwrite it in place as a new-style one,
+ * making sure we have a writable copy first.
*/
if (!writable)
hs = (HStore *) PG_DETOAST_DATUM_COPY(orig);
{
- int count = hs->size_;
- HEntry *new_entries = ARRPTR(hs);
- HOldEntry *old_entries = (HOldEntry *) ARRPTR(hs);
- int i;
-
+ int count = hs->size_;
+ HEntry *new_entries = ARRPTR(hs);
+ HOldEntry *old_entries = (HOldEntry *) ARRPTR(hs);
+ int i;
+
for (i = 0; i < count; ++i)
{
- uint32 pos = old_entries[i].pos;
- uint32 keylen = old_entries[i].keylen;
- uint32 vallen = old_entries[i].vallen;
- bool isnull = old_entries[i].valisnull;
+ uint32 pos = old_entries[i].pos;
+ uint32 keylen = old_entries[i].keylen;
+ uint32 vallen = old_entries[i].vallen;
+ bool isnull = old_entries[i].valisnull;
if (isnull)
vallen = 0;
- new_entries[2*i].entry = (pos + keylen) & HENTRY_POSMASK;
- new_entries[2*i+1].entry = (((pos + keylen + vallen) & HENTRY_POSMASK)
- | ((isnull) ? HENTRY_ISNULL : 0));
+ new_entries[2 * i].entry = (pos + keylen) & HENTRY_POSMASK;
+ new_entries[2 * i + 1].entry = (((pos + keylen + vallen) & HENTRY_POSMASK)
+ | ((isnull) ? HENTRY_ISNULL : 0));
}
if (count)
new_entries[0].entry |= HENTRY_ISFIRST;
- HS_SETCOUNT(hs,count);
- HS_FIXSIZE(hs,count);
+ HS_SETCOUNT(hs, count);
+ HS_FIXSIZE(hs, count);
}
return hs;
Datum
hstore_version_diag(PG_FUNCTION_ARGS)
{
- HStore *hs = (HStore *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
- int valid_new = hstoreValidNewFormat(hs);
- int valid_old = hstoreValidOldFormat(hs);
+ HStore *hs = (HStore *) PG_DETOAST_DATUM(PG_GETARG_DATUM(0));
+ int valid_new = hstoreValidNewFormat(hs);
+ int valid_old = hstoreValidOldFormat(hs);
- PG_RETURN_INT32(valid_old*10 + valid_new);
+ PG_RETURN_INT32(valid_old * 10 + valid_new);
}
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_gin.c,v 1.7 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_gin.c,v 1.8 2010/02/26 02:00:32 momjian Exp $
*/
#include "postgres.h"
HStore *hs = PG_GETARG_HS(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
- HEntry *hsent = ARRPTR(hs);
- char *ptr = STRPTR(hs);
- int count = HS_COUNT(hs);
- int i;
+ HEntry *hsent = ARRPTR(hs);
+ char *ptr = STRPTR(hs);
+ int count = HS_COUNT(hs);
+ int i;
*nentries = 2 * count;
if (count)
{
text *item;
- item = makeitem(HS_KEY(hsent,ptr,i), HS_KEYLEN(hsent,i));
+ item = makeitem(HS_KEY(hsent, ptr, i), HS_KEYLEN(hsent, i));
*VARDATA(item) = KEYFLAG;
- entries[2*i] = PointerGetDatum(item);
+ entries[2 * i] = PointerGetDatum(item);
- if (HS_VALISNULL(hsent,i))
+ if (HS_VALISNULL(hsent, i))
{
item = makeitem(NULL, 0);
*VARDATA(item) = NULLFLAG;
}
else
{
- item = makeitem(HS_VAL(hsent,ptr,i), HS_VALLEN(hsent,i));
+ item = makeitem(HS_VAL(hsent, ptr, i), HS_VALLEN(hsent, i));
*VARDATA(item) = VALFLAG;
}
- entries[2*i+1] = PointerGetDatum(item);
+ entries[2 * i + 1] = PointerGetDatum(item);
}
PG_RETURN_POINTER(entries);
else if (strategy == HStoreExistsAnyStrategyNumber ||
strategy == HStoreExistsAllStrategyNumber)
{
- ArrayType *query = PG_GETARG_ARRAYTYPE_P(0);
- Datum *key_datums;
- bool *key_nulls;
- int key_count;
- int i,j;
+ ArrayType *query = PG_GETARG_ARRAYTYPE_P(0);
+ Datum *key_datums;
+ bool *key_nulls;
+ int key_count;
+ int i,
+ j;
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
- text *item;
+ text *item;
deconstruct_array(query,
TEXTOID, -1, false, 'i',
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* HStore *query = PG_GETARG_HS(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = true;
}
else if (strategy == HStoreExistsAllStrategyNumber)
{
- int i;
+ int i;
for (i = 0; res && i < nkeys; ++i)
if (!check[i])
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_gist.c,v 1.11 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_gist.c,v 1.12 2010/02/26 02:00:32 momjian Exp $
*/
#include "postgres.h"
HStore *val = DatumGetHStoreP(entry->key);
HEntry *hsent = ARRPTR(val);
char *ptr = STRPTR(val);
- int count = HS_COUNT(val);
- int i;
+ int count = HS_COUNT(val);
+ int i;
SET_VARSIZE(res, CALCGTSIZE(0));
for (i = 0; i < count; ++i)
{
- int h;
+ int h;
- h = crc32_sz((char *) HS_KEY(hsent,ptr,i), HS_KEYLEN(hsent,i));
+ h = crc32_sz((char *) HS_KEY(hsent, ptr, i), HS_KEYLEN(hsent, i));
HASH(GETSIGN(res), h);
- if (!HS_VALISNULL(hsent,i))
+ if (!HS_VALISNULL(hsent, i))
{
- h = crc32_sz((char *) HS_VAL(hsent,ptr,i), HS_VALLEN(hsent,i));
+ h = crc32_sz((char *) HS_VAL(hsent, ptr, i), HS_VALLEN(hsent, i));
HASH(GETSIGN(res), h);
}
}
{
GISTTYPE *entry = (GISTTYPE *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
bool res = true;
HStore *query = PG_GETARG_HS(1);
HEntry *qe = ARRPTR(query);
char *qv = STRPTR(query);
- int count = HS_COUNT(query);
- int i;
+ int count = HS_COUNT(query);
+ int i;
for (i = 0; res && i < count; ++i)
{
- int crc = crc32_sz((char *) HS_KEY(qe,qv,i), HS_KEYLEN(qe,i));
+ int crc = crc32_sz((char *) HS_KEY(qe, qv, i), HS_KEYLEN(qe, i));
if (GETBIT(sign, HASHVAL(crc)))
{
- if (!HS_VALISNULL(qe,i))
+ if (!HS_VALISNULL(qe, i))
{
- crc = crc32_sz((char *) HS_VAL(qe,qv,i), HS_VALLEN(qe,i));
+ crc = crc32_sz((char *) HS_VAL(qe, qv, i), HS_VALLEN(qe, i));
if (!GETBIT(sign, HASHVAL(crc)))
res = false;
}
}
else if (strategy == HStoreExistsAllStrategyNumber)
{
- ArrayType *query = PG_GETARG_ARRAYTYPE_P(1);
- Datum *key_datums;
- bool *key_nulls;
- int key_count;
- int i;
+ ArrayType *query = PG_GETARG_ARRAYTYPE_P(1);
+ Datum *key_datums;
+ bool *key_nulls;
+ int key_count;
+ int i;
deconstruct_array(query,
TEXTOID, -1, false, 'i',
for (i = 0; res && i < key_count; ++i)
{
- int crc;
+ int crc;
+
if (key_nulls[i])
continue;
crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
}
else if (strategy == HStoreExistsAnyStrategyNumber)
{
- ArrayType *query = PG_GETARG_ARRAYTYPE_P(1);
- Datum *key_datums;
- bool *key_nulls;
- int key_count;
- int i;
+ ArrayType *query = PG_GETARG_ARRAYTYPE_P(1);
+ Datum *key_datums;
+ bool *key_nulls;
+ int key_count;
+ int i;
deconstruct_array(query,
TEXTOID, -1, false, 'i',
for (i = 0; !res && i < key_count; ++i)
{
- int crc;
+ int crc;
+
if (key_nulls[i])
continue;
crc = crc32_sz(VARDATA(key_datums[i]), VARSIZE(key_datums[i]) - VARHDRSZ);
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_io.c,v 1.12 2009/09/30 19:50:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_io.c,v 1.13 2010/02/26 02:00:32 momjian Exp $
*/
#include "postgres.h"
PG_MODULE_MAGIC;
/* old names for C functions */
-HSTORE_POLLUTE(hstore_from_text,tconvert);
+HSTORE_POLLUTE(hstore_from_text, tconvert);
typedef struct
HStore *
hstorePairs(Pairs *pairs, int4 pcount, int4 buflen)
{
- HStore *out;
+ HStore *out;
HEntry *entry;
char *ptr;
char *buf;
- int4 len;
- int4 i;
+ int4 len;
+ int4 i;
len = CALCDATASIZE(pcount, buflen);
out = palloc(len);
buf = ptr = STRPTR(out);
for (i = 0; i < pcount; i++)
- HS_ADDITEM(entry,buf,ptr,pairs[i]);
+ HS_ADDITEM(entry, buf, ptr, pairs[i]);
- HS_FINALIZE(out,pcount,buf,ptr);
+ HS_FINALIZE(out, pcount, buf, ptr);
return out;
}
int4 buflen;
HStore *out;
Pairs *pairs;
- int4 i;
- int4 pcount;
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ int4 i;
+ int4 pcount;
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
pcount = pq_getmsgint(buf, 4);
for (i = 0; i < pcount; ++i)
{
- int rawlen = pq_getmsgint(buf, 4);
- int len;
+ int rawlen = pq_getmsgint(buf, 4);
+ int len;
if (rawlen < 0)
ereport(ERROR,
Datum
hstore_from_text(PG_FUNCTION_ARGS)
{
- text *key;
- text *val = NULL;
- Pairs p;
+ text *key;
+ text *val = NULL;
+ Pairs p;
HStore *out;
if (PG_ARGISNULL(0))
Pairs *pairs;
Datum *key_datums;
bool *key_nulls;
- int key_count;
+ int key_count;
Datum *value_datums;
bool *value_nulls;
- int value_count;
+ int value_count;
ArrayType *key_array;
ArrayType *value_array;
- int i;
+ int i;
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
Assert(ARR_ELEMTYPE(key_array) == TEXTOID);
/*
- * must check >1 rather than != 1 because empty arrays have
- * 0 dimensions, not 1
+ * must check >1 rather than != 1 because empty arrays have 0 dimensions,
+ * not 1
*/
if (ARR_NDIM(key_array) > 1)
hstore_from_array(PG_FUNCTION_ARGS)
{
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
- int ndims = ARR_NDIM(in_array);
- int count;
+ int ndims = ARR_NDIM(in_array);
+ int count;
int4 buflen;
HStore *out;
Pairs *pairs;
Datum *in_datums;
bool *in_nulls;
- int in_count;
- int i;
+ int in_count;
+ int i;
Assert(ARR_ELEMTYPE(in_array) == TEXTOID);
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
- }
+ }
deconstruct_array(in_array,
TEXTOID, -1, false, 'i',
for (i = 0; i < count; ++i)
{
- if (in_nulls[i*2])
+ if (in_nulls[i * 2])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
- if (in_nulls[i*2+1])
+ if (in_nulls[i * 2 + 1])
{
- pairs[i].key = VARDATA_ANY(in_datums[i*2]);
+ pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = NULL;
- pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i*2]));
+ pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = 4;
pairs[i].isnull = true;
pairs[i].needfree = false;
}
else
{
- pairs[i].key = VARDATA_ANY(in_datums[i*2]);
- pairs[i].val = VARDATA_ANY(in_datums[i*2+1]);
- pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i*2]));
- pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i*2+1]));
+ pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
+ pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]);
+ pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
+ pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1]));
pairs[i].isnull = false;
pairs[i].needfree = false;
}
HeapTupleHeader rec;
int4 buflen;
HStore *out;
- Pairs *pairs;
+ Pairs *pairs;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tuple;
RecordIOData *my_extra;
int ncolumns;
- int i,j;
+ int i,
+ j;
Datum *values;
bool *nulls;
if (PG_ARGISNULL(0))
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo,0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
/*
- * have no tuple to look at, so the only source of type info
- * is the argtype. The lookup_rowtype_tupdesc call below will
- * error out if we don't have a known composite type oid here.
+ * have no tuple to look at, so the only source of type info is the
+ * argtype. The lookup_rowtype_tupdesc call below will error out if we
+ * don't have a known composite type oid here.
*/
tupType = argtype;
tupTypmod = -1;
*/
if (column_info->column_type != column_type)
{
- bool typIsVarlena;
+ bool typIsVarlena;
getTypeOutputInfo(column_type,
&column_info->typiofunc,
Datum
hstore_populate_record(PG_FUNCTION_ARGS)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo,0);
- HStore *hs;
- HEntry *entries;
- char *ptr;
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ HStore *hs;
+ HEntry *entries;
+ char *ptr;
HeapTupleHeader rec;
Oid tupType;
int32 tupTypmod;
TupleDesc tupdesc;
HeapTupleData tuple;
- HeapTuple rettuple;
+ HeapTuple rettuple;
RecordIOData *my_extra;
- int ncolumns;
+ int ncolumns;
int i;
Datum *values;
bool *nulls;
rec = NULL;
/*
- * have no tuple to look at, so the only source of type info
- * is the argtype. The lookup_rowtype_tupdesc call below will
- * error out if we don't have a known composite type oid here.
+ * have no tuple to look at, so the only source of type info is the
+ * argtype. The lookup_rowtype_tupdesc call below will error out if we
+ * don't have a known composite type oid here.
*/
tupType = argtype;
tupTypmod = -1;
ptr = STRPTR(hs);
/*
- * if the input hstore is empty, we can only skip the rest if
- * we were passed in a non-null record, since otherwise there
- * may be issues with domain nulls.
+ * if the input hstore is empty, we can only skip the rest if we were
+ * passed in a non-null record, since otherwise there may be issues with
+ * domain nulls.
*/
if (HS_COUNT(hs) == 0 && rec)
ColumnIOData *column_info = &my_extra->columns[i];
Oid column_type = tupdesc->attrs[i]->atttypid;
char *value;
- int idx;
- int vallen;
+ int idx;
+ int vallen;
/* Ignore dropped columns in datatype */
if (tupdesc->attrs[i]->attisdropped)
idx = hstoreFindKey(hs, 0,
NameStr(tupdesc->attrs[i]->attname),
strlen(NameStr(tupdesc->attrs[i]->attname)));
+
/*
- * we can't just skip here if the key wasn't found since we
- * might have a domain to deal with. If we were passed in a
- * non-null record datum, we assume that the existing values
- * are valid (if they're not, then it's not our fault), but if
- * we were passed in a null, then every field which we don't
- * populate needs to be run through the input function just in
- * case it's a domain type.
+ * we can't just skip here if the key wasn't found since we might have
+ * a domain to deal with. If we were passed in a non-null record
+ * datum, we assume that the existing values are valid (if they're
+ * not, then it's not our fault), but if we were passed in a null,
+ * then every field which we don't populate needs to be run through
+ * the input function just in case it's a domain type.
*/
if (idx < 0 && rec)
continue;
column_info->column_type = column_type;
}
- if (idx < 0 || HS_VALISNULL(entries,idx))
+ if (idx < 0 || HS_VALISNULL(entries, idx))
{
/*
- * need InputFunctionCall to happen even for nulls, so
- * that domain checks are done
+ * need InputFunctionCall to happen even for nulls, so that domain
+ * checks are done
*/
values[i] = InputFunctionCall(&column_info->proc, NULL,
column_info->typioparam,
}
else
{
- vallen = HS_VALLEN(entries,idx);
+ vallen = HS_VALLEN(entries, idx);
value = palloc(1 + vallen);
- memcpy(value, HS_VAL(entries,ptr,idx), vallen);
+ memcpy(value, HS_VAL(entries, ptr, idx), vallen);
value[vallen] = 0;
values[i] = InputFunctionCall(&column_info->proc, value,
HStore *in = PG_GETARG_HS(0);
int buflen,
i;
- int count = HS_COUNT(in);
+ int count = HS_COUNT(in);
char *out,
*ptr;
char *base = STRPTR(in);
buflen = 0;
/*
- * this loop overestimates due to pessimistic assumptions about
- * escaping, so very large hstore values can't be output. this
- * could be fixed, but many other data types probably have the
- * same issue. This replaced code that used the original varlena
- * size for calculations, which was wrong in some subtle ways.
+ * this loop overestimates due to pessimistic assumptions about escaping,
+ * so very large hstore values can't be output. this could be fixed, but
+ * many other data types probably have the same issue. This replaced code
+ * that used the original varlena size for calculations, which was wrong
+ * in some subtle ways.
*/
for (i = 0; i < count; i++)
{
/* include "" and => and comma-space */
- buflen += 6 + 2 * HS_KEYLEN(entries,i);
+ buflen += 6 + 2 * HS_KEYLEN(entries, i);
/* include "" only if nonnull */
- buflen += 2 + (HS_VALISNULL(entries,i)
+ buflen += 2 + (HS_VALISNULL(entries, i)
? 2
- : 2 * HS_VALLEN(entries,i));
+ : 2 * HS_VALLEN(entries, i));
}
out = ptr = palloc(buflen);
for (i = 0; i < count; i++)
{
*ptr++ = '"';
- ptr = cpw(ptr, HS_KEY(entries,base,i), HS_KEYLEN(entries,i));
+ ptr = cpw(ptr, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
*ptr++ = '"';
*ptr++ = '=';
*ptr++ = '>';
- if (HS_VALISNULL(entries,i))
+ if (HS_VALISNULL(entries, i))
{
*ptr++ = 'N';
*ptr++ = 'U';
else
{
*ptr++ = '"';
- ptr = cpw(ptr, HS_VAL(entries,base,i), HS_VALLEN(entries,i));
+ ptr = cpw(ptr, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
*ptr++ = '"';
}
hstore_send(PG_FUNCTION_ARGS)
{
HStore *in = PG_GETARG_HS(0);
- int i;
- int count = HS_COUNT(in);
+ int i;
+ int count = HS_COUNT(in);
char *base = STRPTR(in);
HEntry *entries = ARRPTR(in);
StringInfoData buf;
for (i = 0; i < count; i++)
{
- int32 keylen = HS_KEYLEN(entries,i);
+ int32 keylen = HS_KEYLEN(entries, i);
+
pq_sendint(&buf, keylen, 4);
- pq_sendtext(&buf, HS_KEY(entries,base,i), keylen);
- if (HS_VALISNULL(entries,i))
+ pq_sendtext(&buf, HS_KEY(entries, base, i), keylen);
+ if (HS_VALISNULL(entries, i))
{
pq_sendint(&buf, -1, 4);
}
else
{
- int32 vallen = HS_VALLEN(entries,i);
+ int32 vallen = HS_VALLEN(entries, i);
+
pq_sendint(&buf, vallen, 4);
- pq_sendtext(&buf, HS_VAL(entries,base,i), vallen);
+ pq_sendtext(&buf, HS_VAL(entries, base, i), vallen);
}
}
/*
- * $PostgreSQL: pgsql/contrib/hstore/hstore_op.c,v 1.15 2009/09/30 21:26:17 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/hstore/hstore_op.c,v 1.16 2010/02/26 02:00:32 momjian Exp $
*/
#include "postgres.h"
#include "hstore.h"
/* old names for C functions */
-HSTORE_POLLUTE(hstore_fetchval,fetchval);
-HSTORE_POLLUTE(hstore_exists,exists);
-HSTORE_POLLUTE(hstore_defined,defined);
-HSTORE_POLLUTE(hstore_delete,delete);
-HSTORE_POLLUTE(hstore_concat,hs_concat);
-HSTORE_POLLUTE(hstore_contains,hs_contains);
-HSTORE_POLLUTE(hstore_contained,hs_contained);
-HSTORE_POLLUTE(hstore_akeys,akeys);
-HSTORE_POLLUTE(hstore_avals,avals);
-HSTORE_POLLUTE(hstore_skeys,skeys);
-HSTORE_POLLUTE(hstore_svals,svals);
-HSTORE_POLLUTE(hstore_each,each);
+HSTORE_POLLUTE(hstore_fetchval, fetchval);
+HSTORE_POLLUTE(hstore_exists, exists);
+HSTORE_POLLUTE(hstore_defined, defined);
+HSTORE_POLLUTE(hstore_delete, delete);
+HSTORE_POLLUTE(hstore_concat, hs_concat);
+HSTORE_POLLUTE(hstore_contains, hs_contains);
+HSTORE_POLLUTE(hstore_contained, hs_contained);
+HSTORE_POLLUTE(hstore_akeys, akeys);
+HSTORE_POLLUTE(hstore_avals, avals);
+HSTORE_POLLUTE(hstore_skeys, skeys);
+HSTORE_POLLUTE(hstore_svals, svals);
+HSTORE_POLLUTE(hstore_each, each);
/*
* one-off or unordered searches.
*/
int
-hstoreFindKey(HStore * hs, int *lowbound, char *key, int keylen)
+hstoreFindKey(HStore *hs, int *lowbound, char *key, int keylen)
{
HEntry *entries = ARRPTR(hs);
- int stopLow = lowbound ? *lowbound : 0;
- int stopHigh = HS_COUNT(hs);
- int stopMiddle;
+ int stopLow = lowbound ? *lowbound : 0;
+ int stopHigh = HS_COUNT(hs);
+ int stopMiddle;
char *base = STRPTR(hs);
while (stopLow < stopHigh)
{
- int difference;
+ int difference;
stopMiddle = stopLow + (stopHigh - stopLow) / 2;
- if (HS_KEYLEN(entries,stopMiddle) == keylen)
- difference = strncmp(HS_KEY(entries,base,stopMiddle), key, keylen);
+ if (HS_KEYLEN(entries, stopMiddle) == keylen)
+ difference = strncmp(HS_KEY(entries, base, stopMiddle), key, keylen);
else
- difference = (HS_KEYLEN(entries,stopMiddle) > keylen) ? 1 : -1;
+ difference = (HS_KEYLEN(entries, stopMiddle) > keylen) ? 1 : -1;
if (difference == 0)
{
Pairs *
hstoreArrayToPairs(ArrayType *a, int *npairs)
{
- Datum *key_datums;
- bool *key_nulls;
- int key_count;
- Pairs *key_pairs;
- int bufsiz;
- int i,j;
+ Datum *key_datums;
+ bool *key_nulls;
+ int key_count;
+ Pairs *key_pairs;
+ int bufsiz;
+ int i,
+ j;
deconstruct_array(a,
TEXTOID, -1, false, 'i',
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
text *out;
- int idx = hstoreFindKey(hs, NULL,
+ int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
- if (idx < 0 || HS_VALISNULL(entries,idx))
+ if (idx < 0 || HS_VALISNULL(entries, idx))
PG_RETURN_NULL();
- out = cstring_to_text_with_len(HS_VAL(entries,STRPTR(hs),idx),
- HS_VALLEN(entries,idx));
+ out = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), idx),
+ HS_VALLEN(entries, idx));
PG_RETURN_TEXT_P(out);
}
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
- int idx = hstoreFindKey(hs, NULL,
+ int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
PG_RETURN_BOOL(idx >= 0);
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
- int nkeys;
- Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
- int i;
- int lowbound = 0;
- bool res = false;
+ int nkeys;
+ Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
+ int i;
+ int lowbound = 0;
+ bool res = false;
/*
- * we exploit the fact that the pairs list is already sorted into
- * strictly increasing order to narrow the hstoreFindKey search;
- * each search can start one entry past the previous "found"
- * entry, or at the lower bound of the last search.
+ * we exploit the fact that the pairs list is already sorted into strictly
+ * increasing order to narrow the hstoreFindKey search; each search can
+ * start one entry past the previous "found" entry, or at the lower bound
+ * of the last search.
*/
for (i = 0; !res && i < nkeys; ++i)
{
- int idx = hstoreFindKey(hs, &lowbound,
- key_pairs[i].key, key_pairs[i].keylen);
+ int idx = hstoreFindKey(hs, &lowbound,
+ key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
res = true;
{
HStore *hs = PG_GETARG_HS(0);
ArrayType *keys = PG_GETARG_ARRAYTYPE_P(1);
- int nkeys;
- Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
- int i;
- int lowbound = 0;
- bool res = nkeys ? true : false;
+ int nkeys;
+ Pairs *key_pairs = hstoreArrayToPairs(keys, &nkeys);
+ int i;
+ int lowbound = 0;
+ bool res = nkeys ? true : false;
/*
- * we exploit the fact that the pairs list is already sorted into
- * strictly increasing order to narrow the hstoreFindKey search;
- * each search can start one entry past the previous "found"
- * entry, or at the lower bound of the last search.
+ * we exploit the fact that the pairs list is already sorted into strictly
+ * increasing order to narrow the hstoreFindKey search; each search can
+ * start one entry past the previous "found" entry, or at the lower bound
+ * of the last search.
*/
for (i = 0; res && i < nkeys; ++i)
{
- int idx = hstoreFindKey(hs, &lowbound,
- key_pairs[i].key, key_pairs[i].keylen);
+ int idx = hstoreFindKey(hs, &lowbound,
+ key_pairs[i].key, key_pairs[i].keylen);
if (idx < 0)
res = false;
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
HEntry *entries = ARRPTR(hs);
- int idx = hstoreFindKey(hs, NULL,
+ int idx = hstoreFindKey(hs, NULL,
VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
- bool res = (idx >= 0 && !HS_VALISNULL(entries,idx));
+ bool res = (idx >= 0 && !HS_VALISNULL(entries, idx));
PG_RETURN_BOOL(res);
}
{
HStore *hs = PG_GETARG_HS(0);
text *key = PG_GETARG_TEXT_PP(1);
- char *keyptr = VARDATA_ANY(key);
- int keylen = VARSIZE_ANY_EXHDR(key);
+ char *keyptr = VARDATA_ANY(key);
+ int keylen = VARSIZE_ANY_EXHDR(key);
HStore *out = palloc(VARSIZE(hs));
char *bufs,
- *bufd,
+ *bufd,
*ptrd;
HEntry *es,
*ed;
- int i;
- int count = HS_COUNT(hs);
- int outcount = 0;
+ int i;
+ int count = HS_COUNT(hs);
+ int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
- HS_SETCOUNT(out, count); /* temporary! */
+ HS_SETCOUNT(out, count); /* temporary! */
bufs = STRPTR(hs);
es = ARRPTR(hs);
for (i = 0; i < count; ++i)
{
- int len = HS_KEYLEN(es,i);
- char *ptrs = HS_KEY(es,bufs,i);
+ int len = HS_KEYLEN(es, i);
+ char *ptrs = HS_KEY(es, bufs, i);
if (!(len == keylen && strncmp(ptrs, keyptr, keylen) == 0))
{
- int vallen = HS_VALLEN(es,i);
- HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es,i));
+ int vallen = HS_VALLEN(es, i);
+
+ HS_COPYITEM(ed, bufd, ptrd, ptrs, len, vallen, HS_VALISNULL(es, i));
++outcount;
}
}
- HS_FINALIZE(out,outcount,bufd,ptrd);
+ HS_FINALIZE(out, outcount, bufd, ptrd);
PG_RETURN_POINTER(out);
}
{
HStore *hs = PG_GETARG_HS(0);
HStore *out = palloc(VARSIZE(hs));
- int hs_count = HS_COUNT(hs);
+ int hs_count = HS_COUNT(hs);
char *ps,
- *bufd,
+ *bufd,
*pd;
HEntry *es,
*ed;
- int i,j;
- int outcount = 0;
+ int i,
+ j;
+ int outcount = 0;
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
- int nkeys;
- Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
+ int nkeys;
+ Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
SET_VARSIZE(out, VARSIZE(hs));
- HS_SETCOUNT(out, hs_count); /* temporary! */
+ HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
}
/*
- * this is in effect a merge between hs and key_pairs, both of
- * which are already sorted by (keylen,key); we take keys from
- * hs only
+ * this is in effect a merge between hs and key_pairs, both of which are
+ * already sorted by (keylen,key); we take keys from hs only
*/
- for (i = j = 0; i < hs_count; )
+ for (i = j = 0; i < hs_count;)
{
- int difference;
-
+ int difference;
+
if (j >= nkeys)
difference = -1;
else
{
- int skeylen = HS_KEYLEN(es,i);
+ int skeylen = HS_KEYLEN(es, i);
+
if (skeylen == key_pairs[j].keylen)
- difference = strncmp(HS_KEY(es,ps,i),
+ difference = strncmp(HS_KEY(es, ps, i),
key_pairs[j].key,
key_pairs[j].keylen);
else
else
{
HS_COPYITEM(ed, bufd, pd,
- HS_KEY(es,ps,i), HS_KEYLEN(es,i),
- HS_VALLEN(es,i), HS_VALISNULL(es,i));
+ HS_KEY(es, ps, i), HS_KEYLEN(es, i),
+ HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
- HS_FINALIZE(out,outcount,bufd,pd);
+ HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
HStore *hs = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
HStore *out = palloc(VARSIZE(hs));
- int hs_count = HS_COUNT(hs);
- int hs2_count = HS_COUNT(hs2);
+ int hs_count = HS_COUNT(hs);
+ int hs2_count = HS_COUNT(hs2);
char *ps,
- *ps2,
- *bufd,
+ *ps2,
+ *bufd,
*pd;
HEntry *es,
- *es2,
+ *es2,
*ed;
- int i,j;
- int outcount = 0;
+ int i,
+ j;
+ int outcount = 0;
SET_VARSIZE(out, VARSIZE(hs));
- HS_SETCOUNT(out, hs_count); /* temporary! */
+ HS_SETCOUNT(out, hs_count); /* temporary! */
ps = STRPTR(hs);
es = ARRPTR(hs);
}
/*
- * this is in effect a merge between hs and hs2, both of
- * which are already sorted by (keylen,key); we take keys from
- * hs only; for equal keys, we take the value from hs unless the
- * values are equal
+ * this is in effect a merge between hs and hs2, both of which are already
+ * sorted by (keylen,key); we take keys from hs only; for equal keys, we
+ * take the value from hs unless the values are equal
*/
- for (i = j = 0; i < hs_count; )
+ for (i = j = 0; i < hs_count;)
{
- int difference;
-
+ int difference;
+
if (j >= hs2_count)
difference = -1;
else
{
- int skeylen = HS_KEYLEN(es,i);
- int s2keylen = HS_KEYLEN(es2,j);
+ int skeylen = HS_KEYLEN(es, i);
+ int s2keylen = HS_KEYLEN(es2, j);
+
if (skeylen == s2keylen)
- difference = strncmp(HS_KEY(es,ps,i),
- HS_KEY(es2,ps2,j),
+ difference = strncmp(HS_KEY(es, ps, i),
+ HS_KEY(es2, ps2, j),
skeylen);
else
difference = (skeylen > s2keylen) ? 1 : -1;
++j;
else if (difference == 0)
{
- int svallen = HS_VALLEN(es,i);
- int snullval = HS_VALISNULL(es,i);
- if (snullval != HS_VALISNULL(es2,j)
+ int svallen = HS_VALLEN(es, i);
+ int snullval = HS_VALISNULL(es, i);
+
+ if (snullval != HS_VALISNULL(es2, j)
|| (!snullval
- && (svallen != HS_VALLEN(es2,j)
- || strncmp(HS_VAL(es,ps,i), HS_VAL(es2,ps2,j), svallen) != 0)))
+ && (svallen != HS_VALLEN(es2, j)
+ || strncmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
{
HS_COPYITEM(ed, bufd, pd,
- HS_KEY(es,ps,i), HS_KEYLEN(es,i),
+ HS_KEY(es, ps, i), HS_KEYLEN(es, i),
svallen, snullval);
++outcount;
}
else
{
HS_COPYITEM(ed, bufd, pd,
- HS_KEY(es,ps,i), HS_KEYLEN(es,i),
- HS_VALLEN(es,i), HS_VALISNULL(es,i));
+ HS_KEY(es, ps, i), HS_KEYLEN(es, i),
+ HS_VALLEN(es, i), HS_VALISNULL(es, i));
++outcount;
++i;
}
}
- HS_FINALIZE(out,outcount,bufd,pd);
+ HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
HStore *out = palloc(VARSIZE(s1) + VARSIZE(s2));
char *ps1,
*ps2,
- *bufd,
+ *bufd,
*pd;
HEntry *es1,
*es2,
*ed;
- int s1idx;
- int s2idx;
- int s1count = HS_COUNT(s1);
- int s2count = HS_COUNT(s2);
- int outcount = 0;
+ int s1idx;
+ int s2idx;
+ int s1count = HS_COUNT(s1);
+ int s2count = HS_COUNT(s2);
+ int outcount = 0;
SET_VARSIZE(out, VARSIZE(s1) + VARSIZE(s2) - HSHRDSIZE);
HS_SETCOUNT(out, s1count + s2count);
ed = ARRPTR(out);
/*
- * this is in effect a merge between s1 and s2, both of which
- * are already sorted by (keylen,key); we take s2 for equal keys
+ * this is in effect a merge between s1 and s2, both of which are already
+ * sorted by (keylen,key); we take s2 for equal keys
*/
for (s1idx = s2idx = 0; s1idx < s1count || s2idx < s2count; ++outcount)
{
- int difference;
-
+ int difference;
+
if (s1idx >= s1count)
difference = 1;
else if (s2idx >= s2count)
difference = -1;
else
{
- int s1keylen = HS_KEYLEN(es1,s1idx);
- int s2keylen = HS_KEYLEN(es2,s2idx);
+ int s1keylen = HS_KEYLEN(es1, s1idx);
+ int s2keylen = HS_KEYLEN(es2, s2idx);
+
if (s1keylen == s2keylen)
- difference = strncmp(HS_KEY(es1,ps1,s1idx),
- HS_KEY(es2,ps2,s2idx),
+ difference = strncmp(HS_KEY(es1, ps1, s1idx),
+ HS_KEY(es2, ps2, s2idx),
s1keylen);
else
difference = (s1keylen > s2keylen) ? 1 : -1;
if (difference >= 0)
{
HS_COPYITEM(ed, bufd, pd,
- HS_KEY(es2,ps2,s2idx), HS_KEYLEN(es2,s2idx),
- HS_VALLEN(es2,s2idx), HS_VALISNULL(es2,s2idx));
+ HS_KEY(es2, ps2, s2idx), HS_KEYLEN(es2, s2idx),
+ HS_VALLEN(es2, s2idx), HS_VALISNULL(es2, s2idx));
++s2idx;
if (difference == 0)
++s1idx;
else
{
HS_COPYITEM(ed, bufd, pd,
- HS_KEY(es1,ps1,s1idx), HS_KEYLEN(es1,s1idx),
- HS_VALLEN(es1,s1idx), HS_VALISNULL(es1,s1idx));
+ HS_KEY(es1, ps1, s1idx), HS_KEYLEN(es1, s1idx),
+ HS_VALLEN(es1, s1idx), HS_VALISNULL(es1, s1idx));
++s1idx;
}
}
- HS_FINALIZE(out,outcount,bufd,pd);
+ HS_FINALIZE(out, outcount, bufd, pd);
PG_RETURN_POINTER(out);
}
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
- char *ptr = STRPTR(hs);
+ char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
ArrayType *aout;
- Datum *key_datums;
- bool *key_nulls;
- Datum *out_datums;
- bool *out_nulls;
- int key_count;
- int i;
+ Datum *key_datums;
+ bool *key_nulls;
+ Datum *out_datums;
+ bool *out_nulls;
+ int key_count;
+ int i;
deconstruct_array(key_array,
TEXTOID, -1, false, 'i',
for (i = 0; i < key_count; ++i)
{
- text *key = (text*) DatumGetPointer(key_datums[i]);
- int idx;
+ text *key = (text *) DatumGetPointer(key_datums[i]);
+ int idx;
if (key_nulls[i])
idx = -1;
else
idx = hstoreFindKey(hs, NULL, VARDATA(key), VARSIZE(key) - VARHDRSZ);
- if (idx < 0 || HS_VALISNULL(entries,idx))
+ if (idx < 0 || HS_VALISNULL(entries, idx))
{
out_nulls[i] = true;
out_datums[i] = (Datum) 0;
else
{
out_datums[i] = PointerGetDatum(
- cstring_to_text_with_len(HS_VAL(entries,ptr,idx),
- HS_VALLEN(entries,idx)));
+ cstring_to_text_with_len(HS_VAL(entries, ptr, idx),
+ HS_VALLEN(entries, idx)));
out_nulls[i] = false;
}
}
ARR_NDIM(key_array),
ARR_DIMS(key_array),
ARR_LBOUND(key_array),
- TEXTOID, -1, false, 'i');
+ TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(aout);
}
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
- char *ptr = STRPTR(hs);
+ char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
- HStore *out;
- int nkeys;
- Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
- Pairs *out_pairs;
- int bufsiz;
- int lastidx = 0;
- int i;
- int out_count = 0;
+ HStore *out;
+ int nkeys;
+ Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
+ Pairs *out_pairs;
+ int bufsiz;
+ int lastidx = 0;
+ int i;
+ int out_count = 0;
if (nkeys == 0)
{
bufsiz = 0;
/*
- * we exploit the fact that the pairs list is already sorted into
- * strictly increasing order to narrow the hstoreFindKey search;
- * each search can start one entry past the previous "found"
- * entry, or at the lower bound of the last search.
+ * we exploit the fact that the pairs list is already sorted into strictly
+ * increasing order to narrow the hstoreFindKey search; each search can
+ * start one entry past the previous "found" entry, or at the lower bound
+ * of the last search.
*/
for (i = 0; i < nkeys; ++i)
{
- int idx = hstoreFindKey(hs, &lastidx,
- key_pairs[i].key, key_pairs[i].keylen);
+ int idx = hstoreFindKey(hs, &lastidx,
+ key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
out_pairs[out_count].key = key_pairs[i].key;
bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
- out_pairs[out_count].val = HS_VAL(entries,ptr,idx);
- bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries,idx));
- out_pairs[out_count].isnull = HS_VALISNULL(entries,idx);
+ out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
+ bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
+ out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
out_pairs[out_count].needfree = false;
++out_count;
}
}
/*
- * we don't use uniquePairs here because we know that the
- * pairs list is already sorted and uniq'ed.
+ * we don't use uniquePairs here because we know that the pairs list is
+ * already sorted and uniq'ed.
*/
out = hstorePairs(out_pairs, out_count, bufsiz);
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
- int count = HS_COUNT(hs);
- int i;
+ int count = HS_COUNT(hs);
+ int i;
if (count == 0)
{
for (i = 0; i < count; ++i)
{
- text *item = cstring_to_text_with_len(HS_KEY(entries,base,i),
- HS_KEYLEN(entries,i));
+ text *item = cstring_to_text_with_len(HS_KEY(entries, base, i),
+ HS_KEYLEN(entries, i));
+
d[i] = PointerGetDatum(item);
}
a = construct_array(d, count,
- TEXTOID, -1, false, 'i');
+ TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
{
HStore *hs = PG_GETARG_HS(0);
Datum *d;
- bool *nulls;
+ bool *nulls;
ArrayType *a;
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
- int count = HS_COUNT(hs);
- int lb = 1;
- int i;
+ int count = HS_COUNT(hs);
+ int lb = 1;
+ int i;
if (count == 0)
{
for (i = 0; i < count; ++i)
{
- if (HS_VALISNULL(entries,i))
+ if (HS_VALISNULL(entries, i))
{
d[i] = (Datum) 0;
nulls[i] = true;
}
else
{
- text *item = cstring_to_text_with_len(HS_VAL(entries,base,i),
- HS_VALLEN(entries,i));
+ text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
+ HS_VALLEN(entries, i));
+
d[i] = PointerGetDatum(item);
nulls[i] = false;
}
}
a = construct_md_array(d, nulls, 1, &count, &lb,
- TEXTOID, -1, false, 'i');
+ TEXTOID, -1, false, 'i');
PG_RETURN_POINTER(a);
}
{
HEntry *entries = ARRPTR(hs);
char *base = STRPTR(hs);
- int count = HS_COUNT(hs);
- int out_size[2] = { 0, 2 };
- int lb[2] = { 1, 1 };
+ int count = HS_COUNT(hs);
+ int out_size[2] = {0, 2};
+ int lb[2] = {1, 1};
Datum *out_datums;
bool *out_nulls;
- int i;
+ int i;
Assert(ndims < 3);
for (i = 0; i < count; ++i)
{
- text *key = cstring_to_text_with_len(HS_KEY(entries,base,i),
- HS_KEYLEN(entries,i));
- out_datums[i*2] = PointerGetDatum(key);
- out_nulls[i*2] = false;
+ text *key = cstring_to_text_with_len(HS_KEY(entries, base, i),
+ HS_KEYLEN(entries, i));
+
+ out_datums[i * 2] = PointerGetDatum(key);
+ out_nulls[i * 2] = false;
- if (HS_VALISNULL(entries,i))
+ if (HS_VALISNULL(entries, i))
{
- out_datums[i*2+1] = (Datum) 0;
- out_nulls[i*2+1] = true;
+ out_datums[i * 2 + 1] = (Datum) 0;
+ out_nulls[i * 2 + 1] = true;
}
else
{
- text *item = cstring_to_text_with_len(HS_VAL(entries,base,i),
- HS_VALLEN(entries,i));
- out_datums[i*2+1] = PointerGetDatum(item);
- out_nulls[i*2+1] = false;
+ text *item = cstring_to_text_with_len(HS_VAL(entries, base, i),
+ HS_VALLEN(entries, i));
+
+ out_datums[i * 2 + 1] = PointerGetDatum(item);
+ out_nulls[i * 2 + 1] = false;
}
}
Datum
hstore_to_array(PG_FUNCTION_ARGS)
{
- HStore *hs = PG_GETARG_HS(0);
+ HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 1);
PG_RETURN_POINTER(out);
Datum
hstore_to_matrix(PG_FUNCTION_ARGS)
{
- HStore *hs = PG_GETARG_HS(0);
+ HStore *hs = PG_GETARG_HS(0);
ArrayType *out = hstore_to_array_internal(hs, 2);
PG_RETURN_POINTER(out);
*/
static void
-setup_firstcall(FuncCallContext *funcctx, HStore * hs,
+setup_firstcall(FuncCallContext *funcctx, HStore *hs,
FunctionCallInfoData *fcinfo)
{
MemoryContext oldcontext;
- HStore *st;
+ HStore *st;
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
-
+
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
}
hstore_skeys(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- HStore *hs;
- int i;
+ HStore *hs;
+ int i;
if (SRF_IS_FIRSTCALL())
{
if (i < HS_COUNT(hs))
{
- HEntry *entries = ARRPTR(hs);
+ HEntry *entries = ARRPTR(hs);
text *item;
- item = cstring_to_text_with_len(HS_KEY(entries,STRPTR(hs),i),
- HS_KEYLEN(entries,i));
+ item = cstring_to_text_with_len(HS_KEY(entries, STRPTR(hs), i),
+ HS_KEYLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
hstore_svals(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- HStore *hs;
- int i;
+ HStore *hs;
+ int i;
if (SRF_IS_FIRSTCALL())
{
if (i < HS_COUNT(hs))
{
- HEntry *entries = ARRPTR(hs);
+ HEntry *entries = ARRPTR(hs);
- if (HS_VALISNULL(entries,i))
+ if (HS_VALISNULL(entries, i))
{
ReturnSetInfo *rsi;
{
text *item;
- item = cstring_to_text_with_len(HS_VAL(entries,STRPTR(hs),i),
- HS_VALLEN(entries,i));
+ item = cstring_to_text_with_len(HS_VAL(entries, STRPTR(hs), i),
+ HS_VALLEN(entries, i));
SRF_RETURN_NEXT(funcctx, PointerGetDatum(item));
}
char *tstr = STRPTR(tmpl);
HEntry *ve = ARRPTR(val);
char *vstr = STRPTR(val);
- int tcount = HS_COUNT(tmpl);
- int lastidx = 0;
- int i;
+ int tcount = HS_COUNT(tmpl);
+ int lastidx = 0;
+ int i;
/*
- * we exploit the fact that keys in "tmpl" are in strictly
- * increasing order to narrow the hstoreFindKey search; each search
- * can start one entry past the previous "found" entry, or at the
- * lower bound of the search
+ * we exploit the fact that keys in "tmpl" are in strictly increasing
+ * order to narrow the hstoreFindKey search; each search can start one
+ * entry past the previous "found" entry, or at the lower bound of the
+ * search
*/
for (i = 0; res && i < tcount; ++i)
{
- int idx = hstoreFindKey(val, &lastidx,
- HS_KEY(te,tstr,i), HS_KEYLEN(te,i));
+ int idx = hstoreFindKey(val, &lastidx,
+ HS_KEY(te, tstr, i), HS_KEYLEN(te, i));
if (idx >= 0)
{
- bool nullval = HS_VALISNULL(te,i);
- int vallen = HS_VALLEN(te,i);
+ bool nullval = HS_VALISNULL(te, i);
+ int vallen = HS_VALLEN(te, i);
- if (nullval != HS_VALISNULL(ve,idx)
+ if (nullval != HS_VALISNULL(ve, idx)
|| (!nullval
- && (vallen != HS_VALLEN(ve,idx)
- || strncmp(HS_VAL(te,tstr,i), HS_VAL(ve,vstr,idx), vallen))))
+ && (vallen != HS_VALLEN(ve, idx)
+ || strncmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
res = false;
}
else
hstore_each(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
- HStore *hs;
- int i;
+ HStore *hs;
+ int i;
if (SRF_IS_FIRSTCALL())
{
if (i < HS_COUNT(hs))
{
HEntry *entries = ARRPTR(hs);
- char *ptr = STRPTR(hs);
+ char *ptr = STRPTR(hs);
Datum res,
dvalues[2];
bool nulls[2] = {false, false};
text *item;
HeapTuple tuple;
- item = cstring_to_text_with_len(HS_KEY(entries,ptr,i),
- HS_KEYLEN(entries,i));
+ item = cstring_to_text_with_len(HS_KEY(entries, ptr, i),
+ HS_KEYLEN(entries, i));
dvalues[0] = PointerGetDatum(item);
- if (HS_VALISNULL(entries,i))
+ if (HS_VALISNULL(entries, i))
{
dvalues[1] = (Datum) 0;
nulls[1] = true;
}
else
{
- item = cstring_to_text_with_len(HS_VAL(entries,ptr,i),
- HS_VALLEN(entries,i));
+ item = cstring_to_text_with_len(HS_VAL(entries, ptr, i),
+ HS_VALLEN(entries, i));
dvalues[1] = PointerGetDatum(item);
}
{
HStore *hs1 = PG_GETARG_HS(0);
HStore *hs2 = PG_GETARG_HS(1);
- int hcount1 = HS_COUNT(hs1);
- int hcount2 = HS_COUNT(hs2);
- int res = 0;
+ int hcount1 = HS_COUNT(hs1);
+ int hcount2 = HS_COUNT(hs2);
+ int res = 0;
if (hcount1 == 0 || hcount2 == 0)
{
/*
- * if either operand is empty, and the other is nonempty, the
- * nonempty one is larger. If both are empty they are equal.
+ * if either operand is empty, and the other is nonempty, the nonempty
+ * one is larger. If both are empty they are equal.
*/
if (hcount1 > 0)
res = 1;
else
{
/* here we know both operands are nonempty */
- char *str1 = STRPTR(hs1);
- char *str2 = STRPTR(hs2);
- HEntry *ent1 = ARRPTR(hs1);
- HEntry *ent2 = ARRPTR(hs2);
- size_t len1 = HSE_ENDPOS(ent1[2*hcount1 - 1]);
- size_t len2 = HSE_ENDPOS(ent2[2*hcount2 - 1]);
+ char *str1 = STRPTR(hs1);
+ char *str2 = STRPTR(hs2);
+ HEntry *ent1 = ARRPTR(hs1);
+ HEntry *ent2 = ARRPTR(hs2);
+ size_t len1 = HSE_ENDPOS(ent1[2 * hcount1 - 1]);
+ size_t len2 = HSE_ENDPOS(ent2[2 * hcount2 - 1]);
- res = memcmp(str1, str2, Min(len1,len2));
+ res = memcmp(str1, str2, Min(len1, len2));
if (res == 0)
{
res = -1;
else
{
- int count = hcount1 * 2;
- int i;
+ int count = hcount1 * 2;
+ int i;
for (i = 0; i < count; ++i)
if (HSE_ENDPOS(ent1[i]) != HSE_ENDPOS(ent2[i]) ||
}
/*
- * this is a btree support function; this is one of the few
- * places where memory needs to be explicitly freed.
+ * this is a btree support function; this is one of the few places where
+ * memory needs to be explicitly freed.
*/
- PG_FREE_IF_COPY(hs1,0);
- PG_FREE_IF_COPY(hs2,1);
+ PG_FREE_IF_COPY(hs1, 0);
+ PG_FREE_IF_COPY(hs2, 1);
PG_RETURN_INT32(res);
}
Datum
hstore_eq(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res == 0);
}
Datum
hstore_ne(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res != 0);
}
Datum
hstore_gt(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res > 0);
}
Datum
hstore_ge(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res >= 0);
}
Datum
hstore_lt(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res < 0);
}
Datum
hstore_le(PG_FUNCTION_ARGS)
{
- int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
+ int res = DatumGetInt32(DirectFunctionCall2(hstore_cmp,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+
PG_RETURN_BOOL(res <= 0);
}
hstore_hash(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
- Datum hval = hash_any((unsigned char *)VARDATA(hs),
+ Datum hval = hash_any((unsigned char *) VARDATA(hs),
VARSIZE(hs) - VARHDRSZ);
/*
- * this is the only place in the code that cares whether the
- * overall varlena size exactly matches the true data size;
- * this assertion should be maintained by all the other code,
- * but we make it explicit here.
+ * this is the only place in the code that cares whether the overall
+ * varlena size exactly matches the true data size; this assertion should
+ * be maintained by all the other code, but we make it explicit here.
*/
Assert(VARSIZE(hs) ==
(HS_COUNT(hs) != 0 ?
CALCDATASIZE(HS_COUNT(hs),
- HSE_ENDPOS(ARRPTR(hs)[2*HS_COUNT(hs) - 1])) :
+ HSE_ENDPOS(ARRPTR(hs)[2 * HS_COUNT(hs) - 1])) :
HSHRDSIZE));
- PG_FREE_IF_COPY(hs,0);
+ PG_FREE_IF_COPY(hs, 0);
PG_RETURN_DATUM(hval);
}
* isn.c
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
- * Author: German Mendez Bravo (Kronuz)
+ * Author: German Mendez Bravo (Kronuz)
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.13 2010/02/05 04:34:51 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.14 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* isn.h
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
- * Author: German Mendez Bravo (Kronuz)
+ * Author: German Mendez Bravo (Kronuz)
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/isn/isn.h,v 1.9 2010/02/05 04:34:51 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/isn/isn.h,v 1.10 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Originally by
* B. Palmer, bpalmer@crimelabs.net 1-17-2001
*
- * $PostgreSQL: pgsql/contrib/oid2name/oid2name.c,v 1.37 2010/02/07 20:48:08 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/oid2name/oid2name.c,v 1.38 2010/02/26 02:00:32 momjian Exp $
*/
#include "postgres_fe.h"
/* get the oid and database name from the system pg_database table */
snprintf(todo, sizeof(todo),
"SELECT d.oid AS \"Oid\", datname AS \"Database Name\", "
- "spcname AS \"Tablespace\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t ON "
+ "spcname AS \"Tablespace\" FROM pg_catalog.pg_database d JOIN pg_catalog.pg_tablespace t ON "
"(dattablespace = t.oid) ORDER BY 2");
sql_exec(conn, todo, opts->quiet);
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
snprintf(todo, sizeof(todo),
- "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s "
+ "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s "
"FROM pg_class c "
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace "
- " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),"
+ " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),"
" pg_catalog.pg_tablespace t "
"WHERE relkind IN ('r'%s%s) AND "
" %s"
/* now build the query */
todo = (char *) myalloc(650 + strlen(qualifiers));
snprintf(todo, 650 + strlen(qualifiers),
- "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+ "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
"FROM pg_catalog.pg_class c \n"
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
" LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
* Author: Laurenz Albe <laurenz.albe@wien.gv.at>
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/passwordcheck/passwordcheck.c,v 1.2 2010/01/02 16:57:32 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/passwordcheck/passwordcheck.c,v 1.3 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
switch (password_type)
{
case PASSWORD_TYPE_MD5:
+
/*
- * Unfortunately we cannot perform exhaustive checks on
- * encrypted passwords - we are restricted to guessing.
- * (Alternatively, we could insist on the password being
- * presented non-encrypted, but that has its own security
- * disadvantages.)
+ * Unfortunately we cannot perform exhaustive checks on encrypted
+ * passwords - we are restricted to guessing. (Alternatively, we
+ * could insist on the password being presented non-encrypted, but
+ * that has its own security disadvantages.)
*
* We only check for username = password.
*/
break;
case PASSWORD_TYPE_PLAINTEXT:
+
/*
* For unencrypted passwords we can perform better checks
*/
for (i = 0; i < pwdlen; i++)
{
/*
- * isalpha() does not work for multibyte encodings
- * but let's consider non-ASCII characters non-letters
+ * isalpha() does not work for multibyte encodings but let's
+ * consider non-ASCII characters non-letters
*/
if (isalpha((unsigned char) password[i]))
pwd_has_letter = true;
if (!pwd_has_letter || !pwd_has_nonletter)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("password must contain both letters and nonletters")));
+ errmsg("password must contain both letters and nonletters")));
#ifdef USE_CRACKLIB
/* call cracklib to check password */
/*
- * $PostgreSQL: pgsql/contrib/pg_standby/pg_standby.c,v 1.27 2009/11/04 12:51:30 heikki Exp $
+ * $PostgreSQL: pgsql/contrib/pg_standby/pg_standby.c,v 1.28 2010/02/26 02:00:32 momjian Exp $
*
*
* pg_standby.c
}
#ifndef WIN32
+
/*
* You can send SIGUSR1 to trigger failover.
*
}
break;
case 'l': /* Use link */
+
/*
- * Link feature disabled, possibly permanently. Linking
- * causes a problem after recovery ends that is not currently
+ * Link feature disabled, possibly permanently. Linking causes
+ * a problem after recovery ends that is not currently
* resolved by PostgreSQL. 25 Jun 2009
*/
#ifdef NOT_USED
* Copyright (c) 2008-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/pg_stat_statements/pg_stat_statements.c,v 1.12 2010/01/08 00:38:19 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/pg_stat_statements/pg_stat_statements.c,v 1.13 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct Counters
{
- int64 calls; /* # of times executed */
- double total_time; /* total execution time in seconds */
- int64 rows; /* total # of retrieved or affected rows */
+ int64 calls; /* # of times executed */
+ double total_time; /* total execution time in seconds */
+ int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
- int64 shared_blks_read; /* # of shared disk blocks read */
- int64 shared_blks_written;/* # of shared disk blocks written */
- int64 local_blks_hit; /* # of local buffer hits */
+ int64 shared_blks_read; /* # of shared disk blocks read */
+ int64 shared_blks_written; /* # of shared disk blocks written */
+ int64 local_blks_hit; /* # of local buffer hits */
int64 local_blks_read; /* # of local disk blocks read */
- int64 local_blks_written; /* # of local disk blocks written */
- int64 temp_blks_read; /* # of temp blocks read */
- int64 temp_blks_written; /* # of temp blocks written */
- double usage; /* usage factor */
+ int64 local_blks_written; /* # of local disk blocks written */
+ int64 temp_blks_read; /* # of temp blocks read */
+ int64 temp_blks_written; /* # of temp blocks written */
+ double usage; /* usage factor */
} Counters;
/*
static int pgss_max; /* max # statements to track */
static int pgss_track; /* tracking level */
-static bool pgss_track_utility; /* whether to track utility commands */
+static bool pgss_track_utility; /* whether to track utility commands */
static bool pgss_save; /* whether to save stats across shutdown */
long count);
static void pgss_ExecutorEnd(QueryDesc *queryDesc);
static void pgss_ProcessUtility(Node *parsetree,
- const char *queryString, ParamListInfo params, bool isTopLevel,
- DestReceiver *dest, char *completionTag);
+ const char *queryString, ParamListInfo params, bool isTopLevel,
+ DestReceiver *dest, char *completionTag);
static uint32 pgss_hash_fn(const void *key, Size keysize);
static int pgss_match_fn(const void *key1, const void *key2, Size keysize);
static void pgss_store(const char *query, double total_time, uint64 rows,
- const BufferUsage *bufusage);
+ const BufferUsage *bufusage);
static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key);
static void entry_dealloc(void);
NULL);
DefineCustomBoolVariable("pg_stat_statements.track_utility",
- "Selects whether utility commands are tracked by pg_stat_statements.",
+ "Selects whether utility commands are tracked by pg_stat_statements.",
NULL,
&pgss_track_utility,
true,
on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
/*
- * Attempt to load old statistics from the dump file, if this is the
- * first time through and we weren't told not to.
+ * Attempt to load old statistics from the dump file, if this is the first
+ * time through and we weren't told not to.
*/
if (found || !pgss_save)
return;
instr_time start;
instr_time duration;
uint64 rows = 0;
- BufferUsage bufusage;
+ BufferUsage bufusage;
bufusage = pgBufferUsage;
INSTR_TIME_SET_CURRENT(start);
* A simple benchmark program for PostgreSQL
* Originally written by Tatsuo Ishii and enhanced by many contributors.
*
- * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.96 2010/01/06 01:30:03 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.97 2010/02/26 02:00:32 momjian Exp $
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
*/
#ifdef WIN32
-#define FD_SETSIZE 1024 /* set before winsock2.h is included */
+#define FD_SETSIZE 1024 /* set before winsock2.h is included */
#endif /* ! WIN32 */
#include "postgres_fe.h"
#ifdef WIN32
/* Use native win32 threads on Windows */
-typedef struct win32_pthread *pthread_t;
-typedef int pthread_attr_t;
-
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void * (*start_routine)(void *), void *arg);
-static int pthread_join(pthread_t th, void **thread_return);
+typedef struct win32_pthread *pthread_t;
+typedef int pthread_attr_t;
+static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */
#include <pthread.h>
-
#else
/* Use emulation with fork. Rename pthread identifiers to avoid conflicts */
#define pthread_create pg_pthread_create
#define pthread_join pg_pthread_join
-typedef struct fork_pthread *pthread_t;
-typedef int pthread_attr_t;
-
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void * (*start_routine)(void *), void *arg);
-static int pthread_join(pthread_t th, void **thread_return);
+typedef struct fork_pthread *pthread_t;
+typedef int pthread_attr_t;
+static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int pthread_join(pthread_t th, void **thread_return);
#endif
extern char *optarg;
* end of configurable parameters
*********************************************************************/
-#define nbranches 1 /* Makes little sense to change this. Change -s instead */
+#define nbranches 1 /* Makes little sense to change this. Change
+ * -s instead */
#define ntellers 10
#define naccounts 100000
} Variable;
#define MAX_FILES 128 /* max number of SQL script files allowed */
-#define SHELL_COMMAND_SIZE 256 /* maximum size allowed for shell command */
+#define SHELL_COMMAND_SIZE 256 /* maximum size allowed for shell command */
/*
* structures used in custom query mode
*/
typedef struct
{
- pthread_t thread; /* thread handle */
- CState *state; /* array of CState */
- int nstate; /* length of state[] */
- instr_time start_time; /* thread start time */
+ pthread_t thread; /* thread handle */
+ CState *state; /* array of CState */
+ int nstate; /* length of state[] */
+ instr_time start_time; /* thread start time */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
typedef struct
{
- instr_time conn_time;
- int xacts;
+ instr_time conn_time;
+ int xacts;
} TResult;
/*
char *argv[MAX_ARGS]; /* command list */
} Command;
-static Command **sql_files[MAX_FILES]; /* SQL script files */
-static int num_files; /* number of script files */
-static int debug = 0; /* debug flag */
+static Command **sql_files[MAX_FILES]; /* SQL script files */
+static int num_files; /* number of script files */
+static int debug = 0; /* debug flag */
/* default scenario */
static char *tpc_b = {
/* Function prototypes */
static void setalarm(int seconds);
-static void* threadRun(void *arg);
+static void *threadRun(void *arg);
static void
usage(const char *progname)
static bool
isLegalVariableName(const char *name)
{
- int i;
+ int i;
for (i = 0; name[i] != '\0'; i++)
{
static bool
runShellCommand(CState *st, char *variable, char **argv, int argc)
{
- char command[SHELL_COMMAND_SIZE];
- int i,
- len = 0;
- FILE *fp;
- char res[64];
- char *endptr;
- int retval;
+ char command[SHELL_COMMAND_SIZE];
+ int i,
+ len = 0;
+ FILE *fp;
+ char res[64];
+ char *endptr;
+ int retval;
/*
* Join arguments with whilespace separaters. Arguments starting with
- * exactly one colon are treated as variables:
- * name - append a string "name"
- * :var - append a variable named 'var'.
- * ::name - append a string ":name"
+ * exactly one colon are treated as variables: name - append a string
+ * "name" :var - append a variable named 'var'. ::name - append a string
+ * ":name"
*/
for (i = 0; i < argc; i++)
{
- char *arg;
- int arglen;
+ char *arg;
+ int arglen;
if (argv[i][0] != ':')
{
- arg = argv[i]; /* a string literal */
+ arg = argv[i]; /* a string literal */
}
else if (argv[i][1] == ':')
{
static bool
clientDone(CState *st, bool ok)
{
- (void) ok; /* unused */
+ (void) ok; /* unused */
if (st->con != NULL)
{
PQfinish(st->con);
st->con = NULL;
}
- return false; /* always false */
+ return false; /* always false */
}
/* return false iff client should be disconnected */
{
case PGRES_COMMAND_OK:
case PGRES_TUPLES_OK:
- break; /* OK */
+ break; /* OK */
default:
fprintf(stderr, "Client %d aborted in state %d: %s",
- st->id, st->state, PQerrorMessage(st->con));
+ st->id, st->state, PQerrorMessage(st->con));
PQclear(res);
return clientDone(st, false);
}
if (st->con == NULL)
{
- instr_time start, end;
+ instr_time start,
+ end;
INSTR_TIME_SET_CURRENT(start);
if ((st->con = doConnect()) == NULL)
{
char *var;
int usec;
- instr_time now;
+ instr_time now;
if (*argv[1] == ':')
{
}
else if (pg_strcasecmp(argv[0], "setshell") == 0)
{
- bool ret = runShellCommand(st, argv[1], argv + 2, argc - 2);
+ bool ret = runShellCommand(st, argv[1], argv + 2, argc - 2);
- if (timer_exceeded) /* timeout */
+ if (timer_exceeded) /* timeout */
return clientDone(st, true);
else if (!ret) /* on error */
{
}
else if (pg_strcasecmp(argv[0], "shell") == 0)
{
- bool ret = runShellCommand(st, NULL, argv + 1, argc - 1);
+ bool ret = runShellCommand(st, NULL, argv + 1, argc - 1);
- if (timer_exceeded) /* timeout */
+ if (timer_exceeded) /* timeout */
return clientDone(st, true);
else if (!ret) /* on error */
{
*/
if (my_commands->argv[1][0] != ':')
{
- char *c = my_commands->argv[1];
+ char *c = my_commands->argv[1];
while (isdigit((unsigned char) *c))
c++;
time_include = INSTR_TIME_GET_DOUBLE(total_time);
tps_include = normal_xacts / time_include;
tps_exclude = normal_xacts / (time_include -
- (INSTR_TIME_GET_DOUBLE(conn_total_time) / nthreads));
+ (INSTR_TIME_GET_DOUBLE(conn_total_time) / nthreads));
if (ttype == 0)
s = "TPC-B (sort of)";
main(int argc, char **argv)
{
int c;
- int nclients = 1; /* default number of simulated clients */
- int nthreads = 1; /* default number of threads */
+ int nclients = 1; /* default number of simulated clients */
+ int nthreads = 1; /* default number of threads */
int is_init_mode = 0; /* initialize mode? */
int is_no_vacuum = 0; /* no vacuum at all before testing? */
int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
}
#endif /* HAVE_GETRLIMIT */
break;
- case 'j': /* jobs */
+ case 'j': /* jobs */
nthreads = atoi(optarg);
if (nthreads <= 0)
{
/* the first thread (i = 0) is executed by main thread */
if (i > 0)
{
- int err = pthread_create(&threads[i].thread, NULL, threadRun, &threads[i]);
+ int err = pthread_create(&threads[i].thread, NULL, threadRun, &threads[i]);
+
if (err != 0 || threads[i].thread == INVALID_THREAD)
{
fprintf(stderr, "cannot create thread: %s\n", strerror(err));
INSTR_TIME_SET_ZERO(conn_total_time);
for (i = 0; i < nthreads; i++)
{
- void *ret = NULL;
+ void *ret = NULL;
if (threads[i].thread == INVALID_THREAD)
ret = threadRun(&threads[i]);
if (ret != NULL)
{
- TResult *r = (TResult *) ret;
+ TResult *r = (TResult *) ret;
+
total_xacts += r->xacts;
INSTR_TIME_ADD(conn_total_time, r->conn_time);
free(ret);
{
TState *thread = (TState *) arg;
CState *state = thread->state;
- TResult *result;
- instr_time start, end;
+ TResult *result;
+ instr_time start,
+ end;
int nstate = thread->nstate;
- int remains = nstate; /* number of remaining clients */
+ int remains = nstate; /* number of remaining clients */
int i;
result = malloc(sizeof(TResult));
st->use_file = getrand(0, num_files - 1);
if (!doCustom(st, &result->conn_time))
- remains--; /* I've aborted */
+ remains--; /* I've aborted */
if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND)
{
while (remains > 0)
{
- fd_set input_mask;
- int maxsock; /* max socket number to be waited */
- int64 now_usec = 0;
- int64 min_usec;
+ fd_set input_mask;
+ int maxsock; /* max socket number to be waited */
+ int64 now_usec = 0;
+ int64 min_usec;
FD_ZERO(&input_mask);
if (min_usec == INT64_MAX)
{
instr_time now;
+
INSTR_TIME_SET_CURRENT(now);
now_usec = INSTR_TIME_GET_MICROSEC(now);
}
goto done;
}
- FD_SET(sock, &input_mask);
+ FD_SET (sock, &input_mask);
+
if (maxsock < sock)
maxsock = sock;
}
if (min_usec > 0 && maxsock != -1)
{
- int nsocks; /* return from select(2) */
+ int nsocks; /* return from select(2) */
if (min_usec != INT64_MAX)
{
- struct timeval timeout;
+ struct timeval timeout;
+
timeout.tv_sec = min_usec / 1000000;
timeout.tv_usec = min_usec % 1000000;
nsocks = select(maxsock + 1, &input_mask, NULL, NULL, &timeout);
int prev_ecnt = st->ecnt;
if (st->con && (FD_ISSET(PQsocket(st->con), &input_mask)
- || commands[st->state]->type == META_COMMAND))
+ || commands[st->state]->type == META_COMMAND))
{
if (!doCustom(st, &result->conn_time))
- remains--; /* I've aborted */
+ remains--; /* I've aborted */
}
if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND)
typedef struct fork_pthread
{
- pid_t pid;
- int pipes[2];
-} fork_pthread;
+ pid_t pid;
+ int pipes[2];
+} fork_pthread;
static int
pthread_create(pthread_t *thread,
pthread_attr_t *attr,
- void * (*start_routine)(void *),
+ void *(*start_routine) (void *),
void *arg)
{
- fork_pthread *th;
- void *ret;
- instr_time start_time;
+ fork_pthread *th;
+ void *ret;
+ instr_time start_time;
th = (fork_pthread *) malloc(sizeof(fork_pthread));
pipe(th->pipes);
th->pid = fork();
- if (th->pid == -1) /* error */
+ if (th->pid == -1) /* error */
{
free(th);
return errno;
}
- if (th->pid != 0) /* in parent process */
+ if (th->pid != 0) /* in parent process */
{
close(th->pipes[1]);
*thread = th;
setalarm(duration);
/*
- * Set a different random seed in each child process. Otherwise they
- * all inherit the parent's state and generate the same "random"
- * sequence. (In the threaded case, the different threads will obtain
- * subsets of the output of a single random() sequence, which should be
- * okay for our purposes.)
+ * Set a different random seed in each child process. Otherwise they all
+ * inherit the parent's state and generate the same "random" sequence.
+ * (In the threaded case, the different threads will obtain subsets of the
+ * output of a single random() sequence, which should be okay for our
+ * purposes.)
*/
INSTR_TIME_SET_CURRENT(start_time);
srandom(((unsigned int) INSTR_TIME_GET_MICROSEC(start_time)) +
static int
pthread_join(pthread_t th, void **thread_return)
{
- int status;
+ int status;
while (waitpid(th->pid, &status, 0) != th->pid)
{
free(th);
return 0;
}
-
#endif
-
#else /* WIN32 */
static VOID CALLBACK
typedef struct win32_pthread
{
HANDLE handle;
- void *(*routine)(void *);
+ void *(*routine) (void *);
void *arg;
void *result;
} win32_pthread;
static int
pthread_create(pthread_t *thread,
pthread_attr_t *attr,
- void * (*start_routine)(void *),
+ void *(*start_routine) (void *),
void *arg)
{
- int save_errno;
- win32_pthread *th;
+ int save_errno;
+ win32_pthread *th;
th = (win32_pthread *) malloc(sizeof(win32_pthread));
th->routine = start_routine;
/*-------------------------------------------------------------------------
*
* unaccent.c
- * Text search unaccent dictionary
+ * Text search unaccent dictionary
*
* Copyright (c) 2009-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/contrib/unaccent/unaccent.c,v 1.4 2010/01/02 16:57:33 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/unaccent/unaccent.c,v 1.5 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
PG_MODULE_MAGIC;
/*
- * Unaccent dictionary uses uncompressed suffix tree to find a
- * character to replace. Each node of tree is an array of
+ * Unaccent dictionary uses uncompressed suffix tree to find a
+ * character to replace. Each node of tree is an array of
* SuffixChar struct with length = 256 (n-th element of array
* corresponds to byte)
*/
-typedef struct SuffixChar {
- struct SuffixChar *nextChar;
- char *replaceTo;
- int replacelen;
+typedef struct SuffixChar
+{
+ struct SuffixChar *nextChar;
+ char *replaceTo;
+ int replacelen;
} SuffixChar;
/*
* placeChar - put str into tree's structure, byte by byte.
*/
-static SuffixChar*
+static SuffixChar *
placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
{
- SuffixChar *curnode;
+ SuffixChar *curnode;
- if ( !node )
+ if (!node)
{
node = palloc(sizeof(SuffixChar) * 256);
memset(node, 0, sizeof(SuffixChar) * 256);
curnode = node + *str;
- if ( lenstr == 1 )
+ if (lenstr == 1)
{
- if ( curnode->replaceTo )
+ if (curnode->replaceTo)
elog(WARNING, "duplicate TO argument, use first one");
else
{
curnode->replacelen = replacelen;
- curnode->replaceTo = palloc( replacelen );
+ curnode->replaceTo = palloc(replacelen);
memcpy(curnode->replaceTo, replaceTo, replacelen);
}
}
else
{
- curnode->nextChar = placeChar( curnode->nextChar, str+1, lenstr-1, replaceTo, replacelen);
+ curnode->nextChar = placeChar(curnode->nextChar, str + 1, lenstr - 1, replaceTo, replacelen);
}
return node;
* initSuffixTree - create suffix tree from file. Function converts
* UTF8-encoded file into current encoding.
*/
-static SuffixChar*
-initSuffixTree(char *filename)
+static SuffixChar *
+initSuffixTree(char *filename)
{
- SuffixChar * volatile rootSuffixTree = NULL;
+ SuffixChar *volatile rootSuffixTree = NULL;
MemoryContext ccxt = CurrentMemoryContext;
- tsearch_readline_state trst;
- volatile bool skip;
+ tsearch_readline_state trst;
+ volatile bool skip;
filename = get_tsearch_config_filename(filename, "rules");
if (!tsearch_readline_begin(&trst, filename))
errmsg("could not open unaccent file \"%s\": %m",
filename)));
- do
+ do
{
- char src[4096];
- char trg[4096];
- int srclen;
- int trglen;
- char *line = NULL;
+ char src[4096];
+ char trg[4096];
+ int srclen;
+ int trglen;
+ char *line = NULL;
skip = true;
PG_TRY();
{
/*
- * pg_do_encoding_conversion() (called by tsearch_readline())
- * will emit exception if it finds untranslatable characters in current locale.
- * We just skip such characters.
+ * pg_do_encoding_conversion() (called by tsearch_readline()) will
+ * emit exception if it finds untranslatable characters in current
+ * locale. We just skip such characters.
*/
while ((line = tsearch_readline(&trst)) != NULL)
{
- if ( sscanf(line, "%s\t%s\n", src, trg)!=2 )
+ if (sscanf(line, "%s\t%s\n", src, trg) != 2)
continue;
srclen = strlen(src);
trglen = strlen(trg);
- rootSuffixTree = placeChar(rootSuffixTree,
- (unsigned char*)src, srclen,
- trg, trglen);
+ rootSuffixTree = placeChar(rootSuffixTree,
+ (unsigned char *) src, srclen,
+ trg, trglen);
skip = false;
pfree(line);
}
}
PG_END_TRY();
}
- while(skip);
+ while (skip);
tsearch_readline_end(&trst);
/*
* findReplaceTo - find multibyte character in tree
*/
-static SuffixChar *
-findReplaceTo( SuffixChar *node, unsigned char *src, int srclen )
+static SuffixChar *
+findReplaceTo(SuffixChar *node, unsigned char *src, int srclen)
{
- while( node )
+ while (node)
{
node = node + *src;
- if ( srclen == 1 )
+ if (srclen == 1)
return node;
src++;
}
PG_FUNCTION_INFO_V1(unaccent_init);
-Datum unaccent_init(PG_FUNCTION_ARGS);
+Datum unaccent_init(PG_FUNCTION_ARGS);
Datum
unaccent_init(PG_FUNCTION_ARGS)
{
- List *dictoptions = (List *) PG_GETARG_POINTER(0);
+ List *dictoptions = (List *) PG_GETARG_POINTER(0);
SuffixChar *rootSuffixTree = NULL;
- bool fileloaded = false;
+ bool fileloaded = false;
ListCell *l;
foreach(l, dictoptions)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple Rules parameters")));
- rootSuffixTree = initSuffixTree(defGetString(defel));
- fileloaded = true;
+ rootSuffixTree = initSuffixTree(defGetString(defel));
+ fileloaded = true;
}
else
{
}
PG_FUNCTION_INFO_V1(unaccent_lexize);
-Datum unaccent_lexize(PG_FUNCTION_ARGS);
+Datum unaccent_lexize(PG_FUNCTION_ARGS);
Datum
unaccent_lexize(PG_FUNCTION_ARGS)
{
- SuffixChar *rootSuffixTree = (SuffixChar*)PG_GETARG_POINTER(0);
- char *srcchar = (char *) PG_GETARG_POINTER(1);
+ SuffixChar *rootSuffixTree = (SuffixChar *) PG_GETARG_POINTER(0);
+ char *srcchar = (char *) PG_GETARG_POINTER(1);
int32 len = PG_GETARG_INT32(2);
- char *srcstart, *trgchar = NULL;
+ char *srcstart,
+ *trgchar = NULL;
int charlen;
TSLexeme *res = NULL;
SuffixChar *node;
srcstart = srcchar;
- while( srcchar - srcstart < len )
+ while (srcchar - srcstart < len)
{
charlen = pg_mblen(srcchar);
- node = findReplaceTo( rootSuffixTree, (unsigned char *) srcchar, charlen );
- if ( node && node->replaceTo )
+ node = findReplaceTo(rootSuffixTree, (unsigned char *) srcchar, charlen);
+ if (node && node->replaceTo)
{
- if ( !res )
+ if (!res)
{
/* allocate res only it it's needed */
res = palloc0(sizeof(TSLexeme) * 2);
- res->lexeme = trgchar = palloc( len * pg_database_encoding_max_length() + 1 /* \0 */ );
+ res->lexeme = trgchar = palloc(len * pg_database_encoding_max_length() + 1 /* \0 */ );
res->flags = TSL_FILTER;
- if ( srcchar != srcstart )
+ if (srcchar != srcstart)
{
memcpy(trgchar, srcstart, srcchar - srcstart);
trgchar += (srcchar - srcstart);
}
}
- memcpy( trgchar, node->replaceTo, node->replacelen );
- trgchar += node->replacelen;
+ memcpy(trgchar, node->replaceTo, node->replacelen);
+ trgchar += node->replacelen;
}
- else if ( res )
+ else if (res)
{
- memcpy( trgchar, srcchar, charlen );
+ memcpy(trgchar, srcchar, charlen);
trgchar += charlen;
}
srcchar += charlen;
}
- if ( res )
+ if (res)
*trgchar = '\0';
PG_RETURN_POINTER(res);
* Function-like wrapper for dictionary
*/
PG_FUNCTION_INFO_V1(unaccent_dict);
-Datum unaccent_dict(PG_FUNCTION_ARGS);
+Datum unaccent_dict(PG_FUNCTION_ARGS);
Datum
unaccent_dict(PG_FUNCTION_ARGS)
{
- text *str;
- int strArg;
- Oid dictOid;
- TSDictionaryCacheEntry *dict;
- TSLexeme *res;
+ text *str;
+ int strArg;
+ Oid dictOid;
+ TSDictionaryCacheEntry *dict;
+ TSLexeme *res;
if (PG_NARGS() == 1)
{
dict = lookup_ts_dictionary_cache(dictOid);
res = (TSLexeme *) DatumGetPointer(FunctionCall4(&(dict->lexize),
- PointerGetDatum(dict->dictData),
- PointerGetDatum(VARDATA(str)),
- Int32GetDatum(VARSIZE(str) - VARHDRSZ),
+ PointerGetDatum(dict->dictData),
+ PointerGetDatum(VARDATA(str)),
+ Int32GetDatum(VARSIZE(str) - VARHDRSZ),
PointerGetDatum(NULL)));
PG_FREE_IF_COPY(str, strArg);
- if ( res == NULL )
+ if (res == NULL)
{
PG_RETURN_TEXT_P(PG_GETARG_TEXT_P_COPY(strArg));
}
- else if ( res->lexeme == NULL )
+ else if (res->lexeme == NULL)
{
pfree(res);
PG_RETURN_TEXT_P(PG_GETARG_TEXT_P_COPY(strArg));
}
else
{
- text *txt = cstring_to_text(res->lexeme);
+ text *txt = cstring_to_text(res->lexeme);
pfree(res->lexeme);
pfree(res);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.32 2010/01/22 16:40:18 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.33 2010/02/26 02:00:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
attribute_reloptions(Datum reloptions, bool validate)
{
relopt_value *options;
- AttributeOpts *aopts;
+ AttributeOpts *aopts;
int numoptions;
static const relopt_parse_elt tab[] = {
{"n_distinct", RELOPT_TYPE_REAL, offsetof(AttributeOpts, n_distinct)},
tablespace_reloptions(Datum reloptions, bool validate)
{
relopt_value *options;
- TableSpaceOpts *tsopts;
+ TableSpaceOpts *tsopts;
int numoptions;
static const relopt_parse_elt tab[] = {
{"random_page_cost", RELOPT_TYPE_REAL, offsetof(TableSpaceOpts, random_page_cost)},
*
* These functions provide conversion between rowtypes that are logically
* equivalent but might have columns in a different order or different sets
- * of dropped columns. There is some overlap of functionality with the
+ * of dropped columns. There is some overlap of functionality with the
* executor's "junkfilter" routines, but these functions work on bare
* HeapTuples rather than TupleTableSlots.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupconvert.c,v 1.3 2010/01/02 16:57:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupconvert.c,v 1.4 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int32 atttypmod;
if (att->attisdropped)
- continue; /* attrMap[i] is already 0 */
+ continue; /* attrMap[i] is already 0 */
noutcols++;
atttypid = att->atttypid;
atttypmod = att->atttypmod;
nincols, noutcols)));
/*
- * Check to see if the map is one-to-one and the tuple types are the
- * same. (We check the latter because if they're not, we want to do
- * conversion to inject the right OID into the tuple datum.)
+ * Check to see if the map is one-to-one and the tuple types are the same.
+ * (We check the latter because if they're not, we want to do conversion
+ * to inject the right OID into the tuple datum.)
*/
if (indesc->natts == outdesc->natts &&
indesc->tdtypeid == outdesc->tdtypeid)
{
for (i = 0; i < n; i++)
{
- if (attrMap[i] == (i+1))
+ if (attrMap[i] == (i + 1))
continue;
/*
- * If it's a dropped column and the corresponding input
- * column is also dropped, we needn't convert. However,
- * attlen and attalign must agree.
+ * If it's a dropped column and the corresponding input column is
+ * also dropped, we needn't convert. However, attlen and attalign
+ * must agree.
*/
if (attrMap[i] == 0 &&
indesc->attrs[i]->attisdropped &&
/* preallocate workspace for Datum arrays */
map->outvalues = (Datum *) palloc(n * sizeof(Datum));
map->outisnull = (bool *) palloc(n * sizeof(bool));
- n = indesc->natts + 1; /* +1 for NULL */
+ n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
- map->invalues[0] = (Datum) 0; /* set up the NULL entry */
+ map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;
return map;
/*
* Set up for tuple conversion, matching input and output columns by name.
- * (Dropped columns are ignored in both input and output.) This is intended
+ * (Dropped columns are ignored in both input and output.) This is intended
* for use when the rowtypes are related by inheritance, so we expect an exact
* match of both type and typmod. The error messages will be a bit unhelpful
* unless both rowtypes are named composite types.
int j;
if (att->attisdropped)
- continue; /* attrMap[i] is already 0 */
+ continue; /* attrMap[i] is already 0 */
attname = NameStr(att->attname);
atttypid = att->atttypid;
atttypmod = att->atttypmod;
}
/*
- * Check to see if the map is one-to-one and the tuple types are the
- * same. (We check the latter because if they're not, we want to do
- * conversion to inject the right OID into the tuple datum.)
+ * Check to see if the map is one-to-one and the tuple types are the same.
+ * (We check the latter because if they're not, we want to do conversion
+ * to inject the right OID into the tuple datum.)
*/
if (indesc->natts == outdesc->natts &&
indesc->tdtypeid == outdesc->tdtypeid)
same = true;
for (i = 0; i < n; i++)
{
- if (attrMap[i] == (i+1))
+ if (attrMap[i] == (i + 1))
continue;
/*
- * If it's a dropped column and the corresponding input
- * column is also dropped, we needn't convert. However,
- * attlen and attalign must agree.
+ * If it's a dropped column and the corresponding input column is
+ * also dropped, we needn't convert. However, attlen and attalign
+ * must agree.
*/
if (attrMap[i] == 0 &&
indesc->attrs[i]->attisdropped &&
/* preallocate workspace for Datum arrays */
map->outvalues = (Datum *) palloc(n * sizeof(Datum));
map->outisnull = (bool *) palloc(n * sizeof(bool));
- n = indesc->natts + 1; /* +1 for NULL */
+ n = indesc->natts + 1; /* +1 for NULL */
map->invalues = (Datum *) palloc(n * sizeof(Datum));
map->inisnull = (bool *) palloc(n * sizeof(bool));
- map->invalues[0] = (Datum) 0; /* set up the NULL entry */
+ map->invalues[0] = (Datum) 0; /* set up the NULL entry */
map->inisnull[0] = true;
return map;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.18 2010/02/11 14:29:50 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.19 2010/02/26 02:00:33 momjian Exp $
*-------------------------------------------------------------------------
*/
#define DEF_NENTRY 2048
#define DEF_NPTR 4
-static void*
+static void *
ginAppendData(void *old, void *new, void *arg)
{
- EntryAccumulator *eo = (EntryAccumulator*)old,
- *en = (EntryAccumulator*)new;
+ EntryAccumulator *eo = (EntryAccumulator *) old,
+ *en = (EntryAccumulator *) new;
- BuildAccumulator *accum = (BuildAccumulator*)arg;
+ BuildAccumulator *accum = (BuildAccumulator *) arg;
if (eo->number >= eo->length)
{
accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
eo->length *= 2;
eo->list = (ItemPointerData *) repalloc(eo->list,
- sizeof(ItemPointerData) * eo->length);
+ sizeof(ItemPointerData) * eo->length);
accum->allocatedMemory += GetMemoryChunkSpace(eo->list);
}
static int
cmpEntryAccumulator(const void *a, const void *b, void *arg)
{
- EntryAccumulator *ea = (EntryAccumulator*)a;
- EntryAccumulator *eb = (EntryAccumulator*)b;
- BuildAccumulator *accum = (BuildAccumulator*)arg;
+ EntryAccumulator *ea = (EntryAccumulator *) a;
+ EntryAccumulator *eb = (EntryAccumulator *) b;
+ BuildAccumulator *accum = (BuildAccumulator *) arg;
return compareAttEntries(accum->ginstate, ea->attnum, ea->value,
eb->attnum, eb->value);
static void
ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum, Datum entry)
{
- EntryAccumulator *key,
- *ea;
+ EntryAccumulator *key,
+ *ea;
- /*
- * Allocate memory by rather big chunk to decrease overhead, we don't
- * keep pointer to previously allocated chunks because they will free
- * by MemoryContextReset() call.
+ /*
+ * Allocate memory by rather big chunk to decrease overhead, we don't keep
+ * pointer to previously allocated chunks because they will free by
+ * MemoryContextReset() call.
*/
if (accum->entryallocator == NULL || accum->length >= DEF_NENTRY)
{
key->attnum = attnum;
key->value = entry;
- /* To prevent multiple palloc/pfree cycles, we reuse array */
+ /* To prevent multiple palloc/pfree cycles, we reuse array */
if (accum->tmpList == NULL)
accum->tmpList =
(ItemPointerData *) palloc(sizeof(ItemPointerData) * DEF_NPTR);
else
{
/*
- * The key has been appended, so "free" allocated
- * key by decrementing chunk's counter.
+ * The key has been appended, so "free" allocated key by decrementing
+ * chunk's counter.
*/
accum->length--;
}
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
* tree; then, we insert the middles of each half of out virtual array, then
* middles of quarters, etc.
*/
- void
+void
ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
Datum *entries, int32 nentry)
{
- uint32 step = nentry;
+ uint32 step = nentry;
if (nentry <= 0)
return;
/*
* step will contain largest power of 2 and <= nentry
*/
- step |= (step >> 1);
- step |= (step >> 2);
- step |= (step >> 4);
- step |= (step >> 8);
+ step |= (step >> 1);
+ step |= (step >> 2);
+ step |= (step >> 4);
+ step |= (step >> 8);
step |= (step >> 16);
step >>= 1;
- step ++;
+ step++;
- while(step > 0) {
- int i;
+ while (step > 0)
+ {
+ int i;
- for (i = step - 1; i < nentry && i >= 0; i += step << 1 /* *2 */)
+ for (i = step - 1; i < nentry && i >= 0; i += step << 1 /* *2 */ )
ginInsertEntry(accum, heapptr, attnum, entries[i]);
- step >>= 1; /* /2 */
+ step >>= 1; /* /2 */
}
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.23 2010/01/02 16:57:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.24 2010/02/26 02:00:33 momjian Exp $
*-------------------------------------------------------------------------
*/
* Gin tuple without any ItemPointers should be large enough to keep
* one ItemPointer, to prevent inconsistency between
* ginHeapTupleFastCollect and ginEntryInsert called by
- * ginHeapTupleInsert. ginHeapTupleFastCollect forms tuple without
+ * ginHeapTupleInsert. ginHeapTupleFastCollect forms tuple without
* extra pointer to heap, but ginEntryInsert (called for pending list
* cleanup during vacuum) will form the same tuple with one
* ItemPointer.
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.29 2010/01/02 16:57:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.30 2010/02/26 02:00:33 momjian Exp $
*-------------------------------------------------------------------------
*/
typedef struct pendingPosition
{
- Buffer pendingBuffer;
- OffsetNumber firstOffset;
- OffsetNumber lastOffset;
- ItemPointerData item;
- bool *hasMatchKey;
+ Buffer pendingBuffer;
+ OffsetNumber firstOffset;
+ OffsetNumber lastOffset;
+ ItemPointerData item;
+ bool *hasMatchKey;
} pendingPosition;
static bool
hasAllMatchingKeys(GinScanOpaque so, pendingPosition *pos)
{
- int i;
+ int i;
for (i = 0; i < so->nkeys; i++)
if (pos->hasMatchKey[i] == false)
memset(key->entryRes, FALSE, key->nentries);
}
- memset(pos->hasMatchKey, FALSE, so->nkeys);
+ memset(pos->hasMatchKey, FALSE, so->nkeys);
for (;;)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.84 2010/01/02 16:57:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.85 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* On non-leaf page we can't conclude that child hasn't NULL
* values because of assumption in GiST: union (VAL, NULL) is VAL.
- * But if on non-leaf page key IS NULL, then all children are NULL.
+ * But if on non-leaf page key IS NULL, then all children are
+ * NULL.
*/
if (key->sk_flags & SK_SEARCHNULL)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.20 2010/01/14 16:31:09 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.21 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (entry->leafkey) /* Point, actually */
{
- BOX *box = palloc(sizeof(BOX));
- Point *point = DatumGetPointP(entry->key);
+ BOX *box = palloc(sizeof(BOX));
+ Point *point = DatumGetPointP(entry->key);
GISTENTRY *retval = palloc(sizeof(GISTENTRY));
box->high = box->low = *point;
static bool
gist_point_consistent_internal(StrategyNumber strategy,
- bool isLeaf, BOX *key, Point *query)
+ bool isLeaf, BOX *key, Point *query)
{
- bool result = false;
+ bool result = false;
switch (strategy)
{
gist_point_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
- StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+ StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
bool result;
bool *recheck = (bool *) PG_GETARG_POINTER(4);
- StrategyNumber strategyGroup = strategy / GeoStrategyNumberOffset;
+ StrategyNumber strategyGroup = strategy / GeoStrategyNumberOffset;
switch (strategyGroup)
{
break;
case BoxStrategyNumberGroup:
result = DatumGetBool(DirectFunctionCall5(
- gist_box_consistent,
- PointerGetDatum(entry),
- PG_GETARG_DATUM(1),
- Int16GetDatum(RTOverlapStrategyNumber),
- 0, PointerGetDatum(recheck)));
+ gist_box_consistent,
+ PointerGetDatum(entry),
+ PG_GETARG_DATUM(1),
+ Int16GetDatum(RTOverlapStrategyNumber),
+ 0, PointerGetDatum(recheck)));
break;
case PolygonStrategyNumberGroup:
{
POLYGON *query = PG_GETARG_POLYGON_P(1);
result = DatumGetBool(DirectFunctionCall5(
- gist_poly_consistent,
- PointerGetDatum(entry),
- PolygonPGetDatum(query),
- Int16GetDatum(RTOverlapStrategyNumber),
- 0, PointerGetDatum(recheck)));
+ gist_poly_consistent,
+ PointerGetDatum(entry),
+ PolygonPGetDatum(query),
+ Int16GetDatum(RTOverlapStrategyNumber),
+ 0, PointerGetDatum(recheck)));
if (GIST_LEAF(entry) && result)
{
* We are on leaf page and quick check shows overlapping
* of polygon's bounding box and point
*/
- BOX *box = DatumGetBoxP(entry->key);
+ BOX *box = DatumGetBoxP(entry->key);
Assert(box->high.x == box->low.x
- && box->high.y == box->low.y);
+ && box->high.y == box->low.y);
result = DatumGetBool(DirectFunctionCall2(
- poly_contain_pt,
- PolygonPGetDatum(query),
+ poly_contain_pt,
+ PolygonPGetDatum(query),
PointPGetDatum(&box->high)));
*recheck = false;
}
break;
case CircleStrategyNumberGroup:
{
- CIRCLE *query = PG_GETARG_CIRCLE_P(1);
+ CIRCLE *query = PG_GETARG_CIRCLE_P(1);
result = DatumGetBool(DirectFunctionCall5(
- gist_circle_consistent,
- PointerGetDatum(entry),
- CirclePGetDatum(query),
- Int16GetDatum(RTOverlapStrategyNumber),
- 0, PointerGetDatum(recheck)));
+ gist_circle_consistent,
+ PointerGetDatum(entry),
+ CirclePGetDatum(query),
+ Int16GetDatum(RTOverlapStrategyNumber),
+ 0, PointerGetDatum(recheck)));
if (GIST_LEAF(entry) && result)
{
* We are on leaf page and quick check shows overlapping
* of polygon's bounding box and point
*/
- BOX *box = DatumGetBoxP(entry->key);
+ BOX *box = DatumGetBoxP(entry->key);
Assert(box->high.x == box->low.x
- && box->high.y == box->low.y);
+ && box->high.y == box->low.y);
result = DatumGetBool(DirectFunctionCall2(
- circle_contain_pt,
- CirclePGetDatum(query),
+ circle_contain_pt,
+ CirclePGetDatum(query),
PointPGetDatum(&box->high)));
*recheck = false;
}
}
break;
default:
- result = false; /* silence compiler warning */
+ result = false; /* silence compiler warning */
elog(ERROR, "unknown strategy number: %d", strategy);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.78 2010/01/02 16:57:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.79 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* field.
*
* Next, if any of keys is a NULL and that key is not marked with
- * SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie,
- * we assume all indexable operators are strict).
+ * SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie, we
+ * assume all indexable operators are strict).
*/
for (i = 0; i < scan->numberOfKeys; i++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.116 2010/01/02 16:57:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.117 2010/02/26 02:00:33 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
/*
* An insertion into the current index page could have happened while
* we didn't have read lock on it. Re-find our position by looking
- * for the TID we previously returned. (Because we hold share lock on
+ * for the TID we previously returned. (Because we hold share lock on
* the bucket, no deletions or splits could have occurred; therefore
* we can expect that the TID still exists in the current index page,
* at an offset >= where we were.)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.68 2010/01/02 16:57:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.69 2010/02/26 02:00:33 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
/*
* If we reach here, there are no live tuples on the "read" page ---
- * it was empty when we got to it, or we moved them all. So we
- * can just free the page without bothering with deleting tuples
+ * it was empty when we got to it, or we moved them all. So we can
+ * just free the page without bothering with deleting tuples
* individually. Then advance to the previous "read" page.
*
* Tricky point here: if our read and write pages are adjacent in the
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.82 2010/01/02 16:57:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.83 2010/02/26 02:00:33 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
/*
* Partition the tuples in the old bucket between the old bucket and the
* new bucket, advancing along the old bucket's overflow bucket chain and
- * adding overflow pages to the new bucket as needed. Outer loop
- * iterates once per page in old bucket.
+ * adding overflow pages to the new bucket as needed. Outer loop iterates
+ * once per page in old bucket.
*/
for (;;)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.287 2010/02/14 18:42:12 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.288 2010/02/26 02:00:33 momjian Exp $
*
*
* INTERFACE ROUTINES
bool allow_strat, bool allow_sync,
bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
- ItemPointerData from, Buffer newbuf, HeapTuple newtup,
- bool all_visible_cleared, bool new_all_visible_cleared);
+ ItemPointerData from, Buffer newbuf, HeapTuple newtup,
+ bool all_visible_cleared, bool new_all_visible_cleared);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup);
/*
* If the all-visible flag indicates that all tuples on the page are
- * visible to everyone, we can skip the per-tuple visibility tests.
- * But not in hot standby mode. A tuple that's already visible to all
+ * visible to everyone, we can skip the per-tuple visibility tests. But
+ * not in hot standby mode. A tuple that's already visible to all
* transactions in the master might still be invisible to a read-only
* transaction in the standby.
*/
* someone setting xmax. Hence recheck after changing lock, same as for
* xmax itself.
*
- * Old-style VACUUM FULL is gone, but we have to keep this code as long
- * as we support having MOVED_OFF/MOVED_IN tuples in the database.
+ * Old-style VACUUM FULL is gone, but we have to keep this code as long as
+ * we support having MOVED_OFF/MOVED_IN tuples in the database.
*/
recheck_xvac:
if (tuple->t_infomask & HEAP_MOVED)
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, xlrec->node);
/*
- * Actual operation is a no-op. Record type exists to provide a means
- * for conflict processing to occur before we begin index vacuum actions.
- * see vacuumlazy.c and also comments in btvacuumpage()
+ * Actual operation is a no-op. Record type exists to provide a means for
+ * conflict processing to occur before we begin index vacuum actions. see
+ * vacuumlazy.c and also comments in btvacuumpage()
*/
}
uint8 info = record->xl_info & ~XLR_INFO_MASK;
/*
- * These operations don't overwrite MVCC data so no conflict
- * processing is required. The ones in heap2 rmgr do.
+ * These operations don't overwrite MVCC data so no conflict processing is
+ * required. The ones in heap2 rmgr do.
*/
RestoreBkpBlocks(lsn, record, false);
uint8 info = record->xl_info & ~XLR_INFO_MASK;
/*
- * Note that RestoreBkpBlocks() is called after conflict processing
- * within each record type handling function.
+ * Note that RestoreBkpBlocks() is called after conflict processing within
+ * each record type handling function.
*/
switch (info & XLOG_HEAP_OPMASK)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.21 2010/02/08 04:33:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.22 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct
{
TransactionId new_prune_xid; /* new prune hint value for page */
- TransactionId latestRemovedXid; /* latest xid to be removed by this prune */
- int nredirected; /* numbers of entries in arrays below */
+ TransactionId latestRemovedXid; /* latest xid to be removed by this
+ * prune */
+ int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
/* arrays that accumulate indexes of items to be changed */
/*
* We can't write WAL in recovery mode, so there's no point trying to
- * clean the page. The master will likely issue a cleaning WAL record
- * soon anyway, so this is no particular loss.
+ * clean the page. The master will likely issue a cleaning WAL record soon
+ * anyway, so this is no particular loss.
*/
if (RecoveryInProgress())
return;
*
* First, initialize the new pd_prune_xid value to zero (indicating no
* prunable tuples). If we find any tuples which may soon become
- * prunable, we will save the lowest relevant XID in new_prune_xid.
- * Also initialize the rest of our working state.
+ * prunable, we will save the lowest relevant XID in new_prune_xid. Also
+ * initialize the rest of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
prstate.latestRemovedXid = InvalidTransactionId;
{
heap_prune_record_unused(prstate, rootoffnum);
HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ &prstate->latestRemovedXid);
ndeleted++;
}
{
latestdead = offnum;
HeapTupleHeaderAdvanceLatestRemovedXid(htup,
- &prstate->latestRemovedXid);
+ &prstate->latestRemovedXid);
}
else if (!recent_dead)
break;
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.20 2010/02/03 10:01:29 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.21 2010/02/26 02:00:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Write an XLOG UNLOGGED record if WAL-logging was skipped */
if (!state->rs_use_wal && !state->rs_new_rel->rd_istemp)
{
- char reason[NAMEDATALEN + 30];
+ char reason[NAMEDATALEN + 30];
+
snprintf(reason, sizeof(reason), "heap rewrite on \"%s\"",
RelationGetRelationName(state->rs_new_rel));
XLogReportUnloggedStatement(reason);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.97 2010/02/04 00:09:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.98 2010/02/26 02:00:33 momjian Exp $
*
*
* INTERFACE ROUTINES
}
/*
- * Finally we store attributes of type 'm' externally. At this point
- * we increase the target tuple size, so that 'm' attributes aren't
- * stored externally unless really necessary.
+ * Finally we store attributes of type 'm' externally. At this point we
+ * increase the target tuple size, so that 'm' attributes aren't stored
+ * externally unless really necessary.
*/
maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff;
*
* Normally this is the actual OID of the target toast table, but during
* table-rewriting operations such as CLUSTER, we have to insert the OID
- * of the table's real permanent toast table instead. rd_toastoid is
- * set if we have to substitute such an OID.
+ * of the table's real permanent toast table instead. rd_toastoid is set
+ * if we have to substitute such an OID.
*/
if (OidIsValid(rel->rd_toastoid))
toast_pointer.va_toastrelid = rel->rd_toastoid;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.8 2010/02/09 21:43:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.9 2010/02/26 02:00:33 momjian Exp $
*
* INTERFACE ROUTINES
* visibilitymap_clear - clear a bit in the visibility map
* NOTES
*
* The visibility map is a bitmap with one bit per heap page. A set bit means
- * that all tuples on the page are known visible to all transactions, and
+ * that all tuples on the page are known visible to all transactions, and
* therefore the page doesn't need to be vacuumed. The map is conservative in
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
rel->rd_istemp);
/*
- * We might as well update the local smgr_vm_nblocks setting.
- * smgrtruncate sent an smgr cache inval message, which will cause
- * other backends to invalidate their copy of smgr_vm_nblocks, and
- * this one too at the next command boundary. But this ensures it
- * isn't outright wrong until then.
+ * We might as well update the local smgr_vm_nblocks setting. smgrtruncate
+ * sent an smgr cache inval message, which will cause other backends to
+ * invalidate their copy of smgr_vm_nblocks, and this one too at the next
+ * command boundary. But this ensures it isn't outright wrong until then.
*/
if (rel->rd_smgr)
rel->rd_smgr->smgr_vm_nblocks = newnblocks;
{
if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
rel->rd_smgr->smgr_vm_nblocks = smgrnblocks(rel->rd_smgr,
- VISIBILITYMAP_FORKNUM);
+ VISIBILITYMAP_FORKNUM);
else
rel->rd_smgr->smgr_vm_nblocks = 0;
}
RelationOpenSmgr(rel);
/*
- * Create the file first if it doesn't exist. If smgr_vm_nblocks
- * is positive then it must exist, no need for an smgrexists call.
+ * Create the file first if it doesn't exist. If smgr_vm_nblocks is
+ * positive then it must exist, no need for an smgrexists call.
*/
if ((rel->rd_smgr->smgr_vm_nblocks == 0 ||
rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber) &&
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.80 2010/02/07 20:48:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.81 2010/02/26 02:00:33 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
/*
* During recovery we ignore killed tuples and don't bother to kill them
- * either. We do this because the xmin on the primary node could easily
- * be later than the xmin on the standby node, so that what the primary
+ * either. We do this because the xmin on the primary node could easily be
+ * later than the xmin on the standby node, so that what the primary
* thinks is killed is supposed to be visible on standby. So for correct
* MVCC for queries during recovery we must ignore these hints and check
- * all tuples. Do *not* set ignore_killed_tuples to true when running
- * in a transaction that was started during recovery.
- * xactStartedInRecovery should not be altered by index AMs.
+ * all tuples. Do *not* set ignore_killed_tuples to true when running in a
+ * transaction that was started during recovery. xactStartedInRecovery
+ * should not be altered by index AMs.
*/
scan->kill_prior_tuple = false;
scan->xactStartedInRecovery = TransactionStartedDuringRecovery();
for (i = 0; i < natts; i++)
{
- char *val;
+ char *val;
if (isnull[i])
val = "null";
else
{
- Oid foutoid;
- bool typisvarlena;
+ Oid foutoid;
+ bool typisvarlena;
/*
- * The provided data is not necessarily of the type stored in
- * the index; rather it is of the index opclass's input type.
- * So look at rd_opcintype not the index tupdesc.
+ * The provided data is not necessarily of the type stored in the
+ * index; rather it is of the index opclass's input type. So look
+ * at rd_opcintype not the index tupdesc.
*
* Note: this is a bit shaky for opclasses that have pseudotype
- * input types such as ANYARRAY or RECORD. Currently, the
- * typoutput functions associated with the pseudotypes will
- * work okay, but we might have to try harder in future.
+ * input types such as ANYARRAY or RECORD. Currently, the
+ * typoutput functions associated with the pseudotypes will work
+ * okay, but we might have to try harder in future.
*/
getTypeOutputInfo(indexRelation->rd_opcintype[i],
&foutoid, &typisvarlena);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.117 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.118 2010/02/26 02:00:34 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
/*
* If we scanned a whole HOT chain and found only dead tuples,
- * tell index AM to kill its entry for that TID. We do not do
- * this when in recovery because it may violate MVCC to do so.
- * see comments in RelationGetIndexScan().
+ * tell index AM to kill its entry for that TID. We do not do this
+ * when in recovery because it may violate MVCC to do so. see
+ * comments in RelationGetIndexScan().
*/
if (!scan->xactStartedInRecovery)
scan->kill_prior_tuple = scan->xs_hot_dead;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.176 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.177 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
*
- * For a partial uniqueness check, we don't wait for the other xact.
- * Just let the tuple in and return false for possibly non-unique,
- * or true for definitely unique.
+ * For a partial uniqueness check, we don't wait for the other xact. Just
+ * let the tuple in and return false for possibly non-unique, or true for
+ * definitely unique.
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
/*
* It is a duplicate. If we are only doing a partial
- * check, then don't bother checking if the tuple is
- * being updated in another transaction. Just return
- * the fact that it is a potential conflict and leave
- * the full check till later.
+ * check, then don't bother checking if the tuple is being
+ * updated in another transaction. Just return the fact
+ * that it is a potential conflict and leave the full
+ * check till later.
*/
if (checkUnique == UNIQUE_CHECK_PARTIAL)
{
}
/*
- * This is a definite conflict. Break the tuple down
- * into datums and report the error. But first, make
- * sure we release the buffer locks we're holding ---
+ * This is a definite conflict. Break the tuple down into
+ * datums and report the error. But first, make sure we
+ * release the buffer locks we're holding ---
* BuildIndexValueDescription could make catalog accesses,
- * which in the worst case might touch this same index
- * and cause deadlocks.
+ * which in the worst case might touch this same index and
+ * cause deadlocks.
*/
if (nbuf != InvalidBuffer)
_bt_relbuf(rel, nbuf);
_bt_relbuf(rel, buf);
{
- Datum values[INDEX_MAX_KEYS];
- bool isnull[INDEX_MAX_KEYS];
+ Datum values[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
index_deform_tuple(itup, RelationGetDescr(rel),
values, isnull);
RelationGetRelationName(rel)),
errdetail("Key %s already exists.",
BuildIndexValueDescription(rel,
- values, isnull))));
+ values, isnull))));
}
}
else if (all_dead)
}
/*
- * If we are doing a recheck then we should have found the tuple we
- * are checking. Otherwise there's something very wrong --- probably,
- * the index is on a non-immutable expression.
+ * If we are doing a recheck then we should have found the tuple we are
+ * checking. Otherwise there's something very wrong --- probably, the
+ * index is on a non-immutable expression.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to re-find tuple within index \"%s\"",
RelationGetRelationName(rel)),
- errhint("This may be because of a non-immutable index expression.")));
+ errhint("This may be because of a non-immutable index expression.")));
if (nbuf != InvalidBuffer)
_bt_relbuf(rel, nbuf);
if (itemsz > BTMaxItemSize(page))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) BTMaxItemSize(page),
- RelationGetRelationName(rel)),
+ errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+ (unsigned long) itemsz,
+ (unsigned long) BTMaxItemSize(page),
+ RelationGetRelationName(rel)),
errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
"Consider a function index of an MD5 hash of the value, "
"or use full text indexing.")));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.119 2010/02/13 00:59:58 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.120 2010/02/26 02:00:34 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
START_CRIT_SECTION();
/*
- * We don't do MarkBufferDirty here because we're about initialise
- * the page, and nobody else can see it yet.
+ * We don't do MarkBufferDirty here because we're about initialise the
+ * page, and nobody else can see it yet.
*/
/* XLOG stuff */
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
- * We don't do PageSetLSN or PageSetTLI here because
- * we're about initialise the page, so no need.
+ * We don't do PageSetLSN or PageSetTLI here because we're about
+ * initialise the page, so no need.
*/
}
{
page = BufferGetPage(buf);
if (_bt_page_recyclable(page))
- {
+ {
/*
- * If we are generating WAL for Hot Standby then create
- * a WAL record that will allow us to conflict with
- * queries running on standby.
+ * If we are generating WAL for Hot Standby then create a
+ * WAL record that will allow us to conflict with queries
+ * running on standby.
*/
if (XLogStandbyInfoActive())
{
if (isVacuum)
{
xl_btree_vacuum xlrec_vacuum;
+
xlrec_vacuum.node = rel->rd_node;
xlrec_vacuum.block = BufferGetBlockNumber(buf);
else
{
xl_btree_delete xlrec_delete;
+
xlrec_delete.node = rel->rd_node;
xlrec_delete.block = BufferGetBlockNumber(buf);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.175 2010/02/08 04:33:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.176 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
IndexBulkDeleteCallback callback;
void *callback_state;
BTCycleId cycleid;
- BlockNumber lastBlockVacuumed; /* last blkno reached by Vacuum scan */
- BlockNumber lastUsedPage; /* blkno of last non-recyclable page */
+ BlockNumber lastBlockVacuumed; /* last blkno reached by Vacuum scan */
+ BlockNumber lastUsedPage; /* blkno of last non-recyclable page */
BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
} BTVacState;
vstate.callback = callback;
vstate.callback_state = callback_state;
vstate.cycleid = cycleid;
- vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
+ vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
vstate.lastUsedPage = BTREE_METAPAGE;
vstate.totFreePages = 0;
/*
* We can't use _bt_getbuf() here because it always applies
* _bt_checkpage(), which will barf on an all-zero page. We want to
- * recycle all-zero pages, not fail. Also, we want to use a nondefault
- * buffer access strategy.
+ * recycle all-zero pages, not fail. Also, we want to use a
+ * nondefault buffer access strategy.
*/
buf = ReadBufferExtended(rel, MAIN_FORKNUM, num_pages - 1, RBM_NORMAL,
info->strategy);
htup = &(itup->t_tid);
/*
- * During Hot Standby we currently assume that XLOG_BTREE_VACUUM
- * records do not produce conflicts. That is only true as long
- * as the callback function depends only upon whether the index
- * tuple refers to heap tuples removed in the initial heap scan.
- * When vacuum starts it derives a value of OldestXmin. Backends
- * taking later snapshots could have a RecentGlobalXmin with a
- * later xid than the vacuum's OldestXmin, so it is possible that
- * row versions deleted after OldestXmin could be marked as killed
- * by other backends. The callback function *could* look at the
- * index tuple state in isolation and decide to delete the index
- * tuple, though currently it does not. If it ever did, we would
- * need to reconsider whether XLOG_BTREE_VACUUM records should
- * cause conflicts. If they did cause conflicts they would be
- * fairly harsh conflicts, since we haven't yet worked out a way
- * to pass a useful value for latestRemovedXid on the
- * XLOG_BTREE_VACUUM records. This applies to *any* type of index
- * that marks index tuples as killed.
+ * During Hot Standby we currently assume that
+ * XLOG_BTREE_VACUUM records do not produce conflicts. That is
+ * only true as long as the callback function depends only
+ * upon whether the index tuple refers to heap tuples removed
+ * in the initial heap scan. When vacuum starts it derives a
+ * value of OldestXmin. Backends taking later snapshots could
+ * have a RecentGlobalXmin with a later xid than the vacuum's
+ * OldestXmin, so it is possible that row versions deleted
+ * after OldestXmin could be marked as killed by other
+ * backends. The callback function *could* look at the index
+ * tuple state in isolation and decide to delete the index
+ * tuple, though currently it does not. If it ever did, we
+ * would need to reconsider whether XLOG_BTREE_VACUUM records
+ * should cause conflicts. If they did cause conflicts they
+ * would be fairly harsh conflicts, since we haven't yet
+ * worked out a way to pass a useful value for
+ * latestRemovedXid on the XLOG_BTREE_VACUUM records. This
+ * applies to *any* type of index that marks index tuples as
+ * killed.
*/
if (callback(htup, callback_state))
deletable[ndeletable++] = offnum;
*/
if (ndeletable > 0)
{
- BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
+ BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
_bt_delitems(rel, buf, deletable, ndeletable, true, vstate->lastBlockVacuumed);
/*
- * Keep track of the block number of the lastBlockVacuumed, so
- * we can scan those blocks as well during WAL replay. This then
+ * Keep track of the block number of the lastBlockVacuumed, so we
+ * can scan those blocks as well during WAL replay. This then
* provides concurrency protection and allows btrees to be used
* while in recovery.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.123 2010/01/20 19:43:40 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.124 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
wstate.btws_use_wal = XLogIsNeeded() && !wstate.index->rd_istemp;
/*
- * Write an XLOG UNLOGGED record if WAL-logging was skipped because
- * WAL archiving is not enabled.
+ * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+ * archiving is not enabled.
*/
if (!wstate.btws_use_wal && !wstate.index->rd_istemp)
{
- char reason[NAMEDATALEN + 20];
+ char reason[NAMEDATALEN + 20];
+
snprintf(reason, sizeof(reason), "b-tree build on \"%s\"",
RelationGetRelationName(wstate.index));
XLogReportUnloggedStatement(reason);
if (itupsz > BTMaxItemSize(npage))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itupsz,
- (unsigned long) BTMaxItemSize(npage),
- RelationGetRelationName(wstate->index)),
+ errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+ (unsigned long) itupsz,
+ (unsigned long) BTMaxItemSize(npage),
+ RelationGetRelationName(wstate->index)),
errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
"Consider a function index of an MD5 hash of the value, "
"or use full text indexing.")));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.97 2010/01/03 05:39:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.98 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
break;
default:
elog(ERROR, "unrecognized StrategyNumber: %d", (int) strat);
- *result = false; /* keep compiler quiet */
+ *result = false; /* keep compiler quiet */
break;
}
return true;
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
- * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we
- * have to un-flip it to get the correct opfamily member.
+ * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we have to
+ * un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*/
if (key->sk_flags & SK_SEARCHNULL)
{
if (isNull)
- continue; /* tuple satisfies this qual */
+ continue; /* tuple satisfies this qual */
}
else
{
Assert(key->sk_flags & SK_SEARCHNOTNULL);
if (!isNull)
- continue; /* tuple satisfies this qual */
+ continue; /* tuple satisfies this qual */
}
/*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.61 2010/02/13 00:59:58 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.62 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
- * If queries might be active then we need to ensure every block is unpinned
- * between the lastBlockVacuumed and the current block, if there are any.
- * This ensures that every block in the index is touched during VACUUM as
- * required to ensure scans work correctly.
+ * If queries might be active then we need to ensure every block is
+ * unpinned between the lastBlockVacuumed and the current block, if there
+ * are any. This ensures that every block in the index is touched during
+ * VACUUM as required to ensure scans work correctly.
*/
if (standbyState == STANDBY_SNAPSHOT_READY &&
(xlrec->lastBlockVacuumed + 1) != xlrec->block)
for (; blkno < xlrec->block; blkno++)
{
/*
- * XXX we don't actually need to read the block, we
- * just need to confirm it is unpinned. If we had a special call
- * into the buffer manager we could optimise this so that
- * if the block is not in shared_buffers we confirm it as unpinned.
+ * XXX we don't actually need to read the block, we just need to
+ * confirm it is unpinned. If we had a special call into the
+ * buffer manager we could optimise this so that if the block is
+ * not in shared_buffers we confirm it as unpinned.
*
* Another simple optimization would be to check if there's any
* backends running; if not, we could just skip this.
/*
* If the block was restored from a full page image, nothing more to do.
- * The RestoreBkpBlocks() call already pinned and took cleanup lock on
- * it. XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop
- * above, to make the disk access more sequential.
+ * The RestoreBkpBlocks() call already pinned and took cleanup lock on it.
+ * XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop above,
+ * to make the disk access more sequential.
*/
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
xlrec = (xl_btree_delete *) XLogRecGetData(record);
/*
- * We don't need to take a cleanup lock to apply these changes.
- * See nbtree/README for details.
+ * We don't need to take a cleanup lock to apply these changes. See
+ * nbtree/README for details.
*/
buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
if (!BufferIsValid(buffer))
switch (info)
{
case XLOG_BTREE_DELETE:
+
/*
- * Btree delete records can conflict with standby queries. You might
- * think that vacuum records would conflict as well, but we've handled
- * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
- * cleaned by the vacuum of the heap and so we can resolve any conflicts
- * just once when that arrives. After that any we know that no conflicts
- * exist from individual btree vacuum records on that index.
+ * Btree delete records can conflict with standby queries. You
+ * might think that vacuum records would conflict as well, but
+ * we've handled that already. XLOG_HEAP2_CLEANUP_INFO records
+ * provide the highest xid cleaned by the vacuum of the heap
+ * and so we can resolve any conflicts just once when that
+ * arrives. After that any we know that no conflicts exist
+ * from individual btree vacuum records on that index.
*/
{
xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
break;
case XLOG_BTREE_REUSE_PAGE:
+
/*
- * Btree reuse page records exist to provide a conflict point when we
- * reuse pages in the index via the FSM. That's all it does though.
+ * Btree reuse page records exist to provide a conflict point
+ * when we reuse pages in the index via the FSM. That's all it
+ * does though.
*/
{
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record);
}
/*
- * Vacuum needs to pin and take cleanup lock on every leaf page,
- * a regular exclusive lock is enough for all other purposes.
+ * Vacuum needs to pin and take cleanup lock on every leaf page, a regular
+ * exclusive lock is enough for all other purposes.
*/
RestoreBkpBlocks(lsn, record, (info == XLOG_BTREE_VACUUM));
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.34 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.35 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
myOldestMember = OldestMemberMXactId[MyBackendId];
if (MultiXactIdIsValid(myOldestMember))
{
- BackendId dummyBackendId = TwoPhaseGetDummyBackendId(xid);
+ BackendId dummyBackendId = TwoPhaseGetDummyBackendId(xid);
/*
- * Even though storing MultiXactId is atomic, acquire lock to make sure
- * others see both changes, not just the reset of the slot of the
+ * Even though storing MultiXactId is atomic, acquire lock to make
+ * sure others see both changes, not just the reset of the slot of the
* current backend. Using a volatile pointer might suffice, but this
* isn't a hot spot.
*/
/*
* We don't need to transfer OldestVisibleMXactId value, because the
- * transaction is not going to be looking at any more multixacts once
- * it's prepared.
+ * transaction is not going to be looking at any more multixacts once it's
+ * prepared.
*
* We assume that storing a MultiXactId is atomic and so we need not take
* MultiXactGenLock to do this.
void *recdata, uint32 len)
{
BackendId dummyBackendId = TwoPhaseGetDummyBackendId(xid);
- MultiXactId oldestMember;
+ MultiXactId oldestMember;
/*
- * Get the oldest member XID from the state file record, and set it in
- * the OldestMemberMXactId slot reserved for this prepared transaction.
+ * Get the oldest member XID from the state file record, and set it in the
+ * OldestMemberMXactId slot reserved for this prepared transaction.
*/
Assert(len == sizeof(MultiXactId));
- oldestMember = *((MultiXactId *)recdata);
+ oldestMember = *((MultiXactId *) recdata);
OldestMemberMXactId[dummyBackendId] = oldestMember;
}
*/
void
multixact_twophase_postabort(TransactionId xid, uint16 info,
- void *recdata, uint32 len)
+ void *recdata, uint32 len)
{
multixact_twophase_postcommit(xid, info, recdata, len);
}
max_xid = xids[i];
}
- /* We don't expect anyone else to modify nextXid, hence startup process
- * doesn't need to hold a lock while checking this. We still acquire
- * the lock to modify it, though.
+ /*
+ * We don't expect anyone else to modify nextXid, hence startup
+ * process doesn't need to hold a lock while checking this. We still
+ * acquire the lock to modify it, though.
*/
if (TransactionIdFollowsOrEquals(max_xid,
ShmemVariableCache->nextXid))
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.26 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.27 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Current state should be 0 */
Assert(*ptr == InvalidTransactionId ||
- (*ptr == parent && overwriteOK));
+ (*ptr == parent && overwriteOK));
*ptr = parent;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.58 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.59 2010/02/26 02:00:34 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
typedef struct GlobalTransactionData
{
PGPROC proc; /* dummy proc */
- BackendId dummyBackendId; /* similar to backend id for backends */
+ BackendId dummyBackendId; /* similar to backend id for backends */
TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
/*
* Assign a unique ID for each dummy proc, so that the range of
* dummy backend IDs immediately follows the range of normal
- * backend IDs. We don't dare to assign a real backend ID to
- * dummy procs, because prepared transactions don't take part in
- * cache invalidation like a real backend ID would imply, but
- * having a unique ID for them is nevertheless handy. This
- * arrangement allows you to allocate an array of size
- * (MaxBackends + max_prepared_xacts + 1), and have a slot for
- * every backend and prepared transaction. Currently multixact.c
- * uses that technique.
+ * backend IDs. We don't dare to assign a real backend ID to dummy
+ * procs, because prepared transactions don't take part in cache
+ * invalidation like a real backend ID would imply, but having a
+ * unique ID for them is nevertheless handy. This arrangement
+ * allows you to allocate an array of size (MaxBackends +
+ * max_prepared_xacts + 1), and have a slot for every backend and
+ * prepared transaction. Currently multixact.c uses that
+ * technique.
*/
gxacts[i].dummyBackendId = MaxBackends + 1 + i;
}
BackendId
TwoPhaseGetDummyBackendId(TransactionId xid)
{
- PGPROC *proc = TwoPhaseGetDummyProc(xid);
+ PGPROC *proc = TwoPhaseGetDummyProc(xid);
return ((GlobalTransaction) proc)->dummyBackendId;
}
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
/*
- * Add the additional info about subxacts, deletable files and
- * cache invalidation messages.
+ * Add the additional info about subxacts, deletable files and cache
+ * invalidation messages.
*/
if (hdr.nsubxacts > 0)
{
/*
* Handle cache invalidation messages.
*
- * Relcache init file invalidation requires processing both
- * before and after we send the SI messages. See AtEOXact_Inval()
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages. See AtEOXact_Inval()
*/
if (hdr->initfileinval)
RelationCacheInitFileInvalidate(true);
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
/*
- * It's possible that SubTransSetParent has been set before, if the
- * prepared transaction generated xid assignment records. Test
+ * It's possible that SubTransSetParent has been set before, if
+ * the prepared transaction generated xid assignment records. Test
* here must match one used in AssignTransactionId().
*/
if (InHotStandby && hdr->nsubxacts >= PGPROC_MAX_CACHED_SUBXIDS)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.13 2010/02/16 22:34:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.14 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
NULL, /* END ID */
lock_twophase_postcommit, /* Lock */
- pgstat_twophase_postcommit, /* pgstat */
- multixact_twophase_postcommit /* MultiXact */
+ pgstat_twophase_postcommit, /* pgstat */
+ multixact_twophase_postcommit /* MultiXact */
};
const TwoPhaseCallback twophase_postabort_callbacks[TWOPHASE_RM_MAX_ID + 1] =
NULL, /* END ID */
lock_twophase_postabort, /* Lock */
pgstat_twophase_postabort, /* pgstat */
- multixact_twophase_postabort /* MultiXact */
+ multixact_twophase_postabort /* MultiXact */
};
const TwoPhaseCallback twophase_standby_recover_callbacks[TWOPHASE_RM_MAX_ID + 1] =
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.90 2010/02/20 21:24:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.91 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Note: when this is called, we are actually already inside a valid
* transaction, since XIDs are now not allocated until the transaction
- * does something. So it is safe to do a database lookup if we want to
+ * does something. So it is safe to do a database lookup if we want to
* issue a warning about XID wrap.
*/
TransactionId
* For safety's sake, we release XidGenLock while sending signals,
* warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
- * possibility of deadlock while doing get_database_name().
- * First, copy all the shared values we'll need in this path.
+ * possibility of deadlock while doing get_database_name(). First,
+ * copy all the shared values we'll need in this path.
*/
TransactionId xidWarnLimit = ShmemVariableCache->xidWarnLimit;
TransactionId xidStopLimit = ShmemVariableCache->xidStopLimit;
TransactionId xidWrapLimit = ShmemVariableCache->xidWrapLimit;
- Oid oldest_datoid = ShmemVariableCache->oldestXidDB;
+ Oid oldest_datoid = ShmemVariableCache->oldestXidDB;
LWLockRelease(XidGenLock);
if (IsUnderPostmaster &&
TransactionIdFollowsOrEquals(xid, xidStopLimit))
{
- char *oldest_datname = get_database_name(oldest_datoid);
+ char *oldest_datname = get_database_name(oldest_datoid);
/* complain even if that DB has disappeared */
if (oldest_datname)
}
else if (TransactionIdFollowsOrEquals(xid, xidWarnLimit))
{
- char *oldest_datname = get_database_name(oldest_datoid);
+ char *oldest_datname = get_database_name(oldest_datoid);
/* complain even if that DB has disappeared */
if (oldest_datname)
/* Log the info */
ereport(DEBUG1,
- (errmsg("transaction ID wrap limit is %u, limited by database with OID %u",
- xidWrapLimit, oldest_datoid)));
+ (errmsg("transaction ID wrap limit is %u, limited by database with OID %u",
+ xidWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
/* Give an immediate warning if past the wrap warn point */
if (TransactionIdFollowsOrEquals(curXid, xidWarnLimit) && !InRecovery)
{
- char *oldest_datname = get_database_name(oldest_datoid);
+ char *oldest_datname = get_database_name(oldest_datoid);
/*
* Note: it's possible that get_database_name fails and returns NULL,
*/
if (oldest_datname)
ereport(WARNING,
- (errmsg("database \"%s\" must be vacuumed within %u transactions",
- oldest_datname,
- xidWrapLimit - curXid),
- errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ (errmsg("database \"%s\" must be vacuumed within %u transactions",
+ oldest_datname,
+ xidWrapLimit - curXid),
+ errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(WARNING,
(errmsg("database with OID %u must be vacuumed within %u transactions",
* We primarily check whether oldestXidDB is valid. The cases we have in
* mind are that that database was dropped, or the field was reset to zero
* by pg_resetxlog. In either case we should force recalculation of the
- * wrap limit. Also do it if oldestXid is old enough to be forcing
+ * wrap limit. Also do it if oldestXid is old enough to be forcing
* autovacuums or other actions; this ensures we update our state as soon
* as possible once extra overhead is being incurred.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.288 2010/02/20 21:24:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.289 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nChildXids; /* # of subcommitted child XIDs */
int maxChildXids; /* allocated size of childXids[] */
Oid prevUser; /* previous CurrentUserId setting */
- int prevSecContext; /* previous SecurityRestrictionContext */
+ int prevSecContext; /* previous SecurityRestrictionContext */
bool prevXactReadOnly; /* entry-time xact r/o state */
- bool startedInRecovery; /* did we start in recovery? */
+ bool startedInRecovery; /* did we start in recovery? */
struct TransactionStateData *parent; /* back link to parent */
} TransactionStateData;
* unreportedXids holds XIDs of all subtransactions that have not yet been
* reported in a XLOG_XACT_ASSIGNMENT record.
*/
-static int nUnreportedXids;
+static int nUnreportedXids;
static TransactionId unreportedXids[PGPROC_MAX_CACHED_SUBXIDS];
static TransactionState CurrentTransactionState = &TopTransactionStateData;
* include the top-level xid and all the subxids that have not yet been
* reported using XLOG_XACT_ASSIGNMENT records.
*
- * This is required to limit the amount of shared memory required in a
- * hot standby server to keep track of in-progress XIDs. See notes for
+ * This is required to limit the amount of shared memory required in a hot
+ * standby server to keep track of in-progress XIDs. See notes for
* RecordKnownAssignedTransactionIds().
*
- * We don't keep track of the immediate parent of each subxid,
- * only the top-level transaction that each subxact belongs to. This
- * is correct in recovery only because aborted subtransactions are
- * separately WAL logged.
+ * We don't keep track of the immediate parent of each subxid, only the
+ * top-level transaction that each subxact belongs to. This is correct in
+ * recovery only because aborted subtransactions are separately WAL
+ * logged.
*/
if (isSubXact && XLogStandbyInfoActive())
{
unreportedXids[nUnreportedXids] = s->transactionId;
nUnreportedXids++;
- /* ensure this test matches similar one in RecoverPreparedTransactions() */
+ /*
+ * ensure this test matches similar one in
+ * RecoverPreparedTransactions()
+ */
if (nUnreportedXids >= PGPROC_MAX_CACHED_SUBXIDS)
{
XLogRecData rdata[2];
- xl_xact_assignment xlrec;
+ xl_xact_assignment xlrec;
/*
* xtop is always set by now because we recurse up transaction
nchildren = xactGetCommittedChildren(&children);
nmsgs = xactGetCommittedInvalidationMessages(&invalMessages,
&RelcacheInitFileInval);
+
/*
* If we haven't been assigned an XID yet, we neither can, nor do we want
* to write a COMMIT record.
AtCCI_LocalCache(void)
{
/*
- * Make any pending relation map changes visible. We must do this
- * before processing local sinval messages, so that the map changes
- * will get reflected into the relcache when relcache invals are
- * processed.
+ * Make any pending relation map changes visible. We must do this before
+ * processing local sinval messages, so that the map changes will get
+ * reflected into the relcache when relcache invals are processed.
*/
AtCCI_RelationMap();
*
* Note: We rely on the fact that the XID of a child always follows that
* of its parent. By copying the XID of this subtransaction before the
- * XIDs of its children, we ensure that the array stays ordered.
- * Likewise, all XIDs already in the array belong to subtransactions
- * started and subcommitted before us, so their XIDs must precede ours.
+ * XIDs of its children, we ensure that the array stays ordered. Likewise,
+ * all XIDs already in the array belong to subtransactions started and
+ * subcommitted before us, so their XIDs must precede ours.
*/
s->parent->childXids[s->parent->nChildXids] = s->transactionId;
s->maxChildXids = 0;
/*
- * We could prune the unreportedXids array here. But we don't bother.
- * That would potentially reduce number of XLOG_XACT_ASSIGNMENT records
- * but it would likely introduce more CPU time into the more common
- * paths, so we choose not to do that.
+ * We could prune the unreportedXids array here. But we don't bother. That
+ * would potentially reduce number of XLOG_XACT_ASSIGNMENT records but it
+ * would likely introduce more CPU time into the more common paths, so we
+ * choose not to do that.
*/
}
/*
* do abort processing
*/
- AfterTriggerEndXact(false); /* 'false' means it's abort */
+ AfterTriggerEndXact(false); /* 'false' means it's abort */
AtAbort_Portals();
AtEOXact_LargeObject(false);
AtAbort_Notify();
/*
* Make sure nextXid is beyond any XID mentioned in the record.
*
- * We don't expect anyone else to modify nextXid, hence we
- * don't need to hold a lock while checking this. We still acquire
- * the lock to modify it, though.
+ * We don't expect anyone else to modify nextXid, hence we don't need to
+ * hold a lock while checking this. We still acquire the lock to modify
+ * it, though.
*/
if (TransactionIdFollowsOrEquals(max_xid,
ShmemVariableCache->nextXid))
* protocol during recovery to provide information on database
* consistency for when users try to set hint bits. It is important
* that we do not set hint bits until the minRecoveryPoint is past
- * this commit record. This ensures that if we crash we don't see
- * hint bits set on changes made by transactions that haven't yet
+ * this commit record. This ensures that if we crash we don't see hint
+ * bits set on changes made by transactions that haven't yet
* recovered. It's unlikely but it's good to be safe.
*/
TransactionIdAsyncCommitTree(xid, xlrec->nsubxacts, sub_xids, lsn);
/*
* Send any cache invalidations attached to the commit. We must
- * maintain the same order of invalidation then release locks
- * as occurs in .
+ * maintain the same order of invalidation then release locks as
+ * occurs in .
*/
ProcessCommittedInvalidationMessages(inval_msgs, xlrec->nmsgs,
- XactCompletionRelcacheInitFileInval(xlrec),
- xlrec->dbId, xlrec->tsId);
+ XactCompletionRelcacheInitFileInval(xlrec),
+ xlrec->dbId, xlrec->tsId);
/*
- * Release locks, if any. We do this for both two phase and normal
- * one phase transactions. In effect we are ignoring the prepare
- * phase and just going straight to lock release.
+ * Release locks, if any. We do this for both two phase and normal one
+ * phase transactions. In effect we are ignoring the prepare phase and
+ * just going straight to lock release.
*/
StandbyReleaseLockTree(xid, xlrec->nsubxacts, sub_xids);
}
}
/*
- * We issue an XLogFlush() for the same reason we emit ForceSyncCommit() in
- * normal operation. For example, in DROP DATABASE, we delete all the files
- * belonging to the database, and then commit the transaction. If we crash
- * after all the files have been deleted but before the commit, you have an
- * entry in pg_database without any files. To minimize the window for that,
- * we use ForceSyncCommit() to rush the commit record to disk as quick as
- * possible. We have the same window during recovery, and forcing an
- * XLogFlush() (which updates minRecoveryPoint during recovery) helps
- * to reduce that problem window, for any user that requested ForceSyncCommit().
+ * We issue an XLogFlush() for the same reason we emit ForceSyncCommit()
+ * in normal operation. For example, in DROP DATABASE, we delete all the
+ * files belonging to the database, and then commit the transaction. If we
+ * crash after all the files have been deleted but before the commit, you
+ * have an entry in pg_database without any files. To minimize the window
+ * for that, we use ForceSyncCommit() to rush the commit record to disk as
+ * quick as possible. We have the same window during recovery, and forcing
+ * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
+ * to reduce that problem window, for any user that requested
+ * ForceSyncCommit().
*/
if (XactCompletionForceSyncCommit(xlrec))
XLogFlush(lsn);
max_xid = TransactionIdLatest(xid, xlrec->nsubxacts, sub_xids);
/* Make sure nextXid is beyond any XID mentioned in the record */
- /* We don't expect anyone else to modify nextXid, hence we
- * don't need to hold a lock while checking this. We still acquire
- * the lock to modify it, though.
+
+ /*
+ * We don't expect anyone else to modify nextXid, hence we don't need to
+ * hold a lock while checking this. We still acquire the lock to modify
+ * it, though.
*/
if (TransactionIdFollowsOrEquals(max_xid,
ShmemVariableCache->nextXid))
if (InHotStandby)
{
/*
- * If a transaction completion record arrives that has as-yet unobserved
- * subtransactions then this will not have been fully handled by the call
- * to RecordKnownAssignedTransactionIds() in the main recovery loop in
- * xlog.c. So we need to do bookkeeping again to cover that case. This is
- * confusing and it is easy to think this call is irrelevant, which has
- * happened three times in development already. Leave it in.
+ * If a transaction completion record arrives that has as-yet
+ * unobserved subtransactions then this will not have been fully
+ * handled by the call to RecordKnownAssignedTransactionIds() in the
+ * main recovery loop in xlog.c. So we need to do bookkeeping again to
+ * cover that case. This is confusing and it is easy to think this
+ * call is irrelevant, which has happened three times in development
+ * already. Leave it in.
*/
RecordKnownAssignedTransactionIds(max_xid);
}
msgs = (SharedInvalidationMessage *) &xacts[xlrec->nsubxacts];
if (XactCompletionRelcacheInitFileInval(xlrec))
- appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",
- xlrec->dbId, xlrec->tsId);
+ appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",
+ xlrec->dbId, xlrec->tsId);
appendStringInfo(buf, "; inval msgs:");
for (i = 0; i < xlrec->nmsgs; i++)
/*
* Note that we ignore the WAL record's xid, since we're more
- * interested in the top-level xid that issued the record
- * and which xids are being reported here.
+ * interested in the top-level xid that issued the record and which
+ * xids are being reported here.
*/
appendStringInfo(buf, "xid assignment xtop %u: ", xlrec->xtop);
xact_desc_assignment(buf, xlrec);
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.378 2010/02/25 02:17:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.379 2010/02/26 02:00:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int XLogArchiveTimeout = 0;
bool XLogArchiveMode = false;
char *XLogArchiveCommand = NULL;
-bool XLogRequestRecoveryConnections = true;
+bool XLogRequestRecoveryConnections = true;
int MaxStandbyDelay = 30;
bool fullPageWrites = true;
bool log_checkpoints = false;
bool InRecovery = false;
/* Are we in Hot Standby mode? Only valid in startup process, see xlog.h */
-HotStandbyState standbyState = STANDBY_DISABLED;
+HotStandbyState standbyState = STANDBY_DISABLED;
-static XLogRecPtr LastRec;
+static XLogRecPtr LastRec;
/*
* Local copy of SharedRecoveryInProgress variable. True actually means "not
* 0: unconditionally not allowed to insert XLOG
* -1: must check RecoveryInProgress(); disallow until it is false
* Most processes start with -1 and transition to 1 after seeing that recovery
- * is not in progress. But we can also force the value for special cases.
+ * is not in progress. But we can also force the value for special cases.
* The coding in XLogInsertAllowed() depends on the first two of these states
* being numerically the same as bool true and false.
*/
/* options taken from recovery.conf for XLOG streaming */
static bool StandbyMode = false;
static char *PrimaryConnInfo = NULL;
-char *TriggerFile = NULL;
+char *TriggerFile = NULL;
/* if recoveryStopsHere returns true, it saves actual stop xid/time here */
static TransactionId recoveryStopXid;
/* end+1 of the last record replayed (or being replayed) */
XLogRecPtr replayEndRecPtr;
/* timestamp of last record replayed (or being replayed) */
- TimestampTz recoveryLastXTime;
+ TimestampTz recoveryLastXTime;
/* end+1 of the last record replayed */
XLogRecPtr recoveryLastRecPtr;
static uint32 readSeg = 0;
static uint32 readOff = 0;
static uint32 readLen = 0;
+
/* Is the currently open segment being streamed from primary? */
static bool readStreamed = false;
static bool InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
bool find_free, int *max_advance,
bool use_lock);
-static int XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
+static int XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
bool fromArchive, bool notexistOk);
-static int XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode,
+static int XLogFileReadAnyTLI(uint32 log, uint32 seg, int emode,
bool fromArchive);
static bool XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
bool randAccess);
* to not need a valid 'lsn' value.
*
* Another important reason for doing it this way is that the passed
- * 'lsn' value could be bogus, i.e., past the end of available WAL,
- * if the caller got it from a corrupted heap page. Accepting such
- * a value as the min recovery point would prevent us from coming up
- * at all. Instead, we just log a warning and continue with recovery.
+ * 'lsn' value could be bogus, i.e., past the end of available WAL, if
+ * the caller got it from a corrupted heap page. Accepting such a
+ * value as the min recovery point would prevent us from coming up at
+ * all. Instead, we just log a warning and continue with recovery.
* (See also the comments about corrupt LSNs in XLogFlush.)
*/
SpinLockAcquire(&xlogctl->info_lck);
if (!force && XLByteLT(newMinRecoveryPoint, lsn))
elog(WARNING,
- "xlog min recovery request %X/%X is past current point %X/%X",
+ "xlog min recovery request %X/%X is past current point %X/%X",
lsn.xlogid, lsn.xrecoff,
newMinRecoveryPoint.xlogid, newMinRecoveryPoint.xrecoff);
/*
* During REDO, we are reading not writing WAL. Therefore, instead of
- * trying to flush the WAL, we should update minRecoveryPoint instead.
- * We test XLogInsertAllowed(), not InRecovery, because we need the
- * bgwriter to act this way too, and because when the bgwriter tries
- * to write the end-of-recovery checkpoint, it should indeed flush.
+ * trying to flush the WAL, we should update minRecoveryPoint instead. We
+ * test XLogInsertAllowed(), not InRecovery, because we need the bgwriter
+ * to act this way too, and because when the bgwriter tries to write the
+ * end-of-recovery checkpoint, it should indeed flush.
*/
if (!XLogInsertAllowed())
{
* the whole system due to corruption on one data page. In particular, if
* the bad page is encountered again during recovery then we would be
* unable to restart the database at all! (This scenario actually
- * happened in the field several times with 7.1 releases.) As of 8.4,
- * bad LSNs encountered during recovery are UpdateMinRecoveryPoint's
- * problem; the only time we can reach here during recovery is while
- * flushing the end-of-recovery checkpoint record, and we don't expect
- * that to have a bad LSN.
+ * happened in the field several times with 7.1 releases.) As of 8.4, bad
+ * LSNs encountered during recovery are UpdateMinRecoveryPoint's problem;
+ * the only time we can reach here during recovery is while flushing the
+ * end-of-recovery checkpoint record, and we don't expect that to have a
+ * bad LSN.
*
- * Note that for calls from xact.c, the ERROR will
- * be promoted to PANIC since xact.c calls this routine inside a critical
- * section. However, calls from bufmgr.c are not within critical sections
- * and so we will not force a restart for a bad LSN on a data page.
+ * Note that for calls from xact.c, the ERROR will be promoted to PANIC
+ * since xact.c calls this routine inside a critical section. However,
+ * calls from bufmgr.c are not within critical sections and so we will not
+ * force a restart for a bad LSN on a data page.
*/
if (XLByteLT(LogwrtResult.Flush, record))
elog(ERROR,
LWLockRelease(ControlFileLock);
/*
- * An invalid minRecoveryPoint means that we need to recover all the WAL,
- * i.e., we're doing crash recovery. We never modify the control file's
- * value in that case, so we can short-circuit future checks here too.
+ * An invalid minRecoveryPoint means that we need to recover all the
+ * WAL, i.e., we're doing crash recovery. We never modify the control
+ * file's value in that case, so we can short-circuit future checks
+ * here too.
*/
if (minRecoveryPoint.xlogid == 0 && minRecoveryPoint.xrecoff == 0)
updateMinRecoveryPoint = false;
char path[MAXPGPATH];
int fd;
- XLogFileName(xlogfname, tli, log, seg);
+ XLogFileName(xlogfname, tli, log, seg);
- if (fromArchive)
- {
- /* Report recovery progress in PS display */
- snprintf(activitymsg, sizeof(activitymsg), "waiting for %s",
- xlogfname);
- set_ps_display(activitymsg, false);
-
- restoredFromArchive = RestoreArchivedFile(path, xlogfname,
- "RECOVERYXLOG",
- XLogSegSize);
- if (!restoredFromArchive)
- return -1;
- }
- else
- {
- XLogFilePath(path, tli, log, seg);
- restoredFromArchive = false;
- }
+ if (fromArchive)
+ {
+ /* Report recovery progress in PS display */
+ snprintf(activitymsg, sizeof(activitymsg), "waiting for %s",
+ xlogfname);
+ set_ps_display(activitymsg, false);
- fd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
- if (fd >= 0)
- {
- /* Success! */
- curFileTLI = tli;
+ restoredFromArchive = RestoreArchivedFile(path, xlogfname,
+ "RECOVERYXLOG",
+ XLogSegSize);
+ if (!restoredFromArchive)
+ return -1;
+ }
+ else
+ {
+ XLogFilePath(path, tli, log, seg);
+ restoredFromArchive = false;
+ }
- /* Report recovery progress in PS display */
- snprintf(activitymsg, sizeof(activitymsg), "recovering %s",
- xlogfname);
- set_ps_display(activitymsg, false);
+ fd = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
+ if (fd >= 0)
+ {
+ /* Success! */
+ curFileTLI = tli;
- return fd;
- }
- if (errno != ENOENT || !notfoundOk) /* unexpected failure? */
- ereport(PANIC,
- (errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
- return -1;
+ /* Report recovery progress in PS display */
+ snprintf(activitymsg, sizeof(activitymsg), "recovering %s",
+ xlogfname);
+ set_ps_display(activitymsg, false);
+
+ return fd;
+ }
+ if (errno != ENOENT || !notfoundOk) /* unexpected failure? */
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
+ return -1;
}
/*
* If not in StandbyMode, fall back to searching pg_xlog. In
* StandbyMode we're streaming segments from the primary to pg_xlog,
* and we mustn't confuse the (possibly partial) segments in pg_xlog
- * with complete segments ready to be applied. We rather wait for
- * the records to arrive through streaming.
+ * with complete segments ready to be applied. We rather wait for the
+ * records to arrive through streaming.
*/
if (!StandbyMode && fromArchive)
{
/*
* WAL segment files will not be re-read in normal operation, so we advise
* the OS to release any cached pages. But do not do so if WAL archiving
- * or streaming is active, because archiver and walsender process could use
- * the cache to read the WAL segment.
+ * or streaming is active, because archiver and walsender process could
+ * use the cache to read the WAL segment.
*/
#if defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
if (!XLogIsNeeded())
{
if (expectedSize > 0 && stat_buf.st_size != expectedSize)
{
- int elevel;
+ int elevel;
/*
* If we find a partial file in standby mode, we assume it's
* trying.
*
* Otherwise treat a wrong-sized file as FATAL to ensure the
- * DBA would notice it, but is that too strong? We could try
+ * DBA would notice it, but is that too strong? We could try
* to plow ahead with a local copy of the file ... but the
* problem is that there probably isn't one, and we'd
- * incorrectly conclude we've reached the end of WAL and
- * we're done recovering ...
+ * incorrectly conclude we've reached the end of WAL and we're
+ * done recovering ...
*/
if (StandbyMode && stat_buf.st_size < expectedSize)
elevel = DEBUG1;
xlogfname, rc)));
not_available:
+
/*
* if an archived file is not available, there might still be a version of
* this file in XLOGDIR, so return that as the filename to open.
struct dirent *xlde;
char lastoff[MAXFNAMELEN];
char path[MAXPGPATH];
+
#ifdef WIN32
char newpath[MAXPGPATH];
#endif
else
{
/* No need for any more future segments... */
- int rc;
+ int rc;
ereport(DEBUG2,
(errmsg("removing transaction log file \"%s\"",
xlde->d_name)));
#ifdef WIN32
+
/*
* On Windows, if another process (e.g another backend)
* holds the file open in FILE_SHARE_DELETE mode, unlink
* will succeed, but the file will still show up in
- * directory listing until the last handle is closed.
- * To avoid confusing the lingering deleted file for a
- * live WAL file that needs to be archived, rename it
- * before deleting it.
+ * directory listing until the last handle is closed. To
+ * avoid confusing the lingering deleted file for a live
+ * WAL file that needs to be archived, rename it before
+ * deleting it.
*
* If another process holds the file open without
* FILE_SHARE_DELETE flag, rename will fail. We'll try
RecPtr = &tmpRecPtr;
/*
- * Align recptr to next page if no more records can fit on the
- * current page.
+ * Align recptr to next page if no more records can fit on the current
+ * page.
*/
if (XLOG_BLCKSZ - (RecPtr->xrecoff % XLOG_BLCKSZ) < SizeOfXLogRecord)
{
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
/*
- * If the ending log segment is still open, close it (to avoid
- * problems on Windows with trying to rename or delete an open file).
+ * If the ending log segment is still open, close it (to avoid problems on
+ * Windows with trying to rename or delete an open file).
*/
if (readFile >= 0)
{
{
/* We ignore autovacuum_max_workers when we make this test. */
RecoveryRequiresIntParameter("max_connections",
- MaxConnections, checkPoint.MaxConnections);
+ MaxConnections, checkPoint.MaxConnections);
RecoveryRequiresIntParameter("max_prepared_xacts",
- max_prepared_xacts, checkPoint.max_prepared_xacts);
+ max_prepared_xacts, checkPoint.max_prepared_xacts);
RecoveryRequiresIntParameter("max_locks_per_xact",
- max_locks_per_xact, checkPoint.max_locks_per_xact);
+ max_locks_per_xact, checkPoint.max_locks_per_xact);
if (!checkPoint.XLogStandbyInfoMode)
ereport(ERROR,
- (errmsg("recovery connections cannot start because the recovery_connections "
- "parameter is disabled on the WAL source server")));
+ (errmsg("recovery connections cannot start because the recovery_connections "
+ "parameter is disabled on the WAL source server")));
}
/*
ValidateXLOGDirectoryStructure();
/*
- * Clear out any old relcache cache files. This is *necessary* if we
- * do any WAL replay, since that would probably result in the cache files
- * being out of sync with database reality. In theory we could leave
- * them in place if the database had been cleanly shut down, but it
- * seems safest to just remove them always and let them be rebuilt
- * during the first backend startup.
+ * Clear out any old relcache cache files. This is *necessary* if we do
+ * any WAL replay, since that would probably result in the cache files
+ * being out of sync with database reality. In theory we could leave them
+ * in place if the database had been cleanly shut down, but it seems
+ * safest to just remove them always and let them be rebuilt during the
+ * first backend startup.
*/
RelationCacheInitFileRemove();
{
if (recoveryTargetExact)
ereport(LOG,
- (errmsg("starting point-in-time recovery to XID %u",
- recoveryTargetXid)));
+ (errmsg("starting point-in-time recovery to XID %u",
+ recoveryTargetXid)));
else
ereport(LOG,
(errmsg("starting point-in-time recovery to %s",
if (XLByteLT(ControlFile->minRecoveryPoint, checkPoint.redo))
ControlFile->minRecoveryPoint = checkPoint.redo;
}
+
/*
* set backupStartupPoint if we're starting archive recovery from a
* base backup
/*
* Initialize recovery connections, if enabled. We won't let backends
- * in yet, not until we've reached the min recovery point specified
- * in control file and we've established a recovery snapshot from a
+ * in yet, not until we've reached the min recovery point specified in
+ * control file and we've established a recovery snapshot from a
* running-xacts WAL record.
*/
if (InArchiveRecovery && XLogRequestRecoveryConnections)
{
TransactionId *xids;
- int nxids;
+ int nxids;
CheckRequiredParameterValues(checkPoint);
{
#ifdef WAL_DEBUG
if (XLOG_DEBUG ||
- (rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
+ (rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
(rmid != RM_XACT_ID && trace_recovery_messages <= DEBUG3))
{
StringInfoData buf;
{
reachedMinRecoveryPoint = true;
ereport(LOG,
- (errmsg("consistent recovery state reached at %X/%X",
- EndRecPtr.xlogid, EndRecPtr.xrecoff)));
+ (errmsg("consistent recovery state reached at %X/%X",
+ EndRecPtr.xlogid, EndRecPtr.xrecoff)));
}
/*
* Have we got a valid starting snapshot that will allow
- * queries to be run? If so, we can tell postmaster that
- * the database is consistent now, enabling connections.
+ * queries to be run? If so, we can tell postmaster that the
+ * database is consistent now, enabling connections.
*/
if (standbyState == STANDBY_SNAPSHOT_READY &&
!backendsAllowed &&
/*
* We are now done reading the xlog from stream. Turn off streaming
- * recovery to force fetching the files (which would be required
- * at end of recovery, e.g., timeline history file) from archive or
- * pg_xlog.
+ * recovery to force fetching the files (which would be required at end of
+ * recovery, e.g., timeline history file) from archive or pg_xlog.
*/
StandbyMode = false;
TransactionIdRetreat(ShmemVariableCache->latestCompletedXid);
/*
- * Start up the commit log and related stuff, too. In hot standby mode
- * we did this already before WAL replay.
+ * Start up the commit log and related stuff, too. In hot standby mode we
+ * did this already before WAL replay.
*/
if (standbyState == STANDBY_DISABLED)
{
}
/*
- * All done. Allow backends to write WAL. (Although the bool flag is
+ * All done. Allow backends to write WAL. (Although the bool flag is
* probably atomic in itself, we use the info_lck here to ensure that
* there are no race conditions concerning visibility of other recent
* updates to shared memory.)
RecoveryInProgress(void)
{
/*
- * We check shared state each time only until we leave recovery mode.
- * We can't re-enter recovery, so there's no need to keep checking after
- * the shared variable has once been seen false.
+ * We check shared state each time only until we leave recovery mode. We
+ * can't re-enter recovery, so there's no need to keep checking after the
+ * shared variable has once been seen false.
*/
if (!LocalRecoveryInProgress)
return false;
/*
* Initialize TimeLineID and RedoRecPtr when we discover that recovery
* is finished. InitPostgres() relies upon this behaviour to ensure
- * that InitXLOGAccess() is called at backend startup. (If you change
+ * that InitXLOGAccess() is called at backend startup. (If you change
* this, see also LocalSetXLogInsertAllowed.)
*/
if (!LocalRecoveryInProgress)
XLogInsertAllowed(void)
{
/*
- * If value is "unconditionally true" or "unconditionally false",
- * just return it. This provides the normal fast path once recovery
- * is known done.
+ * If value is "unconditionally true" or "unconditionally false", just
+ * return it. This provides the normal fast path once recovery is known
+ * done.
*/
if (LocalXLogInsertAllowed >= 0)
return (bool) LocalXLogInsertAllowed;
return false;
/*
- * On exit from recovery, reset to "unconditionally true", since there
- * is no need to keep checking.
+ * On exit from recovery, reset to "unconditionally true", since there is
+ * no need to keep checking.
*/
LocalXLogInsertAllowed = 1;
return true;
CheckPointGuts(checkPoint.redo, flags);
/*
- * Take a snapshot of running transactions and write this to WAL.
- * This allows us to reconstruct the state of running transactions
- * during archive recovery, if required. Skip, if this info disabled.
+ * Take a snapshot of running transactions and write this to WAL. This
+ * allows us to reconstruct the state of running transactions during
+ * archive recovery, if required. Skip, if this info disabled.
*
* If we are shutting down, or Startup process is completing crash
* recovery we don't need to write running xact data.
* Update checkPoint.nextXid since we have a later value
*/
if (!shutdown && XLogStandbyInfoActive())
- LogStandbySnapshot(&checkPoint.oldestActiveXid, &checkPoint.nextXid);
+ LogStandbySnapshot(&checkPoint.oldestActiveXid, &checkPoint.nextXid);
else
checkPoint.oldestActiveXid = InvalidTransactionId;
XLogFlush(recptr);
/*
- * We mustn't write any new WAL after a shutdown checkpoint, or it will
- * be overwritten at next startup. No-one should even try, this just
- * allows sanity-checking. In the case of an end-of-recovery checkpoint,
- * we want to just temporarily disable writing until the system has exited
+ * We mustn't write any new WAL after a shutdown checkpoint, or it will be
+ * overwritten at next startup. No-one should even try, this just allows
+ * sanity-checking. In the case of an end-of-recovery checkpoint, we want
+ * to just temporarily disable writing until the system has exited
* recovery.
*/
if (shutdown)
{
if (flags & CHECKPOINT_END_OF_RECOVERY)
- LocalXLogInsertAllowed = -1; /* return to "check" state */
+ LocalXLogInsertAllowed = -1; /* return to "check" state */
else
- LocalXLogInsertAllowed = 0; /* never again write WAL */
+ LocalXLogInsertAllowed = 0; /* never again write WAL */
}
/*
smgrpostckpt();
/*
- * If there's connected standby servers doing XLOG streaming, don't
- * delete XLOG files that have not been streamed to all of them yet.
- * This does nothing to prevent them from being deleted when the
- * standby is disconnected (e.g because of network problems), but at
- * least it avoids an open replication connection from failing because
- * of that.
+ * If there's connected standby servers doing XLOG streaming, don't delete
+ * XLOG files that have not been streamed to all of them yet. This does
+ * nothing to prevent them from being deleted when the standby is
+ * disconnected (e.g because of network problems), but at least it avoids
+ * an open replication connection from failing because of that.
*/
if ((_logId || _logSeg) && MaxWalSenders > 0)
{
- XLogRecPtr oldest;
- uint32 log;
- uint32 seg;
+ XLogRecPtr oldest;
+ uint32 log;
+ uint32 seg;
oldest = GetOldestWALSendPointer();
if (oldest.xlogid != 0 || oldest.xrecoff != 0)
XLByteToSeg(oldest, log, seg);
if (log < _logId || (log == _logId && seg < _logSeg))
{
- _logId = log;
- _logSeg = seg;
+ _logId = log;
+ _logSeg = seg;
}
}
}
/*
- * Delete old log files (those no longer needed even for
- * previous checkpoint or the standbys in XLOG streaming).
+ * Delete old log files (those no longer needed even for previous
+ * checkpoint or the standbys in XLOG streaming).
*/
if (_logId || _logSeg)
{
/*
* Update pg_control, using current time. Check that it still shows
* IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
- * this is a quick hack to make sure nothing really bad happens if
- * somehow we get here after the end-of-recovery checkpoint.
+ * this is a quick hack to make sure nothing really bad happens if somehow
+ * we get here after the end-of-recovery checkpoint.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY &&
LogCheckpointEnd(true);
ereport((log_checkpoints ? LOG : DEBUG2),
- (errmsg("recovery restart point at %X/%X with latest known log time %s",
- lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff,
- timestamptz_to_str(GetLatestXLogTime()))));
+ (errmsg("recovery restart point at %X/%X with latest known log time %s",
+ lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff,
+ timestamptz_to_str(GetLatestXLogTime()))));
LWLockRelease(CheckpointLock);
return true;
else if (info == XLOG_BACKUP_END)
{
XLogRecPtr startpoint;
+
memcpy(&startpoint, XLogRecGetData(record), sizeof(startpoint));
if (XLByteEQ(ControlFile->backupStartPoint, startpoint))
if (InArchiveRecovery)
{
/*
- * Note: We don't print the reason string from the record,
- * because that gets added as a line using xlog_desc()
+ * Note: We don't print the reason string from the record, because
+ * that gets added as a line using xlog_desc()
*/
ereport(WARNING,
- (errmsg("unlogged operation performed, data may be missing"),
- errhint("This can happen if you temporarily disable archive_mode without taking a new base backup.")));
+ (errmsg("unlogged operation performed, data may be missing"),
+ errhint("This can happen if you temporarily disable archive_mode without taking a new base backup.")));
}
}
}
}
else if (info == XLOG_BACKUP_END)
{
- XLogRecPtr startpoint;
+ XLogRecPtr startpoint;
memcpy(&startpoint, rec, sizeof(XLogRecPtr));
appendStringInfo(buf, "backup end: %X/%X",
}
else if (info == XLOG_UNLOGGED)
{
- char *reason = rec;
+ char *reason = rec;
appendStringInfo(buf, "unlogged operation: %s", reason);
}
static int
get_sync_bit(int method)
{
- int o_direct_flag = 0;
+ int o_direct_flag = 0;
/* If fsync is disabled, never open in sync mode */
if (!enableFsync)
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
* O_SYNC, O_DSYNC or O_FSYNC. But only if archiving and streaming are
- * disabled, otherwise the archive command or walsender process will
- * read the WAL soon after writing it, which is guaranteed to cause a
- * physical read if we bypassed the kernel cache. We also skip the
- * posix_fadvise(POSIX_FADV_DONTNEED) call in XLogFileClose() for the
- * same reason.
+ * disabled, otherwise the archive command or walsender process will read
+ * the WAL soon after writing it, which is guaranteed to cause a physical
+ * read if we bypassed the kernel cache. We also skip the
+ * posix_fadvise(POSIX_FADV_DONTNEED) call in XLogFileClose() for the same
+ * reason.
*
* Never use O_DIRECT in walreceiver process for similar reasons; the WAL
* written by walreceiver is normally read by the startup process soon
{
XLogRecPtr startpoint;
XLogRecPtr stoppoint;
- XLogRecData rdata;
+ XLogRecData rdata;
pg_time_t stamp_time;
char strfbuf[128];
char histfilepath[MAXPGPATH];
*
* We wait forever, since archive_command is supposed to work and we
* assume the admin wanted his backup to work completely. If you don't
- * wish to wait, you can set statement_timeout. Also, some notices
- * are issued to clue in anyone who might be doing this interactively.
+ * wish to wait, you can set statement_timeout. Also, some notices are
+ * issued to clue in anyone who might be doing this interactively.
*/
XLByteToPrevSeg(stoppoint, _logId, _logSeg);
XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
ereport(WARNING,
(errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
- errhint("Check that your archive_command is executing properly. "
- "pg_stop_backup can be cancelled safely, "
- "but the database backup will not be usable without all the WAL segments.")));
+ errhint("Check that your archive_command is executing properly. "
+ "pg_stop_backup can be cancelled safely, "
+ "but the database backup will not be usable without all the WAL segments.")));
}
}
got_SIGHUP = false;
ProcessConfigFile(PGC_SIGHUP);
}
+
/*
* Check if we were requested to exit without finishing recovery.
*/
*/
pqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */
pqsignal(SIGINT, SIG_IGN); /* ignore query cancel */
- pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */
- pqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */
+ pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */
+ pqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */
if (XLogRequestRecoveryConnections)
- pqsignal(SIGALRM, handle_standby_sig_alarm); /* ignored unless InHotStandby */
+ pqsignal(SIGALRM, handle_standby_sig_alarm); /* ignored unless
+ * InHotStandby */
else
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
{
if (StandbyMode)
{
- bool last_restore_failed = false;
+ bool last_restore_failed = false;
/*
* In standby mode, wait for the requested record to become
- * available, either via restore_command succeeding to restore
- * the segment, or via walreceiver having streamed the record.
+ * available, either via restore_command succeeding to restore the
+ * segment, or via walreceiver having streamed the record.
*/
for (;;)
{
if (WalRcvInProgress())
{
/*
- * While walreceiver is active, wait for new WAL to
- * arrive from primary.
+ * While walreceiver is active, wait for new WAL to arrive
+ * from primary.
*/
receivedUpto = GetWalRcvWriteRecPtr();
if (XLByteLT(*RecPtr, receivedUpto))
/*
* If we succeeded restoring some segments from archive
- * since the last connection attempt (or we haven't
- * tried streaming yet, retry immediately. But if we
- * haven't, assume the problem is persistent, so be
- * less aggressive.
+ * since the last connection attempt (or we haven't tried
+ * streaming yet, retry immediately. But if we haven't,
+ * assume the problem is persistent, so be less
+ * aggressive.
*/
if (last_restore_failed)
{
*/
if (CheckForStandbyTrigger())
goto next_record_is_invalid;
- pg_usleep(5000000L); /* 5 seconds */
+ pg_usleep(5000000L); /* 5 seconds */
}
last_restore_failed = true;
}
/*
- * This possibly-long loop needs to handle interrupts of startup
- * process.
+ * This possibly-long loop needs to handle interrupts of
+ * startup process.
*/
HandleStartupProcInterrupts();
}
}
/*
- * At this point, we have the right segment open and we know the
- * requested record is in it.
+ * At this point, we have the right segment open and we know the requested
+ * record is in it.
*/
Assert(readFile != -1);
/*
- * If the current segment is being streamed from master, calculate
- * how much of the current page we have received already. We know the
- * requested record has been received, but this is for the benefit
- * of future calls, to allow quick exit at the top of this function.
+ * If the current segment is being streamed from master, calculate how
+ * much of the current page we have received already. We know the
+ * requested record has been received, but this is for the benefit of
+ * future calls, to allow quick exit at the top of this function.
*/
if (readStreamed)
{
{
ereport(emode,
(errcode_for_file_access(),
- errmsg("could not seek in log file %u, segment %u to offset %u: %m",
- readId, readSeg, readOff)));
+ errmsg("could not seek in log file %u, segment %u to offset %u: %m",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
ereport(emode,
(errcode_for_file_access(),
- errmsg("could not read from log file %u, segment %u, offset %u: %m",
- readId, readSeg, readOff)));
+ errmsg("could not read from log file %u, segment %u, offset %u: %m",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode))
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.259 2010/02/07 20:48:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.260 2010/02/26 02:00:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
/*
- * Assign the ProcSignalSlot for an auxiliary process. Since it
+ * Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
* the auxiliary process type (auxType). Backends use slots indexed
* in the range from 1 to MaxBackends (inclusive), so we use
boot_yyparse();
/*
- * We should now know about all mapped relations, so it's okay to
- * write out the initial relation mapping files.
+ * We should now know about all mapped relations, so it's okay to write
+ * out the initial relation mapping files.
*/
RelationMapFinishBootstrap();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.162 2010/02/14 18:42:12 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.163 2010/02/26 02:00:35 momjian Exp $
*
* NOTES
* See acl.h.
*/
typedef struct
{
- Oid roleid; /* owning role */
- Oid nspid; /* namespace, or InvalidOid if none */
+ Oid roleid; /* owning role */
+ Oid nspid; /* namespace, or InvalidOid if none */
/* remaining fields are same as in InternalGrant: */
bool is_grant;
GrantObjectType objtype;
case ACL_TARGET_ALL_IN_SCHEMA:
istmt.objects = objectsInSchemaToOids(stmt->objtype, stmt->objects);
break;
- /* ACL_TARGET_DEFAULTS should not be seen here */
+ /* ACL_TARGET_DEFAULTS should not be seen here */
default:
elog(ERROR, "unrecognized GrantStmt.targtype: %d",
(int) stmt->targtype);
case ACL_OBJECT_LARGEOBJECT:
foreach(cell, objnames)
{
- Oid lobjOid = intVal(lfirst(cell));
+ Oid lobjOid = intVal(lfirst(cell));
if (!LargeObjectExists(lobjOid))
ereport(ERROR,
}
/*
- * Convert action->privileges, a list of privilege strings,
- * into an AclMode bitmask.
+ * Convert action->privileges, a list of privilege strings, into an
+ * AclMode bitmask.
*/
switch (action->objtype)
{
if (privnode->cols)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("default privileges cannot be set for columns")));
+ errmsg("default privileges cannot be set for columns")));
if (privnode->priv_name == NULL) /* parser mistake? */
elog(ERROR, "AccessPriv node must specify privilege");
iacls.roleid = get_roleid_checked(rolename);
/*
- * We insist that calling user be a member of each target role.
- * If he has that, he could become that role anyway via SET ROLE,
- * so FOR ROLE is just a syntactic convenience and doesn't give
- * any special privileges.
+ * We insist that calling user be a member of each target role. If
+ * he has that, he could become that role anyway via SET ROLE, so
+ * FOR ROLE is just a syntactic convenience and doesn't give any
+ * special privileges.
*/
check_is_member_of_role(GetUserId(), iacls.roleid);
rel = heap_open(DefaultAclRelationId, RowExclusiveLock);
/*
- * Convert ACL object type to pg_default_acl object type
- * and handle all_privs option
+ * Convert ACL object type to pg_default_acl object type and handle
+ * all_privs option
*/
switch (iacls->objtype)
{
tuple = SearchSysCache3(DEFACLROLENSPOBJ,
ObjectIdGetDatum(iacls->roleid),
ObjectIdGetDatum(iacls->nspid),
- CharGetDatum(objtype));
+ CharGetDatum(objtype));
if (HeapTupleIsValid(tuple))
{
{
/*
* If we are creating a global entry, start with the hard-wired
- * defaults and modify as per command. Otherwise, start with an empty
- * ACL and modify that. This is needed because global entries
- * replace the hard-wired defaults, while others do not.
+ * defaults and modify as per command. Otherwise, start with an empty
+ * ACL and modify that. This is needed because global entries replace
+ * the hard-wired defaults, while others do not.
*/
if (!OidIsValid(iacls->nspid))
old_acl = acldefault(iacls->objtype, iacls->roleid);
noldmembers = aclmembers(old_acl, &oldmembers);
/*
- * Generate new ACL. Grantor of rights is always the same as the
- * target role.
+ * Generate new ACL. Grantor of rights is always the same as the target
+ * role.
*/
new_acl = merge_acl_with_grant(old_acl,
iacls->is_grant,
if (OidIsValid(iacls->nspid))
{
ObjectAddress myself,
- referenced;
+ referenced;
myself.classId = DefaultAclRelationId;
myself.objectId = HeapTupleGetOid(newtuple);
Oid *newmembers;
tuple = SearchSysCache1(FOREIGNDATAWRAPPEROID,
- ObjectIdGetDatum(fdwid));
+ ObjectIdGetDatum(fdwid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for foreign-data wrapper %u", fdwid);
foreach(cell, istmt->objects)
{
Oid loid = lfirst_oid(cell);
- Form_pg_largeobject_metadata form_lo_meta;
+ Form_pg_largeobject_metadata form_lo_meta;
char loname[NAMEDATALEN];
Datum aclDatum;
bool isNull;
int nnewmembers;
Oid *oldmembers;
Oid *newmembers;
- ScanKeyData entry[1];
- SysScanDesc scan;
+ ScanKeyData entry[1];
+ SysScanDesc scan;
HeapTuple tuple;
/* There's no syscache for pg_largeobject_metadata */
{
AclMode result;
Relation pg_lo_meta;
- ScanKeyData entry[1];
- SysScanDesc scan;
+ ScanKeyData entry[1];
+ SysScanDesc scan;
HeapTuple tuple;
Datum aclDatum;
bool isNull;
ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner;
aclDatum = SysCacheGetAttr(TABLESPACEOID, tuple,
- Anum_pg_tablespace_spcacl,
- &isNull);
+ Anum_pg_tablespace_spcacl,
+ &isNull);
if (isNull)
{
pg_largeobject_ownercheck(Oid lobj_oid, Oid roleid)
{
Relation pg_lo_meta;
- ScanKeyData entry[1];
- SysScanDesc scan;
+ ScanKeyData entry[1];
+ SysScanDesc scan;
HeapTuple tuple;
Oid ownerId;
if (HeapTupleIsValid(tuple))
{
- Datum aclDatum;
- bool isNull;
+ Datum aclDatum;
+ bool isNull;
aclDatum = SysCacheGetAttr(DEFACLROLENSPOBJ, tuple,
Anum_pg_default_acl_defaclacl,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.88 2010/02/07 20:48:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.89 2010/02/26 02:00:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/* All other tablespaces are accessed via symlinks */
pathlen = 9 + 1 + OIDCHARS + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) +
- 1 + OIDCHARS + 1;
+ 1 + OIDCHARS + 1;
path = (char *) palloc(pathlen);
snprintf(path, pathlen, "pg_tblspc/%u/%s/%u",
spcNode, TABLESPACE_VERSION_DIRECTORY, dbNode);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.95 2010/02/14 18:42:12 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.96 2010/02/26 02:00:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AuthIdRelationId, /* OCLASS_ROLE */
DatabaseRelationId, /* OCLASS_DATABASE */
TableSpaceRelationId, /* OCLASS_TBLSPACE */
- ForeignDataWrapperRelationId, /* OCLASS_FDW */
+ ForeignDataWrapperRelationId, /* OCLASS_FDW */
ForeignServerRelationId, /* OCLASS_FOREIGN_SERVER */
UserMappingRelationId, /* OCLASS_USER_MAPPING */
DefaultAclRelationId /* OCLASS_DEFACL */
break;
/*
- * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally
- * not handled here
+ * OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE intentionally not
+ * handled here
*/
case OCLASS_FDW:
case DEFACLOBJ_RELATION:
appendStringInfo(&buffer,
_("default privileges on new relations belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole));
+ GetUserNameFromId(defacl->defaclrole));
break;
case DEFACLOBJ_SEQUENCE:
appendStringInfo(&buffer,
_("default privileges on new sequences belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole));
+ GetUserNameFromId(defacl->defaclrole));
break;
case DEFACLOBJ_FUNCTION:
appendStringInfo(&buffer,
_("default privileges on new functions belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole));
+ GetUserNameFromId(defacl->defaclrole));
break;
default:
/* shouldn't get here */
appendStringInfo(&buffer,
- _("default privileges belonging to role %s"),
- GetUserNameFromId(defacl->defaclrole));
+ _("default privileges belonging to role %s"),
+ GetUserNameFromId(defacl->defaclrole));
break;
}
if (OidIsValid(defacl->defaclnamespace))
{
appendStringInfo(&buffer,
- _(" in schema %s"),
- get_namespace_name(defacl->defaclnamespace));
+ _(" in schema %s"),
+ get_namespace_name(defacl->defaclnamespace));
}
systable_endscan(rcscan);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.371 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.372 2010/02/26 02:00:36 momjian Exp $
*
*
* INTERFACE ROUTINES
/* Kluge for upgrade-in-place support */
-Oid binary_upgrade_next_heap_relfilenode = InvalidOid;
-Oid binary_upgrade_next_toast_relfilenode = InvalidOid;
+Oid binary_upgrade_next_heap_relfilenode = InvalidOid;
+Oid binary_upgrade_next_toast_relfilenode = InvalidOid;
static void AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc,
{
/*
* Refuse any attempt to create a pseudo-type column, except for a
- * special hack for pg_statistic: allow ANYARRAY when modifying
- * system catalogs (this allows creating pg_statistic and cloning it
- * during VACUUM FULL)
+ * special hack for pg_statistic: allow ANYARRAY when modifying system
+ * catalogs (this allows creating pg_statistic and cloning it during
+ * VACUUM FULL)
*/
if (atttypid != ANYARRAYOID || !allow_system_table_mods)
ereport(ERROR,
* Tuple data is taken from new_rel_desc->rd_rel, except for the
* variable-width fields which are not present in a cached reldesc.
* relacl and reloptions are passed in Datum form (to avoid having
- * to reference the data types in heap.h). Pass (Datum) 0 to set them
+ * to reference the data types in heap.h). Pass (Datum) 0 to set them
* to NULL.
* --------------------------------
*/
Oid new_array_type)
{
return
- TypeCreate(new_row_type, /* optional predetermined OID */
+ TypeCreate(new_row_type, /* optional predetermined OID */
typeName, /* type name */
typeNamespace, /* type namespace */
new_rel_oid, /* relation oid */
/*
* Since defining a relation also defines a complex type, we add a new
- * system type corresponding to the new relation. The OID of the type
- * can be preselected by the caller, but if reltypeid is InvalidOid,
- * we'll generate a new OID for it.
+ * system type corresponding to the new relation. The OID of the type can
+ * be preselected by the caller, but if reltypeid is InvalidOid, we'll
+ * generate a new OID for it.
*
* NOTE: we could get a unique-index failure here, in case someone else is
* creating the same type name in parallel but hadn't committed yet when
/*
* Make a dependency link to force the relation to be deleted if its
- * namespace is. Also make a dependency link to its owner, as well
- * as dependencies for any roles mentioned in the default ACL.
+ * namespace is. Also make a dependency link to its owner, as well as
+ * dependencies for any roles mentioned in the default ACL.
*
* For composite types, these dependencies are tracked for the pg_type
* entry, so we needn't record them here. Likewise, TOAST tables don't
* need a namespace dependency (they live in a pinned namespace) nor an
- * owner dependency (they depend indirectly through the parent table),
- * nor should they have any ACL entries.
+ * owner dependency (they depend indirectly through the parent table), nor
+ * should they have any ACL entries.
*
* Also, skip this in bootstrap mode, since we don't make dependencies
* while bootstrapping.
' ',
' ',
' ',
- NULL, /* not an exclusion constraint */
+ NULL, /* not an exclusion constraint */
expr, /* Tree form of check constraint */
ccbin, /* Binary form of check constraint */
ccsrc, /* Source form of check constraint */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.336 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.337 2010/02/26 02:00:36 momjian Exp $
*
*
* INTERFACE ROUTINES
/* Kluge for upgrade-in-place support */
-Oid binary_upgrade_next_index_relfilenode = InvalidOid;
+Oid binary_upgrade_next_index_relfilenode = InvalidOid;
/* state info for validate_index bulkdelete callback */
typedef struct
/*
* Set the attribute name as specified by caller.
*/
- if (colnames_item == NULL) /* shouldn't happen */
+ if (colnames_item == NULL) /* shouldn't happen */
elog(ERROR, "too few entries in colnames list");
namestrcpy(&to->attname, (const char *) lfirst(colnames_item));
colnames_item = lnext(colnames_item);
/*
* The index will be in the same namespace as its parent table, and is
- * shared across databases if and only if the parent is. Likewise,
- * it will use the relfilenode map if and only if the parent does.
+ * shared across databases if and only if the parent is. Likewise, it
+ * will use the relfilenode map if and only if the parent does.
*/
namespaceId = RelationGetNamespace(heapRelation);
shared_relation = heapRelation->rd_rel->relisshared;
errmsg("concurrent index creation on system catalog tables is not supported")));
/*
- * This case is currently not supported, but there's no way to ask for
- * it in the grammar anyway, so it can't happen.
+ * This case is currently not supported, but there's no way to ask for it
+ * in the grammar anyway, so it can't happen.
*/
if (concurrent && is_exclusion)
ereport(ERROR,
indexInfo->ii_KeyAttrNumbers,
indexInfo->ii_NumIndexAttrs,
InvalidOid, /* no domain */
- indexRelationId, /* index OID */
+ indexRelationId, /* index OID */
InvalidOid, /* no foreign key */
NULL,
NULL,
CreateTrigStmt *trigger;
heapRel = makeRangeVar(get_namespace_name(namespaceId),
- pstrdup(RelationGetRelationName(heapRelation)),
+ pstrdup(RelationGetRelationName(heapRelation)),
-1);
trigger = makeNode(CreateTrigStmt);
Assert(PointerIsValid(stats));
/*
- * If it's for an exclusion constraint, make a second pass over the
- * heap to verify that the constraint is satisfied.
+ * If it's for an exclusion constraint, make a second pass over the heap
+ * to verify that the constraint is satisfied.
*/
if (indexInfo->ii_ExclusionOps != NULL)
IndexCheckExclusion(heapRelation, indexRelation, indexInfo);
/*
* Since caller should hold ShareLock or better, normally
* the only way to see this is if it was inserted earlier
- * in our own transaction. However, it can happen in
+ * in our own transaction. However, it can happen in
* system catalogs, since we tend to release write lock
* before commit there. Give a warning if neither case
* applies.
/*
* If we are performing uniqueness checks, assuming
- * the tuple is dead could lead to missing a uniqueness
- * violation. In that case we wait for the deleting
- * transaction to finish and check again.
+ * the tuple is dead could lead to missing a
+ * uniqueness violation. In that case we wait for the
+ * deleting transaction to finish and check again.
*/
if (checking_uniqueness)
{
/*
* If the index is marked invalid or not ready (ie, it's from a failed
- * CREATE INDEX CONCURRENTLY), and we didn't skip a uniqueness check,
- * we can now mark it valid. This allows REINDEX to be used to clean up
- * in such cases.
+ * CREATE INDEX CONCURRENTLY), and we didn't skip a uniqueness check, we
+ * can now mark it valid. This allows REINDEX to be used to clean up in
+ * such cases.
*
* We can also reset indcheckxmin, because we have now done a
* non-concurrent index build, *except* in the case where index_build
* It is okay to not insert entries into the indexes we have not processed
* yet because all of this is transaction-safe. If we fail partway
* through, the updated rows are dead and it doesn't matter whether they
- * have index entries. Also, a new pg_class index will be created with a
+ * have index entries. Also, a new pg_class index will be created with a
* correct entry for its own pg_class row because we do
* RelationSetNewRelfilenode() before we do index_build().
*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.124 2010/02/20 21:24:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.125 2010/02/26 02:00:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void RemoveTempRelationsCallback(int code, Datum arg);
static void NamespaceCallback(Datum arg, int cacheid, ItemPointer tuplePtr);
static bool MatchNamedCall(HeapTuple proctup, int nargs, List *argnames,
- int **argnumbers);
+ int **argnumbers);
/* These don't really need to appear in any header file */
Datum pg_table_is_visible(PG_FUNCTION_ARGS);
}
/* use exact schema given */
namespaceId = GetSysCacheOid1(NAMESPACENAME,
- CStringGetDatum(newRelation->schemaname));
+ CStringGetDatum(newRelation->schemaname));
if (!OidIsValid(namespaceId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
/*
* Call uses named or mixed notation
*
- * Named or mixed notation can match a variadic function only
- * if expand_variadic is off; otherwise there is no way to match
- * the presumed-nameless parameters expanded from the variadic
- * array.
+ * Named or mixed notation can match a variadic function only if
+ * expand_variadic is off; otherwise there is no way to match the
+ * presumed-nameless parameters expanded from the variadic array.
*/
if (OidIsValid(procform->provariadic) && expand_variadic)
continue;
/*
* Check argument count.
*/
- Assert(nargs >= 0); /* -1 not supported with argnames */
+ Assert(nargs >= 0); /* -1 not supported with argnames */
if (pronargs > nargs && expand_defaults)
{
* Call uses positional notation
*
* Check if function is variadic, and get variadic element type if
- * so. If expand_variadic is false, we should just ignore
+ * so. If expand_variadic is false, we should just ignore
* variadic-ness.
*/
if (pronargs <= nargs && expand_variadic)
/* now examine the named args */
foreach(lc, argnames)
{
- char *argname = (char *) lfirst(lc);
- bool found;
- int i;
+ char *argname = (char *) lfirst(lc);
+ bool found;
+ int i;
pp = 0;
found = false;
/* Check for default arguments */
if (nargs < pronargs)
{
- int first_arg_with_default = pronargs - procform->pronargdefaults;
+ int first_arg_with_default = pronargs - procform->pronargdefaults;
for (pp = numposargs; pp < pronargs; pp++)
{
* Do not allow a Hot Standby slave session to make temp tables. Aside
* from problems with modifying the system catalogs, there is a naming
* conflict: pg_temp_N belongs to the session with BackendId N on the
- * master, not to a slave session with the same BackendId. We should
- * not be able to get here anyway due to XactReadOnly checks, but let's
- * just make real sure. Note that this also backstops various operations
- * that allow XactReadOnly transactions to modify temp tables; they'd need
+ * master, not to a slave session with the same BackendId. We should not
+ * be able to get here anyway due to XactReadOnly checks, but let's just
+ * make real sure. Note that this also backstops various operations that
+ * allow XactReadOnly transactions to modify temp tables; they'd need
* RecoveryInProgress checks if not for this.
*/
if (RecoveryInProgress())
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.105 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.106 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs,
- NIL, true_oid_array))));
+ func_signature_string(fnName, nargs,
+ NIL, true_oid_array))));
}
/* Check aggregate creator has permission to call the function */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.52 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.53 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Register normal dependency on the unique index that supports a
- * foreign-key constraint. (Note: for indexes associated with
- * unique or primary-key constraints, the dependency runs the other
- * way, and is not made here.)
+ * foreign-key constraint. (Note: for indexes associated with unique
+ * or primary-key constraints, the dependency runs the other way, and
+ * is not made here.)
*/
ObjectAddress relobject;
}
/*
- * We don't bother to register dependencies on the exclusion operators
- * of an exclusion constraint. We assume they are members of the opclass
- * supporting the index, so there's an indirect dependency via that.
- * (This would be pretty dicey for cross-type operators, but exclusion
- * operators can never be cross-type.)
+ * We don't bother to register dependencies on the exclusion operators of
+ * an exclusion constraint. We assume they are members of the opclass
+ * supporting the index, so there's an indirect dependency via that. (This
+ * would be pretty dicey for cross-type operators, but exclusion operators
+ * can never be cross-type.)
*/
if (conExpr != NULL)
if (OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("table \"%s\" has multiple constraints named \"%s\"",
- get_rel_name(relid), conname)));
+ errmsg("table \"%s\" has multiple constraints named \"%s\"",
+ get_rel_name(relid), conname)));
conOid = HeapTupleGetOid(tuple);
}
}
/*
* pg_db_role_setting.c
* Routines to support manipulation of the pg_db_role_setting relation
- *
+ *
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_db_role_setting.c,v 1.2 2010/01/02 16:57:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_db_role_setting.c,v 1.3 2010/02/26 02:00:37 momjian Exp $
*/
#include "postgres.h"
*
* - in RESET ALL, simply delete the pg_db_role_setting tuple (if any)
*
- * - in other commands, if there's a tuple in pg_db_role_setting, update it;
- * if it ends up empty, delete it
+ * - in other commands, if there's a tuple in pg_db_role_setting, update
+ * it; if it ends up empty, delete it
*
* - otherwise, insert a new pg_db_role_setting tuple, but only if the
- * command is not RESET
+ * command is not RESET
*/
if (setstmt->kind == VAR_RESET_ALL)
{
ArrayType *a;
memset(nulls, false, sizeof(nulls));
-
+
a = GUCArrayAdd(NULL, setstmt->name, valuestr);
values[Anum_pg_db_role_setting_setdatabase - 1] =
/*
* Drop some settings from the catalog. These can be for a particular
- * database, or for a particular role. (It is of course possible to do both
+ * database, or for a particular role. (It is of course possible to do both
* too, but it doesn't make sense for current uses.)
*/
void
DropSetting(Oid databaseid, Oid roleid)
{
- Relation relsetting;
- HeapScanDesc scan;
- ScanKeyData keys[2];
- HeapTuple tup;
- int numkeys = 0;
+ Relation relsetting;
+ HeapScanDesc scan;
+ ScanKeyData keys[2];
+ HeapTuple tup;
+ int numkeys = 0;
relsetting = heap_open(DbRoleSettingRelationId, RowExclusiveLock);
void
ApplySetting(Oid databaseid, Oid roleid, Relation relsetting, GucSource source)
{
- SysScanDesc scan;
- ScanKeyData keys[2];
- HeapTuple tup;
+ SysScanDesc scan;
+ ScanKeyData keys[2];
+ HeapTuple tup;
ScanKeyInit(&keys[0],
Anum_pg_db_role_setting_setdatabase,
SnapshotNow, 2, keys);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- bool isnull;
- Datum datum;
+ bool isnull;
+ Datum datum;
datum = heap_getattr(tup, Anum_pg_db_role_setting_setconfig,
RelationGetDescr(relsetting), &isnull);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.13 2010/01/02 16:57:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.14 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tupDesc = pg_enum->rd_att;
/*
- * Allocate oids
+ * Allocate oids
*/
oids = (Oid *) palloc(num_elems * sizeof(Oid));
if (OidIsValid(binary_upgrade_next_pg_enum_oid))
{
- if (num_elems != 1)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("EnumValuesCreate() can only set a single OID")));
- oids[0] = binary_upgrade_next_pg_enum_oid;
- binary_upgrade_next_pg_enum_oid = InvalidOid;
- }
+ if (num_elems != 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("EnumValuesCreate() can only set a single OID")));
+ oids[0] = binary_upgrade_next_pg_enum_oid;
+ binary_upgrade_next_pg_enum_oid = InvalidOid;
+ }
else
{
/*
- * While this method does not absolutely guarantee that we generate
- * no duplicate oids (since we haven't entered each oid into the
- * table before allocating the next), trouble could only occur if
- * the oid counter wraps all the way around before we finish. Which
- * seems unlikely.
+ * While this method does not absolutely guarantee that we generate no
+ * duplicate oids (since we haven't entered each oid into the table
+ * before allocating the next), trouble could only occur if the oid
+ * counter wraps all the way around before we finish. Which seems
+ * unlikely.
*/
for (elemno = 0; elemno < num_elems; elemno++)
{
/*
- * The pg_enum.oid is stored in user tables. This oid must be
- * preserved by binary upgrades.
+ * The pg_enum.oid is stored in user tables. This oid must be
+ * preserved by binary upgrades.
*/
oids[elemno] = GetNewOid(pg_enum);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_inherits.c,v 1.7 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_inherits.c,v 1.8 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *
find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
{
- List *rels_list, *rel_numparents;
+ List *rels_list,
+ *rel_numparents;
ListCell *l;
/*
*/
foreach(lc, currentchildren)
{
- Oid child_oid = lfirst_oid(lc);
- bool found = false;
+ Oid child_oid = lfirst_oid(lc);
+ bool found = false;
ListCell *lo;
ListCell *li;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_largeobject.c,v 1.38 2010/02/17 04:19:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_largeobject.c,v 1.39 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Drop a large object having the given LO identifier. Both the data pages
+ * Drop a large object having the given LO identifier. Both the data pages
* and metadata must be dropped.
*/
void
ScanKeyInit(&skey[0],
ObjectIdAttributeNumber,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(loid));
+ ObjectIdGetDatum(loid));
scan = systable_beginscan(pg_lo_meta,
LargeObjectMetadataOidIndexId, true,
void
LargeObjectAlterOwner(Oid loid, Oid newOwnerId)
{
- Form_pg_largeobject_metadata form_lo_meta;
+ Form_pg_largeobject_metadata form_lo_meta;
Relation pg_lo_meta;
- ScanKeyData skey[1];
- SysScanDesc scan;
+ ScanKeyData skey[1];
+ SysScanDesc scan;
HeapTuple oldtup;
HeapTuple newtup;
if (!superuser())
{
/*
- * lo_compat_privileges is not checked here, because ALTER
- * LARGE OBJECT ... OWNER did not exist at all prior to
- * PostgreSQL 9.0.
+ * lo_compat_privileges is not checked here, because ALTER LARGE
+ * OBJECT ... OWNER did not exist at all prior to PostgreSQL 9.0.
*
* We must be the owner of the existing object.
*/
replaces[Anum_pg_largeobject_metadata_lomowner - 1] = true;
/*
- * Determine the modified ACL for the new owner.
- * This is only necessary when the ACL is non-null.
+ * Determine the modified ACL for the new owner. This is only
+ * necessary when the ACL is non-null.
*/
aclDatum = heap_getattr(oldtup,
Anum_pg_largeobject_metadata_lomacl,
LargeObjectExists(Oid loid)
{
Relation pg_lo_meta;
- ScanKeyData skey[1];
- SysScanDesc sd;
+ ScanKeyData skey[1];
+ SysScanDesc sd;
HeapTuple tuple;
bool retval = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.171 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.172 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If there were any named input parameters, check to make sure the
- * names have not been changed, as this could break existing calls.
- * We allow adding names to formerly unnamed parameters, though.
+ * names have not been changed, as this could break existing calls. We
+ * allow adding names to formerly unnamed parameters, though.
*/
proargnames = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup,
Anum_pg_proc_proargnames,
strcmp(old_arg_names[j], new_arg_names[j]) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change name of input parameter \"%s\"",
- old_arg_names[j]),
+ errmsg("cannot change name of input parameter \"%s\"",
+ old_arg_names[j]),
errhint("Use DROP FUNCTION first.")));
}
- }
+ }
/*
* If there are existing defaults, check compatibility: redefinition
/*
* Adjust a syntax error occurring inside the function body of a CREATE
- * FUNCTION or DO command. This can be used by any function validator or
+ * FUNCTION or DO command. This can be used by any function validator or
* anonymous-block handler, not only for SQL-language functions.
* It is assumed that the syntax error position is initially relative to the
* function body string (as passed in). If possible, we adjust the position
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.39 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.40 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pfree(database);
break;
}
-
+
default:
elog(ERROR, "unrecognized shared classId: %u", classId);
break;
case DefaultAclRelationId:
+
/*
- * Ignore default ACLs; they should be handled by
- * DROP OWNED, not REASSIGN OWNED.
+ * Ignore default ACLs; they should be handled by DROP
+ * OWNED, not REASSIGN OWNED.
*/
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.132 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.133 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/rel.h"
#include "utils/syscache.h"
-Oid binary_upgrade_next_pg_type_oid = InvalidOid;
+Oid binary_upgrade_next_pg_type_oid = InvalidOid;
/* ----------------------------------------------------------------
* TypeShellMake
binary_upgrade_next_pg_type_oid = InvalidOid;
}
/* else allow system to assign oid */
-
+
typeObjectId = simple_heap_insert(pg_type_desc, tup);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.31 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.32 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/syscache.h"
/* Kluges for upgrade-in-place support */
-extern Oid binary_upgrade_next_toast_relfilenode;
+extern Oid binary_upgrade_next_toast_relfilenode;
-Oid binary_upgrade_next_pg_type_toast_oid = InvalidOid;
+Oid binary_upgrade_next_pg_type_toast_oid = InvalidOid;
static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid,
Datum reloptions);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.34 2010/02/01 19:28:56 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.35 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
stmt->subname, /* old att name */
stmt->newname, /* new att name */
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- 0); /* expected inhcount */
+ 0); /* expected inhcount */
break;
case OBJECT_TRIGGER:
renametrig(relid,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.151 2010/02/14 18:42:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.152 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static double init_selection_state(int n);
static double get_next_S(double t, int n, double *stateptr);
static int compare_rows(const void *a, const void *b);
-static int acquire_inherited_sample_rows(Relation onerel,
+static int acquire_inherited_sample_rows(Relation onerel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static void update_attstats(Oid relid, bool inh,
- int natts, VacAttrStats **vacattrstats);
+ int natts, VacAttrStats **vacattrstats);
static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
RelationGetRelationName(onerel))));
/*
- * Set up a working context so that we can easily free whatever junk
- * gets created.
+ * Set up a working context so that we can easily free whatever junk gets
+ * created.
*/
anl_context = AllocSetContextCreate(CurrentMemoryContext,
"Analyze",
* Open all indexes of the relation, and see if there are any analyzable
* columns in the indexes. We do not analyze index columns if there was
* an explicit column list in the ANALYZE command, however. If we are
- * doing a recursive scan, we don't want to touch the parent's indexes
- * at all.
+ * doing a recursive scan, we don't want to touch the parent's indexes at
+ * all.
*/
if (!inh)
vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
{
VacAttrStats *stats = vacattrstats[i];
AttributeOpts *aopt =
- get_attribute_options(onerel->rd_id, stats->attr->attnum);
+ get_attribute_options(onerel->rd_id, stats->attr->attnum);
stats->rows = rows;
stats->tupDesc = onerel->rd_att;
*/
if (aopt != NULL)
{
- float8 n_distinct =
- inh ? aopt->n_distinct_inherited : aopt->n_distinct;
+ float8 n_distinct =
+ inh ? aopt->n_distinct_inherited : aopt->n_distinct;
+
if (n_distinct != 0.0)
stats->stadistinct = n_distinct;
}
}
/*
- * Update pages/tuples stats in pg_class, but not if we're inside a
- * VACUUM that got a more precise number.
+ * Update pages/tuples stats in pg_class, but not if we're inside a VACUUM
+ * that got a more precise number.
*/
if (update_reltuples)
vac_update_relstats(onerel,
}
/*
- * Report ANALYZE to the stats collector, too; likewise, tell it to
- * adopt these numbers only if we're not inside a VACUUM that got a
- * better number. However, a call with inh = true shouldn't reset
- * the stats.
+ * Report ANALYZE to the stats collector, too; likewise, tell it to adopt
+ * these numbers only if we're not inside a VACUUM that got a better
+ * number. However, a call with inh = true shouldn't reset the stats.
*/
if (!inh)
pgstat_report_analyze(onerel, update_reltuples,
{
VacAttrStats *stats = thisdata->vacattrstats[i];
AttributeOpts *aopt =
- get_attribute_options(stats->attr->attrelid,
- stats->attr->attnum);
+ get_attribute_options(stats->attr->attrelid,
+ stats->attr->attnum);
stats->exprvals = exprvals + i;
stats->exprnulls = exprnulls + i;
}
/*
- * Now sample rows from each relation, proportionally to its fraction
- * of the total block count. (This might be less than desirable if the
- * child rels have radically different free-space percentages, but it's
- * not clear that it's worth working harder.)
+ * Now sample rows from each relation, proportionally to its fraction of
+ * the total block count. (This might be less than desirable if the child
+ * rels have radically different free-space percentages, but it's not
+ * clear that it's worth working harder.)
*/
numrows = 0;
*totalrows = 0;
if (childblocks > 0)
{
- int childtargrows;
+ int childtargrows;
childtargrows = (int) rint(targrows * childblocks / totalblocks);
/* Make sure we don't overrun due to roundoff error */
map = convert_tuples_by_name(RelationGetDescr(childrel),
RelationGetDescr(onerel),
- gettext_noop("could not convert row type"));
+ gettext_noop("could not convert row type"));
if (map != NULL)
{
- int j;
+ int j;
for (j = 0; j < childrows; j++)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.154 2010/02/20 21:24:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.155 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* This struct declaration has the maximal length, but in a real queue entry
* the data area is only big enough for the actual channel and payload strings
- * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
+ * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible
* entry size, if both channel and payload strings are empty (but note it
* doesn't include alignment padding).
*
*/
typedef struct AsyncQueueEntry
{
- int length; /* total allocated length of entry */
- Oid dboid; /* sender's database OID */
- TransactionId xid; /* sender's XID */
- int32 srcPid; /* sender's PID */
- char data[NAMEDATALEN + NOTIFY_PAYLOAD_MAX_LENGTH];
+ int length; /* total allocated length of entry */
+ Oid dboid; /* sender's database OID */
+ TransactionId xid; /* sender's XID */
+ int32 srcPid; /* sender's PID */
+ char data[NAMEDATALEN + NOTIFY_PAYLOAD_MAX_LENGTH];
} AsyncQueueEntry;
/* Currently, no field of AsyncQueueEntry requires more than int alignment */
*/
typedef struct QueuePosition
{
- int page; /* SLRU page number */
- int offset; /* byte offset within page */
+ int page; /* SLRU page number */
+ int offset; /* byte offset within page */
} QueuePosition;
#define QUEUE_POS_PAGE(x) ((x).page)
*/
typedef struct QueueBackendStatus
{
- int32 pid; /* either a PID or InvalidPid */
- QueuePosition pos; /* backend has read queue up to here */
+ int32 pid; /* either a PID or InvalidPid */
+ QueuePosition pos; /* backend has read queue up to here */
} QueueBackendStatus;
-#define InvalidPid (-1)
+#define InvalidPid (-1)
/*
* Shared memory state for LISTEN/NOTIFY (excluding its SLRU stuff)
*/
typedef struct AsyncQueueControl
{
- QueuePosition head; /* head points to the next free location */
- QueuePosition tail; /* the global tail is equivalent to the
- tail of the "slowest" backend */
- TimestampTz lastQueueFillWarn; /* time of last queue-full msg */
- QueueBackendStatus backend[1]; /* actually of length MaxBackends+1 */
+ QueuePosition head; /* head points to the next free location */
+ QueuePosition tail; /* the global tail is equivalent to the tail
+ * of the "slowest" backend */
+ TimestampTz lastQueueFillWarn; /* time of last queue-full msg */
+ QueueBackendStatus backend[1]; /* actually of length MaxBackends+1 */
/* DO NOT ADD FURTHER STRUCT MEMBERS HERE */
} AsyncQueueControl;
-static AsyncQueueControl *asyncQueueControl;
+static AsyncQueueControl *asyncQueueControl;
#define QUEUE_HEAD (asyncQueueControl->head)
#define QUEUE_TAIL (asyncQueueControl->tail)
/*
* The SLRU buffer area through which we access the notification queue
*/
-static SlruCtlData AsyncCtlData;
+static SlruCtlData AsyncCtlData;
#define AsyncCtl (&AsyncCtlData)
#define QUEUE_PAGESIZE BLCKSZ
-#define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */
+#define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */
/*
* slru.c currently assumes that all filenames are four characters of hex
*
* The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2
* pages, because more than that would confuse slru.c into thinking there
- * was a wraparound condition. With the default BLCKSZ this means there
+ * was a wraparound condition. With the default BLCKSZ this means there
* can be up to 8GB of queued-and-not-read data.
*
* Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of
/*
* State for outbound notifies consists of a list of all channels+payloads
- * NOTIFYed in the current transaction. We do not actually perform a NOTIFY
+ * NOTIFYed in the current transaction. We do not actually perform a NOTIFY
* until and unless the transaction commits. pendingNotifies is NIL if no
* NOTIFYs have been done in the current transaction.
*
*/
typedef struct Notification
{
- char *channel; /* channel name */
- char *payload; /* payload string (can be empty) */
+ char *channel; /* channel name */
+ char *payload; /* payload string (can be empty) */
} Notification;
-static List *pendingNotifies = NIL; /* list of Notifications */
+static List *pendingNotifies = NIL; /* list of Notifications */
static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
/* True if we've registered an on_shmem_exit cleanup */
static bool unlistenExitRegistered = false;
+
/* has this backend sent notifications in the current transaction? */
static bool backendHasSentNotifications = false;
+
/* has this backend executed its first LISTEN in the current transaction? */
static bool backendHasExecutedInitialListen = false;
static void asyncQueueAdvanceTail(void);
static void ProcessIncomingNotify(void);
static void NotifyMyFrontEnd(const char *channel,
- const char *payload,
- int32 srcPid);
+ const char *payload,
+ int32 srcPid);
static bool AsyncExistsPendingNotify(const char *channel, const char *payload);
static void ClearPendingActionsAndNotifies(void);
int diff;
/*
- * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should
- * be in the range 0..QUEUE_MAX_PAGE.
+ * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be
+ * in the range 0..QUEUE_MAX_PAGE.
*/
Assert(p >= 0 && p <= QUEUE_MAX_PAGE);
Assert(q >= 0 && q <= QUEUE_MAX_PAGE);
diff = p - q;
- if (diff >= ((QUEUE_MAX_PAGE+1)/2))
- diff -= QUEUE_MAX_PAGE+1;
- else if (diff < -((QUEUE_MAX_PAGE+1)/2))
- diff += QUEUE_MAX_PAGE+1;
+ if (diff >= ((QUEUE_MAX_PAGE + 1) / 2))
+ diff -= QUEUE_MAX_PAGE + 1;
+ else if (diff < -((QUEUE_MAX_PAGE + 1) / 2))
+ diff += QUEUE_MAX_PAGE + 1;
return diff < 0;
}
Size
AsyncShmemSize(void)
{
- Size size;
+ Size size;
/* This had better match AsyncShmemInit */
size = mul_size(MaxBackends, sizeof(QueueBackendStatus));
void
AsyncShmemInit(void)
{
- bool found;
- int slotno;
- Size size;
+ bool found;
+ int slotno;
+ Size size;
/*
* Create or attach to the AsyncQueueControl structure.
if (!found)
{
/* First time through, so initialize it */
- int i;
+ int i;
SET_QUEUE_POS(QUEUE_HEAD, 0, 0);
SET_QUEUE_POS(QUEUE_TAIL, 0, 0);
n->payload = "";
/*
- * We want to preserve the order so we need to append every
- * notification. See comments at AsyncExistsPendingNotify().
+ * We want to preserve the order so we need to append every notification.
+ * See comments at AsyncExistsPendingNotify().
*/
pendingNotifies = lappend(pendingNotifies, n);
Datum
pg_listening_channels(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- ListCell **lcp;
+ FuncCallContext *funcctx;
+ ListCell **lcp;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
while (*lcp != NULL)
{
- char *channel = (char *) lfirst(*lcp);
+ char *channel = (char *) lfirst(*lcp);
*lcp = lnext(*lcp);
SRF_RETURN_NEXT(funcctx, CStringGetTextDatum(channel));
/*
* Make sure that we have an XID assigned to the current transaction.
- * GetCurrentTransactionId is cheap if we already have an XID, but
- * not so cheap if we don't, and we'd prefer not to do that work
- * while holding AsyncQueueLock.
+ * GetCurrentTransactionId is cheap if we already have an XID, but not
+ * so cheap if we don't, and we'd prefer not to do that work while
+ * holding AsyncQueueLock.
*/
(void) GetCurrentTransactionId();
while (nextNotify != NULL)
{
/*
- * Add the pending notifications to the queue. We acquire and
+ * Add the pending notifications to the queue. We acquire and
* release AsyncQueueLock once per page, which might be overkill
* but it does allow readers to get in while we're doing this.
*
if (asyncQueueIsFull())
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("too many notifications in the NOTIFY queue")));
+ errmsg("too many notifications in the NOTIFY queue")));
nextNotify = asyncQueueAddEntries(nextNotify);
LWLockRelease(AsyncQueueLock);
}
}
/*
- * If we did an initial LISTEN, listenChannels now has the entry, so
- * we no longer need or want the flag to be set.
+ * If we did an initial LISTEN, listenChannels now has the entry, so we no
+ * longer need or want the flag to be set.
*/
backendHasExecutedInitialListen = false;
elog(DEBUG1, "Exec_ListenPreCommit(%d)", MyProcPid);
/*
- * We need this variable to detect an aborted initial LISTEN.
- * In that case we would set up our pointer but not listen on any channel.
- * This flag gets cleared in AtCommit_Notify or AtAbort_Notify().
+ * We need this variable to detect an aborted initial LISTEN. In that case
+ * we would set up our pointer but not listen on any channel. This flag
+ * gets cleared in AtCommit_Notify or AtAbort_Notify().
*/
backendHasExecutedInitialListen = true;
/*
- * Before registering, make sure we will unlisten before dying.
- * (Note: this action does not get undone if we abort later.)
+ * Before registering, make sure we will unlisten before dying. (Note:
+ * this action does not get undone if we abort later.)
*/
if (!unlistenExitRegistered)
{
* already-committed notifications. Still, we could get notifications that
* have already committed before we started to LISTEN.
*
- * Note that we are not yet listening on anything, so we won't deliver
- * any notification to the frontend.
+ * Note that we are not yet listening on anything, so we won't deliver any
+ * notification to the frontend.
*
* This will also advance the global tail pointer if possible.
*/
static void
Exec_UnlistenCommit(const char *channel)
{
- ListCell *q;
- ListCell *prev;
+ ListCell *q;
+ ListCell *prev;
if (Trace_notify)
elog(DEBUG1, "Exec_UnlistenCommit(%s,%d)", channel, MyProcPid);
prev = NULL;
foreach(q, listenChannels)
{
- char *lchan = (char *) lfirst(q);
+ char *lchan = (char *) lfirst(q);
if (strcmp(lchan, channel) == 0)
{
* The reason that this is not done in AtCommit_Notify is that there is
* a nonzero chance of errors here (for example, encoding conversion errors
* while trying to format messages to our frontend). An error during
- * AtCommit_Notify would be a PANIC condition. The timing is also arranged
+ * AtCommit_Notify would be a PANIC condition. The timing is also arranged
* to ensure that a transaction's self-notifies are delivered to the frontend
* before it gets the terminating ReadyForQuery message.
*
* Note that we send signals and process the queue even if the transaction
- * eventually aborted. This is because we need to clean out whatever got
+ * eventually aborted. This is because we need to clean out whatever got
* added to the queue.
*
* NOTE: we are outside of any transaction here.
return;
/*
- * We reset the flag immediately; otherwise, if any sort of error
- * occurs below, we'd be locked up in an infinite loop, because
- * control will come right back here after error cleanup.
+ * We reset the flag immediately; otherwise, if any sort of error occurs
+ * below, we'd be locked up in an infinite loop, because control will come
+ * right back here after error cleanup.
*/
backendHasSentNotifications = false;
elog(DEBUG1, "ProcessCompletedNotifies");
/*
- * We must run asyncQueueReadAllNotifications inside a transaction,
- * else bad things happen if it gets an error.
+ * We must run asyncQueueReadAllNotifications inside a transaction, else
+ * bad things happen if it gets an error.
*/
StartTransactionCommand();
{
/*
* If we found no other listening backends, and we aren't listening
- * ourselves, then we must execute asyncQueueAdvanceTail to flush
- * the queue, because ain't nobody else gonna do it. This prevents
- * queue overflow when we're sending useless notifies to nobody.
- * (A new listener could have joined since we looked, but if so this
- * is harmless.)
+ * ourselves, then we must execute asyncQueueAdvanceTail to flush the
+ * queue, because ain't nobody else gonna do it. This prevents queue
+ * overflow when we're sending useless notifies to nobody. (A new
+ * listener could have joined since we looked, but if so this is
+ * harmless.)
*/
asyncQueueAdvanceTail();
}
/*
* Remove our entry from the listeners array when we are no longer listening
- * on any channel. NB: must not fail if we're already not listening.
+ * on any channel. NB: must not fail if we're already not listening.
*/
static void
asyncQueueUnregister(void)
{
- bool advanceTail;
+ bool advanceTail;
- Assert(listenChannels == NIL); /* else caller error */
+ Assert(listenChannels == NIL); /* else caller error */
LWLockAcquire(AsyncQueueLock, LW_SHARED);
/* check if entry is valid and oldest ... */
/*
* The queue is full if creating a new head page would create a page that
* logically precedes the current global tail pointer, ie, the head
- * pointer would wrap around compared to the tail. We cannot create such
+ * pointer would wrap around compared to the tail. We cannot create such
* a head page for fear of confusing slru.c. For safety we round the tail
* pointer back to a segment boundary (compare the truncation logic in
* asyncQueueAdvanceTail).
/*
* Advance the QueuePosition to the next entry, assuming that the current
- * entry is of length entryLength. If we jump to a new page the function
+ * entry is of length entryLength. If we jump to a new page the function
* returns true, else false.
*/
static bool
asyncQueueAdvance(QueuePosition *position, int entryLength)
{
- int pageno = QUEUE_POS_PAGE(*position);
- int offset = QUEUE_POS_OFFSET(*position);
- bool pageJump = false;
+ int pageno = QUEUE_POS_PAGE(*position);
+ int offset = QUEUE_POS_OFFSET(*position);
+ bool pageJump = false;
/*
* Move to the next writing position: First jump over what we have just
{
pageno++;
if (pageno > QUEUE_MAX_PAGE)
- pageno = 0; /* wrap around */
+ pageno = 0; /* wrap around */
offset = 0;
pageJump = true;
}
static void
asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe)
{
- size_t channellen = strlen(n->channel);
- size_t payloadlen = strlen(n->payload);
- int entryLength;
+ size_t channellen = strlen(n->channel);
+ size_t payloadlen = strlen(n->payload);
+ int entryLength;
Assert(channellen < NAMEDATALEN);
Assert(payloadlen < NOTIFY_PAYLOAD_MAX_LENGTH);
* the last byte which simplifies reading the page later.
*
* We are passed the list cell containing the next notification to write
- * and return the first still-unwritten cell back. Eventually we will return
+ * and return the first still-unwritten cell back. Eventually we will return
* NULL indicating all is done.
*
* We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock
static ListCell *
asyncQueueAddEntries(ListCell *nextNotify)
{
- AsyncQueueEntry qe;
- int pageno;
- int offset;
- int slotno;
+ AsyncQueueEntry qe;
+ int pageno;
+ int offset;
+ int slotno;
/* We hold both AsyncQueueLock and AsyncCtlLock during this operation */
LWLockAcquire(AsyncCtlLock, LW_EXCLUSIVE);
while (nextNotify != NULL)
{
- Notification *n = (Notification *) lfirst(nextNotify);
+ Notification *n = (Notification *) lfirst(nextNotify);
/* Construct a valid queue entry in local variable qe */
asyncQueueNotificationToEntry(n, &qe);
*/
qe.length = QUEUE_PAGESIZE - offset;
qe.dboid = InvalidOid;
- qe.data[0] = '\0'; /* empty channel */
- qe.data[1] = '\0'; /* empty payload */
+ qe.data[0] = '\0'; /* empty channel */
+ qe.data[1] = '\0'; /* empty payload */
}
/* Now copy qe into the shared buffer page */
if (asyncQueueAdvance(&(QUEUE_HEAD), qe.length))
{
/*
- * Page is full, so we're done here, but first fill the next
- * page with zeroes. The reason to do this is to ensure that
- * slru.c's idea of the head page is always the same as ours,
- * which avoids boundary problems in SimpleLruTruncate. The
- * test in asyncQueueIsFull() ensured that there is room to
- * create this page without overrunning the queue.
+ * Page is full, so we're done here, but first fill the next page
+ * with zeroes. The reason to do this is to ensure that slru.c's
+ * idea of the head page is always the same as ours, which avoids
+ * boundary problems in SimpleLruTruncate. The test in
+ * asyncQueueIsFull() ensured that there is room to create this
+ * page without overrunning the queue.
*/
slotno = SimpleLruZeroPage(AsyncCtl, QUEUE_POS_PAGE(QUEUE_HEAD));
/* And exit the loop */
static void
asyncQueueFillWarning(void)
{
- int headPage = QUEUE_POS_PAGE(QUEUE_HEAD);
- int tailPage = QUEUE_POS_PAGE(QUEUE_TAIL);
- int occupied;
- double fillDegree;
- TimestampTz t;
+ int headPage = QUEUE_POS_PAGE(QUEUE_HEAD);
+ int tailPage = QUEUE_POS_PAGE(QUEUE_TAIL);
+ int occupied;
+ double fillDegree;
+ TimestampTz t;
occupied = headPage - tailPage;
if (occupied == 0)
return; /* fast exit for common case */
-
+
if (occupied < 0)
{
/* head has wrapped around, tail not yet */
- occupied += QUEUE_MAX_PAGE+1;
+ occupied += QUEUE_MAX_PAGE + 1;
}
- fillDegree = (double) occupied / (double) ((QUEUE_MAX_PAGE+1)/2);
+ fillDegree = (double) occupied / (double) ((QUEUE_MAX_PAGE + 1) / 2);
if (fillDegree < 0.5)
return;
if (TimestampDifferenceExceeds(asyncQueueControl->lastQueueFillWarn,
t, QUEUE_FULL_WARN_INTERVAL))
{
- QueuePosition min = QUEUE_HEAD;
- int32 minPid = InvalidPid;
- int i;
+ QueuePosition min = QUEUE_HEAD;
+ int32 minPid = InvalidPid;
+ int i;
for (i = 1; i <= MaxBackends; i++)
{
int32 pid;
/*
- * Identify all backends that are listening and not already up-to-date.
- * We don't want to send signals while holding the AsyncQueueLock, so
- * we just build a list of target PIDs.
+ * Identify all backends that are listening and not already up-to-date. We
+ * don't want to send signals while holding the AsyncQueueLock, so we just
+ * build a list of target PIDs.
*
- * XXX in principle these pallocs could fail, which would be bad.
- * Maybe preallocate the arrays? But in practice this is only run
- * in trivial transactions, so there should surely be space available.
+ * XXX in principle these pallocs could fail, which would be bad. Maybe
+ * preallocate the arrays? But in practice this is only run in trivial
+ * transactions, so there should surely be space available.
*/
pids = (int32 *) palloc(MaxBackends * sizeof(int32));
ids = (BackendId *) palloc(MaxBackends * sizeof(BackendId));
/*
* Note: assuming things aren't broken, a signal failure here could
* only occur if the target backend exited since we released
- * AsyncQueueLock; which is unlikely but certainly possible.
- * So we just log a low-level debug message if it happens.
+ * AsyncQueueLock; which is unlikely but certainly possible. So we
+ * just log a low-level debug message if it happens.
*/
if (SendProcSignal(pid, PROCSIG_NOTIFY_INTERRUPT, ids[i]) < 0)
elog(DEBUG3, "could not signal backend with PID %d: %m", pid);
{
/*
* If we LISTEN but then roll back the transaction we have set our pointer
- * but have not made any entry in listenChannels. In that case, remove
- * our pointer again.
+ * but have not made any entry in listenChannels. In that case, remove our
+ * pointer again.
*/
if (backendHasExecutedInitialListen)
{
* is disabled until the next EnableNotifyInterrupt call.
*
* The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this,
- * so as to prevent conflicts if one signal interrupts the other. So we
+ * so as to prevent conflicts if one signal interrupts the other. So we
* must return the previous state of the flag.
*/
bool
static void
asyncQueueReadAllNotifications(void)
{
- QueuePosition pos;
- QueuePosition oldpos;
- QueuePosition head;
+ QueuePosition pos;
+ QueuePosition oldpos;
+ QueuePosition head;
bool advanceTail;
+
/* page_buffer must be adequately aligned, so use a union */
- union {
+ union
+ {
char buf[QUEUE_PAGESIZE];
AsyncQueueEntry align;
- } page_buffer;
+ } page_buffer;
/* Fetch current state */
LWLockAcquire(AsyncQueueLock, LW_SHARED);
* Especially we do not take into account different commit times.
* Consider the following example:
*
- * Backend 1: Backend 2:
+ * Backend 1: Backend 2:
*
* transaction starts
* NOTIFY foo;
* commit starts
- * transaction starts
- * LISTEN foo;
- * commit starts
+ * transaction starts
+ * LISTEN foo;
+ * commit starts
* commit to clog
- * commit to clog
+ * commit to clog
*
* It could happen that backend 2 sees the notification from backend 1 in
* the queue. Even though the notifying transaction committed before
{
bool reachedStop;
- do
+ do
{
int curpage = QUEUE_POS_PAGE(pos);
int curoffset = QUEUE_POS_OFFSET(pos);
/*
* We copy the data from SLRU into a local buffer, so as to avoid
* holding the AsyncCtlLock while we are examining the entries and
- * possibly transmitting them to our frontend. Copy only the part
+ * possibly transmitting them to our frontend. Copy only the part
* of the page we will actually inspect.
*/
slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage,
/* we only want to read as far as head */
copysize = QUEUE_POS_OFFSET(head) - curoffset;
if (copysize < 0)
- copysize = 0; /* just for safety */
+ copysize = 0; /* just for safety */
}
else
{
* uncommitted message.
*
* Our stop position is what we found to be the head's position
- * when we entered this function. It might have changed
- * already. But if it has, we will receive (or have already
- * received and queued) another signal and come here again.
+ * when we entered this function. It might have changed already.
+ * But if it has, we will receive (or have already received and
+ * queued) another signal and come here again.
*
* We are not holding AsyncQueueLock here! The queue can only
* extend beyond the head pointer (see above) and we leave our
* and deliver relevant ones to my frontend.
*
* The current page must have been fetched into page_buffer from shared
- * memory. (We could access the page right in shared memory, but that
+ * memory. (We could access the page right in shared memory, but that
* would imply holding the AsyncCtlLock throughout this routine.)
*
* We stop if we reach the "stop" position, or reach a notification from an
{
bool reachedStop = false;
bool reachedEndOfPage;
- AsyncQueueEntry *qe;
+ AsyncQueueEntry *qe;
do
{
- QueuePosition thisentry = *current;
+ QueuePosition thisentry = *current;
if (QUEUE_POS_EQUAL(thisentry, stop))
break;
qe = (AsyncQueueEntry *) (page_buffer + QUEUE_POS_OFFSET(thisentry));
/*
- * Advance *current over this message, possibly to the next page.
- * As noted in the comments for asyncQueueReadAllNotifications, we
- * must do this before possibly failing while processing the message.
+ * Advance *current over this message, possibly to the next page. As
+ * noted in the comments for asyncQueueReadAllNotifications, we must
+ * do this before possibly failing while processing the message.
*/
reachedEndOfPage = asyncQueueAdvance(current, qe->length);
if (TransactionIdDidCommit(qe->xid))
{
/* qe->data is the null-terminated channel name */
- char *channel = qe->data;
+ char *channel = qe->data;
if (IsListeningOn(channel))
{
/* payload follows channel name */
- char *payload = qe->data + strlen(channel) + 1;
+ char *payload = qe->data + strlen(channel) + 1;
NotifyMyFrontEnd(channel, payload, qe->srcPid);
}
{
/*
* The transaction has neither committed nor aborted so far,
- * so we can't process its message yet. Break out of the loop,
- * but first back up *current so we will reprocess the message
- * next time. (Note: it is unlikely but not impossible for
- * TransactionIdDidCommit to fail, so we can't really avoid
- * this advance-then-back-up behavior when dealing with an
- * uncommitted message.)
+ * so we can't process its message yet. Break out of the
+ * loop, but first back up *current so we will reprocess the
+ * message next time. (Note: it is unlikely but not
+ * impossible for TransactionIdDidCommit to fail, so we can't
+ * really avoid this advance-then-back-up behavior when
+ * dealing with an uncommitted message.)
*/
*current = thisentry;
reachedStop = true;
static void
asyncQueueAdvanceTail(void)
{
- QueuePosition min;
- int i;
- int oldtailpage;
- int newtailpage;
- int boundary;
+ QueuePosition min;
+ int i;
+ int oldtailpage;
+ int newtailpage;
+ int boundary;
LWLockAcquire(AsyncQueueLock, LW_EXCLUSIVE);
min = QUEUE_HEAD;
* We can truncate something if the global tail advanced across an SLRU
* segment boundary.
*
- * XXX it might be better to truncate only once every several segments,
- * to reduce the number of directory scans.
+ * XXX it might be better to truncate only once every several segments, to
+ * reduce the number of directory scans.
*/
newtailpage = QUEUE_POS_PAGE(min);
boundary = newtailpage - (newtailpage % SLRU_PAGES_PER_SEGMENT);
if (asyncQueuePagePrecedesLogically(oldtailpage, boundary))
{
/*
- * SimpleLruTruncate() will ask for AsyncCtlLock but will also
- * release the lock again.
+ * SimpleLruTruncate() will ask for AsyncCtlLock but will also release
+ * the lock again.
*/
SimpleLruTruncate(AsyncCtl, newtailpage);
}
notifyInterruptOccurred = 0;
/*
- * We must run asyncQueueReadAllNotifications inside a transaction,
- * else bad things happen if it gets an error.
+ * We must run asyncQueueReadAllNotifications inside a transaction, else
+ * bad things happen if it gets an error.
*/
StartTransactionCommand();
/*-------------------------------------------------------------------------
*
* cluster.c
- * CLUSTER a table on an index. This is now also used for VACUUM FULL.
+ * CLUSTER a table on an index. This is now also used for VACUUM FULL.
*
* There is hardly anything left of Paul Brown's original implementation...
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.201 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.202 2010/02/26 02:00:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void rebuild_relation(Relation OldHeap, Oid indexOid,
- int freeze_min_age, int freeze_table_age);
+ int freeze_min_age, int freeze_table_age);
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
int freeze_min_age, int freeze_table_age,
bool *pSwapToastByContent, TransactionId *pFreezeXid);
* them incrementally while we load the table.
*
* If indexOid is InvalidOid, the table will be rewritten in physical order
- * instead of index order. This is the new implementation of VACUUM FULL,
+ * instead of index order. This is the new implementation of VACUUM FULL,
* and error messages should refer to the operation as VACUUM not CLUSTER.
*/
void
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
- * remote temp tables by name. There is another check in
- * cluster_rel which is redundant, but we leave it for extra safety.
+ * remote temp tables by name. There is another check in cluster_rel
+ * which is redundant, but we leave it for extra safety.
*/
if (RELATION_IS_OTHER_TEMP(OldHeap))
{
* Check that the index is still the one with indisclustered set.
*/
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(indexOid));
- if (!HeapTupleIsValid(tuple)) /* probably can't happen */
+ if (!HeapTupleIsValid(tuple)) /* probably can't happen */
{
relation_close(OldHeap, AccessExclusiveLock);
return;
errmsg("cannot cluster a shared catalog")));
/*
- * Don't process temp tables of other backends ... their local
- * buffer manager is not going to cope.
+ * Don't process temp tables of other backends ... their local buffer
+ * manager is not going to cope.
*/
if (RELATION_IS_OTHER_TEMP(OldHeap))
{
if (OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot vacuum temporary tables of other sessions")));
+ errmsg("cannot vacuum temporary tables of other sessions")));
}
/*
* the old, or we will have problems with the TEMP status of temp tables.
*
* Note: the new heap is not a shared relation, even if we are rebuilding
- * a shared rel. However, we do make the new heap mapped if the source
- * is mapped. This simplifies swap_relation_files, and is absolutely
+ * a shared rel. However, we do make the new heap mapped if the source is
+ * mapped. This simplifies swap_relation_files, and is absolutely
* necessary for rebuilding pg_class, for reasons explained there.
*/
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap);
* If necessary, create a TOAST table for the new relation.
*
* If the relation doesn't have a TOAST table already, we can't need one
- * for the new relation. The other way around is possible though: if
- * some wide columns have been dropped, AlterTableCreateToastTable
- * can decide that no TOAST table is needed for the new table.
+ * for the new relation. The other way around is possible though: if some
+ * wide columns have been dropped, AlterTableCreateToastTable can decide
+ * that no TOAST table is needed for the new table.
*
* Note that AlterTableCreateToastTable ends with CommandCounterIncrement,
* so that the TOAST table will be visible for insertion.
isnull = (bool *) palloc(natts * sizeof(bool));
/*
- * We need to log the copied data in WAL iff WAL archiving/streaming
- * is enabled AND it's not a temp rel.
+ * We need to log the copied data in WAL iff WAL archiving/streaming is
+ * enabled AND it's not a temp rel.
*/
use_wal = XLogIsNeeded() && !NewHeap->rd_istemp;
/*
- * Write an XLOG UNLOGGED record if WAL-logging was skipped because
- * WAL archiving is not enabled.
+ * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+ * archiving is not enabled.
*/
if (!use_wal && !NewHeap->rd_istemp)
{
- char reason[NAMEDATALEN + 32];
+ char reason[NAMEDATALEN + 32];
if (OldIndex != NULL)
snprintf(reason, sizeof(reason), "CLUSTER on \"%s\"",
/*
* If both tables have TOAST tables, perform toast swap by content. It is
* possible that the old table has a toast table but the new one doesn't,
- * if toastable columns have been dropped. In that case we have to do
+ * if toastable columns have been dropped. In that case we have to do
* swap by links. This is okay because swap by content is only essential
* for system catalogs, and we don't support schema changes for them.
*/
* data will eventually be found. Set this up by setting rd_toastoid.
* Note that we must hold NewHeap open until we are done writing data,
* since the relcache will not guarantee to remember this setting once
- * the relation is closed. Also, this technique depends on the fact
+ * the relation is closed. Also, this technique depends on the fact
* that no one will try to read from the NewHeap until after we've
* finished writing it and swapping the rels --- otherwise they could
* follow the toast pointers to the wrong place.
rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
/*
- * Scan through the OldHeap, either in OldIndex order or sequentially,
- * and copy each tuple into the NewHeap. To ensure we see recently-dead
+ * Scan through the OldHeap, either in OldIndex order or sequentially, and
+ * copy each tuple into the NewHeap. To ensure we see recently-dead
* tuples that still need to be copied, we scan with SnapshotAny and use
* HeapTupleSatisfiesVacuum for the visibility test.
*/
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
- * Since we hold exclusive lock on the relation, normally
- * the only way to see this is if it was inserted earlier
- * in our own transaction. However, it can happen in system
+ * Since we hold exclusive lock on the relation, normally the
+ * only way to see this is if it was inserted earlier in our
+ * own transaction. However, it can happen in system
* catalogs, since we tend to release write lock before commit
- * there. Give a warning if neither case applies; but in
- * any case we had better copy it.
+ * there. Give a warning if neither case applies; but in any
+ * case we had better copy it.
*/
if (!is_system_catalog &&
!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
NameStr(relform2->relname), r2);
/*
- * Send replacement mappings to relmapper. Note these won't actually
+ * Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
/*
* In the case of a shared catalog, these next few steps will only affect
- * our own database's pg_class row; but that's okay, because they are
- * all noncritical updates. That's also an important fact for the case
- * of a mapped catalog, because it's possible that we'll commit the map
- * change and then fail to commit the pg_class update.
+ * our own database's pg_class row; but that's okay, because they are all
+ * noncritical updates. That's also an important fact for the case of a
+ * mapped catalog, because it's possible that we'll commit the map change
+ * and then fail to commit the pg_class update.
*/
/* set rel1's frozen Xid */
/*
* Update the tuples in pg_class --- unless the target relation of the
* swap is pg_class itself. In that case, there is zero point in making
- * changes because we'd be updating the old data that we're about to
- * throw away. Because the real work being done here for a mapped relation
- * is just to change the relation map settings, it's all right to not
- * update the pg_class rows in this case.
+ * changes because we'd be updating the old data that we're about to throw
+ * away. Because the real work being done here for a mapped relation is
+ * just to change the relation map settings, it's all right to not update
+ * the pg_class rows in this case.
*/
if (!target_is_pg_class)
{
/*
* We disallow this case for system catalogs, to avoid the
* possibility that the catalog we're rebuilding is one of the
- * ones the dependency changes would change. It's too late
- * to be making any data changes to the target catalog.
+ * ones the dependency changes would change. It's too late to be
+ * making any data changes to the target catalog.
*/
if (IsSystemClass(relform1))
elog(ERROR, "cannot swap toast files by links for system catalogs");
*/
if (swap_toast_by_content &&
relform1->reltoastidxid && relform2->reltoastidxid)
- swap_relation_files(relform1->reltoastidxid,
- relform2->reltoastidxid,
- target_is_pg_class,
- swap_toast_by_content,
- InvalidTransactionId,
- mapped_tables);
+ swap_relation_files(relform1->reltoastidxid,
+ relform2->reltoastidxid,
+ target_is_pg_class,
+ swap_toast_by_content,
+ InvalidTransactionId,
+ mapped_tables);
/* Clean up. */
heap_freetuple(reltup1);
* non-transient relation.)
*
* Caution: the placement of this step interacts with the decision to
- * handle toast rels by recursion. When we are trying to rebuild pg_class
+ * handle toast rels by recursion. When we are trying to rebuild pg_class
* itself, the smgr close on pg_class must happen after all accesses in
* this function.
*/
/*
* Rebuild each index on the relation (but not the toast table, which is
- * all-new at this point). It is important to do this before the DROP
+ * all-new at this point). It is important to do this before the DROP
* step because if we are processing a system catalog that will be used
- * during DROP, we want to have its indexes available. There is no
+ * during DROP, we want to have its indexes available. There is no
* advantage to the other order anyway because this is all transactional,
- * so no chance to reclaim disk space before commit. We do not need
- * a final CommandCounterIncrement() because reindex_relation does it.
+ * so no chance to reclaim disk space before commit. We do not need a
+ * final CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(OIDOldHeap, false, true);
/*
* Now we must remove any relation mapping entries that we set up for the
- * transient table, as well as its toast table and toast index if any.
- * If we fail to do this before commit, the relmapper will complain about
- * new permanent map entries being added post-bootstrap.
+ * transient table, as well as its toast table and toast index if any. If
+ * we fail to do this before commit, the relmapper will complain about new
+ * permanent map entries being added post-bootstrap.
*/
for (i = 0; OidIsValid(mapped_tables[i]); i++)
RelationMapRemoveMapping(mapped_tables[i]);
* Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.113 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.114 2010/02/26 02:00:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
sd = systable_beginscan(description, DescriptionObjIndexId, true,
SnapshotNow, 3, skey);
- comment = NULL;
+ comment = NULL;
while ((tuple = systable_getnext(sd)) != NULL)
{
- Datum value;
- bool isnull;
+ Datum value;
+ bool isnull;
/* Found the tuple, get description field */
value = heap_getattr(tuple, Anum_pg_description_description, tupdesc, &isnull);
* Allow comments only on columns of tables, views, and composite types
* (which are the only relkinds for which pg_dump will dump per-column
* comments). In particular we wish to disallow comments on index
- * columns, because the naming of an index's columns may change across
- * PG versions, so dumping per-column comments could create reload
- * failures.
+ * columns, because the naming of an index's columns may change across PG
+ * versions, so dumping per-column comments could create reload failures.
*/
if (relation->rd_rel->relkind != RELKIND_RELATION &&
relation->rd_rel->relkind != RELKIND_VIEW &&
/* Find the rule's pg_rewrite tuple, get its OID */
tuple = SearchSysCache2(RULERELNAME,
- ObjectIdGetDatum(reloid),
+ ObjectIdGetDatum(reloid),
PointerGetDatum(rulename));
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
namespaceId = LookupExplicitNamespace(schemaname);
tuple = SearchSysCache3(OPFAMILYAMNAMENSP,
ObjectIdGetDatum(amID),
- PointerGetDatum(opfname),
+ PointerGetDatum(opfname),
ObjectIdGetDatum(namespaceId));
}
else
/*
* Call CreateComments() to create/drop the comments
*
- * See the comment in the inv_create() which describes
- * the reason why LargeObjectRelationId is used instead
- * of LargeObjectMetadataRelationId.
+ * See the comment in the inv_create() which describes the reason why
+ * LargeObjectRelationId is used instead of LargeObjectMetadataRelationId.
*/
CreateComments(loid, LargeObjectRelationId, 0, comment);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/constraint.c,v 1.3 2010/01/02 16:57:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/constraint.c,v 1.4 2010/02/26 02:00:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool isnull[INDEX_MAX_KEYS];
/*
- * Make sure this is being called as an AFTER ROW trigger. Note:
- * translatable error strings are shared with ri_triggers.c, so
- * resist the temptation to fold the function name into them.
+ * Make sure this is being called as an AFTER ROW trigger. Note:
+ * translatable error strings are shared with ri_triggers.c, so resist the
+ * temptation to fold the function name into them.
*/
if (!CALLED_AS_TRIGGER(fcinfo))
ereport(ERROR,
* If the new_row is now dead (ie, inserted and then deleted within our
* transaction), we can skip the check. However, we have to be careful,
* because this trigger gets queued only in response to index insertions;
- * which means it does not get queued for HOT updates. The row we are
+ * which means it does not get queued for HOT updates. The row we are
* called for might now be dead, but have a live HOT child, in which case
* we still need to make the check. Therefore we have to use
* heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in
}
/*
- * Open the index, acquiring a RowExclusiveLock, just as if we were
- * going to update it. (This protects against possible changes of the
- * index schema, not against concurrent updates.)
+ * Open the index, acquiring a RowExclusiveLock, just as if we were going
+ * to update it. (This protects against possible changes of the index
+ * schema, not against concurrent updates.)
*/
indexRel = index_open(trigdata->tg_trigger->tgconstrindid,
RowExclusiveLock);
ExecStoreTuple(new_row, slot, InvalidBuffer, false);
/*
- * Typically the index won't have expressions, but if it does we need
- * an EState to evaluate them. We need it for exclusion constraints
- * too, even if they are just on simple columns.
+ * Typically the index won't have expressions, but if it does we need an
+ * EState to evaluate them. We need it for exclusion constraints too,
+ * even if they are just on simple columns.
*/
if (indexInfo->ii_Expressions != NIL ||
indexInfo->ii_ExclusionOps != NULL)
estate = NULL;
/*
- * Form the index values and isnull flags for the index entry that
- * we need to check.
+ * Form the index values and isnull flags for the index entry that we need
+ * to check.
*
- * Note: if the index uses functions that are not as immutable as they
- * are supposed to be, this could produce an index tuple different from
- * the original. The index AM can catch such errors by verifying that
- * it finds a matching index entry with the tuple's TID. For exclusion
+ * Note: if the index uses functions that are not as immutable as they are
+ * supposed to be, this could produce an index tuple different from the
+ * original. The index AM can catch such errors by verifying that it
+ * finds a matching index entry with the tuple's TID. For exclusion
* constraints we check this in check_exclusion_constraint().
*/
FormIndexDatum(indexInfo, slot, estate, values, isnull);
else
{
/*
- * For exclusion constraints we just do the normal check, but now
- * it's okay to throw error.
+ * For exclusion constraints we just do the normal check, but now it's
+ * okay to throw error.
*/
check_exclusion_constraint(trigdata->tg_relation, indexRel, indexInfo,
&(new_row->t_self), values, isnull,
}
/*
- * If that worked, then this index entry is unique or non-excluded,
- * and we are done.
+ * If that worked, then this index entry is unique or non-excluded, and we
+ * are done.
*/
if (estate != NULL)
FreeExecutorState(estate);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.325 2010/02/20 21:24:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.326 2010/02/26 02:00:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (strcmp(defel->defname, "format") == 0)
{
- char *fmt = defGetString(defel);
+ char *fmt = defGetString(defel);
if (format_specified)
ereport(ERROR,
errmsg("conflicting or redundant options")));
format_specified = true;
if (strcmp(fmt, "text") == 0)
- /* default format */ ;
+ /* default format */ ;
else if (strcmp(fmt, "csv") == 0)
cstate->csv_mode = true;
else if (strcmp(fmt, "binary") == 0)
force_quote = (List *) defel->arg;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument to option \"%s\" must be a list of column names",
- defel->defname)));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("argument to option \"%s\" must be a list of column names",
+ defel->defname)));
}
else if (strcmp(defel->defname, "force_not_null") == 0)
{
force_notnull = (List *) defel->arg;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument to option \"%s\" must be a list of column names",
- defel->defname)));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("argument to option \"%s\" must be a list of column names",
+ defel->defname)));
}
else
ereport(ERROR,
cstate->force_quote_flags = (bool *) palloc0(num_phys_attrs * sizeof(bool));
if (force_quote_all)
{
- int i;
+ int i;
for (i = 0; i < num_phys_attrs; i++)
cstate->force_quote_flags[i] = true;
if (!skip_tuple)
{
- List *recheckIndexes = NIL;
+ List *recheckIndexes = NIL;
/* Place tuple in tuple slot */
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
*/
if (hi_options & HEAP_INSERT_SKIP_WAL)
{
- char reason[NAMEDATALEN + 30];
+ char reason[NAMEDATALEN + 30];
+
snprintf(reason, sizeof(reason), "COPY FROM on \"%s\"",
RelationGetRelationName(cstate->rel));
XLogReportUnloggedStatement(reason);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.234 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.235 2010/02/26 02:00:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* fails when presented with data in an encoding it's not expecting. We
* allow mismatch in four cases:
*
- * 1. locale encoding = SQL_ASCII, which means that the locale is
- * C/POSIX which works with any encoding.
+ * 1. locale encoding = SQL_ASCII, which means that the locale is C/POSIX
+ * which works with any encoding.
*
- * 2. locale encoding = -1, which means that we couldn't determine
- * the locale's encoding and have to trust the user to get it right.
+ * 2. locale encoding = -1, which means that we couldn't determine the
+ * locale's encoding and have to trust the user to get it right.
*
* 3. selected encoding is UTF8 and platform is win32. This is because
* UTF8 is a pseudo codepage that is supported in all locales since it's
/*
* We deliberately set datacl to default (NULL), rather than copying it
- * from the template database. Copying it would be a bad idea when the
+ * from the template database. Copying it would be a bad idea when the
* owner is not the same as the template's owner.
*/
new_record_nulls[Anum_pg_database_datacl - 1] = true;
heap_close(pgdbrel, NoLock);
/*
- * Force synchronous commit, thus minimizing the window between removal
- * of the database files and commital of the transaction. If we crash
- * before committing, we'll have a DB that's gone on disk but still there
+ * Force synchronous commit, thus minimizing the window between removal of
+ * the database files and commital of the transaction. If we crash before
+ * committing, we'll have a DB that's gone on disk but still there
* according to pg_database, which is not good.
*/
ForceSyncCommit();
void
AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
{
- Oid datid = get_database_oid(stmt->dbname);
+ Oid datid = get_database_oid(stmt->dbname);
if (!OidIsValid(datid))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\" does not exist", stmt->dbname)));
-
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_DATABASE),
+ errmsg("database \"%s\" does not exist", stmt->dbname)));
+
/*
* Obtain a lock on the database and make sure it didn't go away in the
* meantime.
shdepLockAndCheckObject(DatabaseRelationId, datid);
if (!pg_database_ownercheck(datid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
- stmt->dbname);
+ aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
+ stmt->dbname);
AlterSetting(datid, InvalidOid, stmt->setstmt);
-
+
UnlockSharedObject(DatabaseRelationId, datid, 0, AccessShareLock);
}
if (InHotStandby)
{
/*
- * Lock database while we resolve conflicts to ensure that InitPostgres()
- * cannot fully re-execute concurrently. This avoids backends re-connecting
- * automatically to same database, which can happen in some cases.
+ * Lock database while we resolve conflicts to ensure that
+ * InitPostgres() cannot fully re-execute concurrently. This
+ * avoids backends re-connecting automatically to same database,
+ * which can happen in some cases.
*/
LockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
ResolveRecoveryConflictWithDatabase(xlrec->db_id);
if (InHotStandby)
{
/*
- * Release locks prior to commit. XXX There is a race condition here that may allow
- * backends to reconnect, but the window for this is small because the gap between
- * here and commit is mostly fairly small and it is unlikely that people will be
- * dropping databases that we are trying to connect to anyway.
+ * Release locks prior to commit. XXX There is a race condition
+ * here that may allow backends to reconnect, but the window for
+ * this is small because the gap between here and commit is mostly
+ * fairly small and it is unlikely that people will be dropping
+ * databases that we are trying to connect to anyway.
*/
UnlockSharedObjectForSession(DatabaseRelationId, xlrec->db_id, 0, AccessExclusiveLock);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.107 2010/01/02 16:57:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.108 2010/02/26 02:00:38 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
char *sval = defGetString(def);
/*
- * The set of strings accepted here should match up with
- * the grammar's opt_boolean production.
+ * The set of strings accepted here should match up with the
+ * grammar's opt_boolean production.
*/
if (pg_strcasecmp(sval, "true") == 0)
return true;
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.203 2010/02/16 22:19:59 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.204 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ExplainState *es);
static double elapsed_time(instr_time *starttime);
static void ExplainNode(Plan *plan, PlanState *planstate,
- Plan *outer_plan,
- const char *relationship, const char *plan_name,
- ExplainState *es);
+ Plan *outer_plan,
+ const char *relationship, const char *plan_name,
+ ExplainState *es);
static void show_plan_tlist(Plan *plan, ExplainState *es);
static void show_qual(List *qual, const char *qlabel, Plan *plan,
Plan *outer_plan, bool useprefix, ExplainState *es);
static void ExplainMemberNodes(List *plans, PlanState **planstate,
Plan *outer_plan, ExplainState *es);
static void ExplainSubPlans(List *plans, const char *relationship,
- ExplainState *es);
+ ExplainState *es);
static void ExplainPropertyList(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState *es);
static void ExplainProperty(const char *qlabel, const char *value,
- bool numeric, ExplainState *es);
-#define ExplainPropertyText(qlabel, value, es) \
+ bool numeric, ExplainState *es);
+
+#define ExplainPropertyText(qlabel, value, es) \
ExplainProperty(qlabel, value, false, es)
static void ExplainPropertyInteger(const char *qlabel, int value,
- ExplainState *es);
+ ExplainState *es);
static void ExplainPropertyLong(const char *qlabel, long value,
- ExplainState *es);
+ ExplainState *es);
static void ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
- ExplainState *es);
+ ExplainState *es);
static void ExplainOpenGroup(const char *objtype, const char *labelname,
bool labeled, ExplainState *es);
static void ExplainCloseGroup(const char *objtype, const char *labelname,
- bool labeled, ExplainState *es);
+ bool labeled, ExplainState *es);
static void ExplainDummyGroup(const char *objtype, const char *labelname,
- ExplainState *es);
+ ExplainState *es);
static void ExplainXMLTag(const char *tagname, int flags, ExplainState *es);
static void ExplainJSONLineEnding(ExplainState *es);
static void ExplainYAMLLineStarting(ExplainState *es);
/* Parse options list. */
foreach(lc, stmt->options)
{
- DefElem *opt = (DefElem *) lfirst(lc);
+ DefElem *opt = (DefElem *) lfirst(lc);
if (strcmp(opt->defname, "analyze") == 0)
es.analyze = defGetBoolean(opt);
es.buffers = defGetBoolean(opt);
else if (strcmp(opt->defname, "format") == 0)
{
- char *p = defGetString(opt);
+ char *p = defGetString(opt);
if (strcmp(p, "text") == 0)
es.format = EXPLAIN_FORMAT_TEXT;
es.format = EXPLAIN_FORMAT_YAML;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized value for EXPLAIN option \"%s\": \"%s\"",
- opt->defname, p)));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("unrecognized value for EXPLAIN option \"%s\": \"%s\"",
+ opt->defname, p)));
}
else
ereport(ERROR,
if (es.buffers && !es.analyze)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
/*
* Parse analysis was done already, but we still have to run the rule
- * rewriter. We do not do AcquireRewriteLocks: we assume the query
- * either came straight from the parser, or suitable locks were
- * acquired by plancache.c.
+ * rewriter. We do not do AcquireRewriteLocks: we assume the query either
+ * came straight from the parser, or suitable locks were acquired by
+ * plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
* a preliminary copy of the source querytree. This prevents problems in
/* Check for XML format option */
foreach(lc, stmt->options)
{
- DefElem *opt = (DefElem *) lfirst(lc);
+ DefElem *opt = (DefElem *) lfirst(lc);
if (strcmp(opt->defname, "format") == 0)
{
- char *p = defGetString(opt);
+ char *p = defGetString(opt);
xml = (strcmp(p, "xml") == 0);
/* don't "break", as ExplainQuery will use the last value */
{
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfoString(es->str,
- "Utility statements have no plan structure\n");
+ "Utility statements have no plan structure\n");
else
ExplainDummyGroup("Utility Statement", NULL, es);
}
* convert a QueryDesc's plan tree to text and append it to es->str
*
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str. Other fields in *es are
+ * initializing the output buffer es->str. Other fields in *es are
* initialized here.
*
* NB: will not work on utility statements
/*
* ExplainQueryText -
- * add a "Query Text" node that contains the actual text of the query
- *
+ * add a "Query Text" node that contains the actual text of the query
+ *
* The caller should have set up the options fields of *es, as well as
- * initializing the output buffer es->str.
+ * initializing the output buffer es->str.
*
*/
void
/*
* In text format, we avoid printing both the trigger name and the
- * constraint name unless VERBOSE is specified. In non-text
- * formats we just print everything.
+ * constraint name unless VERBOSE is specified. In non-text formats
+ * we just print everything.
*/
if (es->format == EXPLAIN_FORMAT_TEXT)
{
pname = sname = "Nested Loop";
break;
case T_MergeJoin:
- pname = "Merge"; /* "Join" gets added by jointype switch */
+ pname = "Merge"; /* "Join" gets added by jointype switch */
sname = "Merge Join";
break;
case T_HashJoin:
- pname = "Hash"; /* "Join" gets added by jointype switch */
+ pname = "Hash"; /* "Join" gets added by jointype switch */
sname = "Hash Join";
break;
case T_SeqScan:
{
case T_IndexScan:
{
- IndexScan *indexscan = (IndexScan *) plan;
+ IndexScan *indexscan = (IndexScan *) plan;
const char *indexname =
- explain_get_index_name(indexscan->indexid);
+ explain_get_index_name(indexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT)
{
{
BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
const char *indexname =
- explain_get_index_name(bitmapindexscan->indexid);
+ explain_get_index_name(bitmapindexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, " on %s", indexname);
if (es->format == EXPLAIN_FORMAT_TEXT)
{
- bool has_shared = (usage->shared_blks_hit > 0 ||
- usage->shared_blks_read > 0 ||
- usage->shared_blks_written);
- bool has_local = (usage->local_blks_hit > 0 ||
- usage->local_blks_read > 0 ||
- usage->local_blks_written);
- bool has_temp = (usage->temp_blks_read > 0 ||
- usage->temp_blks_written);
+ bool has_shared = (usage->shared_blks_hit > 0 ||
+ usage->shared_blks_read > 0 ||
+ usage->shared_blks_written);
+ bool has_local = (usage->local_blks_hit > 0 ||
+ usage->local_blks_read > 0 ||
+ usage->local_blks_written);
+ bool has_temp = (usage->temp_blks_read > 0 ||
+ usage->temp_blks_written);
/* Show only positive counter values. */
if (has_shared || has_local || has_temp)
appendStringInfoString(es->str, " shared");
if (usage->shared_blks_hit > 0)
appendStringInfo(es->str, " hit=%ld",
- usage->shared_blks_hit);
+ usage->shared_blks_hit);
if (usage->shared_blks_read > 0)
appendStringInfo(es->str, " read=%ld",
- usage->shared_blks_read);
+ usage->shared_blks_read);
if (usage->shared_blks_written > 0)
appendStringInfo(es->str, " written=%ld",
- usage->shared_blks_written);
+ usage->shared_blks_written);
if (has_local || has_temp)
appendStringInfoChar(es->str, ',');
}
appendStringInfoString(es->str, " local");
if (usage->local_blks_hit > 0)
appendStringInfo(es->str, " hit=%ld",
- usage->local_blks_hit);
+ usage->local_blks_hit);
if (usage->local_blks_read > 0)
appendStringInfo(es->str, " read=%ld",
- usage->local_blks_read);
+ usage->local_blks_read);
if (usage->local_blks_written > 0)
appendStringInfo(es->str, " written=%ld",
- usage->local_blks_written);
+ usage->local_blks_written);
if (has_temp)
appendStringInfoChar(es->str, ',');
}
appendStringInfoString(es->str, " temp");
if (usage->temp_blks_read > 0)
appendStringInfo(es->str, " read=%ld",
- usage->temp_blks_read);
+ usage->temp_blks_read);
if (usage->temp_blks_written > 0)
appendStringInfo(es->str, " written=%ld",
- usage->temp_blks_written);
+ usage->temp_blks_written);
}
appendStringInfoChar(es->str, '\n');
}
TargetEntry *tle = (TargetEntry *) lfirst(lc);
result = lappend(result,
- deparse_expression((Node *) tle->expr, context,
+ deparse_expression((Node *) tle->expr, context,
useprefix, false));
}
if (es->analyze && sortstate->sort_Done &&
sortstate->tuplesortstate != NULL)
{
- Tuplesortstate *state = (Tuplesortstate *) sortstate->tuplesortstate;
+ Tuplesortstate *state = (Tuplesortstate *) sortstate->tuplesortstate;
const char *sortMethod;
const char *spaceType;
long spaceUsed;
if (hashtable)
{
- long spacePeakKb = (hashtable->spacePeak + 1023) / 1024;
+ long spacePeakKb = (hashtable->spacePeak + 1023) / 1024;
+
if (es->format != EXPLAIN_FORMAT_TEXT)
{
ExplainPropertyLong("Hash Buckets", hashtable->nbuckets, es);
{
appendStringInfoSpaces(es->str, es->indent * 2);
appendStringInfo(es->str,
- "Buckets: %d Batches: %d (originally %d) Memory Usage: %ldkB\n",
+ "Buckets: %d Batches: %d (originally %d) Memory Usage: %ldkB\n",
hashtable->nbuckets, hashtable->nbatch,
hashtable->nbatch_original, spacePeakKb);
}
{
appendStringInfoSpaces(es->str, es->indent * 2);
appendStringInfo(es->str,
- "Buckets: %d Batches: %d Memory Usage: %ldkB\n",
+ "Buckets: %d Batches: %d Memory Usage: %ldkB\n",
hashtable->nbuckets, hashtable->nbatch,
spacePeakKb);
}
*/
static void
ExplainMemberNodes(List *plans, PlanState **planstate, Plan *outer_plan,
- ExplainState *es)
+ ExplainState *es)
{
ListCell *lst;
int j = 0;
ExplainXMLTag(qlabel, X_OPENING, es);
foreach(lc, data)
{
- char *str;
+ char *str;
appendStringInfoSpaces(es->str, es->indent * 2 + 2);
appendStringInfoString(es->str, "<Item>");
case EXPLAIN_FORMAT_XML:
{
- char *str;
+ char *str;
appendStringInfoSpaces(es->str, es->indent * 2);
ExplainXMLTag(qlabel, X_OPENING | X_NOWHITESPACE, es);
static void
ExplainPropertyInteger(const char *qlabel, int value, ExplainState *es)
{
- char buf[32];
+ char buf[32];
snprintf(buf, sizeof(buf), "%d", value);
ExplainProperty(qlabel, buf, true, es);
static void
ExplainPropertyLong(const char *qlabel, long value, ExplainState *es)
{
- char buf[32];
+ char buf[32];
snprintf(buf, sizeof(buf), "%ld", value);
ExplainProperty(qlabel, buf, true, es);
ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
ExplainState *es)
{
- char buf[256];
+ char buf[256];
snprintf(buf, sizeof(buf), "%.*f", ndigits, value);
ExplainProperty(qlabel, buf, true, es);
/*
* In JSON format, the grouping_stack is an integer list. 0 means
* we've emitted nothing at this grouping level, 1 means we've
- * emitted something (and so the next item needs a comma).
- * See ExplainJSONLineEnding().
+ * emitted something (and so the next item needs a comma). See
+ * ExplainJSONLineEnding().
*/
es->grouping_stack = lcons_int(0, es->grouping_stack);
es->indent++;
case EXPLAIN_FORMAT_XML:
appendStringInfoString(es->str,
- "<explain xmlns=\"http://www.postgresql.org/2009/explain\">\n");
+ "<explain xmlns=\"http://www.postgresql.org/2009/explain\">\n");
es->indent++;
break;
/*
* Emit a JSON line ending.
*
- * JSON requires a comma after each property but the last. To facilitate this,
+ * JSON requires a comma after each property but the last. To facilitate this,
* in JSON format, the text emitted for each property begins just prior to the
* preceding line-break (and comma, if applicable).
*/
* YAML lines are ordinarily indented by two spaces per indentation level.
* The text emitted for each property begins just prior to the preceding
* line-break, except for the first property in an unlabelled group, for which
- * it begins immediately after the "- " that introduces the group. The first
+ * it begins immediately after the "- " that introduces the group. The first
* property of the group appears on the same line as the opening "- ".
*/
static void
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.117 2010/02/17 04:19:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.118 2010/02/26 02:00:39 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
strcmp(prevfp->name, fp->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("parameter name \"%s\" used more than once",
- fp->name)));
+ errmsg("parameter name \"%s\" used more than once",
+ fp->name)));
}
paramNames[i] = CStringGetTextDatum(fp->name);
if (!OidIsValid(laninline))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("language \"%s\" does not support inline code execution",
- NameStr(languageStruct->lanname))));
+ errmsg("language \"%s\" does not support inline code execution",
+ NameStr(languageStruct->lanname))));
ReleaseSysCache(languageTuple);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.193 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.194 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (exclusionOpNames != NIL && !OidIsValid(accessMethodForm->amgettuple))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support exclusion constraints",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support exclusion constraints",
+ accessMethodName)));
amcanorder = accessMethodForm->amcanorder;
amoptions = accessMethodForm->amoptions;
else
{
elog(ERROR, "unknown constraint type");
- constraint_type = NULL; /* keep compiler quiet */
+ constraint_type = NULL; /* keep compiler quiet */
}
ereport(NOTICE,
heap_close(rel, NoLock);
/*
- * Make the catalog entries for the index, including constraints.
- * Then, if not skip_build || concurrent, actually build the index.
+ * Make the catalog entries for the index, including constraints. Then, if
+ * not skip_build || concurrent, actually build the index.
*/
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
/*
* For a concurrent build, it's important to make the catalog entries
- * visible to other transactions before we start to build the index.
- * That will prevent them from making incompatible HOT updates. The new
- * index will be marked not indisready and not indisvalid, so that no one
- * else tries to either insert into it or use it for queries.
+ * visible to other transactions before we start to build the index. That
+ * will prevent them from making incompatible HOT updates. The new index
+ * will be marked not indisready and not indisvalid, so that no one else
+ * tries to either insert into it or use it for queries.
*
* We must commit our current transaction so that the index becomes
* visible; then start another. Note that all the data structures we just
/* Allocate space for exclusion operator info, if needed */
if (exclusionOpNames)
{
- int ncols = list_length(attList);
+ int ncols = list_length(attList);
Assert(list_length(exclusionOpNames) == ncols);
indexInfo->ii_ExclusionOps = (Oid *) palloc(sizeof(Oid) * ncols);
*/
if (nextExclOp)
{
- List *opname = (List *) lfirst(nextExclOp);
- Oid opid;
- Oid opfamily;
- int strat;
+ List *opname = (List *) lfirst(nextExclOp);
+ Oid opid;
+ Oid opfamily;
+ int strat;
/*
* Find the operator --- it must accept the column datatype
strat = get_op_opfamily_strategy(opid, opfamily);
if (strat == 0)
{
- HeapTuple opftuple;
+ HeapTuple opftuple;
Form_pg_opfamily opfform;
/*
const char *name = (const char *) lfirst(lc);
if (buflen > 0)
- buf[buflen++] = '_'; /* insert _ between names */
+ buf[buflen++] = '_'; /* insert _ between names */
/*
* At this point we have buflen <= NAMEDATALEN. name should be less
/*
* Select the actual names to be used for the columns of an index, given the
- * list of IndexElems for the columns. This is mostly about ensuring the
+ * list of IndexElems for the columns. This is mostly about ensuring the
* names are unique so we don't get a conflicting-attribute-names error.
*
* Returns a List of plain strings (char *, not String nodes).
/* Get the preliminary name from the IndexElem */
if (ielem->indexcolname)
- origname = ielem->indexcolname; /* caller-specified name */
+ origname = ielem->indexcolname; /* caller-specified name */
else if (ielem->name)
- origname = ielem->name; /* simple column reference */
+ origname = ielem->name; /* simple column reference */
else
- origname = "expr"; /* default name for expression */
+ origname = "expr"; /* default name for expression */
/* If it conflicts with any previous column, tweak it */
curname = origname;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.28 2010/02/20 21:24:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/lockcmds.c,v 1.29 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
reloid = RangeVarGetRelid(relation, false);
/*
- * During recovery we only accept these variations:
- * LOCK TABLE foo IN ACCESS SHARE MODE
- * LOCK TABLE foo IN ROW SHARE MODE
- * LOCK TABLE foo IN ROW EXCLUSIVE MODE
- * This test must match the restrictions defined in LockAcquire()
+ * During recovery we only accept these variations: LOCK TABLE foo IN
+ * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo
+ * IN ROW EXCLUSIVE MODE This test must match the restrictions defined
+ * in LockAcquire()
*/
if (lockstmt->mode > RowExclusiveLock)
PreventCommandDuringRecovery("LOCK TABLE");
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.90 2010/02/23 22:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.91 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Likewise for the anonymous block handler, if required;
- * but we don't care about its return type.
+ * Likewise for the anonymous block handler, if required; but we don't
+ * care about its return type.
*/
if (pltemplate->tmplinline)
{
{
inlineOid = ProcedureCreate(pltemplate->tmplinline,
PG_CATALOG_NAMESPACE,
- false, /* replace */
- false, /* returnsSet */
+ false, /* replace */
+ false, /* returnsSet */
VOIDOID,
ClanguageId,
F_FMGR_C_VALIDATOR,
pltemplate->tmplinline,
pltemplate->tmpllibrary,
- false, /* isAgg */
- false, /* isWindowFunc */
- false, /* security_definer */
- true, /* isStrict */
+ false, /* isAgg */
+ false, /* isWindowFunc */
+ false, /* security_definer */
+ true, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),
PointerGetDatum(NULL),
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
- true, /* isStrict */
+ true, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),
PointerGetDatum(NULL),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.56 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.57 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
if (saved_uid != owner_uid)
SetUserIdAndSecContext(owner_uid,
- save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
+ save_sec_context | SECURITY_LOCAL_USERID_CHANGE);
/* Create the schema's namespace */
namespaceId = NamespaceCreate(schemaName, owner_uid);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.326 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.327 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
Constraint *fkconstraint);
static void ATExecDropConstraint(Relation rel, const char *constrName,
- DropBehavior behavior,
- bool recurse, bool recursing,
- bool missing_ok);
+ DropBehavior behavior,
+ bool recurse, bool recursing,
+ bool missing_ok);
static void ATPrepAlterColumnType(List **wqueue,
AlteredTableInfo *tab, Relation rel,
bool recurse, bool recursing,
Relation rel = (Relation) lfirst(cell);
/*
- * Normally, we need a transaction-safe truncation here. However,
- * if the table was either created in the current (sub)transaction
- * or has a new relfilenode in the current (sub)transaction, then
- * we can just truncate it in-place, because a rollback would
- * cause the whole table or the current physical file to be
- * thrown away anyway.
+ * Normally, we need a transaction-safe truncation here. However, if
+ * the table was either created in the current (sub)transaction or has
+ * a new relfilenode in the current (sub)transaction, then we can just
+ * truncate it in-place, because a rollback would cause the whole
+ * table or the current physical file to be thrown away anyway.
*/
if (rel->rd_createSubid == mySubid ||
rel->rd_newRelfilenodeSubid == mySubid)
/*
* storage_name
- * returns the name corresponding to a typstorage/attstorage enum value
+ * returns the name corresponding to a typstorage/attstorage enum value
*/
static const char *
storage_name(char c)
int parentsWithOids = 0;
bool have_bogus_defaults = false;
int child_attno;
- static Node bogus_marker = { 0 }; /* marks conflicting defaults */
+ static Node bogus_marker = {0}; /* marks conflicting defaults */
/*
* Check for and reject tables with too many columns. We perform this
ListCell *prev = entry;
if (coldef->typeName == NULL)
+
/*
- * Typed table column option that does not belong to a
- * column from the type. This works because the columns
- * from the type come first in the list.
+ * Typed table column option that does not belong to a column from
+ * the type. This works because the columns from the type come
+ * first in the list.
*/
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
while (rest != NULL)
{
ColumnDef *restdef = lfirst(rest);
- ListCell *next = lnext(rest); /* need to save it in case we delete it */
+ ListCell *next = lnext(rest); /* need to save it in case we
+ * delete it */
if (strcmp(coldef->colname, restdef->colname) == 0)
{
if (coldef->is_from_type)
{
- /* merge the column options into the column from
- * the type */
+ /*
+ * merge the column options into the column from the type
+ */
coldef->is_not_null = restdef->is_not_null;
coldef->raw_default = restdef->raw_default;
coldef->cooked_default = restdef->cooked_default;
else if (def->storage != attribute->attstorage)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("inherited column \"%s\" has a storage parameter conflict",
- attributeName),
- errdetail("%s versus %s",
- storage_name(def->storage),
- storage_name(attribute->attstorage))));
+ errmsg("inherited column \"%s\" has a storage parameter conflict",
+ attributeName),
+ errdetail("%s versus %s",
+ storage_name(def->storage),
+ storage_name(attribute->attstorage))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
else if (newdef->storage != 0 && def->storage != newdef->storage)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" has a storage parameter conflict",
- attributeName),
- errdetail("%s versus %s",
- storage_name(def->storage),
- storage_name(newdef->storage))));
+ errmsg("column \"%s\" has a storage parameter conflict",
+ attributeName),
+ errdetail("%s versus %s",
+ storage_name(def->storage),
+ storage_name(newdef->storage))));
/* Mark the column as locally defined */
def->is_local = true;
*/
if (recurse)
{
- List *child_oids, *child_numparents;
- ListCell *lo, *li;
+ List *child_oids,
+ *child_numparents;
+ ListCell *lo,
+ *li;
/*
* we need the number of parents for each child so that the recursive
oldattname)));
/*
- * if the attribute is inherited, forbid the renaming. if this is a
+ * if the attribute is inherited, forbid the renaming. if this is a
* top-level call to renameatt(), then expected_parents will be 0, so the
* effect of this code will be to prohibit the renaming if the attribute
* is inherited at all. if this is a recursive call to renameatt(),
* expected_parents will be the number of parents the current relation has
- * within the inheritance hierarchy being processed, so we'll prohibit
- * the renaming only if there are additional parents from elsewhere.
+ * within the inheritance hierarchy being processed, so we'll prohibit the
+ * renaming only if there are additional parents from elsewhere.
*/
if (attform->attinhcount > expected_parents)
ereport(ERROR,
OldHeap = heap_open(tab->relid, NoLock);
/*
- * We don't support rewriting of system catalogs; there are
- * too many corner cases and too little benefit. In particular
- * this is certainly not going to work for mapped catalogs.
+ * We don't support rewriting of system catalogs; there are too
+ * many corner cases and too little benefit. In particular this
+ * is certainly not going to work for mapped catalogs.
*/
if (IsSystemRelation(OldHeap))
ereport(ERROR,
newrel = NULL;
/*
- * Prepare a BulkInsertState and options for heap_insert. Because
- * we're building a new heap, we can skip WAL-logging and fsync it
- * to disk at the end instead (unless WAL-logging is required for
- * archiving or streaming replication). The FSM is empty too,
- * so don't bother using it.
+ * Prepare a BulkInsertState and options for heap_insert. Because we're
+ * building a new heap, we can skip WAL-logging and fsync it to disk at
+ * the end instead (unless WAL-logging is required for archiving or
+ * streaming replication). The FSM is empty too, so don't bother using it.
*/
if (newrel)
{
/* If we skipped writing WAL, then we need to sync the heap. */
if (hi_options & HEAP_INSERT_SKIP_WAL)
{
- char reason[NAMEDATALEN + 30];
+ char reason[NAMEDATALEN + 30];
+
snprintf(reason, sizeof(reason), "table rewrite on \"%s\"",
RelationGetRelationName(newrel));
XLogReportUnloggedStatement(reason);
/* Generate new proposed attoptions (text array) */
Assert(IsA(options, List));
datum = SysCacheGetAttr(ATTNAME, tuple, Anum_pg_attribute_attoptions,
- &isnull);
+ &isnull);
newOptions = transformRelOptions(isnull ? (Datum) 0 : datum,
(List *) options, NULL, NULL, false,
isReset);
* get the number of the attribute
*/
tuple = SearchSysCacheAttName(RelationGetRelid(rel), colName);
- if (!HeapTupleIsValid(tuple)){
- if (!missing_ok){
+ if (!HeapTupleIsValid(tuple))
+ {
+ if (!missing_ok)
+ {
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
break;
case CONSTR_FOREIGN:
+
/*
- * Note that we currently never recurse for FK constraints, so
- * the "recurse" flag is silently ignored.
+ * Note that we currently never recurse for FK constraints, so the
+ * "recurse" flag is silently ignored.
*
* Assign or validate constraint name
*/
else
newConstraint->conname =
ChooseConstraintName(RelationGetRelationName(rel),
- strVal(linitial(newConstraint->fk_attrs)),
+ strVal(linitial(newConstraint->fk_attrs)),
"fkey",
RelationGetNamespace(rel),
NIL);
if (indexStruct->indisprimary)
{
/*
- * Refuse to use a deferrable primary key. This is per SQL spec,
- * and there would be a lot of interesting semantic problems if
- * we tried to allow it.
+ * Refuse to use a deferrable primary key. This is per SQL spec,
+ * and there would be a lot of interesting semantic problems if we
+ * tried to allow it.
*/
if (!indexStruct->indimmediate)
ereport(ERROR,
}
/*
- * Refuse to use a deferrable unique/primary key. This is per
- * SQL spec, and there would be a lot of interesting semantic
- * problems if we tried to allow it.
+ * Refuse to use a deferrable unique/primary key. This is per SQL
+ * spec, and there would be a lot of interesting semantic problems
+ * if we tried to allow it.
*/
if (found && !indexStruct->indimmediate)
{
/*
- * Remember that we found an otherwise matching index, so
- * that we can generate a more appropriate error message.
+ * Remember that we found an otherwise matching index, so that
+ * we can generate a more appropriate error message.
*/
found_deferrable = true;
found = false;
systable_endscan(scan);
- if (!found){
- if (!missing_ok){
+ if (!found)
+ {
+ if (!missing_ok)
+ {
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" of relation \"%s\" does not exist",
- constrName, RelationGetRelationName(rel))));
+ errmsg("constraint \"%s\" of relation \"%s\" does not exist",
+ constrName, RelationGetRelationName(rel))));
}
else
{
return;
}
}
+
/*
* Propagate to children as appropriate. Unlike most other ALTER
* routines, we have to do this one level of recursion at a time; we can't
heap_close(pg_class, RowExclusiveLock);
/*
- * Write an XLOG UNLOGGED record if WAL-logging was skipped because
- * WAL archiving is not enabled.
+ * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+ * archiving is not enabled.
*/
if (!XLogIsNeeded() && !rel->rd_istemp)
{
- char reason[NAMEDATALEN + 40];
+ char reason[NAMEDATALEN + 40];
+
snprintf(reason, sizeof(reason), "ALTER TABLE SET TABLESPACE on \"%s\"",
RelationGetRelationName(rel));
* enabled AND it's not a temp rel.
*
* Note: If you change the conditions here, update the conditions in
- * ATExecSetTableSpace() for when an XLOG UNLOGGED record is written
- * to match.
+ * ATExecSetTableSpace() for when an XLOG UNLOGGED record is written to
+ * match.
*/
use_wal = XLogIsNeeded() && !istemp;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.73 2010/02/17 04:19:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.74 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void create_tablespace_directories(const char *location,
- const Oid tablespaceoid);
+ const Oid tablespaceoid);
static bool destroy_tablespace_directories(Oid tablespaceoid, bool redo);
/*
* Parent directories are missing during WAL replay, so
- * continue by creating simple parent directories
- * rather than a symlink.
+ * continue by creating simple parent directories rather
+ * than a symlink.
*/
/* create two parents up if not exist */
/*
* Check that location isn't too long. Remember that we're going to append
- * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
+ * 'PG_XXX/<dboid>/<relid>.<nnn>'. FYI, we never actually reference the
* whole path, but mkdir() uses the first two parts.
*/
if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 +
static void
create_tablespace_directories(const char *location, const Oid tablespaceoid)
{
- char *linkloc = palloc(OIDCHARS + OIDCHARS + 1);
- char *location_with_version_dir = palloc(strlen(location) + 1 +
- strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
+ char *linkloc = palloc(OIDCHARS + OIDCHARS + 1);
+ char *location_with_version_dir = palloc(strlen(location) + 1 +
+ strlen(TABLESPACE_VERSION_DIRECTORY) + 1);
sprintf(linkloc, "pg_tblspc/%u", tablespaceoid);
sprintf(location_with_version_dir, "%s/%s", location,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
/*
* Attempt to coerce target directory to safe permissions. If this fails,
location)));
else
ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ (errcode_for_file_access(),
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
}
/*
- * The creation of the version directory prevents more than one
- * tablespace in a single location.
+ * The creation of the version directory prevents more than one tablespace
+ * in a single location.
*/
if (mkdir(location_with_version_dir, S_IRWXU) < 0)
{
else
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- location_with_version_dir)));
+ errmsg("could not create directory \"%s\": %m",
+ location_with_version_dir)));
}
/*
struct stat st;
linkloc_with_version_dir = palloc(9 + 1 + OIDCHARS + 1 +
- strlen(TABLESPACE_VERSION_DIRECTORY));
+ strlen(TABLESPACE_VERSION_DIRECTORY));
sprintf(linkloc_with_version_dir, "pg_tblspc/%u/%s", tablespaceoid,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
/*
* Check if the tablespace still contains any files. We try to rmdir each
(errcode_for_file_access(),
errmsg("could not remove directory \"%s\": %m",
linkloc_with_version_dir)));
-
+
/*
- * Try to remove the symlink. We must however deal with the
- * possibility that it's a directory instead of a symlink --- this could
- * happen during WAL replay (see TablespaceCreateDbspace), and it is also
- * the case on Windows where junction points lstat() as directories.
+ * Try to remove the symlink. We must however deal with the possibility
+ * that it's a directory instead of a symlink --- this could happen during
+ * WAL replay (see TablespaceCreateDbspace), and it is also the case on
+ * Windows where junction points lstat() as directories.
*/
linkloc = pstrdup(linkloc_with_version_dir);
get_parent_directory(linkloc);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("tablespace \"%s\" does not exist",
- stmt->tablespacename)));
+ stmt->tablespacename)));
/* Must be owner of the existing object */
if (!pg_tablespace_ownercheck(HeapTupleGetOid(tup), GetUserId()))
xl_tblspc_drop_rec *xlrec = (xl_tblspc_drop_rec *) XLogRecGetData(record);
/*
- * If we issued a WAL record for a drop tablespace it is
- * because there were no files in it at all. That means that
- * no permanent objects can exist in it at this point.
+ * If we issued a WAL record for a drop tablespace it is because there
+ * were no files in it at all. That means that no permanent objects
+ * can exist in it at this point.
*
- * It is possible for standby users to be using this tablespace
- * as a location for their temporary files, so if we fail to
- * remove all files then do conflict processing and try again,
- * if currently enabled.
+ * It is possible for standby users to be using this tablespace as a
+ * location for their temporary files, so if we fail to remove all
+ * files then do conflict processing and try again, if currently
+ * enabled.
*/
if (!destroy_tablespace_directories(xlrec->ts_id, true))
{
ResolveRecoveryConflictWithTablespace(xlrec->ts_id);
/*
- * If we did recovery processing then hopefully the
- * backends who wrote temp files should have cleaned up and
- * exited by now. So lets recheck before we throw an error.
- * If !process_conflicts then this will just fail again.
+ * If we did recovery processing then hopefully the backends who
+ * wrote temp files should have cleaned up and exited by now. So
+ * lets recheck before we throw an error. If !process_conflicts
+ * then this will just fail again.
*/
if (!destroy_tablespace_directories(xlrec->ts_id, true))
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("tablespace %u is not empty",
- xlrec->ts_id)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("tablespace %u is not empty",
+ xlrec->ts_id)));
}
}
else
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.261 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.262 2010/02/26 02:00:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
int16 tgtype;
int ncolumns;
- int2 *columns;
+ int2 *columns;
int2vector *tgattr;
Node *whenClause;
List *whenRtable;
*/
if (stmt->whenClause)
{
- ParseState *pstate;
+ ParseState *pstate;
RangeTblEntry *rte;
- List *varList;
- ListCell *lc;
+ List *varList;
+ ListCell *lc;
/* Set up a pstate to parse with */
pstate = make_parsestate(NULL);
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in trigger WHEN condition")));
+ errmsg("cannot use subquery in trigger WHEN condition")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
if (pstate->p_hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
- errmsg("cannot use window function in trigger WHEN condition")));
+ errmsg("cannot use window function in trigger WHEN condition")));
/*
* Check for disallowed references to OLD/NEW.
stmt->deferrable,
stmt->initdeferred,
RelationGetRelid(rel),
- NULL, /* no conkey */
+ NULL, /* no conkey */
0,
- InvalidOid, /* no domain */
- InvalidOid, /* no index */
- InvalidOid, /* no foreign key */
+ InvalidOid, /* no domain */
+ InvalidOid, /* no index */
+ InvalidOid, /* no foreign key */
NULL,
NULL,
NULL,
NULL,
NULL,
true, /* islocal */
- 0); /* inhcount */
+ 0); /* inhcount */
}
/*
trigoid = GetNewOid(tgrel);
/*
- * If trigger is internally generated, modify the provided trigger name
- * to ensure uniqueness by appending the trigger OID. (Callers will
- * usually supply a simple constant trigger name in these cases.)
+ * If trigger is internally generated, modify the provided trigger name to
+ * ensure uniqueness by appending the trigger OID. (Callers will usually
+ * supply a simple constant trigger name in these cases.)
*/
if (isInternal)
{
/*
* Scan pg_trigger for existing triggers on relation. We do this only to
* give a nice error message if there's already a trigger of the same
- * name. (The unique index on tgrelid/tgname would complain anyway.)
- * We can skip this for internally generated triggers, since the name
+ * name. (The unique index on tgrelid/tgname would complain anyway.) We
+ * can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
* NOTE that this is cool only because we have AccessExclusiveLock on the
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
}
systable_endscan(tgscan);
}
columns = (int2 *) palloc(ncolumns * sizeof(int2));
foreach(cell, stmt->columns)
{
- char *name = strVal(lfirst(cell));
- int2 attnum;
- int j;
+ char *name = strVal(lfirst(cell));
+ int2 attnum;
+ int j;
- /* Lookup column name. System columns are not allowed */
+ /* Lookup column name. System columns are not allowed */
attnum = attnameAttNum(rel, name, false);
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name, RelationGetRelationName(rel))));
/* Check for duplicates */
for (j = i - 1; j >= 0; j--)
else
{
/*
- * User CREATE TRIGGER, so place dependencies. We make trigger be
+ * User CREATE TRIGGER, so place dependencies. We make trigger be
* auto-dropped if its relation is dropped or if the FK relation is
* dropped. (Auto drop is compatible with our pre-7.3 behavior.)
*/
}
/* Not possible to have an index dependency in this case */
Assert(!OidIsValid(indexOid));
+
/*
* If it's a user-specified constraint trigger, make the constraint
* internally dependent on the trigger instead of vice versa.
/* If column-specific trigger, add normal dependencies on columns */
if (columns != NULL)
{
- int i;
+ int i;
referenced.classId = RelationRelationId;
referenced.objectId = RelationGetRelid(rel);
}
/*
- * If it has a WHEN clause, add dependencies on objects mentioned in
- * the expression (eg, functions, as well as any columns used).
+ * If it has a WHEN clause, add dependencies on objects mentioned in the
+ * expression (eg, functions, as well as any columns used).
*/
if (whenClause != NULL)
recordDependencyOnExpr(&myself, whenClause, whenRtable,
* comparison; so we just compare corresponding slots of the two sets.
*
* Note: comparing the stringToNode forms of the WHEN clauses means that
- * parse column locations will affect the result. This is okay as long
- * as this function is only used for detecting exact equality, as for
- * example in checking for staleness of a cache entry.
+ * parse column locations will affect the result. This is okay as long as
+ * this function is only used for detecting exact equality, as for example
+ * in checking for staleness of a cache entry.
*/
if (trigdesc1 != NULL)
{
if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
return false;
if (trig1->tgqual == NULL && trig2->tgqual == NULL)
- /* ok */ ;
+ /* ok */ ;
else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
return false;
else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
- return false;
+ return false;
}
}
else if (trigdesc2 != NULL)
int *tgindx;
int i;
TriggerData LocTriggerData;
- Bitmapset *modifiedCols;
+ Bitmapset *modifiedCols;
trigdesc = relinfo->ri_TrigDesc;
HeapTuple intuple = newtuple;
TupleTableSlot *newSlot;
int i;
- Bitmapset *modifiedCols;
+ Bitmapset *modifiedCols;
trigtuple = GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
&newSlot);
/*
* EvalPlanQual already locked the tuple, but we
- * re-call heap_lock_tuple anyway as an easy way
- * of re-fetching the correct tuple. Speed is
- * hardly a criterion in this path anyhow.
+ * re-call heap_lock_tuple anyway as an easy way of
+ * re-fetching the correct tuple. Speed is hardly a
+ * criterion in this path anyhow.
*/
goto ltrmark;
}
Assert(estate != NULL);
/*
- * trigger is an element of relinfo->ri_TrigDesc->triggers[];
- * find the matching element of relinfo->ri_TrigWhenExprs[]
+ * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
+ * matching element of relinfo->ri_TrigWhenExprs[]
*/
i = trigger - relinfo->ri_TrigDesc->triggers;
predicate = &relinfo->ri_TrigWhenExprs[i];
*/
if (*predicate == NIL)
{
- Node *tgqual;
+ Node *tgqual;
oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
tgqual = stringToNode(trigger->tgqual);
* Handle SET CONSTRAINTS constraint-name [, ...]
*
* First, identify all the named constraints and make a list of their
- * OIDs. Since, unlike the SQL spec, we allow multiple constraints
- * of the same name within a schema, the specifications are not
- * necessarily unique. Our strategy is to target all matching
+ * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
+ * the same name within a schema, the specifications are not
+ * necessarily unique. Our strategy is to target all matching
* constraints within the first search-path schema that has any
* matches, but disregard matches in schemas beyond the first match.
* (This is a bit odd but it's the historical behavior.)
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
/*
- * Silently skip triggers that are marked as non-deferrable
- * in pg_trigger. This is not an error condition, since
- * a deferrable RI constraint may have some non-deferrable
+ * Silently skip triggers that are marked as non-deferrable in
+ * pg_trigger. This is not an error condition, since a
+ * deferrable RI constraint may have some non-deferrable
* actions.
*/
if (pg_trigger->tgdeferrable)
* be fired for an event.
*
* NOTE: this is called whenever there are any triggers associated with
- * the event (even if they are disabled). This function decides which
+ * the event (even if they are disabled). This function decides which
* triggers actually need to be queued.
* ----------
*/
int *tgindx;
/*
- * Check state. We use normal tests not Asserts because it is possible
- * to reach here in the wrong state given misconfigured RI triggers,
- * in particular deferring a cascade action trigger.
+ * Check state. We use normal tests not Asserts because it is possible to
+ * reach here in the wrong state given misconfigured RI triggers, in
+ * particular deferring a cascade action trigger.
*/
if (afterTriggers == NULL)
elog(ERROR, "AfterTriggerSaveEvent() called outside of transaction");
}
/*
- * If the trigger is a deferred unique constraint check trigger,
- * only queue it if the unique constraint was potentially violated,
- * which we know from index insertion time.
+ * If the trigger is a deferred unique constraint check trigger, only
+ * queue it if the unique constraint was potentially violated, which
+ * we know from index insertion time.
*/
if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.147 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.148 2010/02/26 02:00:40 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
} RelToCheck;
-Oid binary_upgrade_next_pg_type_array_oid = InvalidOid;
+Oid binary_upgrade_next_pg_type_array_oid = InvalidOid;
static Oid findTypeInputFunction(List *procname, Oid typeOid);
static Oid findTypeOutputFunction(List *procname, Oid typeOid);
* now have TypeCreate do all the real work.
*/
typoid =
- /*
- * The pg_type.oid is stored in user tables as array elements
- * (base types) in ArrayType and in composite types in
- * DatumTupleFields. This oid must be preserved by binary
- * upgrades.
- */
+
+ /*
+ * The pg_type.oid is stored in user tables as array elements (base types)
+ * in ArrayType and in composite types in DatumTupleFields. This oid must
+ * be preserved by binary upgrades.
+ */
TypeCreate(InvalidOid, /* no predetermined type OID */
typeName, /* type name */
typeNamespace, /* namespace */
case CONSTR_EXCLUSION:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("exclusion constraints not possible for domains")));
+ errmsg("exclusion constraints not possible for domains")));
break;
case CONSTR_FOREIGN:
Oid
AssignTypeArrayOid(void)
{
- Oid type_array_oid;
+ Oid type_array_oid;
/* Pre-assign the type's array OID for use in pg_type.typarray */
if (OidIsValid(binary_upgrade_next_pg_type_array_oid))
createStmt->tablespacename = NULL;
/*
- * Check for collision with an existing type name. If there is one
- * and it's an autogenerated array, we can rename it out of the
- * way. This check is here mainly to get a better error message
- * about a "type" instead of below about a "relation".
+ * Check for collision with an existing type name. If there is one and
+ * it's an autogenerated array, we can rename it out of the way. This
+ * check is here mainly to get a better error message about a "type"
+ * instead of below about a "relation".
*/
typeNamespace = RangeVarGetCreationNamespace(createStmt->relation);
old_type_oid =
case CONSTR_EXCLUSION:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("exclusion constraints not possible for domains")));
+ errmsg("exclusion constraints not possible for domains")));
break;
case CONSTR_FOREIGN:
' ',
' ',
' ',
- NULL, /* not an exclusion constraint */
+ NULL, /* not an exclusion constraint */
expr, /* Tree form of check constraint */
ccbin, /* Binary form of check constraint */
ccsrc, /* Source form of check constraint */
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.192 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.193 2010/02/26 02:00:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (check_password_hook && password)
(*check_password_hook) (stmt->role,
password,
- isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+ isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
validUntil_datum,
validUntil_null);
if (check_password_hook && password)
(*check_password_hook) (stmt->role,
password,
- isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
+ isMD5(password) ? PASSWORD_TYPE_MD5 : PASSWORD_TYPE_PLAINTEXT,
validUntil_datum,
validUntil_null);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.409 2010/02/15 16:10:34 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.410 2010/02/26 02:00:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We cannot run VACUUM inside a user transaction block; if we were inside
* a transaction, then our commit- and start-transaction-command calls
- * would not have the intended effect! There are numerous other subtle
+ * would not have the intended effect! There are numerous other subtle
* dependencies on this, too.
*
* ANALYZE (without VACUUM) can run either way.
heap_close(relation, RowExclusiveLock);
/*
- * If we were able to advance datfrozenxid, see if we can truncate pg_clog.
- * Also do it if the shared XID-wrap-limit info is stale, since this
- * action will update that too.
+ * If we were able to advance datfrozenxid, see if we can truncate
+ * pg_clog. Also do it if the shared XID-wrap-limit info is stale, since
+ * this action will update that too.
*/
if (dirty || ForceTransactionIdLimitUpdate())
vac_truncate_clog(newFrozenXid);
/*
* Switch to the table owner's userid, so that any index functions are run
* as that user. Also lock down security-restricted operations and
- * arrange to make GUC variable changes local to this command.
- * (This is unnecessary, but harmless, for lazy VACUUM.)
+ * arrange to make GUC variable changes local to this command. (This is
+ * unnecessary, but harmless, for lazy VACUUM.)
*/
GetUserIdAndSecContext(&save_userid, &save_sec_context);
SetUserIdAndSecContext(onerel->rd_rel->relowner,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.131 2010/02/09 21:43:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.132 2010/02/26 02:00:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
{
/*
- * No need to log changes for temp tables, they do not contain
- * data visible on the standby server.
+ * No need to log changes for temp tables, they do not contain data
+ * visible on the standby server.
*/
if (rel->rd_istemp || !XLogIsNeeded())
return;
{
lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
- &vacrelstats->latestRemovedXid);
+ &vacrelstats->latestRemovedXid);
tups_vacuumed += 1;
}
else
RelationTruncate(onerel, new_rel_pages);
/*
- * We can release the exclusive lock as soon as we have truncated. Other
+ * We can release the exclusive lock as soon as we have truncated. Other
* backends can't safely access the relation until they have processed the
* smgr invalidation that smgrtruncate sent out ... but that should happen
* as part of standard invalidation processing once they acquire lock on
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.347 2010/02/20 21:24:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.348 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void ExecCheckRTEPerms(RangeTblEntry *rte);
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
- Plan *planTree);
+ Plan *planTree);
static void OpenIntoRel(QueryDesc *queryDesc);
static void CloseIntoRel(QueryDesc *queryDesc);
static void intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo);
/*
* CREATE TABLE AS or SELECT INTO?
*
- * XXX should we allow this if the destination is temp? Considering
- * that it would still require catalog changes, probably not.
+ * XXX should we allow this if the destination is temp? Considering that
+ * it would still require catalog changes, probably not.
*/
if (plannedstmt->intoClause != NULL)
PreventCommandIfReadOnly(CreateCommandTag((Node *) plannedstmt));
/*
* initialize result relation stuff, and open/lock the result rels.
*
- * We must do this before initializing the plan tree, else we might
- * try to do a lock upgrade if a result rel is also a source rel.
+ * We must do this before initializing the plan tree, else we might try to
+ * do a lock upgrade if a result rel is also a source rel.
*/
if (plannedstmt->resultRelations)
{
/*
* Similarly, we have to lock relations selected FOR UPDATE/FOR SHARE
- * before we initialize the plan tree, else we'd be risking lock
- * upgrades. While we are at it, build the ExecRowMark list.
+ * before we initialize the plan tree, else we'd be risking lock upgrades.
+ * While we are at it, build the ExecRowMark list.
*/
estate->es_rowMarks = NIL;
foreach(l, plannedstmt->rowMarks)
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT queries need a
- * filter if there are any junk attrs in the top-level tlist.
+ * Initialize the junk filter if needed. SELECT queries need a filter if
+ * there are any junk attrs in the top-level tlist.
*/
if (operation == CMD_SELECT)
{
/*
* destroy the executor's tuple table. Actually we only care about
- * releasing buffer pins and tupdesc refcounts; there's no need to
- * pfree the TupleTableSlots, since the containing memory context
- * is about to go away anyway.
+ * releasing buffer pins and tupdesc refcounts; there's no need to pfree
+ * the TupleTableSlots, since the containing memory context is about to go
+ * away anyway.
*/
ExecResetTupleTable(estate->es_tupleTable, false);
slot = ExecFilterJunk(estate->es_junkFilter, slot);
/*
- * If we are supposed to send the tuple somewhere, do so.
- * (In practice, this is probably always the case at this point.)
+ * If we are supposed to send the tuple somewhere, do so. (In
+ * practice, this is probably always the case at this point.)
*/
if (sendTuples)
(*dest->receiveSlot) (slot, dest);
EvalPlanQualBegin(epqstate, estate);
/*
- * Free old test tuple, if any, and store new tuple where relation's
- * scan node will see it
+ * Free old test tuple, if any, and store new tuple where relation's scan
+ * node will see it
*/
EvalPlanQualSetTuple(epqstate, rti, copyTuple);
slot = EvalPlanQualNext(epqstate);
/*
- * If we got a tuple, force the slot to materialize the tuple so that
- * it is not dependent on any local state in the EPQ query (in particular,
+ * If we got a tuple, force the slot to materialize the tuple so that it
+ * is not dependent on any local state in the EPQ query (in particular,
* it's highly likely that the slot contains references to any pass-by-ref
- * datums that may be present in copyTuple). As with the next step,
- * this is to guard against early re-use of the EPQ query.
+ * datums that may be present in copyTuple). As with the next step, this
+ * is to guard against early re-use of the EPQ query.
*/
if (!TupIsNull(slot))
(void) ExecMaterializeSlot(slot);
/*
- * Clear out the test tuple. This is needed in case the EPQ query
- * is re-used to test a tuple for a different relation. (Not clear
- * that can really happen, but let's be safe.)
+ * Clear out the test tuple. This is needed in case the EPQ query is
+ * re-used to test a tuple for a different relation. (Not clear that can
+ * really happen, but let's be safe.)
*/
EvalPlanQualSetTuple(epqstate, rti, NULL);
Assert(rti > 0);
/*
- * free old test tuple, if any, and store new tuple where relation's
- * scan node will see it
+ * free old test tuple, if any, and store new tuple where relation's scan
+ * node will see it
*/
if (estate->es_epqTuple[rti - 1] != NULL)
heap_freetuple(estate->es_epqTuple[rti - 1]);
/*
* Fetch the current row values for any non-locked relations that need
- * to be scanned by an EvalPlanQual operation. origslot must have been set
+ * to be scanned by an EvalPlanQual operation. origslot must have been set
* to contain the current result row (top-level row) that we need to recheck.
*/
void
/* Recopy current values of parent parameters */
if (parentestate->es_plannedstmt->nParamExec > 0)
{
- int i = parentestate->es_plannedstmt->nParamExec;
+ int i = parentestate->es_plannedstmt->nParamExec;
while (--i >= 0)
{
estate->es_param_list_info = parentestate->es_param_list_info;
if (parentestate->es_plannedstmt->nParamExec > 0)
{
- int i = parentestate->es_plannedstmt->nParamExec;
+ int i = parentestate->es_plannedstmt->nParamExec;
estate->es_param_exec_vals = (ParamExecData *)
palloc0(i * sizeof(ParamExecData));
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
+ * nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
/*
* Initialize private state information for each SubPlan. We must do this
* before running ExecInitNode on the main query tree, since
- * ExecInitSubPlan expects to be able to find these entries.
- * Some of the SubPlans might not be used in the part of the plan tree
- * we intend to run, but since it's not easy to tell which, we just
- * initialize them all.
+ * ExecInitSubPlan expects to be able to find these entries. Some of the
+ * SubPlans might not be used in the part of the plan tree we intend to
+ * run, but since it's not easy to tell which, we just initialize them
+ * all.
*/
Assert(estate->es_subplanstates == NIL);
foreach(l, parentestate->es_plannedstmt->subplans)
}
/*
- * Initialize the private state information for all the nodes in the
- * part of the plan tree we need to run. This opens files, allocates
- * storage and leaves us ready to start processing tuples.
+ * Initialize the private state information for all the nodes in the part
+ * of the plan tree we need to run. This opens files, allocates storage
+ * and leaves us ready to start processing tuples.
*/
epqstate->planstate = ExecInitNode(planTree, estate, 0);
Assert(into);
/*
- * XXX This code needs to be kept in sync with DefineRelation().
- * Maybe we should try to use that function instead.
+ * XXX This code needs to be kept in sync with DefineRelation(). Maybe we
+ * should try to use that function instead.
*/
/*
/* If we skipped using WAL, must heap_sync before commit */
if (myState->hi_options & HEAP_INSERT_SKIP_WAL)
{
- char reason[NAMEDATALEN + 30];
+ char reason[NAMEDATALEN + 30];
+
snprintf(reason, sizeof(reason), "SELECT INTO on \"%s\"",
RelationGetRelationName(myState->rel));
XLogReportUnloggedStatement(reason);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.262 2010/02/18 18:41:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.263 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* We might have a nested-assignment situation, in which the
* refassgnexpr is itself a FieldStore or ArrayRef that needs to
* obtain and modify the previous value of the array element or slice
- * being replaced. If so, we have to extract that value from the
+ * being replaced. If so, we have to extract that value from the
* array and pass it down via the econtext's caseValue. It's safe to
* reuse the CASE mechanism because there cannot be a CASE between
* here and where the value would be needed, and an array assignment
astate->refelemlength,
astate->refelembyval,
astate->refelemalign,
- &econtext->caseValue_isNull);
+ &econtext->caseValue_isNull);
}
else
{
* We really only care about number of attributes and data type.
* Also, we can ignore type mismatch on columns that are dropped
* in the destination type, so long as (1) the physical storage
- * matches or (2) the actual column value is NULL. Case (1) is
+ * matches or (2) the actual column value is NULL. Case (1) is
* helpful in some cases involving out-of-date cached plans, while
* case (2) is expected behavior in situations such as an INSERT
* into a table with dropped columns (the planner typically
* holds, we have to use ExecEvalWholeRowSlow to check (2) for
* each row. Also, we have to allow the case that the slot has
* more columns than the Var's type, because we might be looking
- * at the output of a subplan that includes resjunk columns.
- * (XXX it would be nice to verify that the extra columns are all
+ * at the output of a subplan that includes resjunk columns. (XXX
+ * it would be nice to verify that the extra columns are all
* marked resjunk, but we haven't got access to the subplan
* targetlist here...) Resjunk columns should always be at the end
* of a targetlist, so it's sufficient to ignore them here; but we
slot_tupdesc->natts,
var_tupdesc->natts)));
else if (var_tupdesc->natts < slot_tupdesc->natts)
- needslow = true; /* need to trim trailing atts */
+ needslow = true; /* need to trim trailing atts */
for (i = 0; i < var_tupdesc->natts; i++)
{
if (vattr->attlen != sattr->attlen ||
vattr->attalign != sattr->attalign)
- needslow = true; /* need runtime check for null */
+ needslow = true; /* need runtime check for null */
}
ReleaseTupleDesc(var_tupdesc);
if (!vattr->attisdropped)
continue; /* already checked non-dropped cols */
- if (heap_attisnull(tuple, i+1))
+ if (heap_attisnull(tuple, i + 1))
continue; /* null is always okay */
if (vattr->attlen != sattr->attlen ||
vattr->attalign != sattr->attalign)
/* prepare map from old to new attribute numbers */
cstate->map = convert_tuples_by_name(cstate->indesc,
cstate->outdesc,
- gettext_noop("could not convert row type"));
+ gettext_noop("could not convert row type"));
cstate->initialized = true;
MemoryContextSwitchTo(old_cxt);
&fstate->argdesc, econtext);
/*
- * Find field's attr record. Note we don't support system columns here:
- * a datum tuple doesn't have valid values for most of the interesting
+ * Find field's attr record. Note we don't support system columns here: a
+ * datum tuple doesn't have valid values for most of the interesting
* system columns anyway.
*/
- if (fieldnum <= 0) /* should never happen */
+ if (fieldnum <= 0) /* should never happen */
elog(ERROR, "unsupported reference to system column %d in FieldSelect",
fieldnum);
if (fieldnum > tupDesc->natts) /* should never happen */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.48 2010/01/02 16:57:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.49 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ExecScanFetch -- fetch next potential tuple
*
* This routine is concerned with substituting a test tuple if we are
- * inside an EvalPlanQual recheck. If we aren't, just execute
+ * inside an EvalPlanQual recheck. If we aren't, just execute
* the access method's next-tuple routine.
*/
static inline TupleTableSlot *
ResetExprContext(econtext);
/*
- * get a tuple from the access method. Loop until we obtain a tuple that
+ * get a tuple from the access method. Loop until we obtain a tuple that
* passes the qualification.
*/
for (;;)
* Routines dealing with TupleTableSlots. These are used for resource
* management associated with tuples (eg, releasing buffer pins for
* tuples in disk buffers, or freeing the memory occupied by transient
- * tuples). Slots also provide access abstraction that lets us implement
+ * tuples). Slots also provide access abstraction that lets us implement
* "virtual" tuples to reduce data-copying overhead.
*
* Routines dealing with the type information for tuples. Currently,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.111 2010/01/02 16:57:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.112 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
do_text_output_multiline(TupOutputState *tstate, char *text)
{
Datum values[1];
- bool isnull[1] = { false };
+ bool isnull[1] = {false};
while (*text)
{
if (eol)
{
len = eol - text;
+
eol++;
}
else
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.170 2010/02/08 04:33:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.171 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* any previously computed pass-by-reference expression result will go away!
*
* If isCommit is false, we are being called in error cleanup, and should
- * not call callbacks but only release memory. (It might be better to call
+ * not call callbacks but only release memory. (It might be better to call
* the callbacks and pass the isCommit flag to them, but that would require
* more invasive code changes than currently seems justified.)
*
checkUnique = UNIQUE_CHECK_PARTIAL;
satisfiesConstraint =
- index_insert(indexRelation, /* index relation */
- values, /* array of index Datums */
- isnull, /* null flags */
+ index_insert(indexRelation, /* index relation */
+ values, /* array of index Datums */
+ isnull, /* null flags */
tupleid, /* tid of heap tuple */
heapRelation, /* heap relation */
checkUnique); /* type of uniqueness check to do */
/*
* If the index has an associated exclusion constraint, check that.
* This is simpler than the process for uniqueness checks since we
- * always insert first and then check. If the constraint is deferred,
+ * always insert first and then check. If the constraint is deferred,
* we check now anyway, but don't throw error on violation; instead
* we'll queue a recheck event.
*
*/
if (indexInfo->ii_ExclusionOps != NULL)
{
- bool errorOK = !indexRelation->rd_index->indimmediate;
+ bool errorOK = !indexRelation->rd_index->indimmediate;
satisfiesConstraint =
check_exclusion_constraint(heapRelation,
ItemPointer tupleid, Datum *values, bool *isnull,
EState *estate, bool newIndex, bool errorOK)
{
- Oid *constr_procs = indexInfo->ii_ExclusionProcs;
- uint16 *constr_strats = indexInfo->ii_ExclusionStrats;
- int index_natts = index->rd_index->indnatts;
- IndexScanDesc index_scan;
- HeapTuple tup;
- ScanKeyData scankeys[INDEX_MAX_KEYS];
- SnapshotData DirtySnapshot;
- int i;
- bool conflict;
- bool found_self;
- ExprContext *econtext;
+ Oid *constr_procs = indexInfo->ii_ExclusionProcs;
+ uint16 *constr_strats = indexInfo->ii_ExclusionStrats;
+ int index_natts = index->rd_index->indnatts;
+ IndexScanDesc index_scan;
+ HeapTuple tup;
+ ScanKeyData scankeys[INDEX_MAX_KEYS];
+ SnapshotData DirtySnapshot;
+ int i;
+ bool conflict;
+ bool found_self;
+ ExprContext *econtext;
TupleTableSlot *existing_slot;
TupleTableSlot *save_scantuple;
/*
- * If any of the input values are NULL, the constraint check is assumed
- * to pass (i.e., we assume the operators are strict).
+ * If any of the input values are NULL, the constraint check is assumed to
+ * pass (i.e., we assume the operators are strict).
*/
for (i = 0; i < index_natts; i++)
{
}
/*
- * Search the tuples that are in the index for any violations,
- * including tuples that aren't visible yet.
+ * Search the tuples that are in the index for any violations, including
+ * tuples that aren't visible yet.
*/
InitDirtySnapshot(DirtySnapshot);
econtext->ecxt_scantuple = existing_slot;
/*
- * May have to restart scan from this point if a potential
- * conflict is found.
+ * May have to restart scan from this point if a potential conflict is
+ * found.
*/
retry:
conflict = false;
while ((tup = index_getnext(index_scan,
ForwardScanDirection)) != NULL)
{
- TransactionId xwait;
+ TransactionId xwait;
Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS];
- char *error_new;
- char *error_existing;
+ char *error_new;
+ char *error_existing;
/*
* Ignore the entry for the tuple we're trying to check.
* Extract the index column values and isnull flags from the existing
* tuple.
*/
- ExecStoreTuple(tup, existing_slot, InvalidBuffer, false);
+ ExecStoreTuple(tup, existing_slot, InvalidBuffer, false);
FormIndexDatum(indexInfo, existing_slot, estate,
existing_values, existing_isnull);
existing_values,
existing_isnull,
values))
- continue; /* tuple doesn't actually match, so no conflict */
+ continue; /* tuple doesn't actually match, so no
+ * conflict */
}
/*
- * At this point we have either a conflict or a potential conflict.
- * If we're not supposed to raise error, just return the fact of the
+ * At this point we have either a conflict or a potential conflict. If
+ * we're not supposed to raise error, just return the fact of the
* potential conflict without waiting to see if it's real.
*/
if (errorOK)
/*
* If an in-progress transaction is affecting the visibility of this
- * tuple, we need to wait for it to complete and then recheck. For
+ * tuple, we need to wait for it to complete and then recheck. For
* simplicity we do rechecking by just restarting the whole scan ---
* this case probably doesn't happen often enough to be worth trying
* harder, and anyway we don't want to hold any index internal locks
index_endscan(index_scan);
/*
- * We should have found our tuple in the index, unless we exited the
- * loop early because of conflict. Complain if not.
+ * We should have found our tuple in the index, unless we exited the loop
+ * early because of conflict. Complain if not.
*/
if (!found_self && !conflict)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to re-find tuple within index \"%s\"",
RelationGetRelationName(index)),
- errhint("This may be because of a non-immutable index expression.")));
+ errhint("This may be because of a non-immutable index expression.")));
econtext->ecxt_scantuple = save_scantuple;
/*
* Check existing tuple's index values to see if it really matches the
- * exclusion condition against the new_values. Returns true if conflict.
+ * exclusion condition against the new_values. Returns true if conflict.
*/
static bool
index_recheck_constraint(Relation index, Oid *constr_procs,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.141 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.142 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* For simplicity, we require callers to support both set eval modes.
* There are cases where we must use one or must use the other, and
- * it's not really worthwhile to postpone the check till we know.
- * But note we do not require caller to provide an expectedDesc.
+ * it's not really worthwhile to postpone the check till we know. But
+ * note we do not require caller to provide an expectedDesc.
*/
if (!rsi || !IsA(rsi, ReturnSetInfo) ||
(rsi->allowedModes & SFRM_ValuePerCall) == 0 ||
AssertArg(!IsPolymorphicType(rettype));
if (modifyTargetList)
- *modifyTargetList = false; /* initialize for no change */
+ *modifyTargetList = false; /* initialize for no change */
if (junkFilter)
*junkFilter = NULL; /* initialize in case of VOID result */
/*
* Verify that the targetlist matches the return tuple type. We scan
* the non-deleted attributes to ensure that they match the datatypes
- * of the non-resjunk columns. For deleted attributes, insert NULL
+ * of the non-resjunk columns. For deleted attributes, insert NULL
* result columns if the caller asked for that.
*/
tupnatts = tupdesc->natts;
attr = tupdesc->attrs[colindex - 1];
if (attr->attisdropped && modifyTargetList)
{
- Expr *null_expr;
+ Expr *null_expr;
/* The type of the null we insert isn't important */
null_expr = (Expr *) makeConst(INT4OID,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final statement returns too few columns.")));
+ errdetail("Final statement returns too few columns.")));
if (modifyTargetList)
{
- Expr *null_expr;
+ Expr *null_expr;
/* The type of the null we insert isn't important */
null_expr = (Expr *) makeConst(INT4OID,
-1,
sizeof(int32),
(Datum) 0,
- true, /* isnull */
+ true, /* isnull */
true /* byval */ );
newtlist = lappend(newtlist,
makeTargetEntry(null_expr,
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.24 2010/01/02 16:57:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/instrument.c,v 1.25 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "executor/instrument.h"
-BufferUsage pgBufferUsage;
+BufferUsage pgBufferUsage;
static void BufferUsageAccumDiff(BufferUsage *dst,
- const BufferUsage *add, const BufferUsage *sub);
+ const BufferUsage *add, const BufferUsage *sub);
/* Allocate new instrumentation structure(s) */
Instrumentation *
instr = palloc0(n * sizeof(Instrumentation));
if (instrument_options & INSTRUMENT_BUFFERS)
{
- int i;
+ int i;
for (i = 0; i < n; i++)
instr[i].needs_bufusage = true;
/* Adds delta of buffer usage to node's count. */
if (instr->needs_bufusage)
BufferUsageAccumDiff(&instr->bufusage,
- &pgBufferUsage, &instr->bufusage_start);
+ &pgBufferUsage, &instr->bufusage_start);
/* Is this the first tuple of this cycle? */
if (!instr->running)
* it is completely forbidden for functions to modify pass-by-ref inputs,
* but in the aggregate case we know the left input is either the initial
* transition value or a previous function result, and in either case its
- * value need not be preserved. See int8inc() for an example. Notice that
+ * value need not be preserved. See int8inc() for an example. Notice that
* advance_transition_function() is coded to avoid a data copy step when
* the previous transition value pointer is returned. Also, some
* transition functions want to store working state in addition to the
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.174 2010/02/14 18:42:14 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.175 2010/02/26 02:00:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* number of sorting columns to consider in DISTINCT comparisons */
/* (this is either zero or the same as numSortCols) */
- int numDistinctCols;
+ int numDistinctCols;
/* deconstructed sorting information (arrays of length numSortCols) */
AttrNumber *sortColIdx;
- Oid *sortOperators;
- bool *sortNullsFirst;
+ Oid *sortOperators;
+ bool *sortNullsFirst;
/*
* fmgr lookup data for input columns' equality operators --- only
transtypeByVal;
/*
- * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
+ * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but
*