/* Disallow '/a/b/data/..' */
if (path_contains_parent_reference(filename))
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("reference to parent directory (\"..\") not allowed"))));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("reference to parent directory (\"..\") not allowed"))));
+
/*
- * Allow absolute paths if within DataDir or Log_directory, even
- * though Log_directory might be outside DataDir.
+ * Allow absolute paths if within DataDir or Log_directory, even
+ * though Log_directory might be outside DataDir.
*/
if (!path_is_prefix_of_path(DataDir, filename) &&
(!logAllowed || !is_absolute_path(Log_directory) ||
!path_is_prefix_of_path(Log_directory, filename)))
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("absolute path not allowed"))));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("absolute path not allowed"))));
}
else if (!path_is_relative_and_below_cwd(filename))
ereport(ERROR,
PG_MODULE_MAGIC;
-void _PG_init(void);
+void _PG_init(void);
/* GUC Variables */
static int auth_delay_milliseconds;
/* Original Hook */
-static ClientAuthentication_hook_type original_client_auth_hook = NULL;
+static ClientAuthentication_hook_type original_client_auth_hook = NULL;
/*
* Check authentication
{
/* Define custom GUC variables */
DefineCustomIntVariable("auth_delay.milliseconds",
- "Milliseconds to delay before reporting authentication failure",
+ "Milliseconds to delay before reporting authentication failure",
NULL,
&auth_delay_milliseconds,
0,
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
gdb_date_dist(const void *a, const void *b)
{
/* we assume the difference can't overflow */
- Datum diff = DirectFunctionCall2(date_mi,
+ Datum diff = DirectFunctionCall2(date_mi,
DateADTGetDatum(*((const DateADT *) a)),
- DateADTGetDatum(*((const DateADT *) b)));
+ DateADTGetDatum(*((const DateADT *) b)));
return (float8) Abs(DatumGetInt32(diff));
}
PG_FUNCTION_INFO_V1(date_dist);
-Datum date_dist(PG_FUNCTION_ARGS);
+Datum date_dist(PG_FUNCTION_ARGS);
Datum
date_dist(PG_FUNCTION_ARGS)
{
/* we assume the difference can't overflow */
- Datum diff = DirectFunctionCall2(date_mi,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1));
+ Datum diff = DirectFunctionCall2(date_mi,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1));
PG_RETURN_INT32(Abs(DatumGetInt32(diff)));
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
PG_FUNCTION_INFO_V1(float4_dist);
-Datum float4_dist(PG_FUNCTION_ARGS);
+Datum float4_dist(PG_FUNCTION_ARGS);
Datum
float4_dist(PG_FUNCTION_ARGS)
{
- float4 a = PG_GETARG_FLOAT4(0);
+ float4 a = PG_GETARG_FLOAT4(0);
float4 b = PG_GETARG_FLOAT4(1);
float4 r;
r = a - b;
CHECKFLOATVAL(r, isinf(a) || isinf(b), true);
- PG_RETURN_FLOAT4( Abs(r) );
+ PG_RETURN_FLOAT4(Abs(r));
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
static float8
gbt_float8_dist(const void *a, const void *b)
{
- float8 arg1 = *(const float8 *)a;
- float8 arg2 = *(const float8 *)b;
+ float8 arg1 = *(const float8 *) a;
+ float8 arg2 = *(const float8 *) b;
float8 r;
r = arg1 - arg2;
PG_FUNCTION_INFO_V1(float8_dist);
-Datum float8_dist(PG_FUNCTION_ARGS);
+Datum float8_dist(PG_FUNCTION_ARGS);
Datum
float8_dist(PG_FUNCTION_ARGS)
{
r = a - b;
CHECKFLOATVAL(r, isinf(a) || isinf(b), true);
- PG_RETURN_FLOAT8( Abs(r) );
+ PG_RETURN_FLOAT8(Abs(r));
}
/**************************************************
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
PG_FUNCTION_INFO_V1(int2_dist);
-Datum int2_dist(PG_FUNCTION_ARGS);
+Datum int2_dist(PG_FUNCTION_ARGS);
Datum
int2_dist(PG_FUNCTION_ARGS)
{
- int2 a = PG_GETARG_INT16(0);
- int2 b = PG_GETARG_INT16(1);
+ int2 a = PG_GETARG_INT16(0);
+ int2 b = PG_GETARG_INT16(1);
int2 r;
int2 ra;
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
PG_FUNCTION_INFO_V1(int4_dist);
-Datum int4_dist(PG_FUNCTION_ARGS);
+Datum int4_dist(PG_FUNCTION_ARGS);
Datum
int4_dist(PG_FUNCTION_ARGS)
{
- int4 a = PG_GETARG_INT32(0);
- int4 b = PG_GETARG_INT32(1);
- int4 r;
- int4 ra;
+ int4 a = PG_GETARG_INT32(0);
+ int4 b = PG_GETARG_INT32(1);
+ int4 r;
+ int4 ra;
r = a - b;
ra = Abs(r);
if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("integer out of range")));
+ errmsg("integer out of range")));
PG_RETURN_INT32(ra);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
PG_FUNCTION_INFO_V1(int8_dist);
-Datum int8_dist(PG_FUNCTION_ARGS);
+Datum int8_dist(PG_FUNCTION_ARGS);
Datum
int8_dist(PG_FUNCTION_ARGS)
{
- int64 a = PG_GETARG_INT64(0);
- int64 b = PG_GETARG_INT64(1);
- int64 r;
- int64 ra;
+ int64 a = PG_GETARG_INT64(0);
+ int64 b = PG_GETARG_INT64(1);
+ int64 r;
+ int64 ra;
r = a - b;
ra = Abs(r);
if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("bigint out of range")));
+ errmsg("bigint out of range")));
PG_RETURN_INT64(ra);
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
static float8
gbt_intv_dist(const void *a, const void *b)
{
- return (float8)Abs(intr2num((Interval*)a) - intr2num((Interval*)b));
+ return (float8) Abs(intr2num((Interval *) a) - intr2num((Interval *) b));
}
/*
}
PG_FUNCTION_INFO_V1(interval_dist);
-Datum interval_dist(PG_FUNCTION_ARGS);
+Datum interval_dist(PG_FUNCTION_ARGS);
Datum
interval_dist(PG_FUNCTION_ARGS)
{
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo)
);
}
PG_FUNCTION_INFO_V1(oid_dist);
-Datum oid_dist(PG_FUNCTION_ARGS);
+Datum oid_dist(PG_FUNCTION_ARGS);
Datum
oid_dist(PG_FUNCTION_ARGS)
{
- Oid a = PG_GETARG_OID(0);
- Oid b = PG_GETARG_OID(1);
- Oid res;
+ Oid a = PG_GETARG_OID(0);
+ Oid b = PG_GETARG_OID(1);
+ Oid res;
if (a < b)
res = b - a;
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
{
const TimeADT *aa = (const TimeADT *) a;
const TimeADT *bb = (const TimeADT *) b;
- Interval *i;
+ Interval *i;
i = DatumGetIntervalP(DirectFunctionCall2(time_mi_time,
TimeADTGetDatumFast(*aa),
PG_FUNCTION_INFO_V1(time_dist);
-Datum time_dist(PG_FUNCTION_ARGS);
+Datum time_dist(PG_FUNCTION_ARGS);
Datum
time_dist(PG_FUNCTION_ARGS)
{
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
{
const Timestamp *aa = (const Timestamp *) a;
const Timestamp *bb = (const Timestamp *) b;
- Interval *i;
+ Interval *i;
if (TIMESTAMP_NOT_FINITE(*aa) || TIMESTAMP_NOT_FINITE(*bb))
return get_float8_infinity();
PG_FUNCTION_INFO_V1(ts_dist);
-Datum ts_dist(PG_FUNCTION_ARGS);
+Datum ts_dist(PG_FUNCTION_ARGS);
Datum
ts_dist(PG_FUNCTION_ARGS)
{
Timestamp a = PG_GETARG_TIMESTAMP(0);
Timestamp b = PG_GETARG_TIMESTAMP(1);
- Interval *r;
+ Interval *r;
if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b))
{
- Interval *p = palloc(sizeof(Interval));
+ Interval *p = palloc(sizeof(Interval));
p->day = INT_MAX;
p->month = INT_MAX;
PG_RETURN_INTERVAL_P(p);
}
else
-
- r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1)));
- PG_RETURN_INTERVAL_P( abs_interval(r) );
+ r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1)));
+ PG_RETURN_INTERVAL_P(abs_interval(r));
}
PG_FUNCTION_INFO_V1(tstz_dist);
-Datum tstz_dist(PG_FUNCTION_ARGS);
+Datum tstz_dist(PG_FUNCTION_ARGS);
Datum
tstz_dist(PG_FUNCTION_ARGS)
{
- TimestampTz a = PG_GETARG_TIMESTAMPTZ(0);
- TimestampTz b = PG_GETARG_TIMESTAMPTZ(1);
- Interval *r;
+ TimestampTz a = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz b = PG_GETARG_TIMESTAMPTZ(1);
+ Interval *r;
if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b))
{
- Interval *p = palloc(sizeof(Interval));
+ Interval *p = palloc(sizeof(Interval));
p->day = INT_MAX;
p->month = INT_MAX;
r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
- PG_RETURN_INTERVAL_P( abs_interval(r) );
+ PG_RETURN_INTERVAL_P(abs_interval(r));
}
key.upper = (GBT_NUMKEY *) &kkk->upper;
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
);
}
qqq = tstz_to_ts_gmt(query);
PG_RETURN_FLOAT8(
- gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo)
+ gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo)
);
}
retval = (*tinfo->f_le) (query, key->upper);
break;
case BtreeGistNotEqualStrategyNumber:
- retval = (! ((*tinfo->f_eq) (query, key->lower) &&
- (*tinfo->f_eq) (query, key->upper))) ? true : false;
+ retval = (!((*tinfo->f_eq) (query, key->lower) &&
+ (*tinfo->f_eq) (query, key->upper))) ? true : false;
break;
default:
retval = false;
if (tinfo->f_dist == NULL)
elog(ERROR, "KNN search is not supported for btree_gist type %d",
(int) tinfo->t);
- if ( tinfo->f_le(query, key->lower) )
+ if (tinfo->f_le(query, key->lower))
retval = tinfo->f_dist(query, key->lower);
- else if ( tinfo->f_ge(query, key->upper) )
+ else if (tinfo->f_ge(query, key->upper))
retval = tinfo->f_dist(query, key->upper);
else
retval = 0.0;
bool (*f_le) (const void *, const void *); /* less or equal */
bool (*f_lt) (const void *, const void *); /* less than */
int (*f_cmp) (const void *, const void *); /* key compare function */
- float8 (*f_dist) (const void *, const void *); /* key distance function */
+ float8 (*f_dist) (const void *, const void *); /* key distance function */
} gbtree_ninfo;
#define GET_FLOAT_DISTANCE(t, arg1, arg2) Abs( ((float8) *((const t *) (arg1))) - ((float8) *((const t *) (arg2))) )
-#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
+#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
/*
* check to see if a float4/8 val has underflowed or overflowed
const gbtree_ninfo *tinfo);
extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query,
- bool is_leaf, const gbtree_ninfo *tinfo);
+ bool is_leaf, const gbtree_ninfo *tinfo);
extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v,
const gbtree_ninfo *tinfo);
|| gbt_var_node_pf_match(key, query, tinfo);
break;
case BtreeGistNotEqualStrategyNumber:
- retval = ! ((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper));
+ retval = !((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper));
break;
default:
retval = FALSE;
PG_MODULE_MAGIC;
/* Entrypoint of the module */
-void _PG_init(void);
+void _PG_init(void);
static void
dummy_object_relabel(const ObjectAddress *object, const char *seclabel)
*/
static struct FileFdwOption valid_options[] = {
/* File options */
- { "filename", ForeignTableRelationId },
+ {"filename", ForeignTableRelationId},
/* Format options */
/* oids option is not supported */
- { "format", ForeignTableRelationId },
- { "header", ForeignTableRelationId },
- { "delimiter", ForeignTableRelationId },
- { "quote", ForeignTableRelationId },
- { "escape", ForeignTableRelationId },
- { "null", ForeignTableRelationId },
- { "encoding", ForeignTableRelationId },
+ {"format", ForeignTableRelationId},
+ {"header", ForeignTableRelationId},
+ {"delimiter", ForeignTableRelationId},
+ {"quote", ForeignTableRelationId},
+ {"escape", ForeignTableRelationId},
+ {"null", ForeignTableRelationId},
+ {"encoding", ForeignTableRelationId},
/*
* force_quote is not supported by file_fdw because it's for COPY TO.
*/
/* Sentinel */
- { NULL, InvalidOid }
+ {NULL, InvalidOid}
};
/*
*/
typedef struct FileFdwExecutionState
{
- char *filename; /* file to read */
- List *options; /* merged COPY options, excluding filename */
- CopyState cstate; /* state of reading file */
+ char *filename; /* file to read */
+ List *options; /* merged COPY options, excluding filename */
+ CopyState cstate; /* state of reading file */
} FileFdwExecutionState;
/*
* FDW callback routines
*/
static FdwPlan *filePlanForeignScan(Oid foreigntableid,
- PlannerInfo *root,
- RelOptInfo *baserel);
+ PlannerInfo *root,
+ RelOptInfo *baserel);
static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
static void fileBeginForeignScan(ForeignScanState *node, int eflags);
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
static void fileGetOptions(Oid foreigntableid,
char **filename, List **other_options);
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
- const char *filename,
- Cost *startup_cost, Cost *total_cost);
+ const char *filename,
+ Cost *startup_cost, Cost *total_cost);
/*
/*
* Only superusers are allowed to set options of a file_fdw foreign table.
- * This is because the filename is one of those options, and we don't
- * want non-superusers to be able to determine which file gets read.
+ * This is because the filename is one of those options, and we don't want
+ * non-superusers to be able to determine which file gets read.
*
* Putting this sort of permissions check in a validator is a bit of a
* crock, but there doesn't seem to be any other place that can enforce
* the check more cleanly.
*
- * Note that the valid_options[] array disallows setting filename at
- * any options level other than foreign table --- otherwise there'd
- * still be a security hole.
+ * Note that the valid_options[] array disallows setting filename at any
+ * options level other than foreign table --- otherwise there'd still be a
+ * security hole.
*/
if (catalog == ForeignTableRelationId && !superuser())
ereport(ERROR,
*/
foreach(cell, options_list)
{
- DefElem *def = (DefElem *) lfirst(cell);
+ DefElem *def = (DefElem *) lfirst(cell);
if (!is_valid_option(def->defname, catalog))
{
prev = NULL;
foreach(lc, options)
{
- DefElem *def = (DefElem *) lfirst(lc);
+ DefElem *def = (DefElem *) lfirst(lc);
if (strcmp(def->defname, "filename") == 0)
{
PlannerInfo *root,
RelOptInfo *baserel)
{
- FdwPlan *fdwplan;
+ FdwPlan *fdwplan;
char *filename;
List *options;
fdwplan = makeNode(FdwPlan);
estimate_costs(root, baserel, filename,
&fdwplan->startup_cost, &fdwplan->total_cost);
- fdwplan->fdw_private = NIL; /* not used */
+ fdwplan->fdw_private = NIL; /* not used */
return fdwplan;
}
/* Suppress file size if we're not showing cost details */
if (es->costs)
{
- struct stat stat_buf;
+ struct stat stat_buf;
if (stat(filename, &stat_buf) == 0)
ExplainPropertyLong("Foreign File Size", (long) stat_buf.st_size,
&filename, &options);
/*
- * Create CopyState from FDW options. We always acquire all columns,
- * so as to match the expected ScanTupleSlot signature.
+ * Create CopyState from FDW options. We always acquire all columns, so
+ * as to match the expected ScanTupleSlot signature.
*/
cstate = BeginCopyFrom(node->ss.ss_currentRelation,
filename,
{
FileFdwExecutionState *festate = (FileFdwExecutionState *) node->fdw_state;
TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
- bool found;
+ bool found;
ErrorContextCallback errcontext;
/* Set up callback to identify error line number. */
/*
* The protocol for loading a virtual tuple into a slot is first
* ExecClearTuple, then fill the values/isnull arrays, then
- * ExecStoreVirtualTuple. If we don't find another row in the file,
- * we just skip the last step, leaving the slot empty as required.
+ * ExecStoreVirtualTuple. If we don't find another row in the file, we
+ * just skip the last step, leaving the slot empty as required.
*
* We can pass ExprContext = NULL because we read all columns from the
* file, so no need to evaluate default expressions.
const char *filename,
Cost *startup_cost, Cost *total_cost)
{
- struct stat stat_buf;
- BlockNumber pages;
- int tuple_width;
- double ntuples;
- double nrows;
- Cost run_cost = 0;
- Cost cpu_per_tuple;
+ struct stat stat_buf;
+ BlockNumber pages;
+ int tuple_width;
+ double ntuples;
+ double nrows;
+ Cost run_cost = 0;
+ Cost cpu_per_tuple;
/*
- * Get size of the file. It might not be there at plan time, though,
- * in which case we have to use a default estimate.
+ * Get size of the file. It might not be there at plan time, though, in
+ * which case we have to use a default estimate.
*/
if (stat(filename, &stat_buf) < 0)
stat_buf.st_size = 10 * BLCKSZ;
/*
* Convert size to pages for use in I/O cost estimate below.
*/
- pages = (stat_buf.st_size + (BLCKSZ-1)) / BLCKSZ;
+ pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ;
if (pages < 1)
pages = 1;
ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width);
/*
- * Now estimate the number of rows returned by the scan after applying
- * the baserestrictinfo quals. This is pretty bogus too, since the
- * planner will have no stats about the relation, but it's better than
- * nothing.
+ * Now estimate the number of rows returned by the scan after applying the
+ * baserestrictinfo quals. This is pretty bogus too, since the planner
+ * will have no stats about the relation, but it's better than nothing.
*/
nrows = ntuples *
clauselist_selectivity(root,
baserel->rows = nrows;
/*
- * Now estimate costs. We estimate costs almost the same way as
+ * Now estimate costs. We estimate costs almost the same way as
* cost_seqscan(), thus assuming that I/O costs are equivalent to a
* regular table file of the same size. However, we take per-tuple CPU
* costs as 10x of a seqscan, to account for the cost of parsing records.
*/
#ifdef LEVENSHTEIN_LESS_EQUAL
static int levenshtein_less_equal_internal(text *s, text *t,
- int ins_c, int del_c, int sub_c, int max_d);
+ int ins_c, int del_c, int sub_c, int max_d);
#else
static int levenshtein_internal(text *s, text *t,
int ins_c, int del_c, int sub_c);
* array.
*
* If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound. From any cell in the matrix, there is
+ * is less than or equal to the bound. From any cell in the matrix, there is
* theoretical "minimum residual distance" from that cell to the last column
* of the final row. This minimum residual distance is zero when the
* untransformed portions of the strings are of equal length (because we might
/*
* For levenshtein_less_equal_internal, we have real variables called
- * start_column and stop_column; otherwise it's just short-hand for 0
- * and m.
+ * start_column and stop_column; otherwise it's just short-hand for 0 and
+ * m.
*/
#ifdef LEVENSHTEIN_LESS_EQUAL
- int start_column, stop_column;
+ int start_column,
+ stop_column;
+
#undef START_COLUMN
#undef STOP_COLUMN
#define START_COLUMN start_column
stop_column = m + 1;
/*
- * If max_d >= 0, determine whether the bound is impossibly tight. If so,
+ * If max_d >= 0, determine whether the bound is impossibly tight. If so,
* return max_d + 1 immediately. Otherwise, determine whether it's tight
* enough to limit the computation we must perform. If so, figure out
* initial stop column.
*/
if (max_d >= 0)
{
- int min_theo_d; /* Theoretical minimum distance. */
- int max_theo_d; /* Theoretical maximum distance. */
- int net_inserts = n - m;
+ int min_theo_d; /* Theoretical minimum distance. */
+ int max_theo_d; /* Theoretical maximum distance. */
+ int net_inserts = n - m;
min_theo_d = net_inserts < 0 ?
-net_inserts * del_c : net_inserts * ins_c;
else if (ins_c + del_c > 0)
{
/*
- * Figure out how much of the first row of the notional matrix
- * we need to fill in. If the string is growing, the theoretical
+ * Figure out how much of the first row of the notional matrix we
+ * need to fill in. If the string is growing, the theoretical
* minimum distance already incorporates the cost of deleting the
- * number of characters necessary to make the two strings equal
- * in length. Each additional deletion forces another insertion,
- * so the best-case total cost increases by ins_c + del_c.
- * If the string is shrinking, the minimum theoretical cost
- * assumes no excess deletions; that is, we're starting no futher
- * right than column n - m. If we do start further right, the
- * best-case total cost increases by ins_c + del_c for each move
- * right.
+ * number of characters necessary to make the two strings equal in
+ * length. Each additional deletion forces another insertion, so
+ * the best-case total cost increases by ins_c + del_c. If the
+ * string is shrinking, the minimum theoretical cost assumes no
+ * excess deletions; that is, we're starting no futher right than
+ * column n - m. If we do start further right, the best-case
+ * total cost increases by ins_c + del_c for each move right.
*/
- int slack_d = max_d - min_theo_d;
- int best_column = net_inserts < 0 ? -net_inserts : 0;
+ int slack_d = max_d - min_theo_d;
+ int best_column = net_inserts < 0 ? -net_inserts : 0;
+
stop_column = best_column + (slack_d / (ins_c + del_c)) + 1;
if (stop_column > m)
stop_column = m + 1;
/*
* In order to avoid calling pg_mblen() repeatedly on each character in s,
- * we cache all the lengths before starting the main loop -- but if all the
- * characters in both strings are single byte, then we skip this and use
- * a fast-path in the main loop. If only one string contains multi-byte
- * characters, we still build the array, so that the fast-path needn't
- * deal with the case where the array hasn't been initialized.
+ * we cache all the lengths before starting the main loop -- but if all
+ * the characters in both strings are single byte, then we skip this and
+ * use a fast-path in the main loop. If only one string contains
+ * multi-byte characters, we still build the array, so that the fast-path
+ * needn't deal with the case where the array hasn't been initialized.
*/
if (m != s_bytes || n != t_bytes)
{
- int i;
+ int i;
const char *cp = s_data;
s_char_len = (int *) palloc((m + 1) * sizeof(int));
curr = prev + m;
/*
- * To transform the first i characters of s into the first 0 characters
- * of t, we must perform i deletions.
+ * To transform the first i characters of s into the first 0 characters of
+ * t, we must perform i deletions.
*/
for (i = START_COLUMN; i < STOP_COLUMN; i++)
prev[i] = i * del_c;
int y_char_len = n != t_bytes + 1 ? pg_mblen(y) : 1;
#ifdef LEVENSHTEIN_LESS_EQUAL
+
/*
* In the best case, values percolate down the diagonal unchanged, so
* we must increment stop_column unless it's already on the right end
}
/*
- * The main loop fills in curr, but curr[0] needs a special case:
- * to transform the first 0 characters of s into the first j
- * characters of t, we must perform j insertions. However, if
- * start_column > 0, this special case does not apply.
+ * The main loop fills in curr, but curr[0] needs a special case: to
+ * transform the first 0 characters of s into the first j characters
+ * of t, we must perform j insertions. However, if start_column > 0,
+ * this special case does not apply.
*/
if (start_column == 0)
{
*/
ins = prev[i] + ins_c;
del = curr[i - 1] + del_c;
- if (x[x_char_len-1] == y[y_char_len-1]
+ if (x[x_char_len - 1] == y[y_char_len - 1]
&& x_char_len == y_char_len &&
(x_char_len == 1 || rest_of_char_same(x, y, x_char_len)))
sub = prev[i - 1];
y += y_char_len;
#ifdef LEVENSHTEIN_LESS_EQUAL
+
/*
* This chunk of code represents a significant performance hit if used
* in the case where there is no max_d bound. This is probably not
* string, so we want to find the value for zp where where (n - 1)
* - j = (m - 1) - zp.
*/
- int zp = j - (n - m);
+ int zp = j - (n - m);
/* Check whether the stop column can slide left. */
while (stop_column > 0)
{
- int ii = stop_column - 1;
- int net_inserts = ii - zp;
+ int ii = stop_column - 1;
+ int net_inserts = ii - zp;
+
if (prev[ii] + (net_inserts > 0 ? net_inserts * ins_c :
- -net_inserts * del_c) <= max_d)
+ -net_inserts * del_c) <= max_d)
break;
stop_column--;
}
/* Check whether the start column can slide right. */
while (start_column < stop_column)
{
- int net_inserts = start_column - zp;
+ int net_inserts = start_column - zp;
+
if (prev[start_column] +
(net_inserts > 0 ? net_inserts * ins_c :
- -net_inserts * del_c) <= max_d)
+ -net_inserts * del_c) <= max_d)
break;
+
/*
- * We'll never again update these values, so we must make
- * sure there's nothing here that could confuse any future
+ * We'll never again update these values, so we must make sure
+ * there's nothing here that could confuse any future
* iteration of the outer loop.
*/
prev[start_column] = max_d + 1;
/*
* When using a GIN index for hstore, we choose to index both keys and values.
* The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values. (As of 9.1 it might be better to
+ * to indicate key, value, or null values. (As of 9.1 it might be better to
* store null values as nulls, but we'll keep it this way for on-disk
* compatibility.)
*/
{
/*
* Index doesn't have information about correspondence of keys and
- * values, so we need recheck. However, if not all the keys are
+ * values, so we need recheck. However, if not all the keys are
* present, we can fail at once.
*/
*recheck = true;
if (snullval != HS_VALISNULL(es2, j)
|| (!snullval
&& (svallen != HS_VALLEN(es2, j)
- || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
+ || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
{
HS_COPYITEM(ed, bufd, pd,
HS_KEY(es, ps, i), HS_KEYLEN(es, i),
if (nullval != HS_VALISNULL(ve, idx)
|| (!nullval
&& (vallen != HS_VALLEN(ve, idx)
- || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
+ || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
res = false;
}
else
}
else
{
- long lval;
+ long lval;
nnn[innn] = '\0';
errno = 0;
return FALSE;
/*
- * Set up data for checkcondition_gin. This must agree with the
- * query extraction code in ginint4_queryextract.
+ * Set up data for checkcondition_gin. This must agree with the query
+ * extraction code in ginint4_queryextract.
*/
gcv.first = items;
gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);
/*
* If the query doesn't have any required primitive values (for
- * instance, it's something like '! 42'), we have to do a full
- * index scan.
+ * instance, it's something like '! 42'), we have to do a full index
+ * scan.
*/
if (query_has_required_values(query))
*searchMode = GIN_SEARCH_MODE_DEFAULT;
case RTOldContainsStrategyNumber:
if (*nentries > 0)
*searchMode = GIN_SEARCH_MODE_DEFAULT;
- else /* everything contains the empty set */
+ else /* everything contains the empty set */
*searchMode = GIN_SEARCH_MODE_ALL;
break;
default:
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = FALSE;
*size = (float) ARRNELEMS(a);
}
-/* Sort the given data (len >= 2). Return true if any duplicates found */
+/* Sort the given data (len >= 2). Return true if any duplicates found */
bool
isort(int4 *a, int len)
{
bool r = FALSE;
/*
- * We use a simple insertion sort. While this is O(N^2) in the worst
+ * We use a simple insertion sort. While this is O(N^2) in the worst
* case, it's quite fast if the input is already sorted or nearly so.
* Also, for not-too-large inputs it's faster than more complex methods
* anyhow.
{"10-976000", "10-999999"},
{NULL, NULL},
};
-
#ifdef HAVE_GETOPT_H
#include <getopt.h>
#endif
-#else /* WIN32 */
+#else /* WIN32 */
extern int getopt(int argc, char *const argv[], const char *optstring);
-#endif /* ! WIN32 */
+#endif /* ! WIN32 */
extern char *optarg;
extern int optind;
PGSS_TRACK_NONE, /* track no statements */
PGSS_TRACK_TOP, /* only top level statements */
PGSS_TRACK_ALL /* all statements, including nested ones */
-} PGSSTrackLevel;
+} PGSSTrackLevel;
static const struct config_enum_entry track_options[] =
{
static const char *progname;
-static int ops_per_test = 2000;
-static char full_buf[XLOG_SEG_SIZE], *buf, *filename = FSYNC_FILENAME;
-static struct timeval start_t, stop_t;
-
-
-static void handle_args(int argc, char *argv[]);
-static void prepare_buf(void);
-static void test_open(void);
-static void test_non_sync(void);
-static void test_sync(int writes_per_op);
-static void test_open_syncs(void);
-static void test_open_sync(const char *msg, int writes_size);
-static void test_file_descriptor_sync(void);
+static int ops_per_test = 2000;
+static char full_buf[XLOG_SEG_SIZE],
+ *buf,
+ *filename = FSYNC_FILENAME;
+static struct timeval start_t,
+ stop_t;
+
+
+static void handle_args(int argc, char *argv[]);
+static void prepare_buf(void);
+static void test_open(void);
+static void test_non_sync(void);
+static void test_sync(int writes_per_op);
+static void test_open_syncs(void);
+static void test_open_sync(const char *msg, int writes_size);
+static void test_file_descriptor_sync(void);
+
#ifdef HAVE_FSYNC_WRITETHROUGH
static int pg_fsync_writethrough(int fd);
#endif
-static void print_elapse(struct timeval start_t, struct timeval stop_t);
-static void die(const char *str);
+static void print_elapse(struct timeval start_t, struct timeval stop_t);
+static void die(const char *str);
int
}
while ((option = getopt_long(argc, argv, "f:o:",
- long_options, &optindex)) != -1)
+ long_options, &optindex)) != -1)
{
switch (option)
{
static void
test_sync(int writes_per_op)
{
- int tmpfile, ops, writes;
+ int tmpfile,
+ ops,
+ writes;
bool fs_warning = false;
if (writes_per_op == 1)
static void
test_open_sync(const char *msg, int writes_size)
{
- int tmpfile, ops, writes;
+ int tmpfile,
+ ops,
+ writes;
printf(LABEL_FORMAT, msg);
fflush(stdout);
close(tmpfile);
print_elapse(start_t, stop_t);
}
-
#else
printf(NA_FORMAT, "n/a\n");
#endif
static void
test_file_descriptor_sync(void)
{
- int tmpfile, ops;
+ int tmpfile,
+ ops;
/*
- * Test whether fsync can sync data written on a different
- * descriptor for the same file. This checks the efficiency
- * of multi-process fsyncs against the same file.
- * Possibly this should be done with writethrough on platforms
- * which support it.
+ * Test whether fsync can sync data written on a different descriptor for
+ * the same file. This checks the efficiency of multi-process fsyncs
+ * against the same file. Possibly this should be done with writethrough
+ * on platforms which support it.
*/
printf("\nTest if fsync on non-write file descriptor is honored:\n");
printf("(If the times are similar, fsync() can sync data written\n");
printf("on a different descriptor.)\n");
/*
- * first write, fsync and close, which is the
- * normal behavior without multiple descriptors
+ * first write, fsync and close, which is the normal behavior without
+ * multiple descriptors
*/
printf(LABEL_FORMAT, "write, fsync, close");
fflush(stdout);
if (fsync(tmpfile) != 0)
die("fsync failed");
close(tmpfile);
+
/*
- * open and close the file again to be consistent
- * with the following test
+ * open and close the file again to be consistent with the following
+ * test
*/
if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
die("could not open output file");
print_elapse(start_t, stop_t);
/*
- * Now open, write, close, open again and fsync
- * This simulates processes fsyncing each other's
- * writes.
+ * Now open, write, close, open again and fsync This simulates processes
+ * fsyncing each other's writes.
*/
printf(LABEL_FORMAT, "write, close, fsync");
fflush(stdout);
static void
test_non_sync(void)
{
- int tmpfile, ops;
+ int tmpfile,
+ ops;
/*
* Test a simple write without fsync
return -1;
#endif
}
-
#endif
/*
#endif
#define ISPRINTABLETRGM(t) ( ISPRINTABLECHAR( ((char*)(t)) ) && ISPRINTABLECHAR( ((char*)(t))+1 ) && ISPRINTABLECHAR( ((char*)(t))+2 ) )
-#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */
-#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard meta-character */
+#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */
+#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard
+ * meta-character */
typedef struct
{
float4 cnt_sml(TRGM *trg1, TRGM *trg2);
bool trgm_contained_by(TRGM *trg1, TRGM *trg2);
-#endif /* __TRGM_H__ */
+#endif /* __TRGM_H__ */
ptr = GETARR(trg);
for (i = 0; i < trglen; i++)
{
- int32 item = trgm2int(ptr);
+ int32 item = trgm2int(ptr);
entries[i] = Int32GetDatum(item);
ptr++;
text *val = (text *) PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
StrategyNumber strategy = PG_GETARG_UINT16(2);
- /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
- /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
- /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */
- int32 *searchMode = (int32 *) PG_GETARG_POINTER(6);
+
+ /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
+ /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+ /* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */
+ int32 *searchMode = (int32 *) PG_GETARG_POINTER(6);
Datum *entries = NULL;
TRGM *trg;
int32 trglen;
#endif
/* FALL THRU */
case LikeStrategyNumber:
+
/*
* For wildcard search we extract all the trigrams that every
* potentially-matching string must include.
break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
- trg = NULL; /* keep compiler quiet */
+ trg = NULL; /* keep compiler quiet */
break;
}
ptr = GETARR(trg);
for (i = 0; i < trglen; i++)
{
- int32 item = trgm2int(ptr);
+ int32 item = trgm2int(ptr);
entries[i] = Int32GetDatum(item);
ptr++;
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
- /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+
+ /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res;
int32 i,
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
text *query = PG_GETARG_TEXT_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
TRGM *key = (TRGM *) DatumGetPointer(entry->key);
TRGM *qtrg;
bool res;
char *cache = (char *) fcinfo->flinfo->fn_extra,
- *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber));
+ *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber));
/*
* Store both the strategy number and extracted trigrams in cache, because
- * trigram extraction is relatively CPU-expensive. We must include
+ * trigram extraction is relatively CPU-expensive. We must include
* strategy number because trigram extraction depends on strategy.
*/
if (cache == NULL || strategy != *((StrategyNumber *) cache) ||
break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
- qtrg = NULL; /* keep compiler quiet */
+ qtrg = NULL; /* keep compiler quiet */
break;
}
*recheck = false;
if (GIST_LEAF(entry))
- { /* all leafs contains orig trgm */
- float4 tmpsml = cnt_sml(key, qtrg);
+ { /* all leafs contains orig trgm */
+ float4 tmpsml = cnt_sml(key, qtrg);
/* strange bug at freebsd 5.2.1 and gcc 3.3.3 */
res = (*(int *) &tmpsml == *(int *) &trgm_limit || tmpsml > trgm_limit) ? true : false;
}
else if (ISALLTRUE(key))
- { /* non-leaf contains signature */
+ { /* non-leaf contains signature */
res = true;
}
else
- { /* non-leaf contains signature */
- int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
- int4 len = ARRNELEM(qtrg);
+ { /* non-leaf contains signature */
+ int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+ int4 len = ARRNELEM(qtrg);
if (len == 0)
res = false;
* nodes.
*/
if (GIST_LEAF(entry))
- { /* all leafs contains orig trgm */
+ { /* all leafs contains orig trgm */
res = trgm_contained_by(qtrg, key);
}
else if (ISALLTRUE(key))
- { /* non-leaf contains signature */
+ { /* non-leaf contains signature */
res = true;
}
else
- { /* non-leaf contains signature */
- int32 k,
- tmp = 0,
- len = ARRNELEM(qtrg);
- trgm *ptr = GETARR(qtrg);
- BITVECP sign = GETSIGN(key);
+ { /* non-leaf contains signature */
+ int32 k,
+ tmp = 0,
+ len = ARRNELEM(qtrg);
+ trgm *ptr = GETARR(qtrg);
+ BITVECP sign = GETSIGN(key);
res = true;
for (k = 0; k < len; k++)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
text *query = PG_GETARG_TEXT_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
TRGM *key = (TRGM *) DatumGetPointer(entry->key);
TRGM *qtrg;
{
case DistanceStrategyNumber:
if (GIST_LEAF(entry))
- { /* all leafs contains orig trgm */
+ { /* all leafs contains orig trgm */
res = 1.0 - cnt_sml(key, qtrg);
}
else if (ISALLTRUE(key))
- { /* all leafs contains orig trgm */
+ { /* all leafs contains orig trgm */
res = 0.0;
}
else
- { /* non-leaf contains signature */
- int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
- int4 len = ARRNELEM(qtrg);
+ { /* non-leaf contains signature */
+ int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+ int4 len = ARRNELEM(qtrg);
res = (len == 0) ? -1.0 : 1.0 - ((float8) count) / ((float8) len);
}
const char *beginword = str;
const char *endword;
char *s = buf;
- bool in_wildcard_meta = false;
- bool in_escape = false;
- int clen;
+ bool in_wildcard_meta = false;
+ bool in_escape = false;
+ int clen;
/*
* Find the first word character remembering whether last character was
{
TRGM *trg;
char *buf,
- *buf2;
+ *buf2;
trgm *tptr;
int len,
charlen,
bytelen;
const char *eword;
- trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) * 3);
+ trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
trg->flag = ARRKEY;
SET_VARSIZE(trg, TRGMHDRSIZE);
float4 res = DatumGetFloat4(DirectFunctionCall2(similarity,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)));
+
PG_RETURN_FLOAT4(1.0 - res);
}
old_cluster.major_version = get_major_server_version(&old_cluster);
new_cluster.major_version = get_major_server_version(&new_cluster);
- /* We allow upgrades from/to the same major version for alpha/beta upgrades */
+ /*
+ * We allow upgrades from/to the same major version for alpha/beta
+ * upgrades
+ */
if (GET_MAJOR_VERSION(old_cluster.major_version) < 803)
pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
}
if (script)
- fclose(script);
+ fclose(script);
if (found)
{
"\nOld and new pg_controldata date/time storage types do not match.\n");
/*
- * This is a common 8.3 -> 8.4 upgrade problem, so we are more
- * verbose
+ * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_log(PG_FATAL,
"You will need to rebuild the new server with configure\n"
static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster);
-static void validate_exec(const char *dir, const char *cmdName);
+static void validate_exec(const char *dir, const char *cmdName);
/*
else
return 0;
}
+
#endif
void
install_support_functions_in_new_db(const char *db_name)
{
- PGconn *conn = connectToServer(&new_cluster, db_name);
-
+ PGconn *conn = connectToServer(&new_cluster, db_name);
+
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(conn,
- "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
+ "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(conn,
"RESET client_min_messages;"));
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
- "binary_upgrade.set_next_array_pg_type_oid(OID) "
+ "binary_upgrade.set_next_array_pg_type_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
- "binary_upgrade.set_next_toast_pg_type_oid(OID) "
+ "binary_upgrade.set_next_toast_pg_type_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
- "binary_upgrade.set_next_heap_pg_class_oid(OID) "
+ "binary_upgrade.set_next_heap_pg_class_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
- "binary_upgrade.set_next_index_pg_class_oid(OID) "
+ "binary_upgrade.set_next_index_pg_class_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(conn,
"CREATE OR REPLACE FUNCTION "
- "binary_upgrade.set_next_toast_pg_class_oid(OID) "
+ "binary_upgrade.set_next_toast_pg_class_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_support' "
"LANGUAGE C STRICT;"));
static void create_rel_filename_map(const char *old_data, const char *new_data,
- const DbInfo *old_db, const DbInfo *new_db,
- const RelInfo *old_rel, const RelInfo *new_rel,
- FileNameMap *map);
+ const DbInfo *old_db, const DbInfo *new_db,
+ const RelInfo *old_rel, const RelInfo *new_rel,
+ FileNameMap *map);
static void get_db_infos(ClusterInfo *cluster);
static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo);
static void free_rel_infos(RelInfoArr *rel_arr);
if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
- old_db->db_name);
+ old_db->db_name);
maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
old_db->rel_arr.nrels);
if (old_rel->reloid != new_rel->reloid)
pg_log(PG_FATAL, "Mismatch of relation id: database \"%s\", old relid %d, new relid %d\n",
- old_db->db_name, old_rel->reloid, new_rel->reloid);
+ old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
- * In pre-8.4, TOAST table names change during CLUSTER; in >= 8.4
- * TOAST relation names always use heap table oids, hence we
- * cannot check relation names when upgrading from pre-8.4.
+ * In pre-8.4, TOAST table names change during CLUSTER; in >= 8.4
+ * TOAST relation names always use heap table oids, hence we cannot
+ * check relation names when upgrading from pre-8.4.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 804 ||
strcmp(old_rel->nspname, "pg_toast") != 0) &&
strcmp(old_rel->relname, new_rel->relname) != 0))
pg_log(PG_FATAL, "Mismatch of relation names: database \"%s\", "
- "old rel %s.%s, new rel %s.%s\n",
- old_db->db_name, old_rel->nspname, old_rel->relname,
- new_rel->nspname, new_rel->relname);
+ "old rel %s.%s, new rel %s.%s\n",
+ old_db->db_name, old_rel->nspname, old_rel->relname,
+ new_rel->nspname, new_rel->relname);
create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
- old_rel, new_rel, maps + num_maps);
+ old_rel, new_rel, maps + num_maps);
num_maps++;
}
*/
static void
create_rel_filename_map(const char *old_data, const char *new_data,
- const DbInfo *old_db, const DbInfo *new_db,
- const RelInfo *old_rel, const RelInfo *new_rel,
- FileNameMap *map)
+ const DbInfo *old_db, const DbInfo *new_db,
+ const RelInfo *old_rel, const RelInfo *new_rel,
+ FileNameMap *map)
{
if (strlen(old_rel->tablespace) == 0)
{
}
/*
- * old_relfilenode might differ from pg_class.oid (and hence
- * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
+ * old_relfilenode might differ from pg_class.oid (and hence
+ * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
*/
map->old_relfilenode = old_rel->relfilenode;
int ntups;
int tupnum;
DbInfo *dbinfos;
- int i_datname, i_oid, i_spclocation;
+ int i_datname,
+ i_oid,
+ i_spclocation;
res = executeQueryOrDie(conn,
"SELECT d.oid, d.datname, t.spclocation "
int num_rels = 0;
char *nspname = NULL;
char *relname = NULL;
- int i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode;
+ int i_spclocation,
+ i_nspname,
+ i_relname,
+ i_oid,
+ i_relfilenode;
char query[QUERY_ALLOC];
/*
* pg_largeobject contains user data that does not appear in pg_dumpall
* --schema-only output, so we have to copy that system table heap and
- * index. We could grab the pg_largeobject oids from template1, but
- * it is easy to treat it as a normal table.
- * Order by oid so we can join old/new structures efficiently.
+ * index. We could grab the pg_largeobject oids from template1, but it is
+ * easy to treat it as a normal table. Order by oid so we can join old/new
+ * structures efficiently.
*/
snprintf(query, sizeof(query),
" ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
" c.oid >= %u) "
" OR (n.nspname = 'pg_catalog' AND "
- " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
+ " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
/* we preserve pg_class.oid so we sort by it to match old/new */
"ORDER BY 1;",
/* see the comment at the top of old_8_3_create_sequence_script() */
FirstNormalObjectId,
/* does pg_largeobject_metadata need to be migrated? */
(GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
- "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");
+ "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");
res = executeQueryOrDie(conn, query);
* FYI, while pg_class.oid and pg_class.relfilenode are intially the same
* in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
* FULL. The new cluster will have matching pg_class.oid and
- * pg_class.relfilenode values and be based on the old oid value. This can
+ * pg_class.relfilenode values and be based on the old oid value. This can
* cause the old and new pg_class.relfilenode values to differ. In summary,
* old and new pg_class.oid and new pg_class.relfilenode will have the
* same value, and old pg_class.relfilenode might differ.
*/
-
+
#include "pg_upgrade.h"
#ifdef HAVE_LANGINFO_H
/* This is the database used by pg_dumpall to restore global tables */
#define GLOBAL_DUMP_DB "postgres"
-ClusterInfo old_cluster, new_cluster;
+ClusterInfo old_cluster,
+ new_cluster;
OSInfo os_info;
int
exec_prog(true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --analyze >> %s 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
+ new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
check_ok();
/*
exec_prog(true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --freeze >> %s 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
+ new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
check_ok();
get_pg_database_relfilenode(&new_cluster);
prep_status("Creating databases in the new cluster");
/*
- * Install support functions in the global-restore database
- * to preserve pg_authid.oid.
+ * Install support functions in the global-restore database to preserve
+ * pg_authid.oid.
*/
install_support_functions_in_new_db(GLOBAL_DUMP_DB);
/*
* We have to create the databases first so we can install support
- * functions in all the other databases. Ideally we could create
- * the support functions in template1 but pg_dumpall creates database
- * using the template0 template.
+ * functions in all the other databases. Ideally we could create the
+ * support functions in template1 but pg_dumpall creates database using
+ * the template0 template.
*/
exec_prog(true,
SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
{
char old_dir[MAXPGPATH];
char new_dir[MAXPGPATH];
+
/*
* old/new relfilenodes might differ for pg_largeobject(_metadata) indexes
* due to VACUUM FULL or REINDEX. Other relfilenodes are preserved.
Oid old_relfilenode;
Oid new_relfilenode;
/* the rest are used only for logging and error reporting */
- char nspname[NAMEDATALEN]; /* namespaces */
+ char nspname[NAMEDATALEN]; /* namespaces */
char relname[NAMEDATALEN];
} FileNameMap;
char *bindir; /* pathname for cluster's executable directory */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
- char major_version_str[64]; /* string PG_VERSION of cluster */
+ char major_version_str[64]; /* string PG_VERSION of cluster */
Oid pg_database_oid; /* OID of pg_database relation */
char *libpath; /* pathname for cluster's pkglibdir */
char *tablespace_suffix; /* directory specification */
/*
* Global variables
*/
-extern LogOpts log_opts;
+extern LogOpts log_opts;
extern UserOpts user_opts;
-extern ClusterInfo old_cluster, new_cluster;
+extern ClusterInfo old_cluster,
+ new_cluster;
extern OSInfo os_info;
extern char scandir_file_pattern[];
char **sequence_script_file_name);
void check_new_cluster(void);
void report_clusters_compatible(void);
-void issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *deletion_script_file_name);
+void issue_warnings(char *sequence_script_file_name);
+void output_completion_banner(char *deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
int dir_matching_filenames(const struct dirent * scan_ent);
int pg_scandir(const char *dirname, struct dirent *** namelist,
- int (*selector) (const struct dirent *));
+ int (*selector) (const struct dirent *));
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
- const char *dst);
+ const char *dst);
void check_hard_link(void);
FileNameMap *gen_db_file_maps(DbInfo *old_db,
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
-void get_db_and_rel_infos(ClusterInfo *cluster);
+void get_db_and_rel_infos(ClusterInfo *cluster);
void free_db_and_rel_infos(DbInfoArr *db_arr);
-void print_maps(FileNameMap *maps, int n,
- const char *db_name);
+void print_maps(FileNameMap *maps, int n,
+ const char *db_name);
/* option.c */
/* server.c */
-PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
-PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...);
+PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
+PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...);
void start_postmaster(ClusterInfo *cluster, bool quiet);
void stop_postmaster(bool fast, bool quiet);
-uint32 get_major_server_version(ClusterInfo *cluster);
+uint32 get_major_server_version(ClusterInfo *cluster);
void check_for_libpq_envvars(void);
/* version.c */
void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
- bool check_mode);
+ bool check_mode);
/* version_old_8_3.c */
void old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster);
void old_8_3_check_for_tsquery_usage(ClusterInfo *cluster);
-void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode);
-void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode);
+void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode);
+void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
- bool check_mode);
+ bool check_mode);
char *old_8_3_create_sequence_script(ClusterInfo *cluster);
*/
const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr,
- DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
+ DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{
int dbnum;
const char *msg = NULL;
if (old_db_arr->ndbs != new_db_arr->ndbs)
pg_log(PG_FATAL, "old and new clusters have a different number of databases\n");
-
+
for (dbnum = 0; dbnum < old_db_arr->ndbs; dbnum++)
{
DbInfo *old_db = &old_db_arr->dbs[dbnum];
if (strcmp(old_db->db_name, new_db->db_name) != 0)
pg_log(PG_FATAL, "old and new databases have different names: old \"%s\", new \"%s\"\n",
- old_db->db_name, new_db->db_name);
-
+ old_db->db_name, new_db->db_name);
+
n_maps = 0;
mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
for (fileno = 0; fileno < numFiles; fileno++)
{
if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
- strlen(scandir_file_pattern)) == 0)
+ strlen(scandir_file_pattern)) == 0)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
namelist[fileno]->d_name);
unlink(new_file);
transfer_relfile(pageConverter, old_file, new_file,
- maps[mapnum].nspname, maps[mapnum].relname);
+ maps[mapnum].nspname, maps[mapnum].relname);
}
}
}
for (fileno = 0; fileno < numFiles; fileno++)
{
if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
- strlen(scandir_file_pattern)) == 0)
+ strlen(scandir_file_pattern)) == 0)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
namelist[fileno]->d_name);
unlink(new_file);
transfer_relfile(pageConverter, old_file, new_file,
- maps[mapnum].nspname, maps[mapnum].relname);
+ maps[mapnum].nspname, maps[mapnum].relname);
}
}
}
*/
static void
transfer_relfile(pageCnvCtx *pageConverter, const char *old_file,
- const char *new_file, const char *nspname, const char *relname)
+ const char *new_file, const char *nspname, const char *relname)
{
const char *msg;
if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
pg_log(PG_FATAL,
- "error while creating link from %s.%s (%s to %s): %s\n",
+ "error while creating link from %s.%s (%s to %s): %s\n",
nspname, relname, old_file, new_file, msg);
}
return;
* because it is being used by another process." so we have to send all
* other output to 'nul'.
*
- * Using autovacuum=off disables cleanup vacuum and analyze, but
- * freeze vacuums can still happen, so we set
- * autovacuum_freeze_max_age to its maximum. We assume all datfrozenxid
- * and relfrozen values are less than a gap of 2000000000 from the current
- * xid counter, so autovacuum will not touch them.
- */
+ * Using autovacuum=off disables cleanup vacuum and analyze, but freeze
+ * vacuums can still happen, so we set autovacuum_freeze_max_age to its
+ * maximum. We assume all datfrozenxid and relfrozen values are less than
+ * a gap of 2000000000 from the current xid counter, so autovacuum will
+ * not touch them.
+ */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
"-o \"-p %d -c autovacuum=off "
"\"%s\" 2>&1" SYSTEMQUOTE,
bindir,
#ifndef WIN32
- log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename);
+ log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename);
#else
DEVNULL, datadir, fast ? "-m fast" : "", DEVNULL);
#endif
{
/* This cluster has a version-specific subdirectory */
cluster->tablespace_suffix = pg_malloc(4 +
- strlen(cluster->major_version_str) +
- 10 /* OIDCHARS */ + 1);
+ strlen(cluster->major_version_str) +
+ 10 /* OIDCHARS */ + 1);
/* The leading slash is needed to start a new directory. */
sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
#include <signal.h>
-LogOpts log_opts;
+LogOpts log_opts;
/*
* report_status()
/* Rebuild all tsvector collumns with one ALTER TABLE command */
if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 ||
- strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
+ strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
{
if (strlen(nspname) != 0 || strlen(relname) != 0)
fprintf(script, ";\n\n");
&textDatums, NULL, &ndatums);
for (i = 0; i < ndatums; i++)
{
- text *txtname = DatumGetTextPP(textDatums[i]);
- char *extName = text_to_cstring(txtname);
- Oid extOid = get_extension_oid(extName, false);
+ text *txtname = DatumGetTextPP(textDatums[i]);
+ char *extName = text_to_cstring(txtname);
+ Oid extOid = get_extension_oid(extName, false);
requiredExtensions = lappend_oid(requiredExtensions, extOid);
}
InsertExtensionTuple(text_to_cstring(extName),
GetUserId(),
- get_namespace_oid(text_to_cstring(schemaName), false),
+ get_namespace_oid(text_to_cstring(schemaName), false),
relocatable,
text_to_cstring(extVersion),
extConfig,
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */
typedef struct fork_pthread *pthread_t;
typedef int pthread_attr_t;
-static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#endif
INSTR_TIME_SET_CURRENT(now);
INSTR_TIME_ACCUM_DIFF(thread->exec_elapsed[cnum],
- now, st->stmt_begin);
+ now, st->stmt_begin);
thread->exec_count[cnum]++;
}
if (commands[st->state]->type == SQL_COMMAND)
{
/*
- * Read and discard the query result; note this is not included
- * in the statement latency numbers.
+ * Read and discard the query result; note this is not included in
+ * the statement latency numbers.
*/
res = PQgetResult(st->con);
switch (PQresultStatus(res))
for (i = 0; i < num_files; i++)
{
- Command **commands;
+ Command **commands;
if (num_files > 1)
- printf("statement latencies in milliseconds, file %d:\n", i+1);
+ printf("statement latencies in milliseconds, file %d:\n", i + 1);
else
printf("statement latencies in milliseconds:\n");
for (commands = sql_files[i]; *commands != NULL; commands++)
{
- Command *command = *commands;
+ Command *command = *commands;
int cnum = command->command_num;
double total_time;
instr_time total_exec_elapsed;
total_exec_count = 0;
for (t = 0; t < nthreads; t++)
{
- TState *thread = &threads[t];
+ TState *thread = &threads[t];
INSTR_TIME_ADD(total_exec_elapsed,
thread->exec_elapsed[cnum]);
* is_latencies only works with multiple threads in thread-based
* implementations, not fork-based ones, because it supposes that the
* parent can see changes made to the per-thread execution stats by child
- * threads. It seems useful enough to accept despite this limitation,
- * but perhaps we should FIXME someday (by passing the stats data back
- * up through the parent-to-child pipes).
+ * threads. It seems useful enough to accept despite this limitation, but
+ * perhaps we should FIXME someday (by passing the stats data back up
+ * through the parent-to-child pipes).
*/
#ifndef ENABLE_THREAD_SAFETY
if (is_latencies && nthreads > 1)
threads = (TState *) xmalloc(sizeof(TState) * nthreads);
for (i = 0; i < nthreads; i++)
{
- TState *thread = &threads[i];
+ TState *thread = &threads[i];
thread->tid = i;
thread->state = &state[nclients / nthreads * i];
if (is_latencies)
{
/* Reserve memory for the thread to store per-command latencies */
- int t;
+ int t;
thread->exec_elapsed = (instr_time *)
xmalloc(sizeof(instr_time) * num_commands);
/* start threads */
for (i = 0; i < nthreads; i++)
{
- TState *thread = &threads[i];
+ TState *thread = &threads[i];
INSTR_TIME_SET_CURRENT(thread->start_time);
static int
pthread_create(pthread_t *thread,
- pthread_attr_t *attr,
+ pthread_attr_t * attr,
void *(*start_routine) (void *),
void *arg)
{
void *(*routine) (void *);
void *arg;
void *result;
-} win32_pthread;
+} win32_pthread;
static unsigned __stdcall
win32_pthread_run(void *arg)
static int
pthread_create(pthread_t *thread,
- pthread_attr_t *attr,
+ pthread_attr_t * attr,
void *(*start_routine) (void *),
void *arg)
{
{
seg = (SEG *) DatumGetPointer(entryvec->vector[i].key);
/* center calculation is done this way to avoid possible overflow */
- sort_items[i - 1].center = seg->lower*0.5f + seg->upper*0.5f;
+ sort_items[i - 1].center = seg->lower * 0.5f + seg->upper * 0.5f;
sort_items[i - 1].index = i;
sort_items[i - 1].data = seg;
}
result = bms_copy(columns);
result = bms_del_member(result, index);
- for (attno=1; attno <= natts; attno++)
+ for (attno = 1; attno <= natts; attno++)
{
tuple = SearchSysCache2(ATTNUM,
ObjectIdGetDatum(relOid),
while ((index = bms_first_member(tmpset)) > 0)
{
attno = index + FirstLowInvalidHeapAttributeNumber;
+
/*
* whole-row-reference shall be fixed-up later
*/
bool result = true;
/*
- * Hardwired Policies:
- * SE-PostgreSQL enforces
- * - clients cannot modify system catalogs using DMLs
- * - clients cannot reference/modify toast relations using DMLs
+ * Hardwired Policies: SE-PostgreSQL enforces - clients cannot modify
+ * system catalogs using DMLs - clients cannot reference/modify toast
+ * relations using DMLs
*/
if (sepgsql_getenforce() > 0)
{
- Oid relnamespace = get_rel_namespace(relOid);
+ Oid relnamespace = get_rel_namespace(relOid);
if (IsSystemNamespace(relnamespace) &&
(required & (SEPG_DB_TABLE__UPDATE |
{
AttrNumber attnum;
uint32 column_perms = 0;
- ObjectAddress object;
+ ObjectAddress object;
if (bms_is_member(index, selected))
column_perms |= SEPG_DB_COLUMN__SELECT;
{
ListCell *lr;
- foreach (lr, rangeTabls)
+ foreach(lr, rangeTabls)
{
- RangeTblEntry *rte = lfirst(lr);
- uint32 required = 0;
- List *tableIds;
- ListCell *li;
+ RangeTblEntry *rte = lfirst(lr);
+ uint32 required = 0;
+ List *tableIds;
+ ListCell *li;
/*
* Only regular relations shall be checked
/*
* If this RangeTblEntry is also supposed to reference inherited
- * tables, we need to check security label of the child tables.
- * So, we expand rte->relid into list of OIDs of inheritance
- * hierarchy, then checker routine will be invoked for each
- * relations.
+ * tables, we need to check security label of the child tables. So, we
+ * expand rte->relid into list of OIDs of inheritance hierarchy, then
+ * checker routine will be invoked for each relations.
*/
if (!rte->inh)
tableIds = list_make1_oid(rte->relid);
else
tableIds = find_all_inheritors(rte->relid, NoLock, NULL);
- foreach (li, tableIds)
+ foreach(li, tableIds)
{
Oid tableOid = lfirst_oid(li);
Bitmapset *selectedCols;
Bitmapset *modifiedCols;
/*
- * child table has different attribute numbers, so we need
- * to fix up them.
+ * child table has different attribute numbers, so we need to fix
+ * up them.
*/
selectedCols = fixup_inherited_columns(rte->relid, tableOid,
rte->selectedCols);
/*
* Declarations
*/
-void _PG_init(void);
+void _PG_init(void);
/*
* Saved hook entries (if stacked)
*/
-static object_access_hook_type next_object_access_hook = NULL;
-static ClientAuthentication_hook_type next_client_auth_hook = NULL;
-static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL;
-static needs_fmgr_hook_type next_needs_fmgr_hook = NULL;
-static fmgr_hook_type next_fmgr_hook = NULL;
-static ProcessUtility_hook_type next_ProcessUtility_hook = NULL;
+static object_access_hook_type next_object_access_hook = NULL;
+static ClientAuthentication_hook_type next_client_auth_hook = NULL;
+static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL;
+static needs_fmgr_hook_type next_needs_fmgr_hook = NULL;
+static fmgr_hook_type next_fmgr_hook = NULL;
+static ProcessUtility_hook_type next_ProcessUtility_hook = NULL;
/*
* GUC: sepgsql.permissive = (on|off)
static void
sepgsql_client_auth(Port *port, int status)
{
- char *context;
+ char *context;
if (next_client_auth_hook)
- (*next_client_auth_hook)(port, status);
+ (*next_client_auth_hook) (port, status);
/*
- * In the case when authentication failed, the supplied socket
- * shall be closed soon, so we don't need to do anything here.
+ * In the case when authentication failed, the supplied socket shall be
+ * closed soon, so we don't need to do anything here.
*/
if (status != STATUS_OK)
return;
sepgsql_set_client_label(context);
/*
- * Switch the current performing mode from INTERNAL to either
- * DEFAULT or PERMISSIVE.
+ * Switch the current performing mode from INTERNAL to either DEFAULT or
+ * PERMISSIVE.
*/
if (sepgsql_permissive)
sepgsql_set_mode(SEPGSQL_MODE_PERMISSIVE);
*/
static void
sepgsql_object_access(ObjectAccessType access,
- Oid classId,
- Oid objectId,
- int subId)
+ Oid classId,
+ Oid objectId,
+ int subId)
{
if (next_object_access_hook)
- (*next_object_access_hook)(access, classId, objectId, subId);
+ (*next_object_access_hook) (access, classId, objectId, subId);
switch (access)
{
break;
default:
- elog(ERROR, "unexpected object access type: %d", (int)access);
+ elog(ERROR, "unexpected object access type: %d", (int) access);
break;
}
}
sepgsql_exec_check_perms(List *rangeTabls, bool abort)
{
/*
- * If security provider is stacking and one of them replied 'false'
- * at least, we don't need to check any more.
+ * If security provider is stacking and one of them replied 'false' at
+ * least, we don't need to check any more.
*/
if (next_exec_check_perms_hook &&
- !(*next_exec_check_perms_hook)(rangeTabls, abort))
+ !(*next_exec_check_perms_hook) (rangeTabls, abort))
return false;
if (!sepgsql_dml_privileges(rangeTabls, abort))
static bool
sepgsql_needs_fmgr_hook(Oid functionId)
{
- char *old_label;
- char *new_label;
- char *function_label;
+ char *old_label;
+ char *new_label;
+ char *function_label;
if (next_needs_fmgr_hook &&
- (*next_needs_fmgr_hook)(functionId))
+ (*next_needs_fmgr_hook) (functionId))
return true;
/*
- * SELinux needs the function to be called via security_definer
- * wrapper, if this invocation will take a domain-transition.
- * We call these functions as trusted-procedure, if the security
- * policy has a rule that switches security label of the client
- * on execution.
+ * SELinux needs the function to be called via security_definer wrapper,
+ * if this invocation will take a domain-transition. We call these
+ * functions as trusted-procedure, if the security policy has a rule that
+ * switches security label of the client on execution.
*/
old_label = sepgsql_get_client_label();
new_label = sepgsql_proc_get_domtrans(functionId);
/*
* Even if not a trusted-procedure, this function should not be inlined
- * unless the client has db_procedure:{execute} permission.
- * Please note that it shall be actually failed later because of same
- * reason with ACL_EXECUTE.
+ * unless the client has db_procedure:{execute} permission. Please note
+ * that it shall be actually failed later because of same reason with
+ * ACL_EXECUTE.
*/
function_label = sepgsql_get_label(ProcedureRelationId, functionId, 0);
if (sepgsql_check_perms(sepgsql_get_client_label(),
sepgsql_fmgr_hook(FmgrHookEventType event,
FmgrInfo *flinfo, Datum *private)
{
- struct {
- char *old_label;
- char *new_label;
- Datum next_private;
- } *stack;
+ struct
+ {
+ char *old_label;
+ char *new_label;
+ Datum next_private;
+ } *stack;
switch (event)
{
case FHET_START:
- stack = (void *)DatumGetPointer(*private);
+ stack = (void *) DatumGetPointer(*private);
if (!stack)
{
- MemoryContext oldcxt;
- const char *cur_label = sepgsql_get_client_label();
+ MemoryContext oldcxt;
+ const char *cur_label = sepgsql_get_client_label();
oldcxt = MemoryContextSwitchTo(flinfo->fn_mcxt);
stack = palloc(sizeof(*stack));
{
/*
* process:transition permission between old and new
- * label, when user tries to switch security label of
- * the client on execution of trusted procedure.
+ * label, when user tries to switch security label of the
+ * client on execution of trusted procedure.
*/
sepgsql_check_perms(cur_label, stack->new_label,
SEPG_CLASS_PROCESS,
stack->old_label = sepgsql_set_client_label(stack->new_label);
if (next_fmgr_hook)
- (*next_fmgr_hook)(event, flinfo, &stack->next_private);
+ (*next_fmgr_hook) (event, flinfo, &stack->next_private);
break;
case FHET_END:
case FHET_ABORT:
- stack = (void *)DatumGetPointer(*private);
+ stack = (void *) DatumGetPointer(*private);
if (next_fmgr_hook)
- (*next_fmgr_hook)(event, flinfo, &stack->next_private);
+ (*next_fmgr_hook) (event, flinfo, &stack->next_private);
sepgsql_set_client_label(stack->old_label);
stack->old_label = NULL;
break;
default:
- elog(ERROR, "unexpected event type: %d", (int)event);
+ elog(ERROR, "unexpected event type: %d", (int) event);
break;
}
}
char *completionTag)
{
if (next_ProcessUtility_hook)
- (*next_ProcessUtility_hook)(parsetree, queryString, params,
- isTopLevel, dest, completionTag);
+ (*next_ProcessUtility_hook) (parsetree, queryString, params,
+ isTopLevel, dest, completionTag);
/*
* Check command tag to avoid nefarious operations
switch (nodeTag(parsetree))
{
case T_LoadStmt:
+
/*
* We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks.
}
break;
default:
+
/*
- * Right now we don't check any other utility commands,
- * because it needs more detailed information to make
- * access control decision here, but we don't want to
- * have two parse and analyze routines individually.
+ * Right now we don't check any other utility commands, because it
+ * needs more detailed information to make access control decision
+ * here, but we don't want to have two parse and analyze routines
+ * individually.
*/
break;
}
void
_PG_init(void)
{
- char *context;
+ char *context;
/*
* We allow to load the SE-PostgreSQL module on single-user-mode or
if (IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sepgsql must be loaded via shared_preload_libraries")));
+ errmsg("sepgsql must be loaded via shared_preload_libraries")));
/*
- * Check availability of SELinux on the platform.
- * If disabled, we cannot activate any SE-PostgreSQL features,
- * and we have to skip rest of initialization.
+ * Check availability of SELinux on the platform. If disabled, we cannot
+ * activate any SE-PostgreSQL features, and we have to skip rest of
+ * initialization.
*/
if (is_selinux_enabled() < 1)
{
/*
* sepgsql.permissive = (on|off)
*
- * This variable controls performing mode of SE-PostgreSQL
- * on user's session.
+ * This variable controls performing mode of SE-PostgreSQL on user's
+ * session.
*/
DefineCustomBoolVariable("sepgsql.permissive",
"Turn on/off permissive mode in SE-PostgreSQL",
/*
* sepgsql.debug_audit = (on|off)
*
- * This variable allows users to turn on/off audit logs on access
- * control decisions, independent from auditallow/auditdeny setting
- * in the security policy.
- * We intend to use this option for debugging purpose.
+ * This variable allows users to turn on/off audit logs on access control
+ * decisions, independent from auditallow/auditdeny setting in the
+ * security policy. We intend to use this option for debugging purpose.
*/
DefineCustomBoolVariable("sepgsql.debug_audit",
"Turn on/off debug audit messages",
/*
* Set up dummy client label.
*
- * XXX - note that PostgreSQL launches background worker process
- * like autovacuum without authentication steps. So, we initialize
- * sepgsql_mode with SEPGSQL_MODE_INTERNAL, and client_label with
- * the security context of server process.
- * Later, it also launches background of user session. In this case,
- * the process is always hooked on post-authentication, and we can
- * initialize the sepgsql_mode and client_label correctly.
+ * XXX - note that PostgreSQL launches background worker process like
+ * autovacuum without authentication steps. So, we initialize sepgsql_mode
+ * with SEPGSQL_MODE_INTERNAL, and client_label with the security context
+ * of server process. Later, it also launches background of user session.
+ * In this case, the process is always hooked on post-authentication, and
+ * we can initialize the sepgsql_mode and client_label correctly.
*/
if (getcon_raw(&context) < 0)
ereport(ERROR,
*
* security label of the client process
*/
-static char *client_label = NULL;
+static char *client_label = NULL;
char *
sepgsql_get_client_label(void)
char *
sepgsql_set_client_label(char *new_label)
{
- char *old_label = client_label;
+ char *old_label = client_label;
client_label = new_label;
char *
sepgsql_get_label(Oid classId, Oid objectId, int32 subId)
{
- ObjectAddress object;
- char *label;
+ ObjectAddress object;
+ char *label;
- object.classId = classId;
- object.objectId = objectId;
- object.objectSubId = subId;
+ object.classId = classId;
+ object.objectId = objectId;
+ object.objectSubId = subId;
label = GetSecurityLabel(&object, SEPGSQL_LABEL_TAG);
- if (!label || security_check_context_raw((security_context_t)label))
+ if (!label || security_check_context_raw((security_context_t) label))
{
- security_context_t unlabeled;
+ security_context_t unlabeled;
if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("SELinux: failed to get initial security label: %m")));
+ errmsg("SELinux: failed to get initial security label: %m")));
PG_TRY();
{
label = pstrdup(unlabeled);
sepgsql_object_relabel(const ObjectAddress *object, const char *seclabel)
{
/*
- * validate format of the supplied security label,
- * if it is security context of selinux.
+ * validate format of the supplied security label, if it is security
+ * context of selinux.
*/
if (seclabel &&
security_check_context_raw((security_context_t) seclabel) < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("SELinux: invalid security label: \"%s\"", seclabel)));
+ errmsg("SELinux: invalid security label: \"%s\"", seclabel)));
+
/*
* Do actual permission checks for each object classes
*/
switch (object->classId)
{
case NamespaceRelationId:
- sepgsql_schema_relabel(object->objectId, seclabel);
+ sepgsql_schema_relabel(object->objectId, seclabel);
break;
case RelationRelationId:
if (object->objectSubId == 0)
Datum
sepgsql_getcon(PG_FUNCTION_ARGS)
{
- char *client_label;
+ char *client_label;
if (!sepgsql_is_enabled())
PG_RETURN_NULL();
Datum
sepgsql_mcstrans_in(PG_FUNCTION_ARGS)
{
- text *label = PG_GETARG_TEXT_P(0);
- char *raw_label;
- char *result;
+ text *label = PG_GETARG_TEXT_P(0);
+ char *raw_label;
+ char *result;
if (!sepgsql_is_enabled())
ereport(ERROR,
Datum
sepgsql_mcstrans_out(PG_FUNCTION_ARGS)
{
- text *label = PG_GETARG_TEXT_P(0);
- char *qual_label;
- char *result;
+ text *label = PG_GETARG_TEXT_P(0);
+ char *qual_label;
+ char *result;
if (!sepgsql_is_enabled())
ereport(ERROR,
quote_object_name(const char *src1, const char *src2,
const char *src3, const char *src4)
{
- StringInfoData result;
- const char *temp;
+ StringInfoData result;
+ const char *temp;
initStringInfo(&result);
temp = quote_identifier(src1);
appendStringInfo(&result, "%s", temp);
if (src1 != temp)
- pfree((void *)temp);
+ pfree((void *) temp);
}
if (src2)
{
temp = quote_identifier(src2);
appendStringInfo(&result, ".%s", temp);
if (src2 != temp)
- pfree((void *)temp);
+ pfree((void *) temp);
}
if (src3)
{
temp = quote_identifier(src3);
appendStringInfo(&result, ".%s", temp);
if (src3 != temp)
- pfree((void *)temp);
+ pfree((void *) temp);
}
if (src4)
{
temp = quote_identifier(src4);
appendStringInfo(&result, ".%s", temp);
if (src4 != temp)
- pfree((void *)temp);
+ pfree((void *) temp);
}
return result.data;
}
* catalog OID.
*/
static void
-exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
+exec_object_restorecon(struct selabel_handle * sehnd, Oid catalogId)
{
- Relation rel;
- SysScanDesc sscan;
- HeapTuple tuple;
- char *database_name = get_database_name(MyDatabaseId);
- char *namespace_name;
- Oid namespace_id;
- char *relation_name;
+ Relation rel;
+ SysScanDesc sscan;
+ HeapTuple tuple;
+ char *database_name = get_database_name(MyDatabaseId);
+ char *namespace_name;
+ Oid namespace_id;
+ char *relation_name;
/*
- * Open the target catalog. We don't want to allow writable
- * accesses by other session during initial labeling.
+ * Open the target catalog. We don't want to allow writable accesses by
+ * other session during initial labeling.
*/
rel = heap_open(catalogId, AccessShareLock);
SnapshotNow, 0, NULL);
while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
{
- Form_pg_namespace nspForm;
- Form_pg_class relForm;
- Form_pg_attribute attForm;
- Form_pg_proc proForm;
- char *objname;
- int objtype = 1234;
- ObjectAddress object;
- security_context_t context;
+ Form_pg_namespace nspForm;
+ Form_pg_class relForm;
+ Form_pg_attribute attForm;
+ Form_pg_proc proForm;
+ char *objname;
+ int objtype = 1234;
+ ObjectAddress object;
+ security_context_t context;
/*
- * The way to determine object name depends on object classes.
- * So, any branches set up `objtype', `objname' and `object' here.
+ * The way to determine object name depends on object classes. So, any
+ * branches set up `objtype', `objname' and `object' here.
*/
switch (catalogId)
{
default:
elog(ERROR, "unexpected catalog id: %u", catalogId);
- objname = NULL; /* for compiler quiet */
+ objname = NULL; /* for compiler quiet */
break;
}
Datum
sepgsql_restorecon(PG_FUNCTION_ARGS)
{
- struct selabel_handle *sehnd;
- struct selinux_opt seopts;
+ struct selabel_handle *sehnd;
+ struct selinux_opt seopts;
/*
* SELinux has to be enabled on the running platform.
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("sepgsql is not currently enabled")));
+
/*
- * Check DAC permission. Only superuser can set up initial
- * security labels, like root-user in filesystems
+ * Check DAC permission. Only superuser can set up initial security
+ * labels, like root-user in filesystems
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("SELinux: must be superuser to restore initial contexts")));
+ errmsg("SELinux: must be superuser to restore initial contexts")));
/*
- * Open selabel_lookup(3) stuff. It provides a set of mapping
- * between an initial security label and object class/name due
- * to the system setting.
+ * Open selabel_lookup(3) stuff. It provides a set of mapping between an
+ * initial security label and object class/name due to the system setting.
*/
if (PG_ARGISNULL(0))
{
if (!sehnd)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("SELinux: failed to initialize labeling handle: %m")));
+ errmsg("SELinux: failed to initialize labeling handle: %m")));
PG_TRY();
{
/*
- * Right now, we have no support labeling on the shared
- * database objects, such as database, role, or tablespace.
+ * Right now, we have no support labeling on the shared database
+ * objects, such as database, role, or tablespace.
*/
exec_object_restorecon(sehnd, NamespaceRelationId);
exec_object_restorecon(sehnd, RelationRelationId);
selabel_close(sehnd);
PG_RE_THROW();
}
- PG_END_TRY();
+ PG_END_TRY();
selabel_close(sehnd);
void
sepgsql_proc_post_create(Oid functionId)
{
- Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
- HeapTuple tuple;
- Oid namespaceId;
- ObjectAddress object;
- char *scontext;
- char *tcontext;
- char *ncontext;
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple tuple;
+ Oid namespaceId;
+ ObjectAddress object;
+ char *scontext;
+ char *tcontext;
+ char *ncontext;
/*
* Fetch namespace of the new procedure. Because pg_proc entry is not
heap_close(rel, AccessShareLock);
/*
- * Compute a default security label when we create a new procedure
- * object under the specified namespace.
+ * Compute a default security label when we create a new procedure object
+ * under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId, namespaceId, 0);
char *
sepgsql_proc_get_domtrans(Oid functionId)
{
- char *scontext = sepgsql_get_client_label();
- char *tcontext;
- char *ncontext;
+ char *scontext = sepgsql_get_client_label();
+ char *tcontext;
+ char *ncontext;
tcontext = sepgsql_get_label(ProcedureRelationId, functionId, 0);
void
sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
{
- char *scontext = sepgsql_get_client_label();
- char *tcontext;
- char *ncontext;
- ObjectAddress object;
+ char *scontext = sepgsql_get_client_label();
+ char *tcontext;
+ char *ncontext;
+ ObjectAddress object;
/*
- * Only attributes within regular relation have individual
- * security labels.
+ * Only attributes within regular relation have individual security
+ * labels.
*/
if (get_rel_relkind(relOid) != RELKIND_RELATION)
return;
/*
- * Compute a default security label when we create a new procedure
- * object under the specified namespace.
+ * Compute a default security label when we create a new procedure object
+ * under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN);
+
/*
* Assign the default security label on a new procedure
*/
char *scontext = sepgsql_get_client_label();
char *tcontext;
char *audit_name;
- ObjectAddress object;
+ ObjectAddress object;
if (get_rel_relkind(relOid) != RELKIND_RELATION)
ereport(ERROR,
void
sepgsql_relation_post_create(Oid relOid)
{
- Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
- HeapTuple tuple;
- Form_pg_class classForm;
- ObjectAddress object;
- uint16 tclass;
- char *scontext; /* subject */
- char *tcontext; /* schema */
- char *rcontext; /* relation */
- char *ccontext; /* column */
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple tuple;
+ Form_pg_class classForm;
+ ObjectAddress object;
+ uint16 tclass;
+ char *scontext; /* subject */
+ char *tcontext; /* schema */
+ char *rcontext; /* relation */
+ char *ccontext; /* column */
/*
- * Fetch catalog record of the new relation. Because pg_class entry is
- * not visible right now, we need to scan the catalog using SnapshotSelf.
+ * Fetch catalog record of the new relation. Because pg_class entry is not
+ * visible right now, we need to scan the catalog using SnapshotSelf.
*/
rel = heap_open(RelationRelationId, AccessShareLock);
else if (classForm->relkind == RELKIND_VIEW)
tclass = SEPG_CLASS_DB_VIEW;
else
- goto out; /* No need to assign individual labels */
+ goto out; /* No need to assign individual labels */
/*
- * Compute a default security label when we create a new relation
- * object under the specified namespace.
+ * Compute a default security label when we create a new relation object
+ * under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId,
SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, rcontext);
/*
- * We also assigns a default security label on columns of the new
- * regular tables.
+ * We also assigns a default security label on columns of the new regular
+ * tables.
*/
if (classForm->relkind == RELKIND_RELATION)
{
void
sepgsql_schema_post_create(Oid namespaceId)
{
- char *scontext = sepgsql_get_client_label();
- char *tcontext;
- char *ncontext;
- ObjectAddress object;
+ char *scontext = sepgsql_get_client_label();
+ char *tcontext;
+ char *ncontext;
+ ObjectAddress object;
/*
- * FIXME: Right now, we assume pg_database object has a fixed
- * security label, because pg_seclabel does not support to store
- * label of shared database objects.
+ * FIXME: Right now, we assume pg_database object has a fixed security
+ * label, because pg_seclabel does not support to store label of shared
+ * database objects.
*/
tcontext = "system_u:object_r:sepgsql_db_t:s0";
/*
- * Compute a default security label when we create a new schema
- * object under the working database.
+ * Compute a default security label when we create a new schema object
+ * under the working database.
*/
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_SCHEMA);
*/
static struct
{
- const char *class_name;
- uint16 class_code;
+ const char *class_name;
+ uint16 class_code;
struct
{
- const char *av_name;
- uint32 av_code;
- } av[32];
-} selinux_catalog[] = {
+ const char *av_name;
+ uint32 av_code;
+ } av[32];
+} selinux_catalog[] =
+
+{
{
- "process", SEPG_CLASS_PROCESS,
+ "process", SEPG_CLASS_PROCESS,
{
- { "transition", SEPG_PROCESS__TRANSITION },
- { NULL, 0UL }
+ {
+ "transition", SEPG_PROCESS__TRANSITION
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "file", SEPG_CLASS_FILE,
+ "file", SEPG_CLASS_FILE,
{
- { "read", SEPG_FILE__READ },
- { "write", SEPG_FILE__WRITE },
- { "create", SEPG_FILE__CREATE },
- { "getattr", SEPG_FILE__GETATTR },
- { "unlink", SEPG_FILE__UNLINK },
- { "rename", SEPG_FILE__RENAME },
- { "append", SEPG_FILE__APPEND },
- { NULL, 0UL }
+ {
+ "read", SEPG_FILE__READ
+ },
+ {
+ "write", SEPG_FILE__WRITE
+ },
+ {
+ "create", SEPG_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_FILE__RENAME
+ },
+ {
+ "append", SEPG_FILE__APPEND
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "dir", SEPG_CLASS_DIR,
+ "dir", SEPG_CLASS_DIR,
{
- { "read", SEPG_DIR__READ },
- { "write", SEPG_DIR__WRITE },
- { "create", SEPG_DIR__CREATE },
- { "getattr", SEPG_DIR__GETATTR },
- { "unlink", SEPG_DIR__UNLINK },
- { "rename", SEPG_DIR__RENAME },
- { "search", SEPG_DIR__SEARCH },
- { "add_name", SEPG_DIR__ADD_NAME },
- { "remove_name", SEPG_DIR__REMOVE_NAME },
- { "rmdir", SEPG_DIR__RMDIR },
- { "reparent", SEPG_DIR__REPARENT },
- { NULL, 0UL }
+ {
+ "read", SEPG_DIR__READ
+ },
+ {
+ "write", SEPG_DIR__WRITE
+ },
+ {
+ "create", SEPG_DIR__CREATE
+ },
+ {
+ "getattr", SEPG_DIR__GETATTR
+ },
+ {
+ "unlink", SEPG_DIR__UNLINK
+ },
+ {
+ "rename", SEPG_DIR__RENAME
+ },
+ {
+ "search", SEPG_DIR__SEARCH
+ },
+ {
+ "add_name", SEPG_DIR__ADD_NAME
+ },
+ {
+ "remove_name", SEPG_DIR__REMOVE_NAME
+ },
+ {
+ "rmdir", SEPG_DIR__RMDIR
+ },
+ {
+ "reparent", SEPG_DIR__REPARENT
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "lnk_file", SEPG_CLASS_LNK_FILE,
+ "lnk_file", SEPG_CLASS_LNK_FILE,
{
- { "read", SEPG_LNK_FILE__READ },
- { "write", SEPG_LNK_FILE__WRITE },
- { "create", SEPG_LNK_FILE__CREATE },
- { "getattr", SEPG_LNK_FILE__GETATTR },
- { "unlink", SEPG_LNK_FILE__UNLINK },
- { "rename", SEPG_LNK_FILE__RENAME },
- { NULL, 0UL }
+ {
+ "read", SEPG_LNK_FILE__READ
+ },
+ {
+ "write", SEPG_LNK_FILE__WRITE
+ },
+ {
+ "create", SEPG_LNK_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_LNK_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_LNK_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_LNK_FILE__RENAME
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "chr_file", SEPG_CLASS_CHR_FILE,
+ "chr_file", SEPG_CLASS_CHR_FILE,
{
- { "read", SEPG_CHR_FILE__READ },
- { "write", SEPG_CHR_FILE__WRITE },
- { "create", SEPG_CHR_FILE__CREATE },
- { "getattr", SEPG_CHR_FILE__GETATTR },
- { "unlink", SEPG_CHR_FILE__UNLINK },
- { "rename", SEPG_CHR_FILE__RENAME },
- { NULL, 0UL }
+ {
+ "read", SEPG_CHR_FILE__READ
+ },
+ {
+ "write", SEPG_CHR_FILE__WRITE
+ },
+ {
+ "create", SEPG_CHR_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_CHR_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_CHR_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_CHR_FILE__RENAME
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "blk_file", SEPG_CLASS_BLK_FILE,
+ "blk_file", SEPG_CLASS_BLK_FILE,
{
- { "read", SEPG_BLK_FILE__READ },
- { "write", SEPG_BLK_FILE__WRITE },
- { "create", SEPG_BLK_FILE__CREATE },
- { "getattr", SEPG_BLK_FILE__GETATTR },
- { "unlink", SEPG_BLK_FILE__UNLINK },
- { "rename", SEPG_BLK_FILE__RENAME },
- { NULL, 0UL }
+ {
+ "read", SEPG_BLK_FILE__READ
+ },
+ {
+ "write", SEPG_BLK_FILE__WRITE
+ },
+ {
+ "create", SEPG_BLK_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_BLK_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_BLK_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_BLK_FILE__RENAME
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "sock_file", SEPG_CLASS_SOCK_FILE,
+ "sock_file", SEPG_CLASS_SOCK_FILE,
{
- { "read", SEPG_SOCK_FILE__READ },
- { "write", SEPG_SOCK_FILE__WRITE },
- { "create", SEPG_SOCK_FILE__CREATE },
- { "getattr", SEPG_SOCK_FILE__GETATTR },
- { "unlink", SEPG_SOCK_FILE__UNLINK },
- { "rename", SEPG_SOCK_FILE__RENAME },
- { NULL, 0UL }
+ {
+ "read", SEPG_SOCK_FILE__READ
+ },
+ {
+ "write", SEPG_SOCK_FILE__WRITE
+ },
+ {
+ "create", SEPG_SOCK_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_SOCK_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_SOCK_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_SOCK_FILE__RENAME
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "fifo_file", SEPG_CLASS_FIFO_FILE,
+ "fifo_file", SEPG_CLASS_FIFO_FILE,
{
- { "read", SEPG_FIFO_FILE__READ },
- { "write", SEPG_FIFO_FILE__WRITE },
- { "create", SEPG_FIFO_FILE__CREATE },
- { "getattr", SEPG_FIFO_FILE__GETATTR },
- { "unlink", SEPG_FIFO_FILE__UNLINK },
- { "rename", SEPG_FIFO_FILE__RENAME },
- { NULL, 0UL }
+ {
+ "read", SEPG_FIFO_FILE__READ
+ },
+ {
+ "write", SEPG_FIFO_FILE__WRITE
+ },
+ {
+ "create", SEPG_FIFO_FILE__CREATE
+ },
+ {
+ "getattr", SEPG_FIFO_FILE__GETATTR
+ },
+ {
+ "unlink", SEPG_FIFO_FILE__UNLINK
+ },
+ {
+ "rename", SEPG_FIFO_FILE__RENAME
+ },
+ {
+ NULL, 0UL
+ }
}
},
{
- "db_database", SEPG_CLASS_DB_DATABASE,
+ "db_database", SEPG_CLASS_DB_DATABASE,
{
- { "create", SEPG_DB_DATABASE__CREATE },
- { "drop", SEPG_DB_DATABASE__DROP },
- { "getattr", SEPG_DB_DATABASE__GETATTR },
- { "setattr", SEPG_DB_DATABASE__SETATTR },
- { "relabelfrom", SEPG_DB_DATABASE__RELABELFROM },
- { "relabelto", SEPG_DB_DATABASE__RELABELTO },
- { "access", SEPG_DB_DATABASE__ACCESS },
- { "load_module", SEPG_DB_DATABASE__LOAD_MODULE },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_DATABASE__CREATE
+ },
+ {
+ "drop", SEPG_DB_DATABASE__DROP
+ },
+ {
+ "getattr", SEPG_DB_DATABASE__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_DATABASE__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_DATABASE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_DATABASE__RELABELTO
+ },
+ {
+ "access", SEPG_DB_DATABASE__ACCESS
+ },
+ {
+ "load_module", SEPG_DB_DATABASE__LOAD_MODULE
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_schema", SEPG_CLASS_DB_SCHEMA,
+ "db_schema", SEPG_CLASS_DB_SCHEMA,
{
- { "create", SEPG_DB_SCHEMA__CREATE },
- { "drop", SEPG_DB_SCHEMA__DROP },
- { "getattr", SEPG_DB_SCHEMA__GETATTR },
- { "setattr", SEPG_DB_SCHEMA__SETATTR },
- { "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM },
- { "relabelto", SEPG_DB_SCHEMA__RELABELTO },
- { "search", SEPG_DB_SCHEMA__SEARCH },
- { "add_name", SEPG_DB_SCHEMA__ADD_NAME },
- { "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_SCHEMA__CREATE
+ },
+ {
+ "drop", SEPG_DB_SCHEMA__DROP
+ },
+ {
+ "getattr", SEPG_DB_SCHEMA__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_SCHEMA__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_SCHEMA__RELABELTO
+ },
+ {
+ "search", SEPG_DB_SCHEMA__SEARCH
+ },
+ {
+ "add_name", SEPG_DB_SCHEMA__ADD_NAME
+ },
+ {
+ "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_table", SEPG_CLASS_DB_TABLE,
+ "db_table", SEPG_CLASS_DB_TABLE,
{
- { "create", SEPG_DB_TABLE__CREATE },
- { "drop", SEPG_DB_TABLE__DROP },
- { "getattr", SEPG_DB_TABLE__GETATTR },
- { "setattr", SEPG_DB_TABLE__SETATTR },
- { "relabelfrom", SEPG_DB_TABLE__RELABELFROM },
- { "relabelto", SEPG_DB_TABLE__RELABELTO },
- { "select", SEPG_DB_TABLE__SELECT },
- { "update", SEPG_DB_TABLE__UPDATE },
- { "insert", SEPG_DB_TABLE__INSERT },
- { "delete", SEPG_DB_TABLE__DELETE },
- { "lock", SEPG_DB_TABLE__LOCK },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_TABLE__CREATE
+ },
+ {
+ "drop", SEPG_DB_TABLE__DROP
+ },
+ {
+ "getattr", SEPG_DB_TABLE__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_TABLE__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_TABLE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_TABLE__RELABELTO
+ },
+ {
+ "select", SEPG_DB_TABLE__SELECT
+ },
+ {
+ "update", SEPG_DB_TABLE__UPDATE
+ },
+ {
+ "insert", SEPG_DB_TABLE__INSERT
+ },
+ {
+ "delete", SEPG_DB_TABLE__DELETE
+ },
+ {
+ "lock", SEPG_DB_TABLE__LOCK
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_sequence", SEPG_CLASS_DB_SEQUENCE,
+ "db_sequence", SEPG_CLASS_DB_SEQUENCE,
{
- { "create", SEPG_DB_SEQUENCE__CREATE },
- { "drop", SEPG_DB_SEQUENCE__DROP },
- { "getattr", SEPG_DB_SEQUENCE__GETATTR },
- { "setattr", SEPG_DB_SEQUENCE__SETATTR },
- { "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM },
- { "relabelto", SEPG_DB_SEQUENCE__RELABELTO },
- { "get_value", SEPG_DB_SEQUENCE__GET_VALUE },
- { "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE },
- { "set_value", SEPG_DB_SEQUENCE__SET_VALUE },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_SEQUENCE__CREATE
+ },
+ {
+ "drop", SEPG_DB_SEQUENCE__DROP
+ },
+ {
+ "getattr", SEPG_DB_SEQUENCE__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_SEQUENCE__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_SEQUENCE__RELABELTO
+ },
+ {
+ "get_value", SEPG_DB_SEQUENCE__GET_VALUE
+ },
+ {
+ "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE
+ },
+ {
+ "set_value", SEPG_DB_SEQUENCE__SET_VALUE
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_procedure", SEPG_CLASS_DB_PROCEDURE,
+ "db_procedure", SEPG_CLASS_DB_PROCEDURE,
{
- { "create", SEPG_DB_PROCEDURE__CREATE },
- { "drop", SEPG_DB_PROCEDURE__DROP },
- { "getattr", SEPG_DB_PROCEDURE__GETATTR },
- { "setattr", SEPG_DB_PROCEDURE__SETATTR },
- { "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM },
- { "relabelto", SEPG_DB_PROCEDURE__RELABELTO },
- { "execute", SEPG_DB_PROCEDURE__EXECUTE },
- { "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT },
- { "install", SEPG_DB_PROCEDURE__INSTALL },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_PROCEDURE__CREATE
+ },
+ {
+ "drop", SEPG_DB_PROCEDURE__DROP
+ },
+ {
+ "getattr", SEPG_DB_PROCEDURE__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_PROCEDURE__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_PROCEDURE__RELABELTO
+ },
+ {
+ "execute", SEPG_DB_PROCEDURE__EXECUTE
+ },
+ {
+ "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT
+ },
+ {
+ "install", SEPG_DB_PROCEDURE__INSTALL
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_column", SEPG_CLASS_DB_COLUMN,
+ "db_column", SEPG_CLASS_DB_COLUMN,
{
- { "create", SEPG_DB_COLUMN__CREATE },
- { "drop", SEPG_DB_COLUMN__DROP },
- { "getattr", SEPG_DB_COLUMN__GETATTR },
- { "setattr", SEPG_DB_COLUMN__SETATTR },
- { "relabelfrom", SEPG_DB_COLUMN__RELABELFROM },
- { "relabelto", SEPG_DB_COLUMN__RELABELTO },
- { "select", SEPG_DB_COLUMN__SELECT },
- { "update", SEPG_DB_COLUMN__UPDATE },
- { "insert", SEPG_DB_COLUMN__INSERT },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_COLUMN__CREATE
+ },
+ {
+ "drop", SEPG_DB_COLUMN__DROP
+ },
+ {
+ "getattr", SEPG_DB_COLUMN__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_COLUMN__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_COLUMN__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_COLUMN__RELABELTO
+ },
+ {
+ "select", SEPG_DB_COLUMN__SELECT
+ },
+ {
+ "update", SEPG_DB_COLUMN__UPDATE
+ },
+ {
+ "insert", SEPG_DB_COLUMN__INSERT
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_tuple", SEPG_CLASS_DB_TUPLE,
+ "db_tuple", SEPG_CLASS_DB_TUPLE,
{
- { "relabelfrom", SEPG_DB_TUPLE__RELABELFROM },
- { "relabelto", SEPG_DB_TUPLE__RELABELTO },
- { "select", SEPG_DB_TUPLE__SELECT },
- { "update", SEPG_DB_TUPLE__UPDATE },
- { "insert", SEPG_DB_TUPLE__INSERT },
- { "delete", SEPG_DB_TUPLE__DELETE },
- { NULL, 0UL },
+ {
+ "relabelfrom", SEPG_DB_TUPLE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_TUPLE__RELABELTO
+ },
+ {
+ "select", SEPG_DB_TUPLE__SELECT
+ },
+ {
+ "update", SEPG_DB_TUPLE__UPDATE
+ },
+ {
+ "insert", SEPG_DB_TUPLE__INSERT
+ },
+ {
+ "delete", SEPG_DB_TUPLE__DELETE
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_blob", SEPG_CLASS_DB_BLOB,
+ "db_blob", SEPG_CLASS_DB_BLOB,
{
- { "create", SEPG_DB_BLOB__CREATE },
- { "drop", SEPG_DB_BLOB__DROP },
- { "getattr", SEPG_DB_BLOB__GETATTR },
- { "setattr", SEPG_DB_BLOB__SETATTR },
- { "relabelfrom", SEPG_DB_BLOB__RELABELFROM },
- { "relabelto", SEPG_DB_BLOB__RELABELTO },
- { "read", SEPG_DB_BLOB__READ },
- { "write", SEPG_DB_BLOB__WRITE },
- { "import", SEPG_DB_BLOB__IMPORT },
- { "export", SEPG_DB_BLOB__EXPORT },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_BLOB__CREATE
+ },
+ {
+ "drop", SEPG_DB_BLOB__DROP
+ },
+ {
+ "getattr", SEPG_DB_BLOB__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_BLOB__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_BLOB__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_BLOB__RELABELTO
+ },
+ {
+ "read", SEPG_DB_BLOB__READ
+ },
+ {
+ "write", SEPG_DB_BLOB__WRITE
+ },
+ {
+ "import", SEPG_DB_BLOB__IMPORT
+ },
+ {
+ "export", SEPG_DB_BLOB__EXPORT
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_language", SEPG_CLASS_DB_LANGUAGE,
+ "db_language", SEPG_CLASS_DB_LANGUAGE,
{
- { "create", SEPG_DB_LANGUAGE__CREATE },
- { "drop", SEPG_DB_LANGUAGE__DROP },
- { "getattr", SEPG_DB_LANGUAGE__GETATTR },
- { "setattr", SEPG_DB_LANGUAGE__SETATTR },
- { "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM },
- { "relabelto", SEPG_DB_LANGUAGE__RELABELTO },
- { "implement", SEPG_DB_LANGUAGE__IMPLEMENT },
- { "execute", SEPG_DB_LANGUAGE__EXECUTE },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_LANGUAGE__CREATE
+ },
+ {
+ "drop", SEPG_DB_LANGUAGE__DROP
+ },
+ {
+ "getattr", SEPG_DB_LANGUAGE__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_LANGUAGE__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_LANGUAGE__RELABELTO
+ },
+ {
+ "implement", SEPG_DB_LANGUAGE__IMPLEMENT
+ },
+ {
+ "execute", SEPG_DB_LANGUAGE__EXECUTE
+ },
+ {
+ NULL, 0UL
+ },
}
},
{
- "db_view", SEPG_CLASS_DB_VIEW,
+ "db_view", SEPG_CLASS_DB_VIEW,
{
- { "create", SEPG_DB_VIEW__CREATE },
- { "drop", SEPG_DB_VIEW__DROP },
- { "getattr", SEPG_DB_VIEW__GETATTR },
- { "setattr", SEPG_DB_VIEW__SETATTR },
- { "relabelfrom", SEPG_DB_VIEW__RELABELFROM },
- { "relabelto", SEPG_DB_VIEW__RELABELTO },
- { "expand", SEPG_DB_VIEW__EXPAND },
- { NULL, 0UL },
+ {
+ "create", SEPG_DB_VIEW__CREATE
+ },
+ {
+ "drop", SEPG_DB_VIEW__DROP
+ },
+ {
+ "getattr", SEPG_DB_VIEW__GETATTR
+ },
+ {
+ "setattr", SEPG_DB_VIEW__SETATTR
+ },
+ {
+ "relabelfrom", SEPG_DB_VIEW__RELABELFROM
+ },
+ {
+ "relabelto", SEPG_DB_VIEW__RELABELTO
+ },
+ {
+ "expand", SEPG_DB_VIEW__EXPAND
+ },
+ {
+ NULL, 0UL
+ },
}
},
};
int
sepgsql_set_mode(int new_mode)
{
- int old_mode = sepgsql_mode;
+ int old_mode = sepgsql_mode;
sepgsql_mode = new_mode;
uint32 audited,
const char *audit_name)
{
- StringInfoData buf;
- const char *class_name;
- const char *av_name;
- int i;
+ StringInfoData buf;
+ const char *class_name;
+ const char *av_name;
+ int i;
/* lookup name of the object class */
Assert(tclass < SEPG_CLASS_MAX);
initStringInfo(&buf);
appendStringInfo(&buf, "%s {",
(denied ? "denied" : "allowed"));
- for (i=0; selinux_catalog[tclass].av[i].av_name; i++)
+ for (i = 0; selinux_catalog[tclass].av[i].av_name; i++)
{
if (audited & (1UL << i))
{
sepgsql_compute_avd(const char *scontext,
const char *tcontext,
uint16 tclass,
- struct av_decision *avd)
+ struct av_decision * avd)
{
- const char *tclass_name;
- security_class_t tclass_ex;
- struct av_decision avd_ex;
- int i, deny_unknown = security_deny_unknown();
+ const char *tclass_name;
+ security_class_t tclass_ex;
+ struct av_decision avd_ex;
+ int i,
+ deny_unknown = security_deny_unknown();
- /* Get external code of the object class*/
+ /* Get external code of the object class */
Assert(tclass < SEPG_CLASS_MAX);
Assert(tclass == selinux_catalog[tclass].class_code);
{
/*
* If the current security policy does not support permissions
- * corresponding to database objects, we fill up them with dummy
- * data.
+ * corresponding to database objects, we fill up them with dummy data.
* If security_deny_unknown() returns positive value, undefined
* permissions should be denied. Otherwise, allowed
*/
avd->allowed = (security_deny_unknown() > 0 ? 0 : ~0);
avd->auditallow = 0U;
- avd->auditdeny = ~0U;
+ avd->auditdeny = ~0U;
avd->flags = 0;
return;
* Ask SELinux what is allowed set of permissions on a pair of the
* security contexts and the given object class.
*/
- if (security_compute_av_flags_raw((security_context_t)scontext,
- (security_context_t)tcontext,
+ if (security_compute_av_flags_raw((security_context_t) scontext,
+ (security_context_t) tcontext,
tclass_ex, 0, &avd_ex) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
/*
* SELinux returns its access control decision as a set of permissions
- * represented in external code which depends on run-time environment.
- * So, we need to translate it to the internal representation before
- * returning results for the caller.
+ * represented in external code which depends on run-time environment. So,
+ * we need to translate it to the internal representation before returning
+ * results for the caller.
*/
memset(avd, 0, sizeof(struct av_decision));
- for (i=0; selinux_catalog[tclass].av[i].av_name; i++)
+ for (i = 0; selinux_catalog[tclass].av[i].av_name; i++)
{
- access_vector_t av_code_ex;
- const char *av_name = selinux_catalog[tclass].av[i].av_name;
- uint32 av_code = selinux_catalog[tclass].av[i].av_code;
+ access_vector_t av_code_ex;
+ const char *av_name = selinux_catalog[tclass].av[i].av_name;
+ uint32 av_code = selinux_catalog[tclass].av[i].av_code;
av_code_ex = string_to_av_perm(tclass_ex, av_name);
if (av_code_ex == 0)
const char *tcontext,
uint16 tclass)
{
- security_context_t ncontext;
- security_class_t tclass_ex;
- const char *tclass_name;
- char *result;
+ security_context_t ncontext;
+ security_class_t tclass_ex;
+ const char *tclass_name;
+ char *result;
- /* Get external code of the object class*/
+ /* Get external code of the object class */
Assert(tclass < SEPG_CLASS_MAX);
tclass_name = selinux_catalog[tclass].class_name;
tclass_ex = string_to_security_class(tclass_name);
/*
- * Ask SELinux what is the default context for the given object class
- * on a pair of security contexts
+ * Ask SELinux what is the default context for the given object class on a
+ * pair of security contexts
*/
- if (security_compute_create_raw((security_context_t)scontext,
- (security_context_t)tcontext,
+ if (security_compute_create_raw((security_context_t) scontext,
+ (security_context_t) tcontext,
tclass_ex, &ncontext) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
scontext, tcontext, tclass_name)));
/*
- * libselinux returns malloc()'ed string, so we need to copy it
- * on the palloc()'ed region.
+ * libselinux returns malloc()'ed string, so we need to copy it on the
+ * palloc()'ed region.
*/
PG_TRY();
{
const char *audit_name,
bool abort)
{
- struct av_decision avd;
+ struct av_decision avd;
uint32 denied;
uint32 audited;
bool result = true;
audited = (denied ? denied : required);
else
audited = (denied ? (denied & avd.auditdeny)
- : (required & avd.auditallow));
+ : (required & avd.auditallow));
if (denied &&
sepgsql_getenforce() > 0 &&
result = false;
/*
- * It records a security audit for the request, if needed.
- * But, when SE-PgSQL performs 'internal' mode, it needs to keep silent.
+ * It records a security audit for the request, if needed. But, when
+ * SE-PgSQL performs 'internal' mode, it needs to keep silent.
*/
if (audited && sepgsql_mode != SEPGSQL_MODE_INTERNAL)
{
/*
* selinux.c
*/
-extern bool sepgsql_is_enabled(void);
+extern bool sepgsql_is_enabled(void);
extern int sepgsql_get_mode(void);
extern int sepgsql_set_mode(int new_mode);
extern bool sepgsql_getenforce(void);
extern void sepgsql_audit_log(bool denied,
- const char *scontext,
- const char *tcontext,
- uint16 tclass,
- uint32 audited,
- const char *audit_name);
+ const char *scontext,
+ const char *tcontext,
+ uint16 tclass,
+ uint32 audited,
+ const char *audit_name);
extern void sepgsql_compute_avd(const char *scontext,
- const char *tcontext,
- uint16 tclass,
- struct av_decision *avd);
+ const char *tcontext,
+ uint16 tclass,
+ struct av_decision * avd);
extern char *sepgsql_compute_create(const char *scontext,
- const char *tcontext,
- uint16 tclass);
+ const char *tcontext,
+ uint16 tclass);
extern bool sepgsql_check_perms(const char *scontext,
- const char *tcontext,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort);
+ const char *tcontext,
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort);
+
/*
* label.c
*/
extern char *sepgsql_set_client_label(char *new_label);
extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
-extern void sepgsql_object_relabel(const ObjectAddress *object,
- const char *seclabel);
+extern void sepgsql_object_relabel(const ObjectAddress *object,
+ const char *seclabel);
extern Datum sepgsql_getcon(PG_FUNCTION_ARGS);
extern Datum sepgsql_mcstrans_in(PG_FUNCTION_ARGS);
*/
extern void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum);
extern void sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
- const char *seclabel);
+ const char *seclabel);
extern void sepgsql_relation_post_create(Oid relOid);
extern void sepgsql_relation_relabel(Oid relOid, const char *seclabel);
extern void sepgsql_proc_relabel(Oid functionId, const char *seclabel);
extern char *sepgsql_proc_get_domtrans(Oid functionId);
-#endif /* SEPGSQL_H */
+#endif /* SEPGSQL_H */
/*
* This is where we check to see if the field we are supposed to update
- * even exists. The above function must return -1 if name not found?
+ * even exists. The above function must return -1 if name not found?
*/
if (attnum < 0)
ereport(ERROR,
static xmlChar *pgxml_texttoxmlchar(text *textstring);
static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar *xpath,
- xpath_workspace *workspace);
+ xpath_workspace *workspace);
static void cleanup_workspace(xpath_workspace *workspace);
xpath_nodeset(PG_FUNCTION_ARGS)
{
text *document = PG_GETARG_TEXT_P(0);
- text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
+ text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xmlChar *toptag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2));
xmlChar *septag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(3));
xmlChar *xpath;
xpath_list(PG_FUNCTION_ARGS)
{
text *document = PG_GETARG_TEXT_P(0);
- text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
+ text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xmlChar *plainsep = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2));
xmlChar *xpath;
text *xpres;
xpath_string(PG_FUNCTION_ARGS)
{
text *document = PG_GETARG_TEXT_P(0);
- text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
+ text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xmlChar *xpath;
int32 pathsize;
text *xpres;
xpath_number(PG_FUNCTION_ARGS)
{
text *document = PG_GETARG_TEXT_P(0);
- text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
+ text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xmlChar *xpath;
float4 fRes;
xmlXPathObjectPtr res;
xpath_bool(PG_FUNCTION_ARGS)
{
text *document = PG_GETARG_TEXT_P(0);
- text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
+ text *xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xmlChar *xpath;
int bRes;
xmlXPathObjectPtr res;
/* local defs */
static const char **parse_params(text *paramstr);
-
#endif /* USE_LIBXSLT */
{
max_params *= 2;
params = (const char **) repalloc(params,
- (max_params + 1) * sizeof(char *));
+ (max_params + 1) * sizeof(char *));
}
params[nparams++] = pos;
pos = strstr(pos, nvsep);
*
* check to see if any preceding bits are null...
*/
- int byte = attnum >> 3;
+ int byte = attnum >> 3;
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
* Now check to see if any preceding bits are null...
*/
{
- int byte = attnum >> 3;
+ int byte = attnum >> 3;
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
ArrayType *array = PG_GETARG_ARRAYTYPE_P_COPY(0);
int32 *nkeys = (int32 *) PG_GETARG_POINTER(1);
StrategyNumber strategy = PG_GETARG_UINT16(2);
- /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
+
+ /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool **nullFlags = (bool **) PG_GETARG_POINTER(5);
int32 *searchMode = (int32 *) PG_GETARG_POINTER(6);
case GinContainsStrategy:
if (nelems > 0)
*searchMode = GIN_SEARCH_MODE_DEFAULT;
- else /* everything contains the empty set */
+ else /* everything contains the empty set */
*searchMode = GIN_SEARCH_MODE_ALL;
break;
case GinContainedStrategy:
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* ArrayType *query = PG_GETARG_ARRAYTYPE_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
+
/* Datum *queryKeys = (Datum *) PG_GETARG_POINTER(6); */
bool *nullFlags = (bool *) PG_GETARG_POINTER(7);
bool res;
case GinEqualStrategy:
/* we will need recheck */
*recheck = true;
+
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare
- * and array_eq handle nulls differently ...
+ * against nulls here. This is because array_contain_compare and
+ * array_eq handle nulls differently ...
*/
res = true;
for (i = 0; i < nkeys; i++)
GinEntryAccumulator *ea;
/*
- * Allocate memory by rather big chunks to decrease overhead. We have
- * no need to reclaim RBNodes individually, so this costs nothing.
+ * Allocate memory by rather big chunks to decrease overhead. We have no
+ * need to reclaim RBNodes individually, so this costs nothing.
*/
if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY)
{
cmpEntryAccumulator,
ginCombineData,
ginAllocEntryAccumulator,
- NULL, /* no freefunc needed */
+ NULL, /* no freefunc needed */
(void *) accum);
}
bool isNew;
/*
- * For the moment, fill only the fields of eatmp that will be looked at
- * by cmpEntryAccumulator or ginCombineData.
+ * For the moment, fill only the fields of eatmp that will be looked at by
+ * cmpEntryAccumulator or ginCombineData.
*/
eatmp.attnum = attnum;
eatmp.key = key;
int
ginCompareItemPointers(ItemPointer a, ItemPointer b)
{
- BlockNumber ba = GinItemPointerGetBlockNumber(a);
- BlockNumber bb = GinItemPointerGetBlockNumber(b);
+ BlockNumber ba = GinItemPointerGetBlockNumber(a);
+ BlockNumber bb = GinItemPointerGetBlockNumber(b);
if (ba == bb)
{
- OffsetNumber oa = GinItemPointerGetOffsetNumber(a);
- OffsetNumber ob = GinItemPointerGetOffsetNumber(b);
+ OffsetNumber oa = GinItemPointerGetOffsetNumber(a);
+ OffsetNumber ob = GinItemPointerGetOffsetNumber(b);
if (oa == ob)
return 0;
Page page = BufferGetPage(buf);
int sizeofitem = GinSizeOfDataPageItem(page);
int cnt = 0;
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata[3];
static ginxlogInsert data;
Size pageSize = PageGetPageSize(lpage);
Size freeSpace;
uint32 nCopied = 1;
+
/* these must be static so they can be returned to caller */
static ginxlogSplit data;
static XLogRecData rdata[4];
if (errorTooBig)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) newsize,
- (unsigned long) Min(INDEX_SIZE_MASK,
- GinMaxItemSize),
- RelationGetRelationName(ginstate->index))));
+ errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+ (unsigned long) newsize,
+ (unsigned long) Min(INDEX_SIZE_MASK,
+ GinMaxItemSize),
+ RelationGetRelationName(ginstate->index))));
pfree(itup);
return NULL;
}
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
key = gintuple_get_key(btree->ginstate, itup, &category);
if (ginCompareAttEntries(btree->ginstate,
- btree->entryAttnum, btree->entryKey, btree->entryCategory,
+ btree->entryAttnum, btree->entryKey, btree->entryCategory,
attnum, key, category) > 0)
return TRUE;
Page page = BufferGetPage(buf);
OffsetNumber placed;
int cnt = 0;
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata[3];
static ginxlogInsert data;
Page lpage = PageGetTempPageCopy(BufferGetPage(lbuf));
Page rpage = BufferGetPage(rbuf);
Size pageSize = PageGetPageSize(lpage);
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata[2];
static ginxlogSplit data;
GinPageGetOpaque(page)->rightlink = rightlink;
/*
- * tail page may contain only whole row(s) or final part of row placed
- * on previous pages (a "row" here meaning all the index tuples generated
- * for one heap tuple)
+ * tail page may contain only whole row(s) or final part of row placed on
+ * previous pages (a "row" here meaning all the index tuples generated for
+ * one heap tuple)
*/
if (rightlink == InvalidBlockNumber)
{
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
}
/*
- * Build an index tuple for each key value, and add to array. In
- * pending tuples we just stick the heap TID into t_tid.
+ * Build an index tuple for each key value, and add to array. In pending
+ * tuples we just stick the heap TID into t_tid.
*/
for (i = 0; i < nentries; i++)
{
{
IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
OffsetNumber curattnum;
- Datum curkey;
+ Datum curkey;
GinNullCategory curcategory;
/* Check for change of heap TID or attnum */
*/
ginBeginBAScan(&accum);
while ((list = ginGetBAEntry(&accum,
- &attnum, &key, &category, &nlist)) != NULL)
+ &attnum, &key, &category, &nlist)) != NULL)
{
ginEntryInsert(ginstate, attnum, key, category,
list, nlist, NULL);
ginBeginBAScan(&accum);
while ((list = ginGetBAEntry(&accum,
- &attnum, &key, &category, &nlist)) != NULL)
+ &attnum, &key, &category, &nlist)) != NULL)
ginEntryInsert(ginstate, attnum, key, category,
list, nlist, NULL);
}
callConsistentFn(GinState *ginstate, GinScanKey key)
{
/*
- * If we're dealing with a dummy EVERYTHING key, we don't want to call
- * the consistentFn; just claim it matches.
+ * If we're dealing with a dummy EVERYTHING key, we don't want to call the
+ * consistentFn; just claim it matches.
*/
if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING)
{
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
- * comparePartialFn says we're done.
+ * comparePartialFn says we're done.
* 2. SEARCH_MODE_ALL: scan from current point (which should be first
- * key for the current attnum) until we hit null items or end of attnum
+ * key for the current attnum) until we hit null items or end of attnum
* 3. SEARCH_MODE_EVERYTHING: scan from current point (which should be first
- * key for the current attnum) until we hit end of attnum
+ * key for the current attnum) until we hit end of attnum
*
* Returns true if done, false if it's necessary to restart scan from scratch
*/
collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
GinScanEntry scanEntry)
{
- OffsetNumber attnum;
+ OffsetNumber attnum;
Form_pg_attribute attr;
/* Initialize empty bitmap result */
cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[attnum - 1],
scanEntry->queryKey,
idatum,
- UInt16GetDatum(scanEntry->strategy),
- PointerGetDatum(scanEntry->extra_data)));
+ UInt16GetDatum(scanEntry->strategy),
+ PointerGetDatum(scanEntry->extra_data)));
if (cmp > 0)
return true;
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
* We should unlock current page (but not unpin) during tree scan
* to prevent deadlock with vacuum processes.
*
- * We save current entry value (idatum) to be able to re-find
- * our tuple after re-locking
+ * We save current entry value (idatum) to be able to re-find our
+ * tuple after re-locking
*/
if (icategory == GIN_CAT_NORM_KEY)
idatum = datumCopy(idatum, attr->attbyval, attr->attlen);
Page page;
/*
- * We should unlock entry page before touching posting tree
- * to prevent deadlocks with vacuum processes. Because entry is
- * never deleted from page and posting tree is never reduced to
- * the posting list, we can unlock page after getting BlockNumber
- * of root of posting tree.
+ * We should unlock entry page before touching posting tree to
+ * prevent deadlocks with vacuum processes. Because entry is never
+ * deleted from page and posting tree is never reduced to the
+ * posting list, we can unlock page after getting BlockNumber of
+ * root of posting tree.
*/
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
needUnlock = FALSE;
if (!ItemPointerIsValid(&entry->curItem) ||
ginCompareItemPointers(&entry->curItem,
- entry->list + entry->offset - 1) == 0)
+ entry->list + entry->offset - 1) == 0)
{
/*
* First pages are deleted or empty, or we found exact
}
/*
- * Reset counter to the beginning of entry->matchResult.
- * Note: entry->offset is still greater than
- * matchResult->ntuples if matchResult is lossy. So, on next
- * call we will get next result from TIDBitmap.
+ * Reset counter to the beginning of entry->matchResult. Note:
+ * entry->offset is still greater than matchResult->ntuples if
+ * matchResult is lossy. So, on next call we will get next
+ * result from TIDBitmap.
*/
entry->offset = 0;
}
/*
* Find the minimum of the active entry curItems.
*
- * Note: a lossy-page entry is encoded by a ItemPointer with max value
- * for offset (0xffff), so that it will sort after any exact entries
- * for the same page. So we'll prefer to return exact pointers not
- * lossy pointers, which is good.
+ * Note: a lossy-page entry is encoded by a ItemPointer with max value for
+ * offset (0xffff), so that it will sort after any exact entries for the
+ * same page. So we'll prefer to return exact pointers not lossy
+ * pointers, which is good.
*/
ItemPointerSetMax(&minItem);
/*
* Lossy-page entries pose a problem, since we don't know the correct
- * entryRes state to pass to the consistentFn, and we also don't know
- * what its combining logic will be (could be AND, OR, or even NOT).
- * If the logic is OR then the consistentFn might succeed for all
- * items in the lossy page even when none of the other entries match.
+ * entryRes state to pass to the consistentFn, and we also don't know what
+ * its combining logic will be (could be AND, OR, or even NOT). If the
+ * logic is OR then the consistentFn might succeed for all items in the
+ * lossy page even when none of the other entries match.
*
* If we have a single lossy-page entry then we check to see if the
- * consistentFn will succeed with only that entry TRUE. If so,
- * we return a lossy-page pointer to indicate that the whole heap
- * page must be checked. (On subsequent calls, we'll do nothing until
- * minItem is past the page altogether, thus ensuring that we never return
- * both regular and lossy pointers for the same page.)
+ * consistentFn will succeed with only that entry TRUE. If so, we return
+ * a lossy-page pointer to indicate that the whole heap page must be
+ * checked. (On subsequent calls, we'll do nothing until minItem is past
+ * the page altogether, thus ensuring that we never return both regular
+ * and lossy pointers for the same page.)
*
- * This idea could be generalized to more than one lossy-page entry,
- * but ideally lossy-page entries should be infrequent so it would
- * seldom be the case that we have more than one at once. So it
- * doesn't seem worth the extra complexity to optimize that case.
- * If we do find more than one, we just punt and return a lossy-page
- * pointer always.
+ * This idea could be generalized to more than one lossy-page entry, but
+ * ideally lossy-page entries should be infrequent so it would seldom be
+ * the case that we have more than one at once. So it doesn't seem worth
+ * the extra complexity to optimize that case. If we do find more than
+ * one, we just punt and return a lossy-page pointer always.
*
- * Note that only lossy-page entries pointing to the current item's
- * page should trigger this processing; we might have future lossy
- * pages in the entry array, but they aren't relevant yet.
+ * Note that only lossy-page entries pointing to the current item's page
+ * should trigger this processing; we might have future lossy pages in the
+ * entry array, but they aren't relevant yet.
*/
ItemPointerSetLossyPage(&curPageLossy,
GinItemPointerGetBlockNumber(&key->curItem));
}
/*
- * At this point we know that we don't need to return a lossy
- * whole-page pointer, but we might have matches for individual exact
- * item pointers, possibly in combination with a lossy pointer. Our
- * strategy if there's a lossy pointer is to try the consistentFn both
- * ways and return a hit if it accepts either one (forcing the hit to
- * be marked lossy so it will be rechecked). An exception is that
- * we don't need to try it both ways if the lossy pointer is in a
- * "hidden" entry, because the consistentFn's result can't depend on
- * that.
+ * At this point we know that we don't need to return a lossy whole-page
+ * pointer, but we might have matches for individual exact item pointers,
+ * possibly in combination with a lossy pointer. Our strategy if there's
+ * a lossy pointer is to try the consistentFn both ways and return a hit
+ * if it accepts either one (forcing the hit to be marked lossy so it will
+ * be rechecked). An exception is that we don't need to try it both ways
+ * if the lossy pointer is in a "hidden" entry, because the consistentFn's
+ * result can't depend on that.
*
* Prepare entryRes array to be passed to consistentFn.
*/
keyGetItem(&so->ginstate, so->tempCtx, key);
if (key->isFinished)
- return false; /* finished one of keys */
+ return false; /* finished one of keys */
if (ginCompareItemPointers(&key->curItem, item) < 0)
*item = key->curItem;
* that exact TID, or a lossy reference to the same page.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
break;
/*
- * No hit. Update myAdvancePast to this TID, so that on the next
- * pass we'll move to the next possible entry.
+ * No hit. Update myAdvancePast to this TID, so that on the next pass
+ * we'll move to the next possible entry.
*/
myAdvancePast = *item;
}
/*
* Now pos->firstOffset points to the first tuple of current heap
- * row, pos->lastOffset points to the first tuple of next heap
- * row (or to the end of page)
+ * row, pos->lastOffset points to the first tuple of next heap row
+ * (or to the end of page)
*/
break;
}
entry->queryKey,
datum[off - 1],
UInt16GetDatum(entry->strategy),
- PointerGetDatum(entry->extra_data)));
+ PointerGetDatum(entry->extra_data)));
if (cmp == 0)
return true;
else if (cmp > 0)
memset(pos->hasMatchKey, FALSE, so->nkeys);
/*
- * Outer loop iterates over multiple pending-list pages when a single
- * heap row has entries spanning those pages.
+ * Outer loop iterates over multiple pending-list pages when a single heap
+ * row has entries spanning those pages.
*/
for (;;)
{
if (res == 0)
{
/*
- * Found exact match (there can be only one, except
- * in EMPTY_QUERY mode).
+ * Found exact match (there can be only one, except in
+ * EMPTY_QUERY mode).
*
- * If doing partial match, scan forward from
- * here to end of page to check for matches.
+ * If doing partial match, scan forward from here to
+ * end of page to check for matches.
*
* See comment above about tuple's ordering.
*/
if (StopLow >= StopHigh && entry->isPartialMatch)
{
/*
- * No exact match on this page. If doing partial
- * match, scan from the first tuple greater than
- * target value to end of page. Note that since we
- * don't remember whether the comparePartialFn told us
- * to stop early on a previous page, we will uselessly
- * apply comparePartialFn to the first tuple on each
- * subsequent page.
+ * No exact match on this page. If doing partial match,
+ * scan from the first tuple greater than target value to
+ * end of page. Note that since we don't remember whether
+ * the comparePartialFn told us to stop early on a
+ * previous page, we will uselessly apply comparePartialFn
+ * to the first tuple on each subsequent page.
*/
key->entryRes[j] =
matchPartialInPendingList(&so->ginstate,
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
BlockNumber postingRoot;
/*
- * Build posting-tree-only result tuple. We do this first so as
- * to fail quickly if the key is too big.
+ * Build posting-tree-only result tuple. We do this first so as to
+ * fail quickly if the key is too big.
*/
res = GinFormTuple(ginstate, attnum, key, category, NULL, 0, true);
/*
- * Initialize posting tree with as many TIDs as will fit on the
- * first page.
+ * Initialize posting tree with as many TIDs as will fit on the first
+ * page.
*/
postingRoot = createPostingTree(ginstate->index,
items,
ginBeginBAScan(&buildstate->accum);
while ((list = ginGetBAEntry(&buildstate->accum,
- &attnum, &key, &category, &nlist)) != NULL)
+ &attnum, &key, &category, &nlist)) != NULL)
{
/* there could be many entries, so be willing to abort here */
CHECK_FOR_INTERRUPTS();
break;
default:
elog(ERROR, "unexpected searchMode: %d", searchMode);
- queryCategory = 0; /* keep compiler quiet */
+ queryCategory = 0; /* keep compiler quiet */
break;
}
isPartialMatch = false;
int32 searchMode = GIN_SEARCH_MODE_DEFAULT;
/*
- * We assume that GIN-indexable operators are strict, so a null
- * query argument means an unsatisfiable query.
+ * We assume that GIN-indexable operators are strict, so a null query
+ * argument means an unsatisfiable query.
*/
if (skey->sk_flags & SK_ISNULL)
{
PointerGetDatum(&searchMode)));
/*
- * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL;
- * note in particular we don't allow extractQueryFn to select
+ * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; note
+ * in particular we don't allow extractQueryFn to select
* GIN_SEARCH_MODE_EVERYTHING.
*/
if (searchMode < GIN_SEARCH_MODE_DEFAULT ||
* If the extractQueryFn didn't create a nullFlags array, create one,
* assuming that everything's non-null. Otherwise, run through the
* array and make sure each value is exactly 0 or 1; this ensures
- * binary compatibility with the GinNullCategory representation.
- * While at it, detect whether any null keys are present.
+ * binary compatibility with the GinNullCategory representation. While
+ * at it, detect whether any null keys are present.
*/
if (nullFlags == NULL)
nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool));
else
{
- int32 j;
+ int32 j;
for (j = 0; j < nQueryValues; j++)
{
if (nullFlags[j])
{
- nullFlags[j] = true; /* not any other nonzero value */
+ nullFlags[j] = true; /* not any other nonzero value */
hasNullQuery = true;
}
}
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
- GinStatsData ginStats;
+ GinStatsData ginStats;
ginGetStats(scan->indexRelation, &ginStats);
if (ginStats.ginVersion < 1)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
+
/* remaining arguments are ignored */
GinScanOpaque so = (GinScanOpaque) scan->opaque;
* However, we may have a collatable storage type for a noncollatable
* indexed data type (for instance, hstore uses text index entries).
* If there's no index collation then specify default collation in
- * case the comparison function needs one. This is harmless if the
+ * case the comparison function needs one. This is harmless if the
* comparison function doesn't care about collation, so we just do it
* unconditionally. (We could alternatively call get_typcollation,
* but that seems like expensive overkill --- there aren't going to be
aa->datum, bb->datum));
/*
- * Detect if we have any duplicates. If there are equal keys, qsort
- * must compare them at some point, else it wouldn't know whether one
- * should go before or after the other.
+ * Detect if we have any duplicates. If there are equal keys, qsort must
+ * compare them at some point, else it wouldn't know whether one should go
+ * before or after the other.
*/
if (res == 0)
data->haveDups = true;
/*
* If the extractValueFn didn't create a nullFlags array, create one,
- * assuming that everything's non-null. Otherwise, run through the
- * array and make sure each value is exactly 0 or 1; this ensures
- * binary compatibility with the GinNullCategory representation.
+ * assuming that everything's non-null. Otherwise, run through the array
+ * and make sure each value is exactly 0 or 1; this ensures binary
+ * compatibility with the GinNullCategory representation.
*/
if (nullFlags == NULL)
nullFlags = (bool *) palloc0(*nentries * sizeof(bool));
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to
- * use a simple insertion sort.
+ * pretty bad too. For small numbers of keys it'd likely be better to use
+ * a simple insertion sort.
*/
if (*nentries > 1)
{
j = 1;
for (i = 1; i < *nentries; i++)
{
- if (cmpEntries(&keydata[i-1], &keydata[i], &arg) != 0)
+ if (cmpEntries(&keydata[i - 1], &keydata[i], &arg) != 0)
{
entries[j] = keydata[i].datum;
nullFlags[j] = keydata[i].isnull;
void
ginGetStats(Relation index, GinStatsData *stats)
{
- Buffer metabuffer;
- Page metapage;
- GinMetaPageData *metadata;
+ Buffer metabuffer;
+ Page metapage;
+ GinMetaPageData *metadata;
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
LockBuffer(metabuffer, GIN_SHARE);
void
ginUpdateStats(Relation index, const GinStatsData *stats)
{
- Buffer metabuffer;
- Page metapage;
- GinMetaPageData *metadata;
+ Buffer metabuffer;
+ Page metapage;
+ GinMetaPageData *metadata;
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
LockBuffer(metabuffer, GIN_EXCLUSIVE);
if (RelationNeedsWAL(index))
{
- XLogRecPtr recptr;
- ginxlogUpdateMeta data;
- XLogRecData rdata;
+ XLogRecPtr recptr;
+ ginxlogUpdateMeta data;
+ XLogRecData rdata;
data.node = index->rd_node;
data.ntuples = 0;
{
idxStat.nEntryPages++;
- if ( GinPageIsLeaf(page) )
+ if (GinPageIsLeaf(page))
idxStat.nEntries += PageGetMaxOffsetNumber(page);
}
else
{
OffsetNumber i,
- *tod;
+ *tod;
IndexTuple itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogVacuumPage));
tod = (OffsetNumber *) palloc(sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page));
if (!XLByteLE(lsn, PageGetLSN(page)))
{
OffsetNumber l,
- off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ off = (PageIsEmpty(page)) ? FirstOffsetNumber :
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
int i,
- tupsize;
+ tupsize;
IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
for (i = 0; i < data->ntuples; i++)
/* A List of these is used represent a split-in-progress. */
typedef struct
{
- Buffer buf; /* the split page "half" */
- IndexTuple downlink; /* downlink for this half. */
+ Buffer buf; /* the split page "half" */
+ IndexTuple downlink; /* downlink for this half. */
} GISTPageSplitInfo;
/* non-export function prototypes */
bool is_split;
/*
- * Refuse to modify a page that's incompletely split. This should
- * not happen because we finish any incomplete splits while we walk
- * down the tree. However, it's remotely possible that another
- * concurrent inserter splits a parent page, and errors out before
- * completing the split. We will just throw an error in that case,
- * and leave any split we had in progress unfinished too. The next
- * insert that comes along will clean up the mess.
+ * Refuse to modify a page that's incompletely split. This should not
+ * happen because we finish any incomplete splits while we walk down the
+ * tree. However, it's remotely possible that another concurrent inserter
+ * splits a parent page, and errors out before completing the split. We
+ * will just throw an error in that case, and leave any split we had in
+ * progress unfinished too. The next insert that comes along will clean up
+ * the mess.
*/
if (GistFollowRight(page))
elog(ERROR, "concurrent GiST page split was incomplete");
SplitedPageLayout *dist = NULL,
*ptr;
BlockNumber oldrlink = InvalidBlockNumber;
- GistNSN oldnsn = { 0, 0 };
+ GistNSN oldnsn = {0, 0};
SplitedPageLayout rootpg;
BlockNumber blkno = BufferGetBlockNumber(buffer);
bool is_rootsplit;
/*
* Set up pages to work with. Allocate new buffers for all but the
- * leftmost page. The original page becomes the new leftmost page,
- * and is just replaced with the new contents.
+ * leftmost page. The original page becomes the new leftmost page, and
+ * is just replaced with the new contents.
*
* For a root-split, allocate new buffers for all child pages, the
* original page is overwritten with new root page containing
if (is_rootsplit)
{
IndexTuple *downlinks;
- int ndownlinks = 0;
- int i;
+ int ndownlinks = 0;
+ int i;
rootpg.buffer = buffer;
rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer));
for (ptr = dist; ptr; ptr = ptr->next)
{
GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
+
si->buf = ptr->buffer;
si->downlink = ptr->itup;
*splitinfo = lappend(*splitinfo, si);
*/
for (ptr = dist; ptr; ptr = ptr->next)
{
- char *data = (char *) (ptr->list);
+ char *data = (char *) (ptr->list);
+
for (i = 0; i < ptr->block.num; i++)
{
if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
MarkBufferDirty(leftchildbuf);
/*
- * The first page in the chain was a temporary working copy meant
- * to replace the old page. Copy it over the old page.
+ * The first page in the chain was a temporary working copy meant to
+ * replace the old page. Copy it over the old page.
*/
PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
dist->page = BufferGetPage(dist->buffer);
* Return the new child buffers to the caller.
*
* If this was a root split, we've already inserted the downlink
- * pointers, in the form of a new root page. Therefore we can
- * release all the new buffers, and keep just the root page locked.
+ * pointers, in the form of a new root page. Therefore we can release
+ * all the new buffers, and keep just the root page locked.
*/
if (is_rootsplit)
{
/*
* If we inserted the downlink for a child page, set NSN and clear
- * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know
- * to follow the rightlink if and only if they looked at the parent page
+ * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
+ * follow the rightlink if and only if they looked at the parent page
* before we inserted the downlink.
*
* Note that we do this *after* writing the WAL record. That means that
- * the possible full page image in the WAL record does not include
- * these changes, and they must be replayed even if the page is restored
- * from the full page image. There's a chicken-and-egg problem: if we
- * updated the child pages first, we wouldn't know the recptr of the WAL
- * record we're about to write.
+ * the possible full page image in the WAL record does not include these
+ * changes, and they must be replayed even if the page is restored from
+ * the full page image. There's a chicken-and-egg problem: if we updated
+ * the child pages first, we wouldn't know the recptr of the WAL record
+ * we're about to write.
*/
if (BufferIsValid(leftchildbuf))
{
- Page leftpg = BufferGetPage(leftchildbuf);
+ Page leftpg = BufferGetPage(leftchildbuf);
GistPageGetOpaque(leftpg)->nsn = recptr;
GistClearFollowRight(leftpg);
stack->buffer = ReadBuffer(state.r, stack->blkno);
/*
- * Be optimistic and grab shared lock first. Swap it for an
- * exclusive lock later if we need to update the page.
+ * Be optimistic and grab shared lock first. Swap it for an exclusive
+ * lock later if we need to update the page.
*/
if (!xlocked)
{
Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn));
/*
- * If this page was split but the downlink was never inserted to
- * the parent because the inserting backend crashed before doing
- * that, fix that now.
+ * If this page was split but the downlink was never inserted to the
+ * parent because the inserting backend crashed before doing that, fix
+ * that now.
*/
if (GistFollowRight(stack->page))
{
/*
* Concurrent split detected. There's no guarantee that the
* downlink for this page is consistent with the tuple we're
- * inserting anymore, so go back to parent and rechoose the
- * best child.
+ * inserting anymore, so go back to parent and rechoose the best
+ * child.
*/
UnlockReleaseBuffer(stack->buffer);
xlocked = false;
* Find the child node that has the minimum insertion penalty.
*/
BlockNumber childblkno;
- IndexTuple newtup;
+ IndexTuple newtup;
GISTInsertStack *item;
stack->childoffnum = gistchoose(state.r, stack->page, itup, giststate);
if (newtup)
{
/*
- * Swap shared lock for an exclusive one. Beware, the page
- * may change while we unlock/lock the page...
+ * Swap shared lock for an exclusive one. Beware, the page may
+ * change while we unlock/lock the page...
*/
if (!xlocked)
{
continue;
}
}
+
/*
* Update the tuple.
*
stack->childoffnum, InvalidBuffer))
{
/*
- * If this was a root split, the root page continues to
- * be the parent and the updated tuple went to one of the
+ * If this was a root split, the root page continues to be
+ * the parent and the updated tuple went to one of the
* child pages, so we just need to retry from the root
* page.
*/
{
/*
* Leaf page. Insert the new key. We've already updated all the
- * parents on the way down, but we might have to split the page
- * if it doesn't fit. gistinserthere() will take care of that.
+ * parents on the way down, but we might have to split the page if
+ * it doesn't fit. gistinserthere() will take care of that.
*/
/*
- * Swap shared lock for an exclusive one. Be careful, the page
- * may change while we unlock/lock the page...
+ * Swap shared lock for an exclusive one. Be careful, the page may
+ * change while we unlock/lock the page...
*/
if (!xlocked)
{
if (stack->blkno == GIST_ROOT_BLKNO)
{
/*
- * the only page that can become inner instead of leaf
- * is the root page, so for root we should recheck it
+ * the only page that can become inner instead of leaf is
+ * the root page, so for root we should recheck it
*/
if (!GistPageIsLeaf(stack->page))
{
gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
GISTInsertStack *stack)
{
- Page page = BufferGetPage(buf);
+ Page page = BufferGetPage(buf);
OffsetNumber maxoff;
OffsetNumber offset;
- IndexTuple downlink = NULL;
+ IndexTuple downlink = NULL;
maxoff = PageGetMaxOffsetNumber(page);
for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
{
IndexTuple ituple = (IndexTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
+
if (downlink == NULL)
downlink = CopyIndexTuple(ituple);
else
{
- IndexTuple newdownlink;
+ IndexTuple newdownlink;
+
newdownlink = gistgetadjusted(rel, downlink, ituple,
giststate);
if (newdownlink)
}
/*
- * If the page is completely empty, we can't form a meaningful
- * downlink for it. But we have to insert a downlink for the page.
- * Any key will do, as long as its consistent with the downlink of
- * parent page, so that we can legally insert it to the parent.
- * A minimal one that matches as few scans as possible would be best,
- * to keep scans from doing useless work, but we don't know how to
- * construct that. So we just use the downlink of the original page
- * that was split - that's as far from optimal as it can get but will
- * do..
+ * If the page is completely empty, we can't form a meaningful downlink
+ * for it. But we have to insert a downlink for the page. Any key will do,
+ * as long as its consistent with the downlink of parent page, so that we
+ * can legally insert it to the parent. A minimal one that matches as few
+ * scans as possible would be best, to keep scans from doing useless work,
+ * but we don't know how to construct that. So we just use the downlink of
+ * the original page that was split - that's as far from optimal as it can
+ * get but will do..
*/
if (!downlink)
{
- ItemId iid;
+ ItemId iid;
LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
gistFindCorrectParent(rel, stack);
buf = stack->buffer;
/*
- * Read the chain of split pages, following the rightlinks. Construct
- * a downlink tuple for each page.
+ * Read the chain of split pages, following the rightlinks. Construct a
+ * downlink tuple for each page.
*/
for (;;)
{
GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
- IndexTuple downlink;
+ IndexTuple downlink;
page = BufferGetPage(buf);
IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
Buffer leftchild)
{
- List *splitinfo;
- bool is_split;
+ List *splitinfo;
+ bool is_split;
is_split = gistplacetopage(state, giststate, stack->buffer,
tuples, ntup, oldoffnum,
gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
GISTSTATE *giststate, List *splitinfo)
{
- ListCell *lc;
- List *reversed;
+ ListCell *lc;
+ List *reversed;
GISTPageSplitInfo *right;
GISTPageSplitInfo *left;
- IndexTuple tuples[2];
+ IndexTuple tuples[2];
/* A split always contains at least two halves */
Assert(list_length(splitinfo) >= 2);
/*
- * We need to insert downlinks for each new page, and update the
- * downlink for the original (leftmost) page in the split. Begin at
- * the rightmost page, inserting one downlink at a time until there's
- * only two pages left. Finally insert the downlink for the last new
- * page and update the downlink for the original page as one operation.
+ * We need to insert downlinks for each new page, and update the downlink
+ * for the original (leftmost) page in the split. Begin at the rightmost
+ * page, inserting one downlink at a time until there's only two pages
+ * left. Finally insert the downlink for the last new page and update the
+ * downlink for the original page as one operation.
*/
/* for convenience, create a copy of the list in reverse order */
LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
gistFindCorrectParent(state->r, stack);
- while(list_length(reversed) > 2)
+ while (list_length(reversed) > 2)
{
right = (GISTPageSplitInfo *) linitial(reversed);
left = (GISTPageSplitInfo *) lsecond(reversed);
/* opclasses are not required to provide a Distance method */
if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC)))
fmgr_info_copy(&(giststate->distanceFn[i]),
- index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
+ index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
CurrentMemoryContext);
else
giststate->distanceFn[i].fn_oid = InvalidOid;
*
* On success return for a heap tuple, *recheck_p is set to indicate
* whether recheck is needed. We recheck if any of the consistent() functions
- * request it. recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
* since we must visit the lower index page if there's any doubt.
*
* If we are doing an ordered scan, so->distances[] is filled with distance
*recheck_p = false;
/*
- * If it's a leftover invalid tuple from pre-9.1, treat it as a match
- * with minimum possible distances. This means we'll always follow it
- * to the referenced page.
+ * If it's a leftover invalid tuple from pre-9.1, treat it as a match with
+ * minimum possible distances. This means we'll always follow it to the
+ * referenced page.
*/
if (GistTupleIsInvalid(tuple))
{
- int i;
+ int i;
- if (GistPageIsLeaf(page)) /* shouldn't happen */
+ if (GistPageIsLeaf(page)) /* shouldn't happen */
elog(ERROR, "invalid GIST tuple found on leaf page");
for (i = 0; i < scan->numberOfOrderBys; i++)
so->distances[i] = -get_float8_infinity();
* always be zero, but might as well pass it for possible future
* use.)
*
- * Note that Distance functions don't get a recheck argument.
- * We can't tolerate lossy distance calculations on leaf tuples;
+ * Note that Distance functions don't get a recheck argument. We
+ * can't tolerate lossy distance calculations on leaf tuples;
* there is no opportunity to re-sort the tuples afterwards.
*/
dist = FunctionCall4(&key->sk_func,
* ntids: if not NULL, gistgetbitmap's output tuple counter
*
* If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap. If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
* we're doing a plain or ordered indexscan. For a plain indexscan, heap
* tuple TIDs are returned into so->pageData[]. For an ordered indexscan,
* heap tuple TIDs are pushed into individual search queue items.
/*
* While scanning a leaf page, ItemPointers of matching heap
* tuples are stored in so->pageData. If there are any on
- * this page, we fall out of the inner "do" and loop around
- * to return them.
+ * this page, we fall out of the inner "do" and loop around to
+ * return them.
*/
gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL);
PG_RETURN_POINTER(entry);
}
-#define point_point_distance(p1,p2) \
+#define point_point_distance(p1,p2) \
DatumGetFloat8(DirectFunctionCall2(point_distance, \
PointPGetDatum(p1), PointPGetDatum(p2)))
else
{
/* closest point will be a vertex */
- Point p;
- double subresult;
+ Point p;
+ double subresult;
result = point_point_distance(point, &box->low);
/*
* If new item is heap tuple, it goes to front of chain; otherwise insert
- * it before the first index-page item, so that index pages are visited
- * in LIFO order, ensuring depth-first search of index pages. See
- * comments in gist_private.h.
+ * it before the first index-page item, so that index pages are visited in
+ * LIFO order, ensuring depth-first search of index pages. See comments
+ * in gist_private.h.
*/
if (GISTSearchItemIsHeap(*newitem))
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey key = (ScanKey) PG_GETARG_POINTER(1);
ScanKey orderbys = (ScanKey) PG_GETARG_POINTER(3);
+
/* nkeys and norderbys arguments are ignored */
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
int i;
scan->numberOfKeys * sizeof(ScanKeyData));
/*
- * Modify the scan key so that the Consistent method is called for
- * all comparisons. The original operator is passed to the Consistent
+ * Modify the scan key so that the Consistent method is called for all
+ * comparisons. The original operator is passed to the Consistent
* function in the form of its strategy number, which is available
* from the sk_strategy field, and its subtype from the sk_subtype
* field. Also, preserve sk_func.fn_collation which is the input
}
res = index_form_tuple(giststate->tupdesc, compatt, isnull);
+
/*
* The offset number on tuples on internal pages is unused. For historical
* reasons, it is set 0xffff.
*/
- ItemPointerSetOffsetNumber( &(res->t_tid), 0xffff);
+ ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff);
return res;
}
gistRedoClearFollowRight(RelFileNode node, XLogRecPtr lsn,
BlockNumber leftblkno)
{
- Buffer buffer;
+ Buffer buffer;
buffer = XLogReadBuffer(node, leftblkno, false);
if (BufferIsValid(buffer))
{
- Page page = (Page) BufferGetPage(buffer);
+ Page page = (Page) BufferGetPage(buffer);
/*
* Note that we still update the page even if page LSN is equal to the
{
int i;
OffsetNumber *todelete = (OffsetNumber *) data;
+
data += sizeof(OffsetNumber) * xldata->ntodelete;
for (i = 0; i < xldata->ntodelete; i++)
if (data - begin < record->xl_len)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
+
while (data - begin < record->xl_len)
{
- IndexTuple itup = (IndexTuple) data;
+ IndexTuple itup = (IndexTuple) data;
Size sz = IndexTupleSize(itup);
OffsetNumber l;
+
data += sz;
l = PageAddItem(page, (Item) itup, sz, off, false, false);
SplitedPageLayout *ptr;
int npage = 0,
cur;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
for (ptr = dist; ptr; ptr = ptr->next)
npage++;
}
/*
- * Include a full page image of the child buf. (only necessary if
- * a checkpoint happened since the child page was split)
+ * Include a full page image of the child buf. (only necessary if a
+ * checkpoint happened since the child page was split)
*/
if (BufferIsValid(leftchildbuf))
{
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
+
/* remaining arguments are ignored */
HashScanOpaque so = (HashScanOpaque) scan->opaque;
Relation rel = scan->indexRelation;
* This is essentially relation_open plus check that the relation
* is not an index nor a composite type. (The caller should also
* check that it's not a view or foreign table before assuming it has
- * storage.)
+ * storage.)
* ----------------
*/
Relation
/*
* We're about to do the actual insert -- check for conflict at the
- * relation or buffer level first, to avoid possibly having to roll
- * back work we've just done.
+ * relation or buffer level first, to avoid possibly having to roll back
+ * work we've just done.
*/
CheckForSerializableConflictIn(relation, NULL, buffer);
}
/*
- * We're about to do the actual delete -- check for conflict first,
- * to avoid possibly having to roll back work we've just done.
+ * We're about to do the actual delete -- check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &tp, buffer);
}
/*
- * We're about to do the actual update -- check for conflict first,
- * to avoid possibly having to roll back work we've just done.
+ * We're about to do the actual update -- check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
*/
CheckForSerializableConflictIn(relation, &oldtup, buffer);
}
/*
- * We're about to create the new tuple -- check for conflict first,
- * to avoid possibly having to roll back work we've just done.
+ * We're about to create the new tuple -- check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
*
* NOTE: For a tuple insert, we only need to check for table locks, since
* predicate locking at the index level will cover ranges for anything
}
/*
- * Ignore tuples inserted by an aborted transaction or
- * if the tuple was updated/deleted by the inserting transaction.
+ * Ignore tuples inserted by an aborted transaction or if the tuple was
+ * updated/deleted by the inserting transaction.
*
* Look for a committed hint bit, or if no xmin bit is set, check clog.
- * This needs to work on both master and standby, where it is used
- * to assess btree delete records.
+ * This needs to work on both master and standby, where it is used to
+ * assess btree delete records.
*/
if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) ||
(!(tuple->t_infomask & HEAP_XMIN_COMMITTED) &&
{
if (xmax != xmin &&
TransactionIdFollows(xmax, *latestRemovedXid))
- *latestRemovedXid = xmax;
+ *latestRemovedXid = xmax;
}
/* *latestRemovedXid may still be invalid at end */
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
/*
- * The page may be uninitialized. If so, we can't set the LSN
- * and TLI because that would corrupt the page.
+ * The page may be uninitialized. If so, we can't set the LSN and TLI
+ * because that would corrupt the page.
*/
if (!PageIsNew(page))
{
memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
/*
- * The page may be uninitialized. If so, we can't set the LSN
- * and TLI because that would corrupt the page.
+ * The page may be uninitialized. If so, we can't set the LSN and TLI
+ * because that would corrupt the page.
*/
if (!PageIsNew(page))
{
Buffer
RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
- struct BulkInsertStateData *bistate)
+ struct BulkInsertStateData * bistate)
{
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
Buffer buffer = InvalidBuffer;
* them */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
-} RewriteStateData;
+} RewriteStateData;
/*
* The lookup keys for the hash tables are tuple TID and xmin (we must check
}
/*
- * If the rel is WAL-logged, must fsync before commit. We use heap_sync
+ * If the rel is WAL-logged, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
procnum, attnum, RelationGetRelationName(irel));
fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
- fmgr_info_set_collation(irel->rd_indcollation[attnum-1], locinfo);
+ fmgr_info_set_collation(irel->rd_indcollation[attnum - 1], locinfo);
}
return locinfo;
* The only conflict predicate locking cares about for indexes is when
* an index tuple insert conflicts with an existing lock. Since the
* actual location of the insert is hard to predict because of the
- * random search used to prevent O(N^2) performance when there are many
- * duplicate entries, we can just use the "first valid" page.
+ * random search used to prevent O(N^2) performance when there are
+ * many duplicate entries, we can just use the "first valid" page.
*/
CheckForSerializableConflictIn(rel, NULL, buf);
/* do the insertion */
/*
* origpage is the original page to be split. leftpage is a temporary
* buffer that receives the left-sibling data, which will be copied back
- * into origpage on success. rightpage is the new page that receives
- * the right-sibling data. If we fail before reaching the critical
- * section, origpage hasn't been modified and leftpage is only workspace.
- * In principle we shouldn't need to worry about rightpage either,
- * because it hasn't been linked into the btree page structure; but to
- * avoid leaving possibly-confusing junk behind, we are careful to rewrite
- * rightpage as zeroes before throwing any error.
+ * into origpage on success. rightpage is the new page that receives the
+ * right-sibling data. If we fail before reaching the critical section,
+ * origpage hasn't been modified and leftpage is only workspace. In
+ * principle we shouldn't need to worry about rightpage either, because it
+ * hasn't been linked into the btree page structure; but to avoid leaving
+ * possibly-confusing junk behind, we are careful to rewrite rightpage as
+ * zeroes before throwing any error.
*/
origpage = BufferGetPage(buf);
leftpage = PageGetTempPage(origpage);
{
memset(rightpage, 0, BufferGetPageSize(rbuf));
elog(ERROR, "right sibling's left-link doesn't match: "
- "block %u links to %u instead of expected %u in index \"%s\"",
+ "block %u links to %u instead of expected %u in index \"%s\"",
oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
RelationGetRelationName(rel));
}
/*
* Check that the parent-page index items we're about to delete/overwrite
- * contain what we expect. This can fail if the index has become
- * corrupt for some reason. We want to throw any error before entering
- * the critical section --- otherwise it'd be a PANIC.
+ * contain what we expect. This can fail if the index has become corrupt
+ * for some reason. We want to throw any error before entering the
+ * critical section --- otherwise it'd be a PANIC.
*
* The test on the target item is just an Assert because _bt_getstackbuf
* should have guaranteed it has the expected contents. The test on the
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, P_NONE, 0);
- /* Write the page. If archiving/streaming, XLOG it. */
+ /* Write the page. If archiving/streaming, XLOG it. */
smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
if (XLogIsNeeded())
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
+
/* remaining arguments are ignored */
BTScanOpaque so = (BTScanOpaque) scan->opaque;
/* If index is empty and access = BT_READ, no root page is created. */
if (!BufferIsValid(*bufP))
{
- PredicateLockRelation(rel); /* Nothing finer to lock exists. */
+ PredicateLockRelation(rel); /* Nothing finer to lock exists. */
return (BTStack) NULL;
}
if (!BufferIsValid(buf))
{
/* empty index... */
- PredicateLockRelation(rel); /* Nothing finer to lock exists. */
+ PredicateLockRelation(rel); /* Nothing finer to lock exists. */
return InvalidBuffer;
}
if (!BufferIsValid(buf))
{
/* empty index... */
- PredicateLockRelation(rel); /* Nothing finer to lock exists. */
+ PredicateLockRelation(rel); /* Nothing finer to lock exists. */
so->currPos.buf = InvalidBuffer;
return false;
}
/*
* If the index is WAL-logged, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a non-WAL-logged index we don't
+ * safe to commit the transaction. (For a non-WAL-logged index we don't
* care since the index will be uninteresting after a crash anyway.)
*
* It's obvious that we must do this when not WAL-logging the build. It's
/*
* We can use the cached (default) support procs since no cross-type
- * comparison can be needed. The cached support proc entries have
- * the right collation for the index, too.
+ * comparison can be needed. The cached support proc entries have the
+ * right collation for the index, too.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
arg = index_getattr(itup, i + 1, itupdesc, &null);
/*
* We can use the cached (default) support procs since no cross-type
- * comparison can be needed. The cached support proc entries have
- * the right collation for the index, too.
+ * comparison can be needed. The cached support proc entries have the
+ * right collation for the index, too.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
flags = SK_ISNULL | (indoption[i] << SK_BT_INDOPTION_SHIFT);
TransactionId locking_xid; /* top-level XID of backend working on xact */
bool valid; /* TRUE if fully prepared */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
-} GlobalTransactionData;
+} GlobalTransactionData;
/*
* Two Phase Commit shared state. Access to this struct is protected
/* If we crash now, we have prepared: WAL replay will fix things */
/*
- * Wake up all walsenders to send WAL up to the PREPARE record
- * immediately if replication is enabled
+ * Wake up all walsenders to send WAL up to the PREPARE record immediately
+ * if replication is enabled
*/
if (max_wal_senders > 0)
WalSndWakeup();
/*
* Wait for synchronous replication, if required.
*
- * Note that at this stage we have marked clog, but still show as
- * running in the procarray and continue to hold locks.
+ * Note that at this stage we have marked clog, but still show as running
+ * in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(recptr);
}
/*
* Wait for synchronous replication, if required.
*
- * Note that at this stage we have marked clog, but still show as
- * running in the procarray and continue to hold locks.
+ * Note that at this stage we have marked clog, but still show as running
+ * in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(recptr);
}
char *oldest_datname;
/*
- * We can be called when not inside a transaction, for example
- * during StartupXLOG(). In such a case we cannot do database
- * access, so we must just report the oldest DB's OID.
+ * We can be called when not inside a transaction, for example during
+ * StartupXLOG(). In such a case we cannot do database access, so we
+ * must just report the oldest DB's OID.
*
* Note: it's also possible that get_database_name fails and returns
* NULL, for example because the database just got dropped. We'll
*/
if (isSubXact && !TransactionIdIsValid(s->parent->transactionId))
{
- TransactionState p = s->parent;
- TransactionState *parents;
- size_t parentOffset = 0;
+ TransactionState p = s->parent;
+ TransactionState *parents;
+ size_t parentOffset = 0;
- parents = palloc(sizeof(TransactionState) * s->nestingLevel);
+ parents = palloc(sizeof(TransactionState) * s->nestingLevel);
while (p != NULL && !TransactionIdIsValid(p->transactionId))
{
parents[parentOffset++] = p;
}
/*
- * This is technically a recursive call, but the recursion will
- * never be more than one layer deep.
+ * This is technically a recursive call, but the recursion will never
+ * be more than one layer deep.
*/
while (parentOffset != 0)
AssignTransactionId(parents[--parentOffset]);
/*
* Check if we want to commit asynchronously. We can allow the XLOG flush
* to happen asynchronously if synchronous_commit=off, or if the current
- * transaction has not performed any WAL-logged operation. The latter case
- * can arise if the current transaction wrote only to temporary and/or
- * unlogged tables. In case of a crash, the loss of such a transaction
- * will be irrelevant since temp tables will be lost anyway, and unlogged
- * tables will be truncated. (Given the foregoing, you might think that it
- * would be unnecessary to emit the XLOG record at all in this case, but we
- * don't currently try to do that. It would certainly cause problems at
- * least in Hot Standby mode, where the KnownAssignedXids machinery
- * requires tracking every XID assignment. It might be OK to skip it only
- * when wal_level < hot_standby, but for now we don't.)
+ * transaction has not performed any WAL-logged operation. The latter
+ * case can arise if the current transaction wrote only to temporary
+ * and/or unlogged tables. In case of a crash, the loss of such a
+ * transaction will be irrelevant since temp tables will be lost anyway,
+ * and unlogged tables will be truncated. (Given the foregoing, you might
+ * think that it would be unnecessary to emit the XLOG record at all in
+ * this case, but we don't currently try to do that. It would certainly
+ * cause problems at least in Hot Standby mode, where the
+ * KnownAssignedXids machinery requires tracking every XID assignment. It
+ * might be OK to skip it only when wal_level < hot_standby, but for now
+ * we don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
/*
* Wait for synchronous replication, if required.
*
- * Note that at this stage we have marked clog, but still show as
- * running in the procarray and continue to hold locks.
+ * Note that at this stage we have marked clog, but still show as running
+ * in the procarray and continue to hold locks.
*/
SyncRepWaitForLSN(XactLastRecEnd);
}
/*
- * The remaining actions cannot call any user-defined code, so it's
- * safe to start shutting down within-transaction services. But note
- * that most of this stuff could still throw an error, which would
- * switch us into the transaction-abort path.
+ * The remaining actions cannot call any user-defined code, so it's safe
+ * to start shutting down within-transaction services. But note that most
+ * of this stuff could still throw an error, which would switch us into
+ * the transaction-abort path.
*/
/* Shut down the deferred-trigger manager */
/*
* Mark serializable transaction as complete for predicate locking
- * purposes. This should be done as late as we can put it and still
- * allow errors to be raised for failure patterns found at commit.
+ * purposes. This should be done as late as we can put it and still allow
+ * errors to be raised for failure patterns found at commit.
*/
PreCommit_CheckForSerializationFailure();
}
/*
- * The remaining actions cannot call any user-defined code, so it's
- * safe to start shutting down within-transaction services. But note
- * that most of this stuff could still throw an error, which would
- * switch us into the transaction-abort path.
+ * The remaining actions cannot call any user-defined code, so it's safe
+ * to start shutting down within-transaction services. But note that most
+ * of this stuff could still throw an error, which would switch us into
+ * the transaction-abort path.
*/
/* Shut down the deferred-trigger manager */
/*
* Mark serializable transaction as complete for predicate locking
- * purposes. This should be done as late as we can put it and still
- * allow errors to be raised for failure patterns found at commit.
+ * purposes. This should be done as late as we can put it and still allow
+ * errors to be raised for failure patterns found at commit.
*/
PreCommit_CheckForSerializationFailure();
/* File path names (all relative to $PGDATA) */
#define RECOVERY_COMMAND_FILE "recovery.conf"
#define RECOVERY_COMMAND_DONE "recovery.done"
-#define PROMOTE_SIGNAL_FILE "promote"
+#define PROMOTE_SIGNAL_FILE "promote"
/* User-settable parameters */
* known, need to check the shared state".
*/
static bool LocalRecoveryInProgress = true;
+
/*
* Local copy of SharedHotStandbyActive variable. False actually means "not
* known, need to check the shared state".
/*
* exclusiveBackup is true if a backup started with pg_start_backup() is
* in progress, and nonExclusiveBackups is a counter indicating the number
- * of streaming base backups currently in progress. forcePageWrites is
- * set to true when either of these is non-zero. lastBackupStart is the
- * latest checkpoint redo location used as a starting point for an online
- * backup.
+ * of streaming base backups currently in progress. forcePageWrites is set
+ * to true when either of these is non-zero. lastBackupStart is the latest
+ * checkpoint redo location used as a starting point for an online backup.
*/
bool exclusiveBackup;
int nonExclusiveBackups;
XLogwrtResult LogwrtResult;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
- XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
+ XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */
uint32 lastRemovedSeg;
bool SharedHotStandbyActive;
/*
- * recoveryWakeupLatch is used to wake up the startup process to
- * continue WAL replay, if it is waiting for WAL to arrive or failover
- * trigger file to appear.
+ * recoveryWakeupLatch is used to wake up the startup process to continue
+ * WAL replay, if it is waiting for WAL to arrive or failover trigger file
+ * to appear.
*/
Latch recoveryWakeupLatch;
/* logs restore point */
typedef struct xl_restore_point
{
- TimestampTz rp_time;
+ TimestampTz rp_time;
char rp_name[MAXFNAMELEN];
} xl_restore_point;
static bool
rescanLatestTimeLine(void)
{
- TimeLineID newtarget;
+ TimeLineID newtarget;
+
newtarget = findNewestTimeLine(recoveryTargetTLI);
if (newtarget != recoveryTargetTLI)
{
/*
* Determine the list of expected TLIs for the new TLI
*/
- List *newExpectedTLIs;
+ List *newExpectedTLIs;
+
newExpectedTLIs = readTimeLineHistory(newtarget);
/*
- * If the current timeline is not part of the history of the
- * new timeline, we cannot proceed to it.
+ * If the current timeline is not part of the history of the new
+ * timeline, we cannot proceed to it.
*
* XXX This isn't foolproof: The new timeline might have forked from
* the current one, but before the current recovery location. In that
* case we will still switch to the new timeline and proceed replaying
* from it even though the history doesn't match what we already
* replayed. That's not good. We will likely notice at the next online
- * checkpoint, as the TLI won't match what we expected, but it's
- * not guaranteed. The admin needs to make sure that doesn't happen.
+ * checkpoint, as the TLI won't match what we expected, but it's not
+ * guaranteed. The admin needs to make sure that doesn't happen.
*/
if (!list_member_int(newExpectedTLIs,
(int) recoveryTargetTLI))
timestamptz_to_str(recoveryStopTime));
else if (recoveryTarget == RECOVERY_TARGET_NAME)
snprintf(buffer, sizeof(buffer),
- "%s%u\t%s\tat restore point \"%s\"\n",
+ "%s%u\t%s\tat restore point \"%s\"\n",
(srcfd < 0) ? "" : "\n",
parentTLI,
xlogfname,
{
/*
* If we haven't yet changed the boot_val default of -1, just let it
- * be. We'll fix it when XLOGShmemSize is called.
+ * be. We'll fix it when XLOGShmemSize is called.
*/
if (XLOGbuffers == -1)
return true;
/*
* If the value of wal_buffers is -1, use the preferred auto-tune value.
* This isn't an amazingly clean place to do this, but we must wait till
- * NBuffers has received its final value, and must do it before using
- * the value of XLOGbuffers to do anything important.
+ * NBuffers has received its final value, and must do it before using the
+ * value of XLOGbuffers to do anything important.
*/
if (XLOGbuffers == -1)
{
/*
* Set up information for the initial checkpoint record
*
- * The initial checkpoint record is written to the beginning of the
- * WAL segment with logid=0 logseg=1. The very first WAL segment, 0/0, is
- * not used, so that we can use 0/0 to mean "before any valid WAL segment".
+ * The initial checkpoint record is written to the beginning of the WAL
+ * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
+ * used, so that we can use 0/0 to mean "before any valid WAL segment".
*/
checkPoint.redo.xlogid = 0;
checkPoint.redo.xrecoff = XLogSegSize + SizeOfXLogLongPHD;
TimeLineID rtli = 0;
bool rtliGiven = false;
ConfigVariable *item,
- *head = NULL,
- *tail = NULL;
+ *head = NULL,
+ *tail = NULL;
fd = AllocateFile(RECOVERY_COMMAND_FILE, "r");
if (fd == NULL)
/*
* Since we're asking ParseConfigFp() to error out at FATAL, there's no
* need to check the return value.
- */
+ */
ParseConfigFp(fd, RECOVERY_COMMAND_FILE, 0, FATAL, &head, &tail);
for (item = head; item; item = item->next)
* this overrides recovery_target_time
*/
if (recoveryTarget == RECOVERY_TARGET_XID ||
- recoveryTarget == RECOVERY_TARGET_NAME)
+ recoveryTarget == RECOVERY_TARGET_NAME)
continue;
recoveryTarget = RECOVERY_TARGET_TIME;
*/
recoveryTargetTime =
DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
- CStringGetDatum(item->value),
+ CStringGetDatum(item->value),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
ereport(DEBUG2,
if (recoveryTarget == RECOVERY_TARGET_UNSET)
{
/*
- * Save timestamp of latest transaction commit/abort if this is
- * a transaction record
+ * Save timestamp of latest transaction commit/abort if this is a
+ * transaction record
*/
if (record->xl_rmid == RM_XACT_ID)
SetLatestXTime(recordXtime);
else if (recoveryTarget == RECOVERY_TARGET_NAME)
{
/*
- * There can be many restore points that share the same name, so we stop
- * at the first one
+ * There can be many restore points that share the same name, so we
+ * stop at the first one
*/
stopsHere = (strcmp(recordRPName, recoveryTargetName) == 0);
strncpy(recoveryStopName, recordRPName, MAXFNAMELEN);
ereport(LOG,
- (errmsg("recovery stopping at restore point \"%s\", time %s",
- recoveryStopName,
- timestamptz_to_str(recoveryStopTime))));
+ (errmsg("recovery stopping at restore point \"%s\", time %s",
+ recoveryStopName,
+ timestamptz_to_str(recoveryStopTime))));
}
/*
- * Note that if we use a RECOVERY_TARGET_TIME then we can stop
- * at a restore point since they are timestamped, though the latest
+ * Note that if we use a RECOVERY_TARGET_TIME then we can stop at a
+ * restore point since they are timestamped, though the latest
* transaction time is not updated.
*/
if (record->xl_rmid == RM_XACT_ID && recoveryStopAfter)
while (RecoveryIsPaused())
{
- pg_usleep(1000000L); /* 1000 ms */
+ pg_usleep(1000000L); /* 1000 ms */
HandleStartupProcInterrupts();
}
}
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- bool recoveryPause;
+ bool recoveryPause;
SpinLockAcquire(&xlogctl->info_lck);
recoveryPause = xlogctl->recoveryPause;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to control recovery"))));
+ (errmsg("must be superuser to control recovery"))));
if (!RecoveryInProgress())
ereport(ERROR,
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to control recovery"))));
+ (errmsg("must be superuser to control recovery"))));
if (!RecoveryInProgress())
ereport(ERROR,
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to control recovery"))));
+ (errmsg("must be superuser to control recovery"))));
if (!RecoveryInProgress())
ereport(ERROR,
Datum
pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz xtime;
+ TimestampTz xtime;
xtime = GetLatestXTime();
if (xtime == 0)
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
- * Make sure that REDO location exists. This may not be
- * the case if there was a crash during an online backup,
- * which left a backup_label around that references a WAL
- * segment that's already been archived.
+ * Make sure that REDO location exists. This may not be the case
+ * if there was a crash during an online backup, which left a
+ * backup_label around that references a WAL segment that's
+ * already been archived.
*/
if (XLByteLT(checkPoint.redo, checkPointLoc))
{
ereport(FATAL,
(errmsg("could not locate required checkpoint record"),
errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
- wasShutdown = false; /* keep compiler quiet */
+ wasShutdown = false; /* keep compiler quiet */
}
/* set flag to delete it later */
haveBackupLabel = true;
/*
* We're in recovery, so unlogged relations relations may be trashed
- * and must be reset. This should be done BEFORE allowing Hot
- * Standby connections, so that read-only backends don't try to
- * read whatever garbage is left over from before.
+ * and must be reset. This should be done BEFORE allowing Hot Standby
+ * connections, so that read-only backends don't try to read whatever
+ * garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
if (recoveryStopsHere(record, &recoveryApply))
{
/*
- * Pause only if users can connect to send a resume message
+ * Pause only if users can connect to send a resume
+ * message
*/
if (recoveryPauseAtTarget && standbyState == STANDBY_SNAPSHOT_READY)
{
{
/*
* We check shared state each time only until Hot Standby is active. We
- * can't de-activate Hot Standby, so there's no need to keep checking after
- * the shared variable has once been seen true.
+ * can't de-activate Hot Standby, so there's no need to keep checking
+ * after the shared variable has once been seen true.
*/
if (LocalHotStandbyActive)
return true;
*/
longest_secs = (long) (CheckpointStats.ckpt_longest_sync / 1000000);
longest_usecs = CheckpointStats.ckpt_longest_sync -
- (uint64) longest_secs * 1000000;
+ (uint64) longest_secs *1000000;
average_sync_time = 0;
- if (CheckpointStats.ckpt_sync_rels > 0)
+ if (CheckpointStats.ckpt_sync_rels > 0)
average_sync_time = CheckpointStats.ckpt_agg_sync_time /
CheckpointStats.ckpt_sync_rels;
average_secs = (long) (average_sync_time / 1000000);
- average_usecs = average_sync_time - (uint64) average_secs * 1000000;
+ average_usecs = average_sync_time - (uint64) average_secs *1000000;
if (restartpoint)
elog(LOG, "restartpoint complete: wrote %d buffers (%.1f%%); "
XLogRecPtr
XLogRestorePoint(const char *rpName)
{
- XLogRecPtr RecPtr;
- XLogRecData rdata;
- xl_restore_point xlrec;
+ XLogRecPtr RecPtr;
+ XLogRecData rdata;
+ xl_restore_point xlrec;
xlrec.rp_time = GetCurrentTimestamp();
strncpy(xlrec.rp_name, rpName, MAXFNAMELEN);
ereport(LOG,
(errmsg("restore point \"%s\" created at %X/%X",
- rpName, RecPtr.xlogid, RecPtr.xrecoff)));
+ rpName, RecPtr.xlogid, RecPtr.xrecoff)));
return RecPtr;
}
/*
* Optimize writes by bypassing kernel cache with O_DIRECT when using
- * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
+ * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
* disabled, otherwise the archive command or walsender process will read
* the WAL soon after writing it, which is guaranteed to cause a physical
* read if we bypassed the kernel cache. We also skip the
text *backupid = PG_GETARG_TEXT_P(0);
bool fast = PG_GETARG_BOOL(1);
char *backupidstr;
- XLogRecPtr startpoint;
+ XLogRecPtr startpoint;
char startxlogstr[MAXFNAMELEN];
backupidstr = text_to_cstring(backupid);
* do_pg_start_backup is the workhorse of the user-visible pg_start_backup()
* function. It creates the necessary starting checkpoint and constructs the
* backup label file.
- *
+ *
* There are two kind of backups: exclusive and non-exclusive. An exclusive
* backup is started with pg_start_backup(), and there can be only one active
* at a time. The backup label file of an exclusive backup is written to
if (!superuser() && !is_authenticated_user_replication_role())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser or replication role to run a backup")));
+ errmsg("must be superuser or replication role to run a backup")));
if (RecoveryInProgress())
ereport(ERROR,
/* Ensure we release forcePageWrites if fail below */
PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive));
{
- bool gotUniqueStartpoint = false;
+ bool gotUniqueStartpoint = false;
+
do
{
/*
* Force a CHECKPOINT. Aside from being necessary to prevent torn
- * page problems, this guarantees that two successive backup runs will
- * have different checkpoint positions and hence different history
- * file names, even if nothing happened in between.
+ * page problems, this guarantees that two successive backup runs
+ * will have different checkpoint positions and hence different
+ * history file names, even if nothing happened in between.
*
- * We use CHECKPOINT_IMMEDIATE only if requested by user (via passing
- * fast = true). Otherwise this can take awhile.
+ * We use CHECKPOINT_IMMEDIATE only if requested by user (via
+ * passing fast = true). Otherwise this can take awhile.
*/
RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
(fast ? CHECKPOINT_IMMEDIATE : 0));
/*
- * Now we need to fetch the checkpoint record location, and also its
- * REDO pointer. The oldest point in WAL that would be needed to
- * restore starting from the checkpoint is precisely the REDO pointer.
+ * Now we need to fetch the checkpoint record location, and also
+ * its REDO pointer. The oldest point in WAL that would be needed
+ * to restore starting from the checkpoint is precisely the REDO
+ * pointer.
*/
LWLockAcquire(ControlFileLock, LW_SHARED);
checkpointloc = ControlFile->checkPoint;
LWLockRelease(ControlFileLock);
/*
- * If two base backups are started at the same time (in WAL
- * sender processes), we need to make sure that they use
- * different checkpoints as starting locations, because we use
- * the starting WAL location as a unique identifier for the base
- * backup in the end-of-backup WAL record and when we write the
- * backup history file. Perhaps it would be better generate a
- * separate unique ID for each backup instead of forcing another
- * checkpoint, but taking a checkpoint right after another is
- * not that expensive either because only few buffers have been
- * dirtied yet.
+ * If two base backups are started at the same time (in WAL sender
+ * processes), we need to make sure that they use different
+ * checkpoints as starting locations, because we use the starting
+ * WAL location as a unique identifier for the base backup in the
+ * end-of-backup WAL record and when we write the backup history
+ * file. Perhaps it would be better generate a separate unique ID
+ * for each backup instead of forcing another checkpoint, but
+ * taking a checkpoint right after another is not that expensive
+ * either because only few buffers have been dirtied yet.
*/
LWLockAcquire(WALInsertLock, LW_SHARED);
if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint))
gotUniqueStartpoint = true;
}
LWLockRelease(WALInsertLock);
- } while(!gotUniqueStartpoint);
+ } while (!gotUniqueStartpoint);
XLByteToSeg(startpoint, _logId, _logSeg);
XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg);
/*
- * Construct backup label file
+ * Construct backup label file
*/
initStringInfo(&labelfbuf);
{
/*
* Check for existing backup label --- implies a backup is already
- * running. (XXX given that we checked exclusiveBackup above, maybe
- * it would be OK to just unlink any such label file?)
+ * running. (XXX given that we checked exclusiveBackup above,
+ * maybe it would be OK to just unlink any such label file?)
*/
if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
{
static void
pg_start_backup_callback(int code, Datum arg)
{
- bool exclusive = DatumGetBool(arg);
+ bool exclusive = DatumGetBool(arg);
/* Update backup counters and forcePageWrites on failure */
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
if (!superuser() && !is_authenticated_user_replication_role())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser or replication role to run a backup"))));
+ (errmsg("must be superuser or replication role to run a backup"))));
if (RecoveryInProgress())
ereport(ERROR,
/*
* Read the existing label file into memory.
*/
- struct stat statbuf;
- int r;
+ struct stat statbuf;
+ int r;
if (stat(BACKUP_LABEL_FILE, &statbuf))
{
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
- remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
+ remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/*
* Write the backup-end xlog record
Datum
pg_create_restore_point(PG_FUNCTION_ARGS)
{
- text *restore_name = PG_GETARG_TEXT_P(0);
- char *restore_name_str;
+ text *restore_name = PG_GETARG_TEXT_P(0);
+ char *restore_name_str;
XLogRecPtr restorepoint;
char location[MAXFNAMELEN];
if (!XLogIsNeeded())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL level not sufficient for creating a restore point"),
+ errmsg("WAL level not sufficient for creating a restore point"),
errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
restore_name_str = text_to_cstring(restore_name);
* As a convenience, return the WAL location of the restore point record
*/
snprintf(location, sizeof(location), "%X/%X",
- restorepoint.xlogid, restorepoint.xrecoff);
+ restorepoint.xlogid, restorepoint.xrecoff);
PG_RETURN_TEXT_P(cstring_to_text(location));
}
}
/*
- * If it hasn't been long since last attempt, sleep
- * to avoid busy-waiting.
+ * If it hasn't been long since last attempt, sleep to
+ * avoid busy-waiting.
*/
now = (pg_time_t) time(NULL);
if ((now - last_fail_time) < 5)
CheckForStandbyTrigger(void)
{
struct stat stat_buf;
- static bool triggered = false;
+ static bool triggered = false;
if (triggered)
return true;
if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
{
/*
- * Since we are in a signal handler, it's not safe
- * to elog. We silently ignore any error from unlink.
+ * Since we are in a signal handler, it's not safe to elog. We
+ * silently ignore any error from unlink.
*/
unlink(PROMOTE_SIGNAL_FILE);
return true;
/*
* Note that we must do the permissions check against the target
- * role not the calling user. We require CREATE privileges,
- * since without CREATE you won't be able to do anything using the
+ * role not the calling user. We require CREATE privileges, since
+ * without CREATE you won't be able to do anything using the
* default privs anyway.
*/
iacls->nspid = get_namespace_oid(nspname, false);
pg_class_tuple->relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a foreign table",
+ errmsg("\"%s\" is not a foreign table",
NameStr(pg_class_tuple->relname))));
/* Adjust the default permissions based on object type */
this_privileges &= (AclMode) ACL_SELECT;
}
else if (pg_class_tuple->relkind == RELKIND_FOREIGN_TABLE &&
- this_privileges & ~((AclMode) ACL_SELECT))
+ this_privileges & ~((AclMode) ACL_SELECT))
{
/* Foreign tables have the same restriction as sequences. */
ereport(WARNING,
- (errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("foreign table \"%s\" only supports SELECT column privileges",
- NameStr(pg_class_tuple->relname))));
+ (errcode(ERRCODE_INVALID_GRANT_OPERATION),
+ errmsg("foreign table \"%s\" only supports SELECT column privileges",
+ NameStr(pg_class_tuple->relname))));
this_privileges &= (AclMode) ACL_SELECT;
}
* Note: roles do not have owners per se; instead we use this test in
* places where an ownership-like permissions test is needed for a role.
* Be sure to apply it to the role trying to do the operation, not the
- * role being operated on! Also note that this generally should not be
+ * role being operated on! Also note that this generally should not be
* considered enough privilege if the target role is a superuser.
* (We don't handle that consideration here because we want to give a
* separate error message for such cases, so the caller has to deal with it.)
/*
* forkname_chars
- * We use this to figure out whether a filename could be a relation
- * fork (as opposed to an oddly named stray file that somehow ended
- * up in the database directory). If the passed string begins with
- * a fork name (other than the main fork name), we return its length,
- * and set *fork (if not NULL) to the fork number. If not, we return 0.
+ * We use this to figure out whether a filename could be a relation
+ * fork (as opposed to an oddly named stray file that somehow ended
+ * up in the database directory). If the passed string begins with
+ * a fork name (other than the main fork name), we return its length,
+ * and set *fork (if not NULL) to the fork number. If not, we return 0.
*
* Note that the present coding assumes that there are no fork names which
* are prefixes of other fork names.
for (forkNum = 1; forkNum <= MAX_FORKNUM; forkNum++)
{
- int len = strlen(forkNames[forkNum]);
+ int len = strlen(forkNames[forkNum]);
+
if (strncmp(forkNames[forkNum], str, len) == 0)
{
if (fork)
{
/* OIDCHARS will suffice for an integer, too */
pathlen = 5 + OIDCHARS + 2 + OIDCHARS + 1 + OIDCHARS + 1
- + FORKNAMECHARS + 1;
+ + FORKNAMECHARS + 1;
path = (char *) palloc(pathlen);
if (forknum != MAIN_FORKNUM)
snprintf(path, pathlen, "base/%u/t%d_%u_%s",
if (backend == InvalidBackendId)
{
pathlen = 9 + 1 + OIDCHARS + 1
- + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1
- + OIDCHARS + 1 + FORKNAMECHARS + 1;
+ + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1
+ + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char *) palloc(pathlen);
if (forknum != MAIN_FORKNUM)
snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/%u_%s",
{
/* OIDCHARS will suffice for an integer, too */
pathlen = 9 + 1 + OIDCHARS + 1
- + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2
- + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
+ + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2
+ + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
path = (char *) palloc(pathlen);
if (forknum != MAIN_FORKNUM)
snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/t%d_%u_%s",
ForeignServerRelationId, /* OCLASS_FOREIGN_SERVER */
UserMappingRelationId, /* OCLASS_USER_MAPPING */
DefaultAclRelationId, /* OCLASS_DEFACL */
- ExtensionRelationId /* OCLASS_EXTENSION */
+ ExtensionRelationId /* OCLASS_EXTENSION */
};
/*
* Delete any comments or security labels associated with this object.
- * (This is a convenient place to do these things, rather than having every
- * object type know to do it.)
+ * (This is a convenient place to do these things, rather than having
+ * every object type know to do it.)
*/
DeleteComments(object->objectId, object->classId, object->objectSubId);
DeleteSecurityLabel(object);
* whereas 'behavior' is used for everything else.
*
* NOTE: the caller should ensure that a whole-table dependency on the
- * specified relation is created separately, if one is needed. In particular,
+ * specified relation is created separately, if one is needed. In particular,
* a whole-row Var "relation.*" will not cause this routine to emit any
* dependency item. This is appropriate behavior for subexpressions of an
* ordinary query, so other cases need to cope as necessary.
/*
* A whole-row Var references no specific columns, so adds no new
- * dependency. (We assume that there is a whole-table dependency
+ * dependency. (We assume that there is a whole-table dependency
* arising from each underlying rangetable entry. While we could
* record such a dependency when finding a whole-row Var that
* references a relation directly, it's quite unclear how to extend
/*
* We must also depend on the constant's collation: it could be
- * different from the datatype's, if a CollateExpr was const-folded
- * to a simple constant. However we can save work in the most common
+ * different from the datatype's, if a CollateExpr was const-folded to
+ * a simple constant. However we can save work in the most common
* case where the collation is "default", since we know that's pinned.
*/
if (OidIsValid(con->constcollid) &&
}
foreach(ct, rte->funccolcollations)
{
- Oid collid = lfirst_oid(ct);
+ Oid collid = lfirst_oid(ct);
if (OidIsValid(collid) &&
collid != DEFAULT_COLLATION_OID)
HeapTuple collTup;
collTup = SearchSysCache1(COLLOID,
- ObjectIdGetDatum(object->objectId));
+ ObjectIdGetDatum(object->objectId));
if (!HeapTupleIsValid(collTup))
elog(ERROR, "cache lookup failed for collation %u",
object->objectId);
appendStringInfo(&buffer, _("collation %s"),
- NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname));
+ NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname));
ReleaseSysCache(collTup);
break;
}
char *
getObjectDescriptionOids(Oid classid, Oid objid)
{
- ObjectAddress address;
+ ObjectAddress address;
address.classId = classid;
address.objectId = objid;
CheckAttributeType(NameStr(tupdesc->attrs[i]->attname),
tupdesc->attrs[i]->atttypid,
tupdesc->attrs[i]->attcollation,
- NIL, /* assume we're creating a new rowtype */
+ NIL, /* assume we're creating a new rowtype */
allow_system_table_mods);
}
}
int i;
/*
- * Check for self-containment. Eventually we might be able to allow
+ * Check for self-containment. Eventually we might be able to allow
* this (just return without complaint, if so) but it's not clear how
* many other places would require anti-recursion defenses before it
* would be safe to allow tables to contain their own rowtype.
if (list_member_oid(containing_rowtypes, atttypid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("composite type %s cannot be made a member of itself",
- format_type_be(atttypid))));
+ errmsg("composite type %s cannot be made a member of itself",
+ format_type_be(atttypid))));
containing_rowtypes = lcons_oid(atttypid, containing_rowtypes);
}
/*
- * This might not be strictly invalid per SQL standard, but it is
- * pretty useless, and it cannot be dumped, so we must disallow it.
+ * This might not be strictly invalid per SQL standard, but it is pretty
+ * useless, and it cannot be dumped, so we must disallow it.
*/
if (!OidIsValid(attcollation) && type_is_collatable(atttypid))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("no collation was derived for column \"%s\" with collatable type %s",
- attname, format_type_be(atttypid)),
- errhint("Use the COLLATE clause to set the collation explicitly.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+ errmsg("no collation was derived for column \"%s\" with collatable type %s",
+ attname, format_type_be(atttypid)),
+ errhint("Use the COLLATE clause to set the collation explicitly.")));
}
/*
-1, /* typmod */
0, /* array dimensions for typBaseType */
false, /* Type NOT NULL */
- InvalidOid); /* typcollation */
+ InvalidOid); /* typcollation */
}
/* --------------------------------
CheckAttributeNamesTypes(tupdesc, relkind, allow_system_table_mods);
/*
- * If the relation already exists, it's an error, unless the user specifies
- * "IF NOT EXISTS". In that case, we just print a notice and do nothing
- * further.
+ * If the relation already exists, it's an error, unless the user
+ * specifies "IF NOT EXISTS". In that case, we just print a notice and do
+ * nothing further.
*/
existing_relid = get_relname_relid(relname, relnamespace);
if (existing_relid != InvalidOid)
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" already exists, skipping",
- relname)));
+ relname)));
heap_close(pg_class_desc, RowExclusiveLock);
return InvalidOid;
}
if (!OidIsValid(relid))
{
/*
- * Use binary-upgrade override for pg_class.oid/relfilenode,
- * if supplied.
+ * Use binary-upgrade override for pg_class.oid/relfilenode, if
+ * supplied.
*/
if (OidIsValid(binary_upgrade_next_heap_pg_class_oid) &&
(relkind == RELKIND_RELATION || relkind == RELKIND_SEQUENCE ||
-1, /* typmod */
0, /* array dimensions for typBaseType */
false, /* Type NOT NULL */
- InvalidOid); /* typcollation */
+ InvalidOid); /* typcollation */
pfree(relarrayname);
}
register_on_commit_action(relid, oncommit);
/*
- * If this is an unlogged relation, it needs an init fork so that it
- * can be correctly reinitialized on restart. Since we're going to
- * do an immediate sync, we ony need to xlog this if archiving or
- * streaming is enabled. And the immediate sync is required, because
- * otherwise there's no guarantee that this will hit the disk before
- * the next checkpoint moves the redo pointer.
+ * If this is an unlogged relation, it needs an init fork so that it can
+ * be correctly reinitialized on restart. Since we're going to do an
+ * immediate sync, we ony need to xlog this if archiving or streaming is
+ * enabled. And the immediate sync is required, because otherwise there's
+ * no guarantee that this will hit the disk before the next checkpoint
+ * moves the redo pointer.
*/
if (relpersistence == RELPERSISTENCE_UNLOGGED)
{
/*
* There can no longer be anyone *else* touching the relation, but we
- * might still have open queries or cursors, or pending trigger events,
- * in our own session.
+ * might still have open queries or cursors, or pending trigger events, in
+ * our own session.
*/
CheckTableNotInUse(rel, "DROP TABLE");
*/
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
{
- Relation rel;
- HeapTuple tuple;
+ Relation rel;
+ HeapTuple tuple;
rel = heap_open(ForeignTableRelationId, RowExclusiveLock);
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- true, /* Is Validated */
+ true, /* Is Validated */
RelationGetRelid(rel), /* relation */
attNos, /* attrs in the constraint */
keycount, /* # attrs in the constraint */
int i;
/*
- * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In
- * CREATE TABLE, we have faith that the parser rejected multiple pkey
- * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so
- * it's no problem either.
+ * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In CREATE
+ * TABLE, we have faith that the parser rejected multiple pkey clauses;
+ * and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no
+ * problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(heapRel))
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("multiple primary keys for table \"%s\" are not allowed",
- RelationGetRelationName(heapRel))));
+ errmsg("multiple primary keys for table \"%s\" are not allowed",
+ RelationGetRelationName(heapRel))));
}
/*
continue;
atttuple = SearchSysCache2(ATTNUM,
- ObjectIdGetDatum(RelationGetRelid(heapRel)),
+ ObjectIdGetDatum(RelationGetRelid(heapRel)),
Int16GetDatum(attnum));
if (!HeapTupleIsValid(atttuple))
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
}
/*
- * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
- * tables? Currently, since the PRIMARY KEY itself doesn't cascade,
- * we don't cascade the notnull constraint(s) either; but this is
- * pretty debatable.
+ * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child tables?
+ * Currently, since the PRIMARY KEY itself doesn't cascade, we don't
+ * cascade the notnull constraint(s) either; but this is pretty debatable.
*
- * XXX: possible future improvement: when being called from ALTER
- * TABLE, it would be more efficient to merge this with the outer
- * ALTER TABLE, so as to avoid two scans. But that seems to
- * complicate DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER TABLE,
+ * it would be more efficient to merge this with the outer ALTER TABLE, so
+ * as to avoid two scans. But that seems to complicate DefineIndex's API
+ * unduly.
*/
if (cmds)
AlterTableInternal(RelationGetRelid(heapRel), cmds, false);
if (!OidIsValid(indexRelationId))
{
/*
- * Use binary-upgrade override for pg_class.oid/relfilenode,
- * if supplied.
+ * Use binary-upgrade override for pg_class.oid/relfilenode, if
+ * supplied.
*/
if (OidIsValid(binary_upgrade_next_index_pg_class_oid))
{
* ----------------
*/
UpdateIndexRelation(indexRelationId, heapRelationId, indexInfo,
- collationObjectId, classObjectId, coloptions, isprimary, is_exclusion,
+ collationObjectId, classObjectId, coloptions, isprimary, is_exclusion,
!deferrable,
!concurrent);
/*
* If there are no simply-referenced columns, give the index an
- * auto dependency on the whole table. In most cases, this will
+ * auto dependency on the whole table. In most cases, this will
* be redundant, but it might not be if the index expressions and
* predicate contain no Vars or only whole-row Vars.
*/
/*
* Close the index; but we keep the lock that we acquired above until end
- * of transaction. Closing the heap is caller's responsibility.
+ * of transaction. Closing the heap is caller's responsibility.
*/
index_close(indexRelation, NoLock);
/*
* If the constraint is deferrable, create the deferred uniqueness
- * checking trigger. (The trigger will be given an internal
- * dependency on the constraint by CreateTrigger.)
+ * checking trigger. (The trigger will be given an internal dependency on
+ * the constraint by CreateTrigger.)
*/
if (deferrable)
{
* have been so marked already, so no need to clear the flag in the other
* case.
*
- * Note: this might better be done by callers. We do it here to avoid
+ * Note: this might better be done by callers. We do it here to avoid
* exposing index_update_stats() globally, but that wouldn't be necessary
* if relhaspkey went away.
*/
*/
if (update_pgindex && (mark_as_primary || deferrable))
{
- Relation pg_index;
- HeapTuple indexTuple;
- Form_pg_index indexForm;
- bool dirty = false;
+ Relation pg_index;
+ HeapTuple indexTuple;
+ Form_pg_index indexForm;
+ bool dirty = false;
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
userIndexRelation = index_open(indexId, AccessExclusiveLock);
/*
- * There can no longer be anyone *else* touching the index, but we
- * might still have open queries using it in our own session.
+ * There can no longer be anyone *else* touching the index, but we might
+ * still have open queries using it in our own session.
*/
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
*/
if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
{
- RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty;
+ RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty;
+
RelationOpenSmgr(indexRelation);
smgrcreate(indexRelation->rd_smgr, INIT_FORKNUM, false);
OidFunctionCall1(ambuildempty, PointerGetDatum(indexRelation));
ivinfo.strategy = NULL;
state.tuplesort = tuplesort_begin_datum(TIDOID,
- TIDLessOperator, InvalidOid, false,
+ TIDLessOperator, InvalidOid, false,
maintenance_work_mem,
false);
state.htups = state.itups = state.tups_inserted = 0;
* use catalog indexes while collecting the list.)
*
* To avoid deadlocks, VACUUM FULL or CLUSTER on a system catalog must omit the
- * REINDEX_CHECK_CONSTRAINTS flag. REINDEX should be used to rebuild an index
+ * REINDEX_CHECK_CONSTRAINTS flag. REINDEX should be used to rebuild an index
* if constraint inconsistency is suspected. For optimal performance, other
* callers should include the flag only after transforming the data in a manner
* that risks a change in constraint validity.
if (oldNspOid == nspOid)
ereport(ERROR,
(classid == RelationRelationId ?
- errcode(ERRCODE_DUPLICATE_TABLE) :
+ errcode(ERRCODE_DUPLICATE_TABLE) :
classid == ProcedureRelationId ?
- errcode(ERRCODE_DUPLICATE_FUNCTION) :
- errcode(ERRCODE_DUPLICATE_OBJECT),
+ errcode(ERRCODE_DUPLICATE_FUNCTION) :
+ errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("%s is already in schema \"%s\"",
getObjectDescriptionOids(classid, objid),
get_namespace_name(nspOid))));
if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move objects into or out of temporary schemas")));
+ errmsg("cannot move objects into or out of temporary schemas")));
/* same for TOAST schema */
if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
/*
* get_namespace_oid - given a namespace name, look up the OID
*
- * If missing_ok is false, throw an error if namespace name not found. If
+ * If missing_ok is false, throw an error if namespace name not found. If
* true, just return InvalidOid.
*/
Oid
oid = GetSysCacheOid1(NAMESPACENAME, CStringGetDatum(nspname));
if (!OidIsValid(oid) && !missing_ok)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", nspname)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_SCHEMA),
+ errmsg("schema \"%s\" does not exist", nspname)));
return oid;
}
/* See if the namespace name starts with "pg_temp_" or "pg_toast_temp_" */
nspname = get_namespace_name(namespaceId);
if (!nspname)
- return InvalidBackendId; /* no such namespace? */
+ return InvalidBackendId; /* no such namespace? */
if (strncmp(nspname, "pg_temp_", 8) == 0)
result = atoi(nspname + 8);
else if (strncmp(nspname, "pg_toast_temp_", 14) == 0)
*
* It's possible that newpath->useTemp is set but there is no longer any
* active temp namespace, if the path was saved during a transaction that
- * created a temp namespace and was later rolled back. In that case we just
- * ignore useTemp. A plausible alternative would be to create a new temp
+ * created a temp namespace and was later rolled back. In that case we just
+ * ignore useTemp. A plausible alternative would be to create a new temp
* namespace, but for existing callers that's not necessary because an empty
* temp namespace wouldn't affect their results anyway.
*
if (source == PGC_S_TEST)
ereport(NOTICE,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", curname)));
+ errmsg("schema \"%s\" does not exist", curname)));
else
{
GUC_check_errdetail("schema \"%s\" does not exist", curname);
static ObjectAddress get_object_address_relobject(ObjectType objtype,
List *objname, Relation *relp);
static ObjectAddress get_object_address_attribute(ObjectType objtype,
- List *objname, Relation *relp, LOCKMODE lockmode);
+ List *objname, Relation *relp, LOCKMODE lockmode);
static ObjectAddress get_object_address_opcf(ObjectType objtype, List *objname,
List *objargs);
static bool object_exists(ObjectAddress address);
get_object_address(ObjectType objtype, List *objname, List *objargs,
Relation *relp, LOCKMODE lockmode)
{
- ObjectAddress address;
- Relation relation = NULL;
+ ObjectAddress address;
+ Relation relation = NULL;
/* Some kind of lock must be taken. */
Assert(lockmode != NoLock);
case OBJECT_COLUMN:
address =
get_object_address_attribute(objtype, objname, &relation,
- lockmode);
+ lockmode);
break;
case OBJECT_RULE:
case OBJECT_TRIGGER:
break;
case OBJECT_CAST:
{
- TypeName *sourcetype = (TypeName *) linitial(objname);
- TypeName *targettype = (TypeName *) linitial(objargs);
- Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
- Oid targettypeid = typenameTypeId(NULL, targettype);
+ TypeName *sourcetype = (TypeName *) linitial(objname);
+ TypeName *targettype = (TypeName *) linitial(objargs);
+ Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
+ Oid targettypeid = typenameTypeId(NULL, targettype);
address.classId = CastRelationId;
address.objectId =
/*
* If we're dealing with a relation or attribute, then the relation is
- * already locked. If we're dealing with any other type of object, we need
- * to lock it and then verify that it still exists.
+ * already locked. If we're dealing with any other type of object, we
+ * need to lock it and then verify that it still exists.
*/
if (address.classId != RelationRelationId)
{
break;
default:
elog(ERROR, "unrecognized objtype: %d", (int) objtype);
- msg = NULL; /* placate compiler */
+ msg = NULL; /* placate compiler */
}
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
get_relation_by_qualified_name(ObjectType objtype, List *objname,
LOCKMODE lockmode)
{
- Relation relation;
+ Relation relation;
relation = relation_openrv(makeRangeVarFromNameList(objname), lockmode);
switch (objtype)
nnames = list_length(objname);
if (nnames < 2)
{
- Oid reloid;
+ Oid reloid;
/*
* For compatibility with very old releases, we sometimes allow users
get_object_address_attribute(ObjectType objtype, List *objname,
Relation *relp, LOCKMODE lockmode)
{
- ObjectAddress address;
+ ObjectAddress address;
List *relname;
Oid reloid;
Relation relation;
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
- attname, RelationGetRelationName(relation))));
+ attname, RelationGetRelationName(relation))));
*relp = relation;
return address;
int cache = -1;
Oid indexoid = InvalidOid;
Relation rel;
- ScanKeyData skey[1];
- SysScanDesc sd;
+ ScanKeyData skey[1];
+ SysScanDesc sd;
bool found;
/* Sub-objects require special treatment. */
/*
* For object types that have a relevant syscache, we use it; for
- * everything else, we'll have to do an index-scan. This switch
- * sets either the cache to be used for the syscache lookup, or the
- * index to be used for the index scan.
+ * everything else, we'll have to do an index-scan. This switch sets
+ * either the cache to be used for the syscache lookup, or the index to be
+ * used for the index scan.
*/
switch (address.classId)
{
cache = OPFAMILYOID;
break;
case LargeObjectRelationId:
+
/*
* Weird backward compatibility hack: ObjectAddress notation uses
* LargeObjectRelationId for large objects, but since PostgreSQL
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be owner of large object %u",
- address.objectId)));
+ address.objectId)));
break;
case OBJECT_CAST:
{
/* We can only check permissions on the source/target types */
- TypeName *sourcetype = (TypeName *) linitial(objname);
- TypeName *targettype = (TypeName *) linitial(objargs);
- Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
- Oid targettypeid = typenameTypeId(NULL, targettype);
+ TypeName *sourcetype = (TypeName *) linitial(objname);
+ TypeName *targettype = (TypeName *) linitial(objargs);
+ Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
+ Oid targettypeid = typenameTypeId(NULL, targettype);
if (!pg_type_ownercheck(sourcetypeid, roleid)
&& !pg_type_ownercheck(targettypeid, roleid))
NameListToString(objname));
break;
case OBJECT_ROLE:
+
/*
* We treat roles as being "owned" by those with CREATEROLE priv,
* except that superusers are only owned by superusers.
HeapTuple tup;
Datum values[Natts_pg_collation];
bool nulls[Natts_pg_collation];
- NameData name_name, name_collate, name_ctype;
+ NameData name_name,
+ name_collate,
+ name_ctype;
Oid oid;
ObjectAddress myself,
referenced;
/*
* Make sure there is no existing collation of same name & encoding.
*
- * This would be caught by the unique index anyway; we're just giving
- * a friendlier error message. The unique index provides a backstop
- * against race conditions.
+ * This would be caught by the unique index anyway; we're just giving a
+ * friendlier error message. The unique index provides a backstop against
+ * race conditions.
*/
if (SearchSysCacheExists3(COLLNAMEENCNSP,
PointerGetDatum(collname),
collname, pg_encoding_to_char(collencoding))));
/*
- * Also forbid matching an any-encoding entry. This test of course is
- * not backed up by the unique index, but it's not a problem since we
- * don't support adding any-encoding entries after initdb.
+ * Also forbid matching an any-encoding entry. This test of course is not
+ * backed up by the unique index, but it's not a problem since we don't
+ * support adding any-encoding entries after initdb.
*/
if (SearchSysCacheExists3(COLLNAMEENCNSP,
PointerGetDatum(collname),
* the rel of interest are Vars with the indicated varno/varlevelsup.
*
* Currently we only check to see if the rel has a primary key that is a
- * subset of the grouping_columns. We could also use plain unique constraints
+ * subset of the grouping_columns. We could also use plain unique constraints
* if all their columns are known not null, but there's a problem: we need
* to be able to represent the not-null-ness as part of the constraints added
- * to *constraintDeps. FIXME whenever not-null constraints get represented
+ * to *constraintDeps. FIXME whenever not-null constraints get represented
* in pg_constraint.
*/
bool
if (isNull)
elog(ERROR, "null conkey for constraint %u",
HeapTupleGetOid(tuple));
- arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */
numkeys = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numkeys < 0 ||
/*
* If we are executing a CREATE EXTENSION operation, mark the given object
- * as being a member of the extension. Otherwise, do nothing.
+ * as being a member of the extension. Otherwise, do nothing.
*
* This must be called during creation of any user-definable object type
* that could be a member of an extension.
{
if (creating_extension)
{
- ObjectAddress extension;
+ ObjectAddress extension;
extension.classId = ExtensionRelationId;
extension.objectId = CurrentExtensionObject;
* (possibly with some differences from before).
*
* If skipExtensionDeps is true, we do not delete any dependencies that
- * show that the given object is a member of an extension. This avoids
+ * show that the given object is a member of an extension. This avoids
* needing a lot of extra logic to fetch and recreate that dependency.
*/
long
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
if (skipExtensionDeps &&
- ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
+ ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
continue;
simple_heap_delete(depRel, &tup->t_self);
/* Potentially set by contrib/pg_upgrade_support functions */
-Oid binary_upgrade_next_pg_enum_oid = InvalidOid;
+Oid binary_upgrade_next_pg_enum_oid = InvalidOid;
static void RenumberEnumType(Relation pg_enum, HeapTuple *existing, int nelems);
static int oid_cmp(const void *p1, const void *p2);
num_elems = list_length(vals);
/*
- * We do not bother to check the list of values for duplicates --- if
- * you have any, you'll get a less-than-friendly unique-index violation.
- * It is probably not worth trying harder.
+ * We do not bother to check the list of values for duplicates --- if you
+ * have any, you'll get a less-than-friendly unique-index violation. It is
+ * probably not worth trying harder.
*/
pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
* Allocate OIDs for the enum's members.
*
* While this method does not absolutely guarantee that we generate no
- * duplicate OIDs (since we haven't entered each oid into the table
- * before allocating the next), trouble could only occur if the OID
- * counter wraps all the way around before we finish. Which seems
- * unlikely.
+ * duplicate OIDs (since we haven't entered each oid into the table before
+ * allocating the next), trouble could only occur if the OID counter wraps
+ * all the way around before we finish. Which seems unlikely.
*/
oids = (Oid *) palloc(num_elems * sizeof(Oid));
* tells the comparison functions the OIDs are in the correct sort
* order and can be compared directly.
*/
- Oid new_oid;
+ Oid new_oid;
- do {
+ do
+ {
new_oid = GetNewOid(pg_enum);
} while (new_oid & 1);
oids[elemno] = new_oid;
/*
* Acquire a lock on the enum type, which we won't release until commit.
* This ensures that two backends aren't concurrently modifying the same
- * enum type. Without that, we couldn't be sure to get a consistent
- * view of the enum members via the syscache. Note that this does not
- * block other backends from inspecting the type; see comments for
+ * enum type. Without that, we couldn't be sure to get a consistent view
+ * of the enum members via the syscache. Note that this does not block
+ * other backends from inspecting the type; see comments for
* RenumberEnumType.
*/
LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock);
/* Get the list of existing members of the enum */
list = SearchSysCacheList1(ENUMTYPOIDNAME,
ObjectIdGetDatum(enumTypeOid));
- nelems = list->n_members;
+ nelems = list->n_members;
/* Sort the existing members by enumsortorder */
existing = (HeapTuple *) palloc(nelems * sizeof(HeapTuple));
if (neighbor == NULL)
{
/*
- * Put the new label at the end of the list.
- * No change to existing tuples is required.
+ * Put the new label at the end of the list. No change to existing
+ * tuples is required.
*/
if (nelems > 0)
{
else
{
/* BEFORE or AFTER was specified */
- int nbr_index;
- int other_nbr_index;
- Form_pg_enum nbr_en;
- Form_pg_enum other_nbr_en;
+ int nbr_index;
+ int other_nbr_index;
+ Form_pg_enum nbr_en;
+ Form_pg_enum other_nbr_en;
/* Locate the neighbor element */
for (nbr_index = 0; nbr_index < nelems; nbr_index++)
nbr_en = (Form_pg_enum) GETSTRUCT(existing[nbr_index]);
/*
- * Attempt to assign an appropriate enumsortorder value: one less
- * than the smallest member, one more than the largest member,
- * or halfway between two existing members.
+ * Attempt to assign an appropriate enumsortorder value: one less than
+ * the smallest member, one more than the largest member, or halfway
+ * between two existing members.
*
* In the "halfway" case, because of the finite precision of float4,
- * we might compute a value that's actually equal to one or the
- * other of its neighbors. In that case we renumber the existing
- * members and try again.
+ * we might compute a value that's actually equal to one or the other
+ * of its neighbors. In that case we renumber the existing members
+ * and try again.
*/
if (newValIsAfter)
other_nbr_index = nbr_index + 1;
/*
* On some machines, newelemorder may be in a register that's
- * wider than float4. We need to force it to be rounded to
- * float4 precision before making the following comparisons,
- * or we'll get wrong results. (Such behavior violates the C
- * standard, but fixing the compilers is out of our reach.)
+ * wider than float4. We need to force it to be rounded to float4
+ * precision before making the following comparisons, or we'll get
+ * wrong results. (Such behavior violates the C standard, but
+ * fixing the compilers is out of our reach.)
*/
newelemorder = DatumGetFloat4(Float4GetDatum(newelemorder));
if (OidIsValid(binary_upgrade_next_pg_enum_oid))
{
/*
- * Use binary-upgrade override for pg_enum.oid, if supplied.
- * During binary upgrade, all pg_enum.oid's are set this way
- * so they are guaranteed to be consistent.
+ * Use binary-upgrade override for pg_enum.oid, if supplied. During
+ * binary upgrade, all pg_enum.oid's are set this way so they are
+ * guaranteed to be consistent.
*/
if (neighbor != NULL)
ereport(ERROR,
*/
for (;;)
{
- bool sorts_ok;
+ bool sorts_ok;
/* Get a new OID (different from all existing pg_enum tuples) */
newOid = GetNewOid(pg_enum);
/*
* Detect whether it sorts correctly relative to existing
* even-numbered labels of the enum. We can ignore existing
- * labels with odd Oids, since a comparison involving one of
- * those will not take the fast path anyway.
+ * labels with odd Oids, since a comparison involving one of those
+ * will not take the fast path anyway.
*/
sorts_ok = true;
for (i = 0; i < nelems; i++)
break;
/*
- * If it's odd, and sorts OK, loop back to get another OID
- * and try again. Probably, the next available even OID
- * will sort correctly too, so it's worth trying.
+ * If it's odd, and sorts OK, loop back to get another OID and
+ * try again. Probably, the next available even OID will sort
+ * correctly too, so it's worth trying.
*/
}
else
* We avoid doing this unless absolutely necessary; in most installations
* it will never happen. The reason is that updating existing pg_enum
* entries creates hazards for other backends that are concurrently reading
- * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
+ * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
* see both old and new versions of an updated row as valid, or neither of
* them, if the commit happens between scanning the two versions. It's
* also quite likely for a concurrent scan to see an inconsistent set of
static int
sort_order_cmp(const void *p1, const void *p2)
{
- HeapTuple v1 = *((const HeapTuple *) p1);
- HeapTuple v2 = *((const HeapTuple *) p2);
- Form_pg_enum en1 = (Form_pg_enum) GETSTRUCT(v1);
- Form_pg_enum en2 = (Form_pg_enum) GETSTRUCT(v2);
+ HeapTuple v1 = *((const HeapTuple *) p1);
+ HeapTuple v2 = *((const HeapTuple *) p2);
+ Form_pg_enum en1 = (Form_pg_enum) GETSTRUCT(v1);
+ Form_pg_enum en2 = (Form_pg_enum) GETSTRUCT(v2);
if (en1->enumsortorder < en2->enumsortorder)
return -1;
if (!haspolyarg)
{
/*
- * OK to do full precheck: analyze and rewrite the queries,
- * then verify the result type.
+ * OK to do full precheck: analyze and rewrite the queries, then
+ * verify the result type.
*/
SQLFunctionParseInfoPtr pinfo;
querytree_sublist = pg_analyze_and_rewrite_params(parsetree,
prosrc,
- (ParserSetupHook) sql_fn_parser_setup,
+ (ParserSetupHook) sql_fn_parser_setup,
pinfo);
querytree_list = list_concat(querytree_list,
querytree_sublist);
values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
values[i++] = Int32GetDatum(-1); /* typtypmod */
values[i++] = Int32GetDatum(0); /* typndims */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typcollation */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typcollation */
nulls[i++] = true; /* typdefaultbin */
nulls[i++] = true; /* typdefault */
values[i++] = ObjectIdGetDatum(baseType); /* typbasetype */
values[i++] = Int32GetDatum(typeMod); /* typtypmod */
values[i++] = Int32GetDatum(typNDims); /* typndims */
- values[i++] = ObjectIdGetDatum(typeCollation); /* typcollation */
+ values[i++] = ObjectIdGetDatum(typeCollation); /* typcollation */
/*
* initialize the default binary value for this type. Check for nulls of
break;
default:
elog(ERROR, "invalid relpersistence: %c", relpersistence);
- return; /* placate compiler */
+ return; /* placate compiler */
}
srel = smgropen(rnode, backend);
* *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
* If there are no relations to be deleted, *ptr is set to NULL.
*
- * Only non-temporary relations are included in the returned list. This is OK
+ * Only non-temporary relations are included in the returned list. This is OK
* because the list is used only in contexts where temporary relations don't
* matter: we're either writing to the two-phase state file (and transactions
* that have touched temp tables can't be prepared) or we're writing to xlog
list_make2("chunk_id", "chunk_seq"),
BTREE_AM_OID,
rel->rd_rel->reltablespace,
- collationObjectId, classObjectId, coloptions, (Datum) 0,
+ collationObjectId, classObjectId, coloptions, (Datum) 0,
true, false, false, false,
true, false, false);
switch (getObjectClass(&dep))
{
case OCLASS_CLASS:
- {
- Relation rel;
- Relation classRel;
+ {
+ Relation rel;
+ Relation classRel;
- rel = relation_open(objid, AccessExclusiveLock);
- oldNspOid = RelationGetNamespace(rel);
+ rel = relation_open(objid, AccessExclusiveLock);
+ oldNspOid = RelationGetNamespace(rel);
- classRel = heap_open(RelationRelationId, RowExclusiveLock);
+ classRel = heap_open(RelationRelationId, RowExclusiveLock);
- AlterRelationNamespaceInternal(classRel,
- objid,
- oldNspOid,
- nspOid,
- true);
+ AlterRelationNamespaceInternal(classRel,
+ objid,
+ oldNspOid,
+ nspOid,
+ true);
- heap_close(classRel, RowExclusiveLock);
+ heap_close(classRel, RowExclusiveLock);
- relation_close(rel, NoLock);
- break;
- }
+ relation_close(rel, NoLock);
+ break;
+ }
case OCLASS_PROC:
oldNspOid = AlterFunctionNamespace_oid(objid, nspOid);
{
Oid classId = RelationGetRelid(rel);
Oid oldNspOid;
- Datum name, namespace;
- bool isnull;
- HeapTuple tup, newtup;
+ Datum name,
+ namespace;
+ bool isnull;
+ HeapTuple tup,
+ newtup;
Datum *values;
bool *nulls;
bool *replaces;
/* Permission checks ... superusers can always do it */
if (!superuser())
{
- Datum owner;
+ Datum owner;
Oid ownerId;
AclResult aclresult;
HeapTuple *rows, int numrows,
MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
- Node *index_expr);
+ Node *index_expr);
static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
int targrows, double *totalrows, double *totaldeadrows);
static double random_fract(void);
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
ereport(LOG,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("skipping analyze of \"%s\" --- lock not available",
- vacstmt->relation->relname)));
+ errmsg("skipping analyze of \"%s\" --- lock not available",
+ vacstmt->relation->relname)));
}
if (!onerel)
return;
/*
* When analyzing an expression index, believe the expression tree's type
* not the column datatype --- the latter might be the opckeytype storage
- * type of the opclass, which is not interesting for our purposes. (Note:
+ * type of the opclass, which is not interesting for our purposes. (Note:
* if we did anything with non-expression index columns, we'd need to
* figure out where to get the correct type info from, but for now that's
- * not a problem.) It's not clear whether anyone will care about the
+ * not a problem.) It's not clear whether anyone will care about the
* typmod, but we store that too just in case.
*/
if (index_expr)
TransactionId OldestXmin;
TransactionId FreezeXid;
RewriteState rwstate;
- bool use_sort;
+ bool use_sort;
Tuplesortstate *tuplesort;
double num_tuples = 0,
tups_vacuumed = 0,
rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
/*
- * Decide whether to use an indexscan or seqscan-and-optional-sort to
- * scan the OldHeap. We know how to use a sort to duplicate the ordering
- * of a btree index, and will use seqscan-and-sort for that case if the
- * planner tells us it's cheaper. Otherwise, always indexscan if an
- * index is provided, else plain seqscan.
+ * Decide whether to use an indexscan or seqscan-and-optional-sort to scan
+ * the OldHeap. We know how to use a sort to duplicate the ordering of a
+ * btree index, and will use seqscan-and-sort for that case if the planner
+ * tells us it's cheaper. Otherwise, always indexscan if an index is
+ * provided, else plain seqscan.
*/
if (OldIndex != NULL && OldIndex->rd_rel->relam == BTREE_AM_OID)
use_sort = plan_cluster_use_sort(OIDOldHeap, OIDOldIndex);
/*
* Scan through the OldHeap, either in OldIndex order or sequentially;
* copy each tuple into the NewHeap, or transiently to the tuplesort
- * module. Note that we don't bother sorting dead tuples (they won't
- * get to the new table anyway).
+ * module. Note that we don't bother sorting dead tuples (they won't get
+ * to the new table anyway).
*/
for (;;)
{
heap_endscan(heapScan);
/*
- * In scan-and-sort mode, complete the sort, then read out all live
- * tuples from the tuplestore and write them to the new relation.
+ * In scan-and-sort mode, complete the sort, then read out all live tuples
+ * from the tuplestore and write them to the new relation.
*/
if (tuplesort != NULL)
{
bool newRelHasOids, RewriteState rwstate)
{
HeapTuple copiedTuple;
- int i;
+ int i;
heap_deform_tuple(tuple, oldTupDesc, values, isnull);
#include "utils/syscache.h"
static void AlterCollationOwner_internal(Relation rel, Oid collationOid,
- Oid newOwnerId);
+ Oid newOwnerId);
/*
* CREATE COLLATION
Oid collNamespace;
AclResult aclresult;
ListCell *pl;
- DefElem *fromEl = NULL;
- DefElem *localeEl = NULL;
- DefElem *lccollateEl = NULL;
- DefElem *lcctypeEl = NULL;
+ DefElem *fromEl = NULL;
+ DefElem *localeEl = NULL;
+ DefElem *lccollateEl = NULL;
+ DefElem *lcctypeEl = NULL;
char *collcollate = NULL;
char *collctype = NULL;
Oid newoid;
foreach(pl, parameters)
{
- DefElem *defel = (DefElem *) lfirst(pl);
+ DefElem *defel = (DefElem *) lfirst(pl);
DefElem **defelp;
if (pg_strcasecmp(defel->defname, "from") == 0)
Oid collid;
HeapTuple tp;
- collid = get_collation_oid(defGetQualifiedName(fromEl), false);
+ collid = get_collation_oid(defGetQualifiedName(fromEl), false);
tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
if (!HeapTupleIsValid(tp))
elog(ERROR, "cache lookup failed for collation %u", collid);
if (!collcollate)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("parameter \"lc_collate\" parameter must be specified")));
+ errmsg("parameter \"lc_collate\" parameter must be specified")));
if (!collctype)
ereport(ERROR,
Oid
AlterCollationNamespace_oid(Oid collOid, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
char *collation_name;
void
CommentObject(CommentStmt *stmt)
{
- ObjectAddress address;
- Relation relation;
+ ObjectAddress address;
+ Relation relation;
/*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR; hence, the following special
- * case. (If the length of stmt->objname is not 1, get_object_address will
- * throw an error below; that's OK.)
+ * case. (If the length of stmt->objname is not 1, get_object_address
+ * will throw an error below; that's OK.)
*/
if (stmt->objtype == OBJECT_DATABASE && list_length(stmt->objname) == 1)
{
- char *database = strVal(linitial(stmt->objname));
+ char *database = strVal(linitial(stmt->objname));
+
if (!OidIsValid(get_database_oid(database, true)))
{
ereport(WARNING,
}
/*
- * Translate the parser representation that identifies this object into
- * an ObjectAddress. get_object_address() will throw an error if the
- * object does not exist, and will also acquire a lock on the target
- * to guard against concurrent DROP operations.
+ * Translate the parser representation that identifies this object into an
+ * ObjectAddress. get_object_address() will throw an error if the object
+ * does not exist, and will also acquire a lock on the target to guard
+ * against concurrent DROP operations.
*/
address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);
switch (stmt->objtype)
{
case OBJECT_COLUMN:
+
/*
* Allow comments only on columns of tables, views, composite
* types, and foreign tables (which are the only relkinds for
void
AlterConversionNamespace(List *name, const char *newschema)
{
- Oid convOid, nspOid;
+ Oid convOid,
+ nspOid;
Relation rel;
rel = heap_open(ConversionRelationId, RowExclusiveLock);
Oid
AlterConversionNamespace_oid(Oid convOid, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(ConversionRelationId, RowExclusiveLock);
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
List *force_quote; /* list of column names */
- bool force_quote_all; /* FORCE QUOTE *? */
+ bool force_quote_all; /* FORCE QUOTE *? */
bool *force_quote_flags; /* per-column CSV FQ flags */
List *force_notnull; /* list of column names */
bool *force_notnull_flags; /* per-column CSV FNN flags */
/* field raw data pointers found by COPY FROM */
- int max_fields;
- char ** raw_fields;
+ int max_fields;
+ char **raw_fields;
/*
* Similarly, line_buf holds the whole input line being processed. The
/* non-export function prototypes */
static CopyState BeginCopy(bool is_from, Relation rel, Node *raw_query,
- const char *queryString, List *attnamelist, List *options);
+ const char *queryString, List *attnamelist, List *options);
static void EndCopy(CopyState cstate);
static CopyState BeginCopyTo(Relation rel, Node *query, const char *queryString,
- const char *filename, List *attnamelist, List *options);
+ const char *filename, List *attnamelist, List *options);
static void EndCopyTo(CopyState cstate);
static uint64 DoCopyTo(CopyState cstate);
static uint64 CopyTo(CopyState cstate);
static uint64 CopyFrom(CopyState cstate);
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
-static int CopyReadAttributesText(CopyState cstate);
-static int CopyReadAttributesCSV(CopyState cstate);
+static int CopyReadAttributesText(CopyState cstate);
+static int CopyReadAttributesCSV(CopyState cstate);
static Datum CopyReadBinaryAttribute(CopyState cstate,
int column_no, FmgrInfo *flinfo,
Oid typioparam, int32 typmod,
if (stmt->relation)
{
- TupleDesc tupDesc;
- AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT);
- RangeTblEntry *rte;
- List *attnums;
- ListCell *cur;
+ TupleDesc tupDesc;
+ AclMode required_access = (is_from ? ACL_INSERT : ACL_SELECT);
+ RangeTblEntry *rte;
+ List *attnums;
+ ListCell *cur;
Assert(!stmt->query);
/* Open and lock the relation, using the appropriate lock type. */
rel = heap_openrv(stmt->relation,
- (is_from ? RowExclusiveLock : AccessShareLock));
+ (is_from ? RowExclusiveLock : AccessShareLock));
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist);
foreach(cur, attnums)
{
- int attno = lfirst_int(cur) -
- FirstLowInvalidHeapAttributeNumber;
+ int attno = lfirst_int(cur) -
+ FirstLowInvalidHeapAttributeNumber;
if (is_from)
rte->modifiedCols = bms_add_member(rte->modifiedCols, attno);
cstate = (CopyStateData *) palloc0(sizeof(CopyStateData));
/*
- * We allocate everything used by a cstate in a new memory context.
- * This avoids memory leaks during repeated use of COPY in a query.
+ * We allocate everything used by a cstate in a new memory context. This
+ * avoids memory leaks during repeated use of COPY in a query.
*/
cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext,
"COPY",
cstate->file_encoding = pg_get_client_encoding();
/*
- * Set up encoding conversion info. Even if the file and server
- * encodings are the same, we must apply pg_any_to_server() to validate
- * data in multibyte encodings.
+ * Set up encoding conversion info. Even if the file and server encodings
+ * are the same, we must apply pg_any_to_server() to validate data in
+ * multibyte encodings.
*/
cstate->need_transcoding =
(cstate->file_encoding != GetDatabaseEncoding() ||
*/
if (cstate->need_transcoding)
cstate->null_print_client = pg_server_to_any(cstate->null_print,
- cstate->null_print_len,
- cstate->file_encoding);
+ cstate->null_print_len,
+ cstate->file_encoding);
/* if a header has been requested send the line */
if (cstate->header_line)
{
slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
- if (slot == NULL) /* "do nothing" */
+ if (slot == NULL) /* "do nothing" */
skip_tuple = true;
- else /* trigger might have changed tuple */
+ else /* trigger might have changed tuple */
tuple = ExecMaterializeSlot(slot);
}
{
/* Initialize expressions in copycontext. */
defexprs[num_defaults] = ExecInitExpr(
- expression_planner((Expr *) defexpr), NULL);
+ expression_planner((Expr *) defexpr), NULL);
defmap[num_defaults] = attnum - 1;
num_defaults++;
}
if (!cstate->binary)
{
AttrNumber attr_count = list_length(cstate->attnumlist);
- int nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count;
+ int nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count;
cstate->max_fields = nfields;
cstate->raw_fields = (char **) palloc(nfields * sizeof(char *));
{
cstate->cur_lineno++;
if (CopyReadLine(cstate))
- return false; /* done */
+ return false; /* done */
}
cstate->cur_lineno++;
done = CopyReadLine(cstate);
/*
- * EOF at start of line means we're done. If we see EOF after
- * some characters, we act as though it was newline followed by
- * EOF, ie, process the line and then exit loop on next iteration.
+ * EOF at start of line means we're done. If we see EOF after some
+ * characters, we act as though it was newline followed by EOF, ie,
+ * process the line and then exit loop on next iteration.
*/
if (done && cstate->line_buf.len == 0)
return false;
FmgrInfo *in_functions = cstate->in_functions;
Oid *typioparams = cstate->typioparams;
int i;
- int nfields;
+ int nfields;
bool isnull;
bool file_has_oids = cstate->file_has_oids;
int *defmap = cstate->defmap;
if (fld_count == -1)
{
/*
- * Received EOF marker. In a V3-protocol copy, wait for
- * the protocol-level EOF, and complain if it doesn't come
- * immediately. This ensures that we correctly handle
- * CopyFail, if client chooses to send that now.
+ * Received EOF marker. In a V3-protocol copy, wait for the
+ * protocol-level EOF, and complain if it doesn't come
+ * immediately. This ensures that we correctly handle CopyFail,
+ * if client chooses to send that now.
*
- * Note that we MUST NOT try to read more data in an
- * old-protocol copy, since there is no protocol-level EOF
- * marker then. We could go either way for copy from file,
- * but choose to throw error if there's data after the EOF
- * marker, for consistency with the new-protocol case.
+ * Note that we MUST NOT try to read more data in an old-protocol
+ * copy, since there is no protocol-level EOF marker then. We
+ * could go either way for copy from file, but choose to throw
+ * error if there's data after the EOF marker, for consistency
+ * with the new-protocol case.
*/
- char dummy;
+ char dummy;
if (cstate->copy_dest != COPY_OLD_FE &&
CopyGetData(cstate, &dummy, 1, 1) > 0)
if (file_has_oids)
{
- Oid loaded_oid;
+ Oid loaded_oid;
cstate->cur_attname = "oid";
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(cstate,
0,
- &cstate->oid_in_function,
- cstate->oid_typioparam,
+ &cstate->oid_in_function,
+ cstate->oid_typioparam,
-1,
&isnull));
if (isnull || loaded_oid == InvalidOid)
/*
* Now compute and insert any defaults available for the columns not
- * provided by the input data. Anything not processed here or above
- * will remain NULL.
+ * provided by the input data. Anything not processed here or above will
+ * remain NULL.
*/
for (i = 0; i < num_defaults; i++)
{
* performing de-escaping as needed.
*
* The input is in line_buf. We use attribute_buf to hold the result
- * strings. cstate->raw_fields[k] is set to point to the k'th attribute
- * string, or NULL when the input matches the null marker string.
+ * strings. cstate->raw_fields[k] is set to point to the k'th attribute
+ * string, or NULL when the input matches the null marker string.
* This array is expanded as necessary.
*
- * (Note that the caller cannot check for nulls since the returned
- * string would be the post-de-escaping equivalent, which may look
+ * (Note that the caller cannot check for nulls since the returned
+ * string would be the post-de-escaping equivalent, which may look
* the same as some valid data string.)
*
* delim is the column delimiter string (must be just one byte for now).
if (fieldno >= cstate->max_fields)
{
cstate->max_fields *= 2;
- cstate->raw_fields =
- repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *));
+ cstate->raw_fields =
+ repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *));
}
/* Remember start of field on both input and output sides */
if (fieldno >= cstate->max_fields)
{
cstate->max_fields *= 2;
- cstate->raw_fields =
- repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *));
+ cstate->raw_fields =
+ repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *));
}
/* Remember start of field on both input and output sides */
void
check_encoding_locale_matches(int encoding, const char *collate, const char *ctype)
{
- int ctype_encoding = pg_get_encoding_from_locale(ctype, true);
- int collate_encoding = pg_get_encoding_from_locale(collate, true);
+ int ctype_encoding = pg_get_encoding_from_locale(ctype, true);
+ int collate_encoding = pg_get_encoding_from_locale(collate, true);
if (!(ctype_encoding == encoding ||
ctype_encoding == PG_SQL_ASCII ||
heap_close(pg_database, AccessShareLock);
if (!OidIsValid(oid) && !missing_ok)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\" does not exist",
- dbname)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_DATABASE),
+ errmsg("database \"%s\" does not exist",
+ dbname)));
return oid;
}
const char *relationship, const char *plan_name,
ExplainState *es);
static void show_plan_tlist(PlanState *planstate, List *ancestors,
- ExplainState *es);
+ ExplainState *es);
static void show_expression(Node *node, const char *qlabel,
PlanState *planstate, List *ancestors,
bool useprefix, ExplainState *es);
static void show_qual(List *qual, const char *qlabel,
- PlanState *planstate, List *ancestors,
- bool useprefix, ExplainState *es);
+ PlanState *planstate, List *ancestors,
+ bool useprefix, ExplainState *es);
static void show_scan_qual(List *qual, const char *qlabel,
- PlanState *planstate, List *ancestors,
- ExplainState *es);
+ PlanState *planstate, List *ancestors,
+ ExplainState *es);
static void show_upper_qual(List *qual, const char *qlabel,
- PlanState *planstate, List *ancestors,
- ExplainState *es);
+ PlanState *planstate, List *ancestors,
+ ExplainState *es);
static void show_sort_keys(SortState *sortstate, List *ancestors,
- ExplainState *es);
+ ExplainState *es);
static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
- ExplainState *es);
+ ExplainState *es);
static void show_sort_keys_common(PlanState *planstate,
- int nkeys, AttrNumber *keycols,
- List *ancestors, ExplainState *es);
+ int nkeys, AttrNumber *keycols,
+ List *ancestors, ExplainState *es);
static void show_sort_info(SortState *sortstate, ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es);
static void ExplainMemberNodes(List *plans, PlanState **planstates,
List *ancestors, ExplainState *es);
static void ExplainSubPlans(List *plans, List *ancestors,
- const char *relationship, ExplainState *es);
+ const char *relationship, ExplainState *es);
static void ExplainProperty(const char *qlabel, const char *value,
bool numeric, ExplainState *es);
static void ExplainOpenGroup(const char *objtype, const char *labelname,
{
bool useprefix;
- useprefix = (IsA(planstate->plan, SubqueryScan) || es->verbose);
+ useprefix = (IsA(planstate->plan, SubqueryScan) ||es->verbose);
show_qual(qual, qlabel, planstate, ancestors, useprefix, es);
}
/* Globally visible state variables */
-bool creating_extension = false;
-Oid CurrentExtensionObject = InvalidOid;
+bool creating_extension = false;
+Oid CurrentExtensionObject = InvalidOid;
/*
* Internal data structure to hold the results of parsing a control file
{
char *name; /* name of the extension */
char *directory; /* directory for script files */
- char *default_version; /* default install target version, if any */
- char *module_pathname; /* string to substitute for MODULE_PATHNAME */
+ char *default_version; /* default install target version, if any */
+ char *module_pathname; /* string to substitute for MODULE_PATHNAME */
char *comment; /* comment, if any */
char *schema; /* target schema (allowed if !relocatable) */
bool relocatable; /* is ALTER EXTENSION SET SCHEMA supported? */
List *reachable; /* List of ExtensionVersionInfo's */
bool installable; /* does this version have an install script? */
/* working state for Dijkstra's algorithm: */
- bool distance_known; /* is distance from start known yet? */
+ bool distance_known; /* is distance from start known yet? */
int distance; /* current worst-case distance estimate */
- struct ExtensionVersionInfo *previous; /* current best predecessor */
+ struct ExtensionVersionInfo *previous; /* current best predecessor */
} ExtensionVersionInfo;
/* Local functions */
/*
* get_extension_oid - given an extension name, look up the OID
*
- * If missing_ok is false, throw an error if extension name not found. If
+ * If missing_ok is false, throw an error if extension name not found. If
* true, just return InvalidOid.
*/
Oid
heap_close(rel, AccessShareLock);
if (!OidIsValid(result) && !missing_ok)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("extension \"%s\" does not exist",
- extname)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("extension \"%s\" does not exist",
+ extname)));
return result;
}
int namelen = strlen(extensionname);
/*
- * Disallow empty names (the parser rejects empty identifiers anyway,
- * but let's check).
+ * Disallow empty names (the parser rejects empty identifiers anyway, but
+ * let's check).
*/
if (namelen == 0)
ereport(ERROR,
errdetail("Extension names must not contain \"--\".")));
/*
- * No leading or trailing dash either. (We could probably allow this,
- * but it would require much care in filename parsing and would make
- * filenames visually if not formally ambiguous. Since there's no
- * real-world use case, let's just forbid it.)
+ * No leading or trailing dash either. (We could probably allow this, but
+ * it would require much care in filename parsing and would make filenames
+ * visually if not formally ambiguous. Since there's no real-world use
+ * case, let's just forbid it.)
*/
if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid extension name: \"%s\"", extensionname),
- errdetail("Extension names must not begin or end with \"-\".")));
+ errdetail("Extension names must not begin or end with \"-\".")));
/*
* No directory separators either (this is sufficient to prevent ".."
if (namelen == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid extension version name: \"%s\"", versionname),
+ errmsg("invalid extension version name: \"%s\"", versionname),
errdetail("Version names must not be empty.")));
/*
if (strstr(versionname, "--"))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid extension version name: \"%s\"", versionname),
+ errmsg("invalid extension version name: \"%s\"", versionname),
errdetail("Version names must not contain \"--\".")));
/*
if (versionname[0] == '-' || versionname[namelen - 1] == '-')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid extension version name: \"%s\"", versionname),
- errdetail("Version names must not begin or end with \"-\".")));
+ errmsg("invalid extension version name: \"%s\"", versionname),
+ errdetail("Version names must not begin or end with \"-\".")));
/*
* No directory separators either (this is sufficient to prevent ".."
if (first_dir_separator(versionname) != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid extension version name: \"%s\"", versionname),
+ errmsg("invalid extension version name: \"%s\"", versionname),
errdetail("Version names must not contain directory separator characters.")));
}
get_share_path(my_exec_path, sharepath);
result = (char *) palloc(MAXPGPATH);
- snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory);
+ snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory);
return result;
}
/*
* Parse contents of primary or auxiliary control file, and fill in
- * fields of *control. We parse primary file if version == NULL,
+ * fields of *control. We parse primary file if version == NULL,
* else the optional auxiliary file for that version.
*
* Control files are supposed to be very short, half a dozen lines,
char *filename;
FILE *file;
ConfigVariable *item,
- *head = NULL,
- *tail = NULL;
+ *head = NULL,
+ *tail = NULL;
/*
* Locate the file to read. Auxiliary files are optional.
/* syntax error in name list */
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" must be a list of extension names",
- item->name)));
+ errmsg("parameter \"%s\" must be a list of extension names",
+ item->name)));
}
}
else
read_extension_script_file(const ExtensionControlFile *control,
const char *filename)
{
- int src_encoding;
- int dest_encoding = GetDatabaseEncoding();
- bytea *content;
+ int src_encoding;
+ int dest_encoding = GetDatabaseEncoding();
+ bytea *content;
char *src_str;
- char *dest_str;
- int len;
+ char *dest_str;
+ int len;
content = read_binary_file(filename, 0, -1);
* filename is used only to report errors.
*
* Note: it's tempting to just use SPI to execute the string, but that does
- * not work very well. The really serious problem is that SPI will parse,
+ * not work very well. The really serious problem is that SPI will parse,
* analyze, and plan the whole string before executing any of it; of course
* this fails if there are any plannable statements referring to objects
* created earlier in the script. A lesser annoyance is that SPI insists
List *requiredSchemas,
const char *schemaName, Oid schemaOid)
{
- char *filename;
+ char *filename;
char *save_client_min_messages,
*save_log_min_messages,
*save_search_path;
* so that we won't spam the user with useless NOTICE messages from common
* script actions like creating shell types.
*
- * We use the equivalent of SET LOCAL to ensure the setting is undone
- * upon error.
+ * We use the equivalent of SET LOCAL to ensure the setting is undone upon
+ * error.
*/
save_client_min_messages =
pstrdup(GetConfigOption("client_min_messages", false));
* makes the target schema be the default creation target namespace.
*
* Note: it might look tempting to use PushOverrideSearchPath for this,
- * but we cannot do that. We have to actually set the search_path GUC
- * in case the extension script examines or changes it.
+ * but we cannot do that. We have to actually set the search_path GUC in
+ * case the extension script examines or changes it.
*/
save_search_path = pstrdup(GetConfigOption("search_path", false));
/*
* Set creating_extension and related variables so that
* recordDependencyOnCurrentExtension and other functions do the right
- * things. On failure, ensure we reset these variables.
+ * things. On failure, ensure we reset these variables.
*/
creating_extension = true;
CurrentExtensionObject = extensionOid;
PG_TRY();
{
- char *sql = read_extension_script_file(control, filename);
+ char *sql = read_extension_script_file(control, filename);
/*
* If it's not relocatable, substitute the target schema name for
* occcurrences of @extschema@.
*
- * For a relocatable extension, we just run the script as-is.
- * There cannot be any need for @extschema@, else it wouldn't
- * be relocatable.
+ * For a relocatable extension, we just run the script as-is. There
+ * cannot be any need for @extschema@, else it wouldn't be
+ * relocatable.
*/
if (!control->relocatable)
{
- const char *qSchemaName = quote_identifier(schemaName);
+ const char *qSchemaName = quote_identifier(schemaName);
sql = text_to_cstring(
- DatumGetTextPP(
- DirectFunctionCall3(replace_text,
- CStringGetTextDatum(sql),
- CStringGetTextDatum("@extschema@"),
- CStringGetTextDatum(qSchemaName))));
+ DatumGetTextPP(
+ DirectFunctionCall3(replace_text,
+ CStringGetTextDatum(sql),
+ CStringGetTextDatum("@extschema@"),
+ CStringGetTextDatum(qSchemaName))));
}
/*
if (control->module_pathname)
{
sql = text_to_cstring(
- DatumGetTextPP(
- DirectFunctionCall3(replace_text,
- CStringGetTextDatum(sql),
- CStringGetTextDatum("MODULE_PATHNAME"),
- CStringGetTextDatum(control->module_pathname))));
+ DatumGetTextPP(
+ DirectFunctionCall3(replace_text,
+ CStringGetTextDatum(sql),
+ CStringGetTextDatum("MODULE_PATHNAME"),
+ CStringGetTextDatum(control->module_pathname))));
}
execute_sql_string(sql, filename);
struct dirent *de;
location = get_extension_script_directory(control);
- dir = AllocateDir(location);
+ dir = AllocateDir(location);
while ((de = ReadDir(dir, location)) != NULL)
{
char *vername;
* is still good.
*
* Result is a List of names of versions to transition through (the initial
- * version is *not* included). Returns NIL if no such path.
+ * version is *not* included). Returns NIL if no such path.
*/
static List *
find_update_path(List *evi_list,
foreach(lc, evi->reachable)
{
ExtensionVersionInfo *evi2 = (ExtensionVersionInfo *) lfirst(lc);
- int newdist;
+ int newdist;
newdist = evi->distance + 1;
if (newdist < evi2->distance)
DefElem *d_schema = NULL;
DefElem *d_new_version = NULL;
DefElem *d_old_version = NULL;
- char *schemaName;
+ char *schemaName;
Oid schemaOid;
- char *versionName;
- char *oldVersionName;
+ char *versionName;
+ char *oldVersionName;
Oid extowner = GetUserId();
ExtensionControlFile *pcontrol;
ExtensionControlFile *control;
check_valid_extension_name(stmt->extname);
/*
- * Check for duplicate extension name. The unique index on
+ * Check for duplicate extension name. The unique index on
* pg_extension.extname would catch this anyway, and serves as a backstop
- * in case of race conditions; but this is a friendlier error message,
- * and besides we need a check to support IF NOT EXISTS.
+ * in case of race conditions; but this is a friendlier error message, and
+ * besides we need a check to support IF NOT EXISTS.
*/
if (get_extension_oid(stmt->extname, true) != InvalidOid)
{
}
/*
- * We use global variables to track the extension being created, so we
- * can create only one extension at the same time.
+ * We use global variables to track the extension being created, so we can
+ * create only one extension at the same time.
*/
if (creating_extension)
ereport(ERROR,
if (list_length(updateVersions) == 1)
{
/*
- * Simple case where there's just one update script to run.
- * We will not need any follow-on update steps.
+ * Simple case where there's just one update script to run. We
+ * will not need any follow-on update steps.
*/
Assert(strcmp((char *) linitial(updateVersions), versionName) == 0);
updateVersions = NIL;
strcmp(control->schema, schemaName) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("extension \"%s\" must be installed in schema \"%s\"",
- control->name,
- control->schema)));
+ errmsg("extension \"%s\" must be installed in schema \"%s\"",
+ control->name,
+ control->schema)));
/* If the user is giving us the schema name, it must exist already */
schemaOid = get_namespace_oid(schemaName, false);
{
/*
* The extension is not relocatable and the author gave us a schema
- * for it. We create the schema here if it does not already exist.
+ * for it. We create the schema here if it does not already exist.
*/
schemaName = control->schema;
schemaOid = get_namespace_oid(schemaName, true);
* Else, use the current default creation namespace, which is the
* first explicit entry in the search_path.
*/
- List *search_path = fetch_search_path(false);
+ List *search_path = fetch_search_path(false);
- if (search_path == NIL) /* probably can't happen */
+ if (search_path == NIL) /* probably can't happen */
elog(ERROR, "there is no default creation target");
schemaOid = linitial_oid(search_path);
schemaName = get_namespace_name(schemaOid);
- if (schemaName == NULL) /* recently-deleted namespace? */
+ if (schemaName == NULL) /* recently-deleted namespace? */
elog(ERROR, "there is no default creation target");
list_free(search_path);
* extension script actually creates any objects there, it will fail if
* the user doesn't have such permissions. But there are cases such as
* procedural languages where it's convenient to set schema = pg_catalog
- * yet we don't want to restrict the command to users with ACL_CREATE
- * for pg_catalog.
+ * yet we don't want to restrict the command to users with ACL_CREATE for
+ * pg_catalog.
*/
/*
- * Look up the prerequisite extensions, and build lists of their OIDs
- * and the OIDs of their target schemas.
+ * Look up the prerequisite extensions, and build lists of their OIDs and
+ * the OIDs of their target schemas.
*/
requiredExtensions = NIL;
requiredSchemas = NIL;
schemaName, schemaOid);
/*
- * If additional update scripts have to be executed, apply the updates
- * as though a series of ALTER EXTENSION UPDATE commands were given
+ * If additional update scripts have to be executed, apply the updates as
+ * though a series of ALTER EXTENSION UPDATE commands were given
*/
ApplyExtensionUpdates(extensionOid, pcontrol,
versionName, updateVersions);
/*
* This function lists the available extensions (one row per primary control
- * file in the control directory). We parse each control file and report the
+ * file in the control directory). We parse each control file and report the
* interesting fields.
*
* The system view pg_available_extensions provides a user interface to this
Datum
pg_available_extensions(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- char *location;
- DIR *dir;
- struct dirent *de;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ char *location;
+ DIR *dir;
+ struct dirent *de;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
MemoryContextSwitchTo(oldcontext);
location = get_extension_control_directory();
- dir = AllocateDir(location);
+ dir = AllocateDir(location);
/*
- * If the control directory doesn't exist, we want to silently return
- * an empty set. Any other error will be reported by ReadDir.
+ * If the control directory doesn't exist, we want to silently return an
+ * empty set. Any other error will be reported by ReadDir.
*/
if (dir == NULL && errno == ENOENT)
{
/*
* This function lists the available extension versions (one row per
- * extension installation script). For each version, we parse the related
+ * extension installation script). For each version, we parse the related
* control file(s) and report the interesting fields.
*
* The system view pg_available_extension_versions provides a user interface
Datum
pg_available_extension_versions(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- char *location;
- DIR *dir;
- struct dirent *de;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ char *location;
+ DIR *dir;
+ struct dirent *de;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
MemoryContextSwitchTo(oldcontext);
location = get_extension_control_directory();
- dir = AllocateDir(location);
+ dir = AllocateDir(location);
/*
- * If the control directory doesn't exist, we want to silently return
- * an empty set. Any other error will be reported by ReadDir.
+ * If the control directory doesn't exist, we want to silently return an
+ * empty set. Any other error will be reported by ReadDir.
*/
if (dir == NULL && errno == ENOENT)
{
struct dirent *de;
location = get_extension_script_directory(pcontrol);
- dir = AllocateDir(location);
+ dir = AllocateDir(location);
/* Note this will fail if script directory doesn't exist */
while ((de = ReadDir(dir, location)) != NULL)
{
pg_extension_update_paths(PG_FUNCTION_ARGS)
{
Name extname = PG_GETARG_NAME(0);
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
List *evi_list;
ExtensionControlFile *control;
ListCell *lc1;
text *wherecond = PG_GETARG_TEXT_P(1);
char *tablename;
Relation extRel;
- ScanKeyData key[1];
- SysScanDesc extScan;
+ ScanKeyData key[1];
+ SysScanDesc extScan;
HeapTuple extTup;
Datum arrayDatum;
Datum elementDatum;
ArrayType *a;
/*
- * We only allow this to be called from an extension's SQL script.
- * We shouldn't need any permissions check beyond that.
+ * We only allow this to be called from an extension's SQL script. We
+ * shouldn't need any permissions check beyond that.
*/
if (!creating_extension)
ereport(ERROR,
/*
* Check that the table exists and is a member of the extension being
- * created. This ensures that we don't need to register a dependency
- * to protect the extconfig entry.
+ * created. This ensures that we don't need to register a dependency to
+ * protect the extconfig entry.
*/
tablename = get_rel_name(tableoid);
if (tablename == NULL)
CurrentExtensionObject)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("table \"%s\" is not a member of the extension being created",
- tablename)));
+ errmsg("table \"%s\" is not a member of the extension being created",
+ tablename)));
/*
- * Add the table OID and WHERE condition to the extension's extconfig
- * and extcondition arrays.
+ * Add the table OID and WHERE condition to the extension's extconfig and
+ * extcondition arrays.
*/
/* Find the pg_extension tuple */
extTup = systable_getnext(extScan);
- if (!HeapTupleIsValid(extTup)) /* should not happen */
+ if (!HeapTupleIsValid(extTup)) /* should not happen */
elog(ERROR, "extension with oid %u does not exist",
CurrentExtensionObject);
Assert(ARR_NDIM(a) == 1);
Assert(ARR_LBOUND(a)[0] == 1);
- arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
+ arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
a = array_set(a, 1, &arrayIndex,
elementDatum,
Assert(ARR_NDIM(a) == 1);
Assert(ARR_LBOUND(a)[0] == 1);
- arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
+ arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
a = array_set(a, 1, &arrayIndex,
elementDatum,
Oid oldNspOid = InvalidOid;
AclResult aclresult;
Relation extRel;
- ScanKeyData key[2];
- SysScanDesc extScan;
+ ScanKeyData key[2];
+ SysScanDesc extScan;
HeapTuple extTup;
Form_pg_extension extForm;
Relation depRel;
- SysScanDesc depScan;
+ SysScanDesc depScan;
HeapTuple depTup;
if (list_length(names) != 1)
extTup = systable_getnext(extScan);
- if (!HeapTupleIsValid(extTup)) /* should not happen */
+ if (!HeapTupleIsValid(extTup)) /* should not happen */
elog(ERROR, "extension with oid %u does not exist", extensionOid);
/* Copy tuple so we can modify it below */
systable_endscan(extScan);
/*
- * If the extension is already in the target schema, just silently
- * do nothing.
+ * If the extension is already in the target schema, just silently do
+ * nothing.
*/
if (extForm->extnamespace == nspOid)
{
{
Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
ObjectAddress dep;
- Oid dep_oldNspOid;
+ Oid dep_oldNspOid;
/*
- * Ignore non-membership dependencies. (Currently, the only other
+ * Ignore non-membership dependencies. (Currently, the only other
* case we could see here is a normal dependency from another
* extension.)
*/
ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
{
DefElem *d_new_version = NULL;
- char *versionName;
- char *oldVersionName;
+ char *versionName;
+ char *oldVersionName;
ExtensionControlFile *control;
Oid extensionOid;
Relation extRel;
- ScanKeyData key[1];
- SysScanDesc extScan;
+ ScanKeyData key[1];
+ SysScanDesc extScan;
HeapTuple extTup;
List *updateVersions;
Datum datum;
ListCell *lc;
/*
- * We use global variables to track the extension being created, so we
- * can create/update only one extension at the same time.
+ * We use global variables to track the extension being created, so we can
+ * create/update only one extension at the same time.
*/
if (creating_extension)
ereport(ERROR,
extTup = systable_getnext(extScan);
if (!HeapTupleIsValid(extTup))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("extension \"%s\" does not exist",
- stmt->extname)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("extension \"%s\" does not exist",
+ stmt->extname)));
extensionOid = HeapTupleGetOid(extTup);
if (strcmp(oldVersionName, versionName) == 0)
{
ereport(NOTICE,
- (errmsg("version \"%s\" of extension \"%s\" is already installed",
- versionName, stmt->extname)));
+ (errmsg("version \"%s\" of extension \"%s\" is already installed",
+ versionName, stmt->extname)));
return;
}
List *requiredExtensions;
List *requiredSchemas;
Relation extRel;
- ScanKeyData key[1];
- SysScanDesc extScan;
+ ScanKeyData key[1];
+ SysScanDesc extScan;
HeapTuple extTup;
Form_pg_extension extForm;
Datum values[Natts_pg_extension];
extTup = systable_getnext(extScan);
- if (!HeapTupleIsValid(extTup)) /* should not happen */
+ if (!HeapTupleIsValid(extTup)) /* should not happen */
elog(ERROR, "extension with oid %u does not exist",
extensionOid);
schemaName, schemaOid);
/*
- * Update prior-version name and loop around. Since execute_sql_string
- * did a final CommandCounterIncrement, we can update the pg_extension
- * row again.
+ * Update prior-version name and loop around. Since
+ * execute_sql_string did a final CommandCounterIncrement, we can
+ * update the pg_extension row again.
*/
oldVersionName = versionName;
}
void
ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
{
- ObjectAddress extension;
- ObjectAddress object;
- Relation relation;
- Oid oldExtension;
+ ObjectAddress extension;
+ ObjectAddress object;
+ Relation relation;
+ Oid oldExtension;
extension.classId = ExtensionRelationId;
extension.objectId = get_extension_oid(stmt->extname, false);
stmt->extname);
/*
- * Translate the parser representation that identifies the object into
- * an ObjectAddress. get_object_address() will throw an error if the
- * object does not exist, and will also acquire a lock on the object to
- * guard against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
+ * Translate the parser representation that identifies the object into an
+ * ObjectAddress. get_object_address() will throw an error if the object
+ * does not exist, and will also acquire a lock on the object to guard
+ * against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
*/
object = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);
*/
if (OidIsValid(fdwvalidator))
ereport(WARNING,
- (errmsg("changing the foreign-data wrapper validator can cause "
- "the options for dependent objects to become invalid")));
+ (errmsg("changing the foreign-data wrapper validator can cause "
+ "the options for dependent objects to become invalid")));
}
else
{
ObjectAddress referenced;
/*
- * Flush all existing dependency records of this FDW on functions;
- * we assume there can be none other than the ones we are fixing.
+ * Flush all existing dependency records of this FDW on functions; we
+ * assume there can be none other than the ones we are fixing.
*/
deleteDependencyRecordsForClass(ForeignDataWrapperRelationId,
fdwId,
* We also disallow creating binary-compatibility casts involving
* domains. Casting from a domain to its base type is already
* allowed, and casting the other way ought to go through domain
- * coercion to permit constraint checking. Again, if you're intent on
+ * coercion to permit constraint checking. Again, if you're intent on
* having your own semantics for that, create a no-op cast function.
*
* NOTE: if we were to relax this, the above checks for composites
Oid
get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok)
{
- Oid oid;
+ Oid oid;
oid = GetSysCacheOid2(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
indexRelationId =
index_create(rel, indexRelationName, indexRelationId,
indexInfo, indexColNames,
- accessMethodId, tablespaceId, collationObjectId, classObjectId,
+ accessMethodId, tablespaceId, collationObjectId, classObjectId,
coloptions, reloptions, primary,
isconstraint, deferrable, initdeferred,
allowSystemTableMods,
else
{
/* Index expression */
- Node *expr = attribute->expr;
+ Node *expr = attribute->expr;
Assert(expr != NULL);
atttype = exprType(expr);
attcollation = exprCollation(expr);
/*
- * Strip any top-level COLLATE clause. This ensures that we treat
+ * Strip any top-level COLLATE clause. This ensures that we treat
* "x COLLATE y" and "(x COLLATE y)" alike.
*/
while (IsA(expr, CollateExpr))
}
else
{
- indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
expr);
if (contain_subplans(expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in index expression")));
+ errmsg("cannot use subquery in index expression")));
if (contain_agg_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
/*
* Check we have a collation iff it's a collatable type. The only
* expected failures here are (1) COLLATE applied to a noncollatable
- * type, or (2) index expression had an unresolved collation. But
- * we might as well code this to be a complete consistency check.
+ * type, or (2) index expression had an unresolved collation. But we
+ * might as well code this to be a complete consistency check.
*/
if (type_is_collatable(atttype))
{
if (!HeapTupleIsValid(htup) && !missing_ok)
{
- HeapTuple amtup;
+ HeapTuple amtup;
amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID));
if (!HeapTupleIsValid(amtup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
- NameListToString(opfamilyname),
- NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname))));
+ NameListToString(opfamilyname),
+ NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname))));
}
return htup;
/*
* get_opfamily_oid
- * find an opfamily OID by possibly qualified name
+ * find an opfamily OID by possibly qualified name
*
* If not found, returns InvalidOid if missing_ok, else throws error.
*/
if (!HeapTupleIsValid(htup) && !missing_ok)
{
- HeapTuple amtup;
+ HeapTuple amtup;
amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID));
if (!HeapTupleIsValid(amtup))
/*
* get_opclass_oid
- * find an opclass OID by possibly qualified name
+ * find an opclass OID by possibly qualified name
*
* If not found, returns InvalidOid if missing_ok, else throws error.
*/
if (OidIsValid(member->sortfamily))
{
/*
- * Ordering op, check index supports that. (We could perhaps also
+ * Ordering op, check index supports that. (We could perhaps also
* check that the operator returns a type supported by the sortfamily,
* but that seems more trouble than it's worth here. If it does not,
- * the operator will never be matchable to any ORDER BY clause, but
- * no worse consequences can ensue. Also, trying to check that would
+ * the operator will never be matchable to any ORDER BY clause, but no
+ * worse consequences can ensue. Also, trying to check that would
* create an ordering hazard during dump/reload: it's possible that
* the family has been created but not yet populated with the required
* operators.)
if (!pg_am->amcanorderbyop)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("access method \"%s\" does not support ordering operators",
- NameStr(pg_am->amname))));
+ errmsg("access method \"%s\" does not support ordering operators",
+ NameStr(pg_am->amname))));
ReleaseSysCache(amtup);
}
foreach(l, operators)
{
OpFamilyMember *op = (OpFamilyMember *) lfirst(l);
- char oppurpose;
+ char oppurpose;
/*
* If adding to an existing family, check for conflict with an
{
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
return;
}
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opfamilyname), stmt->amname)));
+ NameListToString(stmt->opfamilyname), stmt->amname)));
return;
}
Oid
AlterOpClassNamespace_oid(Oid opclassOid, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
Oid
AlterOpFamilyNamespace_oid(Oid opfamilyOid, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(OperatorFamilyRelationId, RowExclusiveLock);
List *operatorName = names;
TypeName *typeName1 = (TypeName *) linitial(argtypes);
TypeName *typeName2 = (TypeName *) lsecond(argtypes);
- Oid operOid, nspOid;
+ Oid operOid,
+ nspOid;
Relation rel;
rel = heap_open(OperatorRelationId, RowExclusiveLock);
Oid
AlterOperatorNamespace_oid(Oid operOid, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(OperatorRelationId, RowExclusiveLock);
if (queryDesc)
{
/*
- * Reset the queryDesc before anything else. This prevents us
- * from trying to shut down the executor twice, in case of an
- * error below. The transaction abort mechanisms will take care
- * of resource cleanup in such a case.
+ * Reset the queryDesc before anything else. This prevents us from
+ * trying to shut down the executor twice, in case of an error below.
+ * The transaction abort mechanisms will take care of resource cleanup
+ * in such a case.
*/
portal->queryDesc = NULL;
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo)
palloc(sizeof(ParamListInfoData) +
- (num_params - 1) *sizeof(ParamExternData));
+ (num_params - 1) * sizeof(ParamExternData));
/* we have static list of params, so no hooks needed */
paramLI->paramFetch = NULL;
paramLI->paramFetchArg = NULL;
/* -------------------------------------------------------------------------
*
* seclabel.c
- * routines to support security label feature.
+ * routines to support security label feature.
*
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
typedef struct
{
const char *provider_name;
- check_object_relabel_type hook;
+ check_object_relabel_type hook;
} LabelProvider;
static List *label_provider_list = NIL;
ExecSecLabelStmt(SecLabelStmt *stmt)
{
LabelProvider *provider = NULL;
- ObjectAddress address;
- Relation relation;
- ListCell *lc;
+ ObjectAddress address;
+ Relation relation;
+ ListCell *lc;
/*
* Find the named label provider, or if none specified, check whether
if (label_provider_list == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("no security label providers have been loaded")));
+ errmsg("no security label providers have been loaded")));
if (lnext(list_head(label_provider_list)) != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("must specify provider when multiple security label providers have been loaded")));
+ errmsg("must specify provider when multiple security label providers have been loaded")));
provider = (LabelProvider *) linitial(label_provider_list);
}
else
{
- foreach (lc, label_provider_list)
+ foreach(lc, label_provider_list)
{
LabelProvider *lp = lfirst(lc);
}
/*
- * Translate the parser representation which identifies this object
- * into an ObjectAddress. get_object_address() will throw an error if
- * the object does not exist, and will also acquire a lock on the
- * target to guard against concurrent modifications.
+ * Translate the parser representation which identifies this object into
+ * an ObjectAddress. get_object_address() will throw an error if the
+ * object does not exist, and will also acquire a lock on the target to
+ * guard against concurrent modifications.
*/
address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
&relation, ShareUpdateExclusiveLock);
switch (stmt->objtype)
{
case OBJECT_COLUMN:
+
/*
* Allow security labels only on columns of tables, views,
* composite types, and foreign tables (which are the only
}
/* Provider gets control here, may throw ERROR to veto new label. */
- (*provider->hook)(&address, stmt->label);
+ (*provider->hook) (&address, stmt->label);
/* Apply new label. */
SetSecurityLabel(&address, provider->provider_name, stmt->label);
GetSecurityLabel(const ObjectAddress *object, const char *provider)
{
Relation pg_seclabel;
- ScanKeyData keys[4];
- SysScanDesc scan;
+ ScanKeyData keys[4];
+ SysScanDesc scan;
HeapTuple tuple;
Datum datum;
bool isnull;
const char *provider, const char *label)
{
Relation pg_seclabel;
- ScanKeyData keys[4];
- SysScanDesc scan;
+ ScanKeyData keys[4];
+ SysScanDesc scan;
HeapTuple oldtup;
HeapTuple newtup = NULL;
Datum values[Natts_pg_seclabel];
DeleteSecurityLabel(const ObjectAddress *object)
{
Relation pg_seclabel;
- ScanKeyData skey[3];
- SysScanDesc scan;
+ ScanKeyData skey[3];
+ SysScanDesc scan;
HeapTuple oldtup;
int nkeys;
void
register_label_provider(const char *provider_name, check_object_relabel_type hook)
{
- LabelProvider *provider;
- MemoryContext oldcxt;
+ LabelProvider *provider;
+ MemoryContext oldcxt;
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
provider = palloc(sizeof(LabelProvider));
seq->log_cnt = 1;
/*
- * Create a new storage file for the sequence. We want to keep the
+ * Create a new storage file for the sequence. We want to keep the
* sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
*/
RelationSetNewRelfilenode(seq_rel, InvalidTransactionId);
/*
* If the sequence has been transactionally replaced since we last saw it,
- * discard any cached-but-unissued values. We do not touch the currval()
+ * discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
if (seqrel->rd_rel->relfilenode != elm->filenode)
static void ATSimpleRecursion(List **wqueue, Relation rel,
AlterTableCmd *cmd, bool recurse, LOCKMODE lockmode);
static void ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd,
- LOCKMODE lockmode);
+ LOCKMODE lockmode);
static List *find_typed_table_dependencies(Oid typeOid, const char *typeName,
- DropBehavior behavior);
+ DropBehavior behavior);
static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, bool recursing,
AlterTableCmd *cmd, LOCKMODE lockmode);
static void ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
static void ATExecSetStorage(Relation rel, const char *colName,
Node *newValue, LOCKMODE lockmode);
static void ATPrepDropColumn(List **wqueue, Relation rel, bool recurse, bool recursing,
- AlterTableCmd *cmd, LOCKMODE lockmode);
+ AlterTableCmd *cmd, LOCKMODE lockmode);
static void ATExecDropColumn(List **wqueue, Relation rel, const char *colName,
DropBehavior behavior,
bool recurse, bool recursing,
IndexStmt *stmt, bool is_rebuild, LOCKMODE lockmode);
static void ATExecAddConstraint(List **wqueue,
AlteredTableInfo *tab, Relation rel,
- Constraint *newConstraint, bool recurse, LOCKMODE lockmode);
+ Constraint *newConstraint, bool recurse, LOCKMODE lockmode);
static void ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
- IndexStmt *stmt, LOCKMODE lockmode);
+ IndexStmt *stmt, LOCKMODE lockmode);
static void ATAddCheckConstraint(List **wqueue,
AlteredTableInfo *tab, Relation rel,
Constraint *constr,
AlterTableCmd *cmd, LOCKMODE lockmode);
static bool ATColumnChangeRequiresRewrite(Node *expr, AttrNumber varattno);
static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
- AlterTableCmd *cmd, LOCKMODE lockmode);
+ AlterTableCmd *cmd, LOCKMODE lockmode);
static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode);
static void ATPostAlterTypeParse(char *cmd, List **wqueue, LOCKMODE lockmode);
static void change_owner_recurse_to_sequences(Oid relationOid,
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode);
static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset, LOCKMODE lockmode);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
- char fires_when, bool skip_system, LOCKMODE lockmode);
+ char fires_when, bool skip_system, LOCKMODE lockmode);
static void ATExecEnableDisableRule(Relation rel, char *rulename,
char fires_when, LOCKMODE lockmode);
static void ATPrepAddInherit(Relation child_rel);
/*
* Check consistency of arguments
*/
- if (stmt->oncommit != ONCOMMIT_NOOP
+ if (stmt->oncommit != ONCOMMIT_NOOP
&& stmt->relation->relpersistence != RELPERSISTENCE_TEMP)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
if (relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("default values on foreign tables are not supported")));
+ errmsg("default values on foreign tables are not supported")));
Assert(colDef->cooked_default == NULL);
/*
* RemoveRelations
* Implements DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW,
- * DROP FOREIGN TABLE
+ * DROP FOREIGN TABLE
*/
void
RemoveRelations(DropStmt *drop)
if (defCollId != attribute->attcollation)
ereport(ERROR,
(errcode(ERRCODE_COLLATION_MISMATCH),
- errmsg("inherited column \"%s\" has a collation conflict",
- attributeName),
+ errmsg("inherited column \"%s\" has a collation conflict",
+ attributeName),
errdetail("\"%s\" versus \"%s\"",
get_collation_name(defCollId),
- get_collation_name(attribute->attcollation))));
+ get_collation_name(attribute->attcollation))));
/* Copy storage parameter */
if (def->storage == 0)
relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, composite type, index or foreign table",
- RelationGetRelationName(targetrelation))));
+ errmsg("\"%s\" is not a table, view, composite type, index or foreign table",
+ RelationGetRelationName(targetrelation))));
/*
* permissions checking. only the owner of a class can change its schema.
ListCell *lo;
child_oids = find_typed_table_dependencies(targetrelation->rd_rel->reltype,
- RelationGetRelationName(targetrelation),
+ RelationGetRelationName(targetrelation),
behavior);
foreach(lo, child_oids)
renameatt(Oid myrelid, RenameStmt *stmt)
{
renameatt_internal(myrelid,
- stmt->subname, /* old att name */
- stmt->newname, /* new att name */
- interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- false, /* recursing? */
- 0, /* expected inhcount */
+ stmt->subname, /* old att name */
+ stmt->newname, /* new att name */
+ interpretInhOption(stmt->relation->inhOpt), /* recursive? */
+ false, /* recursing? */
+ 0, /* expected inhcount */
stmt->behavior);
}
AlterTable(AlterTableStmt *stmt)
{
Relation rel;
- LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds);
+ LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds);
/*
* Acquire same level of lock as already acquired during parsing.
}
ATController(rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt),
- lockmode);
+ lockmode);
}
/*
AlterTableInternal(Oid relid, List *cmds, bool recurse)
{
Relation rel;
- LOCKMODE lockmode = AlterTableGetLockLevel(cmds);
+ LOCKMODE lockmode = AlterTableGetLockLevel(cmds);
rel = relation_open(relid, lockmode);
AlterTableGetLockLevel(List *cmds)
{
ListCell *lcmd;
- LOCKMODE lockmode = ShareUpdateExclusiveLock;
+ LOCKMODE lockmode = ShareUpdateExclusiveLock;
foreach(lcmd, cmds)
{
AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
- LOCKMODE cmd_lockmode = AccessExclusiveLock; /* default for compiler */
+ LOCKMODE cmd_lockmode = AccessExclusiveLock; /* default for compiler */
switch (cmd->subtype)
{
- /*
- * Need AccessExclusiveLock for these subcommands because they
- * affect or potentially affect both read and write operations.
- *
- * New subcommand types should be added here by default.
- */
- case AT_AddColumn: /* may rewrite heap, in some cases and visible to SELECT */
- case AT_DropColumn: /* change visible to SELECT */
+ /*
+ * Need AccessExclusiveLock for these subcommands because they
+ * affect or potentially affect both read and write
+ * operations.
+ *
+ * New subcommand types should be added here by default.
+ */
+ case AT_AddColumn: /* may rewrite heap, in some cases and visible
+ * to SELECT */
+ case AT_DropColumn: /* change visible to SELECT */
case AT_AddColumnToView: /* CREATE VIEW */
case AT_AlterColumnType: /* must rewrite heap */
case AT_DropConstraint: /* as DROP INDEX */
- case AT_AddOids: /* must rewrite heap */
- case AT_DropOids: /* calls AT_DropColumn */
+ case AT_AddOids: /* must rewrite heap */
+ case AT_DropOids: /* calls AT_DropColumn */
case AT_EnableAlwaysRule: /* may change SELECT rules */
case AT_EnableReplicaRule: /* may change SELECT rules */
- case AT_EnableRule: /* may change SELECT rules */
+ case AT_EnableRule: /* may change SELECT rules */
case AT_DisableRule: /* may change SELECT rules */
case AT_ChangeOwner: /* change visible to SELECT */
case AT_SetTableSpace: /* must rewrite heap */
cmd_lockmode = AccessExclusiveLock;
break;
- /*
- * These subcommands affect write operations only.
- */
+ /*
+ * These subcommands affect write operations only.
+ */
case AT_ColumnDefault:
- case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
- case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */
+ case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
+ case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */
case AT_EnableTrig:
case AT_EnableAlwaysTrig:
case AT_EnableReplicaTrig:
case AT_DisableTrig:
case AT_DisableTrigAll:
case AT_DisableTrigUser:
- case AT_AddIndex: /* from ADD CONSTRAINT */
+ case AT_AddIndex: /* from ADD CONSTRAINT */
case AT_AddIndexConstraint:
cmd_lockmode = ShareRowExclusiveLock;
break;
case CONSTR_EXCLUSION:
case CONSTR_PRIMARY:
case CONSTR_UNIQUE:
+
/*
* Cases essentially the same as CREATE INDEX. We
- * could reduce the lock strength to ShareLock if we
- * can work out how to allow concurrent catalog updates.
+ * could reduce the lock strength to ShareLock if
+ * we can work out how to allow concurrent catalog
+ * updates.
*/
cmd_lockmode = ShareRowExclusiveLock;
break;
case CONSTR_FOREIGN:
+
/*
* We add triggers to both tables when we add a
* Foreign Key, so the lock level must be at least
}
break;
- /*
- * These subcommands affect inheritance behaviour. Queries started before us
- * will continue to see the old inheritance behaviour, while queries started
- * after we commit will see new behaviour. No need to prevent reads or writes
- * to the subtable while we hook it up though. In both cases the parent table
- * is locked with AccessShareLock.
- */
+ /*
+ * These subcommands affect inheritance behaviour. Queries
+ * started before us will continue to see the old inheritance
+ * behaviour, while queries started after we commit will see
+ * new behaviour. No need to prevent reads or writes to the
+ * subtable while we hook it up though. In both cases the
+ * parent table is locked with AccessShareLock.
+ */
case AT_AddInherit:
case AT_DropInherit:
cmd_lockmode = ShareUpdateExclusiveLock;
break;
- /*
- * These subcommands affect general strategies for performance and maintenance,
- * though don't change the semantic results from normal data reads and writes.
- * Delaying an ALTER TABLE behind currently active writes only delays the point
- * where the new strategy begins to take effect, so there is no benefit in waiting.
- * In this case the minimum restriction applies: we don't currently allow
- * concurrent catalog updates.
- */
+ /*
+ * These subcommands affect general strategies for performance
+ * and maintenance, though don't change the semantic results
+ * from normal data reads and writes. Delaying an ALTER TABLE
+ * behind currently active writes only delays the point where
+ * the new strategy begins to take effect, so there is no
+ * benefit in waiting. In this case the minimum restriction
+ * applies: we don't currently allow concurrent catalog
+ * updates.
+ */
case AT_SetStatistics:
case AT_ClusterOn:
case AT_DropCluster:
cmd_lockmode = ShareUpdateExclusiveLock;
break;
- default: /* oops */
+ default: /* oops */
elog(ERROR, "unrecognized alter table type: %d",
(int) cmd->subtype);
break;
{
case AT_AddColumn: /* ADD COLUMN */
ATSimplePermissions(rel,
- ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+ ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
ATPrepAddColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
/* Recursion occurs during execution phase */
pass = AT_PASS_ADD_COL;
* substitutes default values into INSERTs before it expands
* rules.
*/
- ATSimplePermissions(rel, ATT_TABLE|ATT_VIEW);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_VIEW);
ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
/* No command-specific prep needed */
pass = cmd->def ? AT_PASS_ADD_CONSTR : AT_PASS_DROP;
break;
case AT_DropNotNull: /* ALTER COLUMN DROP NOT NULL */
- ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE);
ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
/* No command-specific prep needed */
pass = AT_PASS_DROP;
break;
case AT_SetNotNull: /* ALTER COLUMN SET NOT NULL */
- ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE);
ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
/* No command-specific prep needed */
pass = AT_PASS_ADD_CONSTR;
break;
case AT_SetOptions: /* ALTER COLUMN SET ( options ) */
case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */
- ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
/* This command never recurses */
pass = AT_PASS_MISC;
break;
break;
case AT_DropColumn: /* DROP COLUMN */
ATSimplePermissions(rel,
- ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+ ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
ATPrepDropColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
/* Recursion occurs during execution phase */
pass = AT_PASS_DROP;
cmd->subtype = AT_AddConstraintRecurse;
pass = AT_PASS_ADD_CONSTR;
break;
- case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */
+ case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */
ATSimplePermissions(rel, ATT_TABLE);
/* This command never recurses */
/* No command-specific prep needed */
break;
case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATSimplePermissions(rel,
- ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+ ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
/* Performs own recursion */
ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode);
pass = AT_PASS_ALTER_TYPE;
pass = AT_PASS_DROP;
break;
case AT_SetTableSpace: /* SET TABLESPACE */
- ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
/* This command never recurses */
ATPrepSetTableSpace(tab, rel, cmd->name, lockmode);
pass = AT_PASS_MISC; /* doesn't actually matter */
break;
case AT_SetRelOptions: /* SET (...) */
case AT_ResetRelOptions: /* RESET (...) */
- ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+ ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
/* This command never recurses */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
case AT_DropColumn: /* DROP COLUMN */
ATExecDropColumn(wqueue, rel, cmd->name,
- cmd->behavior, false, false, cmd->missing_ok, lockmode);
+ cmd->behavior, false, false, cmd->missing_ok, lockmode);
break;
case AT_DropColumnRecurse: /* DROP COLUMN with recursion */
ATExecDropColumn(wqueue, rel, cmd->name,
- cmd->behavior, true, false, cmd->missing_ok, lockmode);
+ cmd->behavior, true, false, cmd->missing_ok, lockmode);
break;
case AT_AddIndex: /* ADD INDEX */
ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, false, lockmode);
ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
true, lockmode);
break;
- case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */
+ case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */
ATExecAddIndexConstraint(tab, rel, (IndexStmt *) cmd->def, lockmode);
break;
case AT_ValidateConstraint:
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
break;
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
break;
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
- TRIGGER_FIRES_ON_REPLICA, false, lockmode);
+ TRIGGER_FIRES_ON_REPLICA, false, lockmode);
break;
case AT_DisableTrig: /* DISABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
break;
case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
break;
case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
break;
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
- TRIGGER_FIRES_ON_ORIGIN, true, lockmode);
+ TRIGGER_FIRES_ON_ORIGIN, true, lockmode);
break;
case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
* (Eventually we'll probably need to check for composite type
* dependencies even when we're just scanning the table without a
* rewrite, but at the moment a composite type does not enforce any
- * constraints, so it's not necessary/appropriate to enforce them
- * just during ALTER.)
+ * constraints, so it's not necessary/appropriate to enforce them just
+ * during ALTER.)
*/
if (tab->newvals != NIL || tab->rewrite)
{
con->conid);
/*
- * No need to mark the constraint row as validated,
- * we did that when we inserted the row earlier.
+ * No need to mark the constraint row as validated, we did
+ * that when we inserted the row earlier.
*/
heap_close(refrel, NoLock);
static void
ATSimplePermissions(Relation rel, int allowed_targets)
{
- int actual_target;
+ int actual_target;
switch (rel->rd_rel->relkind)
{
case ATT_TABLE:
msg = _("\"%s\" is not a table");
break;
- case ATT_TABLE|ATT_INDEX:
+ case ATT_TABLE | ATT_INDEX:
msg = _("\"%s\" is not a table or index");
break;
- case ATT_TABLE|ATT_VIEW:
+ case ATT_TABLE | ATT_VIEW:
msg = _("\"%s\" is not a table or view");
break;
- case ATT_TABLE|ATT_FOREIGN_TABLE:
+ case ATT_TABLE | ATT_FOREIGN_TABLE:
msg = _("\"%s\" is not a table or foreign table");
break;
- case ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE:
+ case ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE:
msg = _("\"%s\" is not a table, composite type, or foreign table");
break;
case ATT_VIEW:
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot alter type \"%s\" because it is the type of a typed table",
typeName),
- errhint("Use ALTER ... CASCADE to alter the typed tables too.")));
+ errhint("Use ALTER ... CASCADE to alter the typed tables too.")));
else
result = lappend_oid(result, HeapTupleGetOid(tuple));
}
/*
* Are we adding the column to a recursion child? If so, check whether to
- * merge with an existing definition for the column. If we do merge,
- * we must not recurse. Children will already have the column, and
- * recursing into them would mess up attinhcount.
+ * merge with an existing definition for the column. If we do merge, we
+ * must not recurse. Children will already have the column, and recursing
+ * into them would mess up attinhcount.
*/
if (colDef->inhcount > 0)
{
ereport(ERROR,
(errcode(ERRCODE_COLLATION_MISMATCH),
errmsg("child table \"%s\" has different collation for column \"%s\"",
- RelationGetRelationName(rel), colDef->colname),
+ RelationGetRelationName(rel), colDef->colname),
errdetail("\"%s\" versus \"%s\"",
get_collation_name(ccollid),
- get_collation_name(childatt->attcollation))));
+ get_collation_name(childatt->attcollation))));
/* If it's OID, child column must actually be OID */
if (isOid && childatt->attnum != ObjectIdAttributeNumber)
if (relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("default values on foreign tables are not supported")));
+ errmsg("default values on foreign tables are not supported")));
rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault));
rawEnt->attnum = attribute.attnum;
elog(ERROR, "index \"%s\" is not unique", indexName);
/*
- * Determine name to assign to constraint. We require a constraint to
+ * Determine name to assign to constraint. We require a constraint to
* have the same name as the underlying index; therefore, use the index's
- * existing name as the default constraint name, and if the user explicitly
- * gives some other name for the constraint, rename the index to match.
+ * existing name as the default constraint name, and if the user
+ * explicitly gives some other name for the constraint, rename the index
+ * to match.
*/
constraintName = stmt->idxname;
if (constraintName == NULL)
*/
static void
ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
- Constraint *newConstraint, bool recurse, LOCKMODE lockmode)
+ Constraint *newConstraint, bool recurse, LOCKMODE lockmode)
{
Assert(IsA(newConstraint, Constraint));
/*
* If the constraint got merged with an existing constraint, we're done.
- * We mustn't recurse to child tables in this case, because they've already
- * got the constraint, and visiting them again would lead to an incorrect
- * value for coninhcount.
+ * We mustn't recurse to child tables in this case, because they've
+ * already got the constraint, and visiting them again would lead to an
+ * incorrect value for coninhcount.
*/
if (newcons == NIL)
return;
/*
* Tell Phase 3 to check that the constraint is satisfied by existing rows
- * We can skip this during table creation or if requested explicitly
- * by specifying NOT VALID on an alter table statement.
+ * We can skip this during table creation or if requested explicitly by
+ * specifying NOT VALID on an alter table statement.
*/
if (!fkconstraint->skip_validation)
{
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist",
- constrName, RelationGetRelationName(rel))));
+ errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist",
+ constrName, RelationGetRelationName(rel))));
if (!con->convalidated)
{
Relation refrel;
/*
- * Triggers are already in place on both tables, so a
- * concurrent write that alters the result here is not
- * possible. Normally we can run a query here to do the
- * validation, which would only require AccessShareLock.
- * In some cases, it is possible that we might need to
- * fire triggers to perform the check, so we take a lock
- * at RowShareLock level just in case.
+ * Triggers are already in place on both tables, so a concurrent write
+ * that alters the result here is not possible. Normally we can run a
+ * query here to do the validation, which would only require
+ * AccessShareLock. In some cases, it is possible that we might need
+ * to fire triggers to perform the check, so we take a lock at
+ * RowShareLock level just in case.
*/
refrel = heap_open(con->confrelid, RowShareLock);
- validateForeignKeyConstraint((char *)constrName, rel, refrel,
+ validateForeignKeyConstraint((char *) constrName, rel, refrel,
con->conindid,
conid);
if (tab->relkind == RELKIND_RELATION)
{
/*
- * Set up an expression to transform the old data value to the new type.
- * If a USING option was given, transform and use that expression, else
- * just take the old value and try to coerce it. We do this first so that
- * type incompatibility can be detected before we waste effort, and
- * because we need the expression to be parsed against the original table
- * rowtype.
+ * Set up an expression to transform the old data value to the new
+ * type. If a USING option was given, transform and use that
+ * expression, else just take the old value and try to coerce it. We
+ * do this first so that type incompatibility can be detected before
+ * we waste effort, and because we need the expression to be parsed
+ * against the original table rowtype.
*/
if (transform)
{
if (expression_returns_set(transform))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("transform expression must not return a set")));
+ errmsg("transform expression must not return a set")));
/* No subplans or aggregates, either... */
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in transform expression")));
+ errmsg("cannot use subquery in transform expression")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
else
{
transform = (Node *) makeVar(1, attnum,
- attTup->atttypid, attTup->atttypmod, attTup->attcollation,
+ attTup->atttypid, attTup->atttypmod, attTup->attcollation,
0);
}
else if (transform)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("ALTER TYPE USING is only supported on plain tables")));
+ errmsg("ALTER TYPE USING is only supported on plain tables")));
if (tab->relkind == RELKIND_COMPOSITE_TYPE ||
tab->relkind == RELKIND_FOREIGN_TABLE)
{
/*
- * For composite types, do this check now. Tables will check
- * it later when the table is being rewritten.
+ * For composite types, do this check now. Tables will check it later
+ * when the table is being rewritten.
*/
find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
}
for (;;)
{
/* only one varno, so no need to check that */
- if (IsA(expr, Var) && ((Var *) expr)->varattno == varattno)
+ if (IsA(expr, Var) &&((Var *) expr)->varattno == varattno)
return false;
else if (IsA(expr, RelabelType))
expr = (Node *) ((RelabelType *) expr)->arg;
break;
case OCLASS_TRIGGER:
+
/*
* A trigger can depend on a column because the column is
* specified as an update target, or because the column is
* used in the trigger's WHEN condition. The first case would
* not require any extra work, but the second case would
* require updating the WHEN expression, which will take a
- * significant amount of new code. Since we can't easily tell
+ * significant amount of new code. Since we can't easily tell
* which case applies, we punt for both. FIXME someday.
*/
ereport(ERROR,
*/
static void
ATExecEnableDisableTrigger(Relation rel, char *trigname,
- char fires_when, bool skip_system, LOCKMODE lockmode)
+ char fires_when, bool skip_system, LOCKMODE lockmode)
{
EnableDisableTrigger(rel, trigname, fires_when, skip_system);
}
static void
ATExecGenericOptions(Relation rel, List *options)
{
- Relation ftrel;
- ForeignServer *server;
+ Relation ftrel;
+ ForeignServer *server;
ForeignDataWrapper *fdw;
- HeapTuple tuple;
- bool isnull;
- Datum repl_val[Natts_pg_foreign_table];
- bool repl_null[Natts_pg_foreign_table];
- bool repl_repl[Natts_pg_foreign_table];
- Datum datum;
- Form_pg_foreign_table tableform;
-
- if (options == NIL)
+ HeapTuple tuple;
+ bool isnull;
+ Datum repl_val[Natts_pg_foreign_table];
+ bool repl_null[Natts_pg_foreign_table];
+ bool repl_repl[Natts_pg_foreign_table];
+ Datum datum;
+ Form_pg_foreign_table tableform;
+
+ if (options == NIL)
return;
ftrel = heap_open(ForeignTableRelationId, RowExclusiveLock);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign table \"%s\" does not exist",
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
tableform = (Form_pg_foreign_table) GETSTRUCT(tuple);
server = GetForeignServer(tableform->ftserver);
fdw = GetForeignDataWrapper(server->fdwid);
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, sequence, or foreign table",
- RelationGetRelationName(rel))));
+ errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+ RelationGetRelationName(rel))));
}
/* get schema OID and check its permissions */
*/
static void
AlterSeqNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode)
+ Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode)
{
Relation depRel;
SysScanDesc scan;
(errcode(ERRCODE_UNDEFINED_FILE),
errmsg("directory \"%s\" does not exist", location),
InRecovery ? errhint("Create this directory for the tablespace before "
- "restarting the server."): 0));
+ "restarting the server.") : 0));
else
ereport(ERROR,
(errcode_for_file_access(),
/*
* Our theory for replaying a CREATE is to forcibly drop the target
- * subdirectory if present, and then recreate it. This may be
- * more work than needed, but it is simple to implement.
+ * subdirectory if present, and then recreate it. This may be more
+ * work than needed, but it is simple to implement.
*/
if (stat(location_with_version_dir, &st) == 0 && S_ISDIR(st.st_mode))
{
heap_close(rel, AccessShareLock);
if (!OidIsValid(result) && !missing_ok)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist",
- tablespacename)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("tablespace \"%s\" does not exist",
+ tablespacename)));
return result;
}
referenced;
/*
- * ShareRowExclusiveLock is sufficient to prevent concurrent write activity
- * to the relation, and thus to lock out any operations that might want to
- * fire triggers on the relation. If we had ON SELECT triggers we would
- * need to take an AccessExclusiveLock to add one of those, just as we do
- * with ON SELECT rules.
+ * ShareRowExclusiveLock is sufficient to prevent concurrent write
+ * activity to the relation, and thus to lock out any operations that
+ * might want to fire triggers on the relation. If we had ON SELECT
+ * triggers we would need to take an AccessExclusiveLock to add one of
+ * those, just as we do with ON SELECT rules.
*/
rel = heap_openrv(stmt->relation, ShareRowExclusiveLock);
if (stmt->whenClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
+ errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
if (stmt->columns != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
- * NOTE that this is cool only because we have ShareRowExclusiveLock on the
- * relation, so the trigger set won't be changing underneath us.
+ * NOTE that this is cool only because we have ShareRowExclusiveLock on
+ * the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{
if (!OidIsValid(object.objectId))
{
ereport(NOTICE,
- (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping",
- trigname, get_rel_name(relid))));
+ (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping",
+ trigname, get_rel_name(relid))));
return;
}
/*
* Open and lock the relation the trigger belongs to. As in
- * CreateTrigger, this is sufficient to lock out all operations that
- * could fire or add triggers; but it would need to be revisited if
- * we had ON SELECT triggers.
+ * CreateTrigger, this is sufficient to lock out all operations that could
+ * fire or add triggers; but it would need to be revisited if we had ON
+ * SELECT triggers.
*/
relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
if (newtuple != slottuple)
{
/*
- * Return the modified tuple using the es_trig_tuple_slot. We assume
+ * Return the modified tuple using the es_trig_tuple_slot. We assume
* the tuple was allocated in per-tuple memory context, and therefore
* will go away by itself. The tuple table slot should not try to
* clear it.
{
TriggerFlags ate_flags; /* status bits and offset to shared data */
ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
-} AfterTriggerEventDataOneCtid;
+} AfterTriggerEventDataOneCtid;
#define SizeofTriggerEvent(evt) \
(((evt)->ate_flags & AFTER_TRIGGER_2CTIDS) ? \
void
AlterTSParserNamespace(List *name, const char *newschema)
{
- Oid prsId, nspOid;
+ Oid prsId,
+ nspOid;
Relation rel;
rel = heap_open(TSParserRelationId, RowExclusiveLock);
Oid
AlterTSParserNamespace_oid(Oid prsId, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(TSParserRelationId, RowExclusiveLock);
void
AlterTSDictionaryNamespace(List *name, const char *newschema)
{
- Oid dictId, nspOid;
+ Oid dictId,
+ nspOid;
Relation rel;
rel = heap_open(TSDictionaryRelationId, RowExclusiveLock);
Oid
AlterTSDictionaryNamespace_oid(Oid dictId, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(TSDictionaryRelationId, RowExclusiveLock);
void
AlterTSTemplateNamespace(List *name, const char *newschema)
{
- Oid tmplId, nspOid;
+ Oid tmplId,
+ nspOid;
Relation rel;
rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
Oid
AlterTSTemplateNamespace_oid(Oid tmplId, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
void
AlterTSConfigurationNamespace(List *name, const char *newschema)
{
- Oid cfgId, nspOid;
+ Oid cfgId,
+ nspOid;
Relation rel;
rel = heap_open(TSConfigRelationId, RowExclusiveLock);
Oid
AlterTSConfigurationNamespace_oid(Oid cfgId, Oid newNspOid)
{
- Oid oldNspOid;
+ Oid oldNspOid;
Relation rel;
rel = heap_open(TSConfigRelationId, RowExclusiveLock);
DefElem *byValueEl = NULL;
DefElem *alignmentEl = NULL;
DefElem *storageEl = NULL;
- DefElem *collatableEl = NULL;
+ DefElem *collatableEl = NULL;
Oid inputOid;
Oid outputOid;
Oid receiveOid = InvalidOid;
* now have TypeCreate do all the real work.
*
* Note: the pg_type.oid is stored in user tables as array elements (base
- * types) in ArrayType and in composite types in DatumTupleFields. This
+ * types) in ArrayType and in composite types in DatumTupleFields. This
* oid must be preserved by binary upgrades.
*/
typoid =
-1, /* typMod (Domains only) */
0, /* Array dimensions of typbasetype */
false, /* Type NOT NULL */
- InvalidOid); /* typcollation */
+ InvalidOid); /* typcollation */
/* Enter the enum's values into pg_enum */
EnumValuesCreate(enumTypeOid, stmt->vals);
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- true, /* Is Validated */
+ true, /* Is Validated */
InvalidOid, /* not a relation constraint */
NULL,
0,
bool createrole = false; /* Can this user create roles? */
bool createdb = false; /* Can the user create databases? */
bool canlogin = false; /* Can this user login? */
- bool isreplication = false; /* Is this a replication role? */
+ bool isreplication = false; /* Is this a replication role? */
int connlimit = -1; /* maximum connections allowed */
List *addroleto = NIL; /* roles to make this a member of */
List *rolemembers = NIL; /* roles to be members of this role */
DefElem *dcreaterole = NULL;
DefElem *dcreatedb = NULL;
DefElem *dcanlogin = NULL;
- DefElem *disreplication = NULL;
+ DefElem *disreplication = NULL;
DefElem *dconnlimit = NULL;
DefElem *daddroleto = NULL;
DefElem *drolemembers = NULL;
if (dissuper)
{
issuper = intVal(dissuper->arg) != 0;
+
/*
- * Superusers get replication by default, but only if
- * NOREPLICATION wasn't explicitly mentioned
+ * Superusers get replication by default, but only if NOREPLICATION
+ * wasn't explicitly mentioned
*/
if (!(disreplication && intVal(disreplication->arg) == 0))
isreplication = 1;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create replication users")));
+ errmsg("must be superuser to create replication users")));
}
else
{
tuple = heap_form_tuple(pg_authid_dsc, new_record, new_record_nulls);
/*
- * pg_largeobject_metadata contains pg_authid.oid's, so we
- * use the binary-upgrade override, if specified.
+ * pg_largeobject_metadata contains pg_authid.oid's, so we use the
+ * binary-upgrade override, if specified.
*/
if (OidIsValid(binary_upgrade_next_pg_authid_oid))
{
int createrole = -1; /* Can this user create roles? */
int createdb = -1; /* Can the user create databases? */
int canlogin = -1; /* Can this user login? */
- int isreplication = -1; /* Is this a replication role? */
+ int isreplication = -1; /* Is this a replication role? */
int connlimit = -1; /* maximum connections allowed */
List *rolemembers = NIL; /* roles to be added/removed */
char *validUntil = NULL; /* time the login is valid until */
DefElem *dcreaterole = NULL;
DefElem *dcreatedb = NULL;
DefElem *dcanlogin = NULL;
- DefElem *disreplication = NULL;
+ DefElem *disreplication = NULL;
DefElem *dconnlimit = NULL;
DefElem *drolemembers = NULL;
DefElem *dvalidUntil = NULL;
/*
* If we have discovered that there are no indexes, then there's no
- * primary key either. This could be done more thoroughly...
+ * primary key either. This could be done more thoroughly...
*/
if (pgcform->relhaspkey && !hasindex)
{
* There's a race condition here: the rel may have gone away since the
* last time we saw it. If so, we don't need to vacuum it.
*
- * If we've been asked not to wait for the relation lock, acquire it
- * first in non-blocking mode, before calling try_relation_open().
+ * If we've been asked not to wait for the relation lock, acquire it first
+ * in non-blocking mode, before calling try_relation_open().
*/
if (!(vacstmt->options & VACOPT_NOWAIT))
onerel = try_relation_open(relid, lmode);
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
ereport(LOG,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("skipping vacuum of \"%s\" --- lock not available",
- vacstmt->relation->relname)));
+ errmsg("skipping vacuum of \"%s\" --- lock not available",
+ vacstmt->relation->relname)));
}
if (!onerel)
PageSetAllVisible(page);
SetBufferCommitInfoNeedsSave(buf);
}
+
/*
* It's possible for the value returned by GetOldestXmin() to move
* backwards, so it's not wrong for us to see tuples that appear to
* not be visible to everyone yet, while PD_ALL_VISIBLE is already
* set. The real safe xmin value never moves backwards, but
* GetOldestXmin() is conservative and sometimes returns a value
- * that's unnecessarily small, so if we see that contradiction it
- * just means that the tuples that we think are not visible to
- * everyone yet actually are, and the PD_ALL_VISIBLE flag is correct.
+ * that's unnecessarily small, so if we see that contradiction it just
+ * means that the tuples that we think are not visible to everyone yet
+ * actually are, and the PD_ALL_VISIBLE flag is correct.
*
* There should never be dead tuples on a page with PD_ALL_VISIBLE
* set, however.
* We can't simply "return check_datestyle(...)" because we need
* to handle constructs like "DEFAULT, ISO".
*/
- char *subval;
- void *subextra = NULL;
+ char *subval;
+ void *subextra = NULL;
subval = strdup(GetConfigOptionResetString("datestyle"));
if (!subval)
{
/*
* The boot_val given for TimeZone in guc.c is NULL. When we see this
- * we just do nothing. If this isn't overridden from the config file
+ * we just do nothing. If this isn't overridden from the config file
* then pg_timezone_initialize() will eventually select a default
- * value from the environment. This hack has two purposes: to avoid
+ * value from the environment. This hack has two purposes: to avoid
* wasting cycles loading values that might soon be overridden from
* the config file, and to avoid trying to read the timezone files
* during InitializeGUCOptions(). The latter doesn't work in an
if (pg_strncasecmp(*newval, "interval", 8) == 0)
{
/*
- * Support INTERVAL 'foo'. This is for SQL spec compliance, not
+ * Support INTERVAL 'foo'. This is for SQL spec compliance, not
* because it has any actual real-world usefulness.
*/
const char *valueptr = *newval;
*
* Note: the result string should be something that we'd accept as input.
* We use the numeric format for interval cases, because it's simpler to
- * reload. In the named-timezone case, *newval is already OK and need not
+ * reload. In the named-timezone case, *newval is already OK and need not
* be changed; it might not have the canonical casing, but that's taken
* care of by show_timezone.
*/
if (myextra.HasCTZSet)
{
- char *result = (char *) malloc(64);
+ char *result = (char *) malloc(64);
if (!result)
return false;
* We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
* we also always allow changes from read-write to read-only. However,
* read-only may be changed to read-write only when in a top-level transaction
- * that has not yet taken an initial snapshot. Can't do it in a hot standby
+ * that has not yet taken an initial snapshot. Can't do it in a hot standby
* slave, either.
*/
bool
*
* We can't roll back the random sequence on error, and we don't want
* config file reloads to affect it, so we only want interactive SET SEED
- * commands to set it. We use the "extra" storage to ensure that rollbacks
+ * commands to set it. We use the "extra" storage to ensure that rollbacks
* don't try to do the operation again.
*/
{
/*
* Can't do catalog lookups, so fail. The result of this is that
- * session_authorization cannot be set in postgresql.conf, which
- * seems like a good thing anyway, so we don't work hard to avoid it.
+ * session_authorization cannot be set in postgresql.conf, which seems
+ * like a good thing anyway, so we don't work hard to avoid it.
*/
return false;
}
show_role(void)
{
/*
- * Check whether SET ROLE is active; if not return "none". This is a
+ * Check whether SET ROLE is active; if not return "none". This is a
* kluge to deal with the fact that SET SESSION AUTHORIZATION logically
* resets SET ROLE to NONE, but we cannot set the GUC role variable from
* assign_session_authorization (because we haven't got enough info to
def->colname = pstrdup(tle->resname);
def->typeName = makeTypeNameFromOid(exprType((Node *) tle->expr),
- exprTypmod((Node *) tle->expr));
+ exprTypmod((Node *) tle->expr));
def->inhcount = 0;
def->is_local = true;
def->is_not_null = false;
def->cooked_default = NULL;
def->collClause = NULL;
def->collOid = exprCollation((Node *) tle->expr);
+
/*
* It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check.
}
else
{
- Oid relid;
+ Oid relid;
/*
* now set the parameters for keys/inheritance etc. All of these are
/*
* Check for unsupported cases. These tests are redundant with ones in
- * DefineQueryRewrite(), but that function will complain about a bogus
- * ON SELECT rule, and we'd rather the message complain about a view.
+ * DefineQueryRewrite(), but that function will complain about a bogus ON
+ * SELECT rule, and we'd rather the message complain about a view.
*/
if (viewParse->intoClause != NULL)
ereport(ERROR,
if (viewParse->hasModifyingCTE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("views must not contain data-modifying statements in WITH")));
+ errmsg("views must not contain data-modifying statements in WITH")));
/*
* If a list of column names was given, run through and insert these into
if (view->relpersistence == RELPERSISTENCE_UNLOGGED)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("views cannot be unlogged because they do not have storage")));
+ errmsg("views cannot be unlogged because they do not have storage")));
/*
* Create the view relation
* ExecutorRun accepts direction and count arguments that specify whether
* the plan is to be executed forwards, backwards, and for how many tuples.
* In some cases ExecutorRun may be called multiple times to process all
- * the tuples for a plan. It is also acceptable to stop short of executing
+ * the tuples for a plan. It is also acceptable to stop short of executing
* the whole plan (but only if it is a SELECT).
*
* ExecutorFinish must be called after the final ExecutorRun call and
switch (queryDesc->operation)
{
case CMD_SELECT:
+
/*
* SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
* mark tuples
* ExecutorFinish
*
* This routine must be called after the last ExecutorRun call.
- * It performs cleanup such as firing AFTER triggers. It is
+ * It performs cleanup such as firing AFTER triggers. It is
* separate from ExecutorEnd because EXPLAIN ANALYZE needs to
* include these actions in the total runtime.
*
* We provide a function hook variable that lets loadable plugins
- * get control when ExecutorFinish is called. Such a plugin would
+ * get control when ExecutorFinish is called. Such a plugin would
* normally call standard_ExecutorFinish().
*
* ----------------------------------------------------------------
Assert(estate != NULL);
/*
- * Check that ExecutorFinish was called, unless in EXPLAIN-only mode.
- * This Assert is needed because ExecutorFinish is new as of 9.1, and
- * callers might forget to call it.
+ * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+ * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+ * might forget to call it.
*/
Assert(estate->es_finished ||
(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
foreach(l, rangeTable)
{
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
result = ExecCheckRTEPerms(rte);
if (!result)
}
if (ExecutorCheckPerms_hook)
- result = (*ExecutorCheckPerms_hook)(rangeTable,
- ereport_on_violation);
+ result = (*ExecutorCheckPerms_hook) (rangeTable,
+ ereport_on_violation);
return result;
}
void
CheckValidResultRel(Relation resultRel, CmdType operation)
{
- TriggerDesc *trigDesc = resultRel->trigdesc;
+ TriggerDesc *trigDesc = resultRel->trigdesc;
switch (resultRel->rd_rel->relkind)
{
case CMD_INSERT:
if (!trigDesc || !trigDesc->trig_insert_instead_row)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot insert into view \"%s\"",
- RelationGetRelationName(resultRel)),
- errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot insert into view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
break;
case CMD_UPDATE:
if (!trigDesc || !trigDesc->trig_update_instead_row)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot update view \"%s\"",
- RelationGetRelationName(resultRel)),
- errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot update view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
break;
case CMD_DELETE:
if (!trigDesc || !trigDesc->trig_delete_instead_row)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot delete from view \"%s\"",
- RelationGetRelationName(resultRel)),
- errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot delete from view \"%s\"",
+ RelationGetRelationName(resultRel)),
+ errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
break;
default:
elog(ERROR, "unrecognized CmdType: %d", (int) operation);
/*
* Open the target relation's relcache entry. We assume that an
* appropriate lock is still held by the backend from whenever the trigger
- * event got queued, so we need take no new lock here. Also, we need
- * not recheck the relkind, so no need for CheckValidResultRel.
+ * event got queued, so we need take no new lock here. Also, we need not
+ * recheck the relkind, so no need for CheckValidResultRel.
*/
rel = heap_open(relid, NoLock);
/*
* Run any secondary ModifyTable nodes to completion, in case the main
- * query did not fetch all rows from them. (We do this to ensure that
+ * query did not fetch all rows from them. (We do this to ensure that
* such nodes have predictable results.)
*/
foreach(lc, estate->es_auxmodifytables)
{
- PlanState *ps = (PlanState *) lfirst(lc);
+ PlanState *ps = (PlanState *) lfirst(lc);
for (;;)
{
* ExecInitSubPlan expects to be able to find these entries. Some of the
* SubPlans might not be used in the part of the plan tree we intend to
* run, but since it's not easy to tell which, we just initialize them
- * all. (However, if the subplan is headed by a ModifyTable node, then
- * it must be a data-modifying CTE, which we will certainly not need to
- * re-run, so we can skip initializing it. This is just an efficiency
+ * all. (However, if the subplan is headed by a ModifyTable node, then it
+ * must be a data-modifying CTE, which we will certainly not need to
+ * re-run, so we can skip initializing it. This is just an efficiency
* hack; it won't skip data-modifying CTEs for which the ModifyTable node
* is not at the top.)
*/
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParamExec(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static void init_fcache(Oid foid, Oid input_collation, FuncExprState *fcache,
MemoryContext fcacheCxt, bool needDescForSets);
static void ShutdownFuncExpr(Datum arg);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("no value found for parameter %d", thisParamId)));
- return (Datum) 0; /* keep compiler quiet */
+ return (Datum) 0; /* keep compiler quiet */
}
/*
* Ordinarily, at this point the search should have found the originally
* inserted tuple, unless we exited the loop early because of conflict.
- * However, it is possible to define exclusion constraints for which
- * that wouldn't be true --- for instance, if the operator is <>.
- * So we no longer complain if found_self is still false.
+ * However, it is possible to define exclusion constraints for which that
+ * wouldn't be true --- for instance, if the operator is <>. So we no
+ * longer complain if found_self is still false.
*/
econtext->ecxt_scantuple = save_scantuple;
char *fname; /* function name (for error msgs) */
char *src; /* function body text (for error msgs) */
- SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */
+ SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */
Oid rettype; /* actual return type */
int16 typlen; /* length of the return type */
Oid *argtypes; /* resolved types of input arguments */
int nargs; /* number of input arguments */
Oid collation; /* function's input collation, if known */
-} SQLFunctionParseInfo;
+} SQLFunctionParseInfo;
/* non-export function prototypes */
* Set up the per-query execution_state records for a SQL function.
*
* The input is a List of Lists of parsed and rewritten, but not planned,
- * querytrees. The sublist structure denotes the original query boundaries.
+ * querytrees. The sublist structure denotes the original query boundaries.
*/
static List *
init_execution_state(List *queryTree_list,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/* translator: %s is a SQL statement name */
- errmsg("%s is not allowed in a non-volatile function",
- CreateCommandTag(stmt))));
+ errmsg("%s is not allowed in a non-volatile function",
+ CreateCommandTag(stmt))));
/* OK, build the execution_state for this query */
newes = (execution_state *) palloc(sizeof(execution_state));
newes->next = NULL;
newes->status = F_EXEC_START;
- newes->setsResult = false; /* might change below */
- newes->lazyEval = false; /* might change below */
+ newes->setsResult = false; /* might change below */
+ newes->lazyEval = false; /* might change below */
newes->stmt = stmt;
newes->qd = NULL;
fcache->src = TextDatumGetCString(tmp);
/*
- * Parse and rewrite the queries in the function text. Use sublists to
+ * Parse and rewrite the queries in the function text. Use sublists to
* keep track of the original query boundaries. But we also build a
* "flat" list of the rewritten queries to pass to check_sql_fn_retval.
* This is because the last canSetTag query determines the result type
queryTree_sublist = pg_analyze_and_rewrite_params(parsetree,
fcache->src,
- (ParserSetupHook) sql_fn_parser_setup,
+ (ParserSetupHook) sql_fn_parser_setup,
fcache->pinfo);
queryTree_list = lappend(queryTree_list, queryTree_sublist);
flat_query_list = list_concat(flat_query_list,
{
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) *sizeof(ParamExternData));
+ (nargs - 1) * sizeof(ParamExternData));
/* we have static list of params, so no hooks needed */
paramLI->paramFetch = NULL;
paramLI->paramFetchArg = NULL;
execution_state *es;
TupleTableSlot *slot;
Datum result;
- List *eslist;
- ListCell *eslc;
+ List *eslist;
+ ListCell *eslc;
/*
* Switch to context in which the fcache lives. This ensures that
*
* In a non-read-only function, we rely on the fact that we'll never
* suspend execution between queries of the function: the only reason to
- * suspend execution before completion is if we are returning a row from
- * a lazily-evaluated SELECT. So, when first entering this loop, we'll
+ * suspend execution before completion is if we are returning a row from a
+ * lazily-evaluated SELECT. So, when first entering this loop, we'll
* either start a new query (and push a fresh snapshot) or re-establish
- * the active snapshot from the existing query descriptor. If we need to
+ * the active snapshot from the existing query descriptor. If we need to
* start a new query in a subsequent execution of the loop, either we need
* a fresh snapshot (and pushed_snapshot is false) or the existing
* snapshot is on the active stack and we can just bump its command ID.
es = (execution_state *) lfirst(eslc);
/*
- * Flush the current snapshot so that we will take a new one
- * for the new query list. This ensures that new snaps are
- * taken at original-query boundaries, matching the behavior
- * of interactive execution.
+ * Flush the current snapshot so that we will take a new one for
+ * the new query list. This ensures that new snaps are taken at
+ * original-query boundaries, matching the behavior of interactive
+ * execution.
*/
if (pushed_snapshot)
{
{
SQLFunctionCachePtr fcache = (SQLFunctionCachePtr) DatumGetPointer(arg);
execution_state *es;
- ListCell *lc;
+ ListCell *lc;
foreach(lc, fcache->func_state)
{
* the function that's calling it.
*
* XXX Note that if rettype is RECORD, the IsBinaryCoercible check
- * will succeed for any composite restype. For the moment we rely on
+ * will succeed for any composite restype. For the moment we rely on
* runtime type checking to catch any discrepancy, but it'd be nice to
* do better at parse time.
*/
tle->expr = (Expr *) makeRelabelType(tle->expr,
rettype,
-1,
- get_typcollation(rettype),
+ get_typcollation(rettype),
COERCE_DONTCARE);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
tle->expr = (Expr *) makeRelabelType(tle->expr,
atttype,
-1,
- get_typcollation(atttype),
+ get_typcollation(atttype),
COERCE_DONTCARE);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
*/
Tuplesortstate *sortstate; /* sort object, if DISTINCT or ORDER BY */
-} AggStatePerAggData;
+} AggStatePerAggData;
/*
* AggStatePerGroupData - per-aggregate-per-group working state
TupleHashEntryData shared; /* common header for hash table entries */
/* per-aggregate transition status array - must be last! */
AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */
-} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
+} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
static void initialize_aggregates(AggState *aggstate,
Assert(node->numGroups > 0);
entrysize = sizeof(AggHashEntryData) +
- (aggstate->numaggs - 1) *sizeof(AggStatePerGroupData);
+ (aggstate->numaggs - 1) * sizeof(AggStatePerGroupData);
aggstate->hashtable = BuildTupleHashTable(node->numCols,
node->grpColIdx,
/* This must match build_hash_table */
entrysize = sizeof(AggHashEntryData) +
- (numAggs - 1) *sizeof(AggStatePerGroupData);
+ (numAggs - 1) * sizeof(AggStatePerGroupData);
entrysize = MAXALIGN(entrysize);
/* Account for hashtable overhead (assuming fill factor = 1) */
entrysize += 3 * sizeof(void *);
indexstate->biss_NumScanKeys);
/*
- * If no run-time keys to calculate, go ahead and pass the scankeys to
- * the index AM.
+ * If no run-time keys to calculate, go ahead and pass the scankeys to the
+ * index AM.
*/
if (indexstate->biss_NumRuntimeKeys == 0 &&
indexstate->biss_NumArrayKeys == 0)
ForeignNext(ForeignScanState *node)
{
TupleTableSlot *slot;
- ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;
+ ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;
ExprContext *econtext = node->ss.ps.ps_ExprContext;
MemoryContext oldcontext;
ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
{
/*
- *----------
- * During this scan we use the HashJoinState fields as follows:
+ * ---------- During this scan we use the HashJoinState fields as follows:
*
- * hj_CurBucketNo: next regular bucket to scan
- * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
- * hj_CurTuple: last tuple returned, or NULL to start next bucket
- *----------
+ * hj_CurBucketNo: next regular bucket to scan hj_CurSkewBucketNo: next
+ * skew bucket (an index into skewBucketNums) hj_CurTuple: last tuple
+ * returned, or NULL to start next bucket ----------
*/
hjstate->hj_CurBucketNo = 0;
hjstate->hj_CurSkewBucketNo = 0;
}
else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
{
- int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
+ int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
hashTuple = hashtable->skewBucket[j]->tuples;
hjstate->hj_CurSkewBucketNo++;
/* insert hashtable's tuple into exec slot */
inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
hjstate->hj_HashTupleSlot,
- false); /* do not pfree */
+ false); /* do not pfree */
econtext->ecxt_innertuple = inntuple;
/*
/* ... and the same for the skew buckets, if any */
for (i = 0; i < hashtable->nSkewBuckets; i++)
{
- int j = hashtable->skewBucketNums[i];
+ int j = hashtable->skewBucketNums[i];
HashSkewBucket *skewBucket = hashtable->skewBucket[j];
for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next)
switch (node->hj_JoinState)
{
case HJ_BUILD_HASHTABLE:
+
/*
* First time through: build hash table for inner relation.
*/
* right/full join, we can quit without building the hash
* table. However, for an inner join it is only a win to
* check this when the outer relation's startup cost is less
- * than the projected cost of building the hash
- * table. Otherwise it's best to build the hash table first
- * and see if the inner relation is empty. (When it's a left
- * join, we should always make this check, since we aren't
- * going to be able to skip the join on the strength of an
- * empty inner relation anyway.)
+ * than the projected cost of building the hash table.
+ * Otherwise it's best to build the hash table first and see
+ * if the inner relation is empty. (When it's a left join, we
+ * should always make this check, since we aren't going to be
+ * able to skip the join on the strength of an empty inner
+ * relation anyway.)
*
* If we are rescanning the join, we make use of information
* gained on the previous scan: don't bother to try the
return NULL;
/*
- * need to remember whether nbatch has increased since we began
- * scanning the outer relation
+ * need to remember whether nbatch has increased since we
+ * began scanning the outer relation
*/
hashtable->nbatch_outstart = hashtable->nbatch;
/* FALL THRU */
case HJ_NEED_NEW_OUTER:
+
/*
* We don't have an outer tuple, try to get the next one
*/
Assert(batchno > hashtable->curbatch);
ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot),
hashvalue,
- &hashtable->outerBatchFile[batchno]);
+ &hashtable->outerBatchFile[batchno]);
/* Loop around, staying in HJ_NEED_NEW_OUTER state */
continue;
}
/* FALL THRU */
case HJ_SCAN_BUCKET:
+
/*
* Scan the selected hash bucket for matches to current outer
*/
}
/*
- * In a semijoin, we'll consider returning the first match,
- * but after that we're done with this outer tuple.
+ * In a semijoin, we'll consider returning the first
+ * match, but after that we're done with this outer tuple.
*/
if (node->js.jointype == JOIN_SEMI)
node->hj_JoinState = HJ_NEED_NEW_OUTER;
break;
case HJ_FILL_OUTER_TUPLE:
+
/*
* The current outer tuple has run out of matches, so check
- * whether to emit a dummy outer-join tuple. Whether we
- * emit one or not, the next state is NEED_NEW_OUTER.
+ * whether to emit a dummy outer-join tuple. Whether we emit
+ * one or not, the next state is NEED_NEW_OUTER.
*/
node->hj_JoinState = HJ_NEED_NEW_OUTER;
break;
case HJ_FILL_INNER_TUPLES:
+
/*
* We have finished a batch, but we are doing right/full join,
* so any unmatched inner tuples in the hashtable have to be
break;
case HJ_NEED_NEW_BATCH:
+
/*
* Try to advance to next batch. Done if there are no more.
*/
if (!ExecHashJoinNewBatch(node))
- return NULL; /* end of join */
+ return NULL; /* end of join */
node->hj_JoinState = HJ_NEED_NEW_OUTER;
break;
}
if (curbatch >= nbatch)
- return false; /* no more batches */
+ return false; /* no more batches */
hashtable->curbatch = curbatch;
if (BufFileSeek(hashtable->outerBatchFile[curbatch], 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
}
return true;
ExecHashTableResetMatchFlags(node->hj_HashTable);
/*
- * Also, we need to reset our state about the emptiness of
- * the outer relation, so that the new scan of the outer will
- * update it correctly if it turns out to be empty this time.
- * (There's no harm in clearing it now because ExecHashJoin won't
- * need the info. In the other cases, where the hash table
- * doesn't exist or we are destroying it, we leave this state
- * alone because ExecHashJoin will need it the first time
- * through.)
+ * Also, we need to reset our state about the emptiness of the
+ * outer relation, so that the new scan of the outer will update
+ * it correctly if it turns out to be empty this time. (There's no
+ * harm in clearing it now because ExecHashJoin won't need the
+ * info. In the other cases, where the hash table doesn't exist
+ * or we are destroying it, we leave this state alone because
+ * ExecHashJoin will need it the first time through.)
*/
node->hj_OuterNotEmpty = false;
/*
* For each run-time key, extract the run-time expression and evaluate
- * it with respect to the current context. We then stick the result
+ * it with respect to the current context. We then stick the result
* into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
indexstate->iss_RelationDesc,
estate->es_snapshot,
indexstate->iss_NumScanKeys,
- indexstate->iss_NumOrderByKeys);
+ indexstate->iss_NumOrderByKeys);
/*
- * If no run-time keys to calculate, go ahead and pass the scankeys to
- * the index AM.
+ * If no run-time keys to calculate, go ahead and pass the scankeys to the
+ * index AM.
*/
if (indexstate->iss_NumRuntimeKeys == 0)
index_rescan(indexstate->iss_ScanDesc,
indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys,
- indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys);
+ indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys);
/*
* all done.
scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData));
/*
- * runtime_keys array is dynamically resized as needed. We handle it
- * this way so that the same runtime keys array can be shared between
- * indexquals and indexorderbys, which will be processed in separate
- * calls of this function. Caller must be sure to pass in NULL/0 for
- * first call.
+ * runtime_keys array is dynamically resized as needed. We handle it this
+ * way so that the same runtime keys array can be shared between
+ * indexquals and indexorderbys, which will be processed in separate calls
+ * of this function. Caller must be sure to pass in NULL/0 for first
+ * call.
*/
runtime_keys = *runtimeKeys;
n_runtime_keys = max_runtime_keys = *numRuntimeKeys;
else if (IsA(child_node, ResultState))
{
/*
- * An extra consideration here is that if the Result is projecting
- * a targetlist that contains any SRFs, we can't assume that every
- * input tuple generates an output tuple, so a Sort underneath
- * might need to return more than N tuples to satisfy LIMIT N.
- * So we cannot use bounded sort.
+ * An extra consideration here is that if the Result is projecting a
+ * targetlist that contains any SRFs, we can't assume that every input
+ * tuple generates an output tuple, so a Sort underneath might need to
+ * return more than N tuples to satisfy LIMIT N. So we cannot use
+ * bounded sort.
*
- * If Result supported qual checking, we'd have to punt on seeing
- * a qual, too. Note that having a resconstantqual is not a
+ * If Result supported qual checking, we'd have to punt on seeing a
+ * qual, too. Note that having a resconstantqual is not a
* showstopper: if that fails we're not getting any rows at all.
*/
if (outerPlanState(child_node) &&
/*
* Locate the ExecRowMark(s) that this node is responsible for, and
- * construct ExecAuxRowMarks for them. (InitPlan should already have
+ * construct ExecAuxRowMarks for them. (InitPlan should already have
* built the global list of ExecRowMarks.)
*/
lrstate->lr_arowMarks = NIL;
* contains integers which index into the slots array. These typedefs try to
* clear it up, but they're only documentation.
*/
-typedef int SlotNumber;
-typedef int HeapPosition;
+typedef int SlotNumber;
+typedef int HeapPosition;
static void heap_insert_slot(MergeAppendState *node, SlotNumber new_slot);
static void heap_siftup_slot(MergeAppendState *node);
* initialize sort-key information
*/
mergestate->ms_nkeys = node->numCols;
- mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols);
+ mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols);
for (i = 0; i < node->numCols; i++)
{
- Oid sortFunction;
- bool reverse;
- int flags;
+ Oid sortFunction;
+ bool reverse;
+ int flags;
if (!get_compare_function_for_ordering_op(node->sortOperators[i],
&sortFunction, &reverse))
if (!node->ms_initialized)
{
/*
- * First time through: pull the first tuple from each subplan,
- * and set up the heap.
+ * First time through: pull the first tuple from each subplan, and set
+ * up the heap.
*/
for (i = 0; i < node->ms_nplans; i++)
{
j = node->ms_heap_size++; /* j is where the "hole" is */
while (j > 0)
{
- int i = (j-1)/2;
+ int i = (j - 1) / 2;
if (heap_compare_slots(node, new_slot, node->ms_heap[i]) >= 0)
break;
i = 0; /* i is where the "hole" is */
for (;;)
{
- int j = 2 * i + 1;
+ int j = 2 * i + 1;
if (j >= n)
break;
- if (j+1 < n && heap_compare_slots(node, heap[j], heap[j+1]) > 0)
+ if (j + 1 < n && heap_compare_slots(node, heap[j], heap[j + 1]) > 0)
j++;
if (heap_compare_slots(node, heap[n], heap[j]) <= 0)
break;
for (nkey = 0; nkey < node->ms_nkeys; nkey++)
{
- ScanKey scankey = node->ms_scankeys + nkey;
- AttrNumber attno = scankey->sk_attno;
- Datum datum1,
- datum2;
- bool isNull1,
- isNull2;
- int32 compare;
+ ScanKey scankey = node->ms_scankeys + nkey;
+ AttrNumber attno = scankey->sk_attno;
+ Datum datum1,
+ datum2;
+ bool isNull1,
+ isNull2;
+ int32 compare;
datum1 = slot_getattr(s1, attno, &isNull1);
datum2 = slot_getattr(s2, attno, &isNull2);
bool reverse; /* if true, negate the cmpfn's output */
bool nulls_first; /* if true, nulls sort low */
FmgrInfo cmpfinfo;
-} MergeJoinClauseData;
+} MergeJoinClauseData;
/* Result type for MJEvalOuterValues and MJEvalInnerValues */
typedef enum
*
* If we generate a new candidate tuple after EvalPlanQual testing, we
* must loop back here and recheck constraints. (We don't need to
- * redo triggers, however. If there are any BEFORE triggers then
+ * redo triggers, however. If there are any BEFORE triggers then
* trigger.c will have done heap_lock_tuple to lock the correct tuple,
* so there's no need to do them again.)
*/
/*
* Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index
- * tuples. This is because UPDATEs are actually DELETEs and INSERTs,
- * and index tuple deletion is done later by VACUUM (see notes in
- * ExecDelete). All we do here is insert new index tuples. -cim
- * 9/27/89
+ * with the heap tuple, all we do is form and insert new index tuples.
+ * This is because UPDATEs are actually DELETEs and INSERTs, and index
+ * tuple deletion is done later by VACUUM (see notes in ExecDelete).
+ * All we do here is insert new index tuples. -cim 9/27/89
*/
/*
TupleTableSlot *planSlot;
ItemPointer tupleid = NULL;
ItemPointerData tuple_ctid;
- HeapTupleHeader oldtuple = NULL;
+ HeapTupleHeader oldtuple = NULL;
/*
* If we've already completed processing, don't try to do more. We need
/*
* es_result_relation_info must point to the currently active result
- * relation while we are within this ModifyTable node. Even though
+ * relation while we are within this ModifyTable node. Even though
* ModifyTable nodes can't be nested statically, they can be nested
* dynamically (since our subplan could include a reference to a modifying
* CTE). So we have to save and restore the caller's value.
for (;;)
{
/*
- * Reset the per-output-tuple exprcontext. This is needed because
+ * Reset the per-output-tuple exprcontext. This is needed because
* triggers expect to use that context as workspace. It's a bit ugly
* to do this below the top level of the plan, however. We might need
* to rethink this later.
elog(ERROR, "ctid is NULL");
tupleid = (ItemPointer) DatumGetPointer(datum);
- tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
+ tuple_ctid = *tupleid; /* be sure we don't free
+ * ctid!! */
tupleid = &tuple_ctid;
}
else
break;
case CMD_UPDATE:
slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
- &node->mt_epqstate, estate, node->canSetTag);
+ &node->mt_epqstate, estate, node->canSetTag);
break;
case CMD_DELETE:
slot = ExecDelete(tupleid, oldtuple, planSlot,
- &node->mt_epqstate, estate, node->canSetTag);
+ &node->mt_epqstate, estate, node->canSetTag);
break;
default:
elog(ERROR, "unknown operation");
/*
* call ExecInitNode on each of the plans to be executed and save the
- * results into the array "mt_plans". This is also a convenient place
- * to verify that the proposed target relations are valid and open their
- * indexes for insertion of new index entries. Note we *must* set
+ * results into the array "mt_plans". This is also a convenient place to
+ * verify that the proposed target relations are valid and open their
+ * indexes for insertion of new index entries. Note we *must* set
* estate->es_result_relation_info correctly while we initialize each
* sub-plan; ExecContextForcesOids depends on that!
*/
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do this
+ * index entries for the tuples we add/update. We need not do this
* for a DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
* Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
* to estate->es_auxmodifytables so that it will be run to completion by
* ExecPostprocessPlan. (It'd actually work fine to add the primary
- * ModifyTable node too, but there's no need.) Note the use of lcons
- * not lappend: we need later-initialized ModifyTable nodes to be shut
- * down before earlier ones. This ensures that we don't throw away
- * RETURNING rows that need to be seen by a later CTE subplan.
+ * ModifyTable node too, but there's no need.) Note the use of lcons not
+ * lappend: we need later-initialized ModifyTable nodes to be shut down
+ * before earlier ones. This ensures that we don't throw away RETURNING
+ * rows that need to be seen by a later CTE subplan.
*/
if (!mtstate->canSetTag)
estate->es_auxmodifytables = lcons(mtstate,
node->nl_MatchedOuter = false;
/*
- * fetch the values of any outer Vars that must be passed to
- * the inner scan, and store them in the appropriate PARAM_EXEC
- * slots.
+ * fetch the values of any outer Vars that must be passed to the
+ * inner scan, and store them in the appropriate PARAM_EXEC slots.
*/
foreach(lc, nl->nestParams)
{
*
* If we have no parameters to pass into the inner rel from the outer,
* tell the inner child that cheap rescans would be good. If we do have
- * such parameters, then there is no point in REWIND support at all in
- * the inner child, because it will always be rescanned with fresh
- * parameter values.
+ * such parameters, then there is no point in REWIND support at all in the
+ * inner child, because it will always be rescanned with fresh parameter
+ * values.
*/
outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
if (node->nestParams == NIL)
typedef struct RUHashEntryData
{
TupleHashEntryData shared; /* common header for hash table entries */
-} RUHashEntryData;
+} RUHashEntryData;
/*
{
TupleHashEntryData shared; /* common header for hash table entries */
SetOpStatePerGroupData pergroup;
-} SetOpHashEntryData;
+} SetOpHashEntryData;
static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);
int aggno; /* if so, index of its PerAggData */
WindowObject winobj; /* object used in window function API */
-} WindowStatePerFuncData;
+} WindowStatePerFuncData;
/*
* For plain aggregate window functions, we also have one of these.
* snapshot != InvalidSnapshot, read_only = true: use exactly the given
* snapshot.
*
- * snapshot != InvalidSnapshot, read_only = false: use the given
- * snapshot, modified by advancing its command ID before each querytree.
+ * snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
+ * modified by advancing its command ID before each querytree.
*
* snapshot == InvalidSnapshot, read_only = true: use the entry-time
* ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
* snapshot for each user command, and advance its command ID before each
* querytree within the command.
*
- * In the first two cases, we can just push the snap onto the stack
- * once for the whole plan list.
+ * In the first two cases, we can just push the snap onto the stack once
+ * for the whole plan list.
*/
if (snapshot != InvalidSnapshot)
{
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) *sizeof(ParamExternData));
+ (nargs - 1) * sizeof(ParamExternData));
/* we have static list of params, so no hooks needed */
paramLI->paramFetch = NULL;
paramLI->paramFetchArg = NULL;
#define IDENT_PORT 113
static int ident_inet(hbaPort *port);
+
#ifdef HAVE_UNIX_SOCKETS
static int auth_peer(hbaPort *port);
#endif
*----------------------------------------------------------------
*/
#ifdef ENABLE_SSPI
-typedef SECURITY_STATUS
+typedef SECURITY_STATUS
(WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) (
PCtxtHandle, void **);
static int pg_SSPI_recvauth(Port *port);
}
#endif
status = auth_peer(port);
-#else /* HAVE_UNIX_SOCKETS */
+#else /* HAVE_UNIX_SOCKETS */
Assert(false);
#endif
break;
}
if (ClientAuthentication_hook)
- (*ClientAuthentication_hook)(port, status);
+ (*ClientAuthentication_hook) (port, status);
if (status == STATUS_OK)
sendAuthRequest(port, AUTH_REQ_OK);
return ret;
retval = krb5_recvauth(pg_krb5_context, &auth_context,
- (krb5_pointer) & port->sock, pg_krb_srvnam,
+ (krb5_pointer) &port->sock, pg_krb_srvnam,
pg_krb5_server, 0, pg_krb5_keytab, &ticket);
if (retval)
{
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
#elif defined(SO_PEERCRED)
/* Linux style: use getsockopt(SO_PEERCRED) */
struct ucred peercred;
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
#elif defined(HAVE_GETPEERUCRED)
/* Solaris > 10 */
uid_t uid;
}
strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
#elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS))
struct msghdr msg;
}
strlcpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1);
-
#else
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
pg_freeaddrinfo_all(hint.ai_family, serveraddrs);
/*
- * Figure out at what time we should time out. We can't just use
- * a single call to select() with a timeout, since somebody can
- * be sending invalid packets to our port thus causing us to
- * retry in a loop and never time out.
+ * Figure out at what time we should time out. We can't just use a single
+ * call to select() with a timeout, since somebody can be sending invalid
+ * packets to our port thus causing us to retry in a loop and never time
+ * out.
*/
gettimeofday(&endtime, NULL);
endtime.tv_sec += RADIUS_TIMEOUT;
{
struct timeval timeout;
struct timeval now;
- int64 timeoutval;
+ int64 timeoutval;
gettimeofday(&now, NULL);
timeoutval = (endtime.tv_sec * 1000000 + endtime.tv_usec) - (now.tv_sec * 1000000 + now.tv_usec);
/*
* Attempt to read the response packet, and verify the contents.
*
- * Any packet that's not actually a RADIUS packet, or otherwise
- * does not validate as an explicit reject, is just ignored and
- * we retry for another packet (until we reach the timeout). This
- * is to avoid the possibility to denial-of-service the login by
- * flooding the server with invalid packets on the port that
- * we're expecting the RADIUS response on.
+ * Any packet that's not actually a RADIUS packet, or otherwise does
+ * not validate as an explicit reject, is just ignored and we retry
+ * for another packet (until we reach the timeout). This is to avoid
+ * the possibility to denial-of-service the login by flooding the
+ * server with invalid packets on the port that we're expecting the
+ * RADIUS response on.
*/
addrsize = sizeof(remoteaddr);
{
#ifdef HAVE_IPV6
ereport(LOG,
- (errmsg("RADIUS response was sent from incorrect port: %i",
- ntohs(remoteaddr.sin6_port))));
+ (errmsg("RADIUS response was sent from incorrect port: %i",
+ ntohs(remoteaddr.sin6_port))));
#else
ereport(LOG,
- (errmsg("RADIUS response was sent from incorrect port: %i",
- ntohs(remoteaddr.sin_port))));
+ (errmsg("RADIUS response was sent from incorrect port: %i",
+ ntohs(remoteaddr.sin_port))));
#endif
continue;
}
*/
cryptvector = palloc(packetlength + strlen(port->hba->radiussecret));
- memcpy(cryptvector, receivepacket, 4); /* code+id+length */
- memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH); /* request
- * authenticator, from
- * original packet */
- if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no attributes
- * at all */
+ memcpy(cryptvector, receivepacket, 4); /* code+id+length */
+ memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH); /* request
+ * authenticator, from
+ * original packet */
+ if (packetlength > RADIUS_HEADER_LENGTH) /* there may be no
+ * attributes at all */
memcpy(cryptvector + RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength - RADIUS_HEADER_LENGTH);
memcpy(cryptvector + packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret));
encryptedpassword))
{
ereport(LOG,
- (errmsg("could not perform MD5 encryption of received packet")));
+ (errmsg("could not perform MD5 encryption of received packet")));
pfree(cryptvector);
continue;
}
else
{
ereport(LOG,
- (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
- receivepacket->code, port->user_name)));
+ (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
+ receivepacket->code, port->user_name)));
continue;
}
- } /* while (true) */
+ } /* while (true) */
}
}
static bool
-ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b)
+ipv4eq(struct sockaddr_in * a, struct sockaddr_in * b)
{
return (a->sin_addr.s_addr == b->sin_addr.s_addr);
}
#ifdef HAVE_IPV6
static bool
-ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
+ipv6eq(struct sockaddr_in6 * a, struct sockaddr_in6 * b)
{
- int i;
+ int i;
for (i = 0; i < 16; i++)
if (a->sin6_addr.s6_addr[i] != b->sin6_addr.s6_addr[i])
return true;
}
-
-#endif /* HAVE_IPV6 */
+#endif /* HAVE_IPV6 */
/*
* Check whether host name matches pattern.
{
if (pattern[0] == '.') /* suffix match */
{
- size_t plen = strlen(pattern);
- size_t hlen = strlen(actual_hostname);
+ size_t plen = strlen(pattern);
+ size_t hlen = strlen(actual_hostname);
if (hlen < plen)
return false;
static bool
check_hostname(hbaPort *port, const char *hostname)
{
- struct addrinfo *gai_result, *gai;
+ struct addrinfo *gai_result,
+ *gai;
int ret;
bool found;
if (gai->ai_addr->sa_family == AF_INET)
{
if (ipv4eq((struct sockaddr_in *) gai->ai_addr,
- (struct sockaddr_in *) &port->raddr.addr))
+ (struct sockaddr_in *) & port->raddr.addr))
{
found = true;
break;
else if (gai->ai_addr->sa_family == AF_INET6)
{
if (ipv6eq((struct sockaddr_in6 *) gai->ai_addr,
- (struct sockaddr_in6 *) &port->raddr.addr))
+ (struct sockaddr_in6 *) & port->raddr.addr))
{
found = true;
break;
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("specifying both host name and CIDR mask is invalid: \"%s\"",
token),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
pfree(token);
return false;
}
#ifdef HAVE_UTIME_H
#include <utime.h>
#endif
-#ifdef WIN32_ONLY_COMPILER /* mstcpip.h is missing on mingw */
+#ifdef WIN32_ONLY_COMPILER /* mstcpip.h is missing on mingw */
#include <mstcpip.h>
#endif
*/
/* --------------------------------
- * pq_set_nonblocking - set socket blocking/non-blocking
+ * pq_set_nonblocking - set socket blocking/non-blocking
*
* Sets the socket non-blocking if nonblocking is TRUE, or sets it
* blocking otherwise.
#ifdef WIN32
pgwin32_noblock = nonblocking ? 1 : 0;
#else
+
/*
- * Use COMMERROR on failure, because ERROR would try to send the error
- * to the client, which might require changing the mode again, leading
- * to infinite recursion.
+ * Use COMMERROR on failure, because ERROR would try to send the error to
+ * the client, which might require changing the mode again, leading to
+ * infinite recursion.
*/
if (nonblocking)
{
if (!pg_set_noblock(MyProcPort->sock))
ereport(COMMERROR,
- (errmsg("could not set socket to non-blocking mode: %m")));
+ (errmsg("could not set socket to non-blocking mode: %m")));
}
else
{
{
/*
* Ok if no data available without blocking or interrupted (though
- * EINTR really shouldn't happen with a non-blocking socket).
- * Report other errors.
+ * EINTR really shouldn't happen with a non-blocking socket). Report
+ * other errors.
*/
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
r = 0;
else
{
/*
- * Careful: an ereport() that tries to write to the client
- * would cause recursion to here, leading to stack overflow
- * and core dump! This message must go *only* to the
- * postmaster log.
+ * Careful: an ereport() that tries to write to the client would
+ * cause recursion to here, leading to stack overflow and core
+ * dump! This message must go *only* to the postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
continue; /* Ok if we were interrupted */
/*
- * Ok if no data writable without blocking, and the socket
- * is in non-blocking mode.
+ * Ok if no data writable without blocking, and the socket is in
+ * non-blocking mode.
*/
if (errno == EAGAIN ||
errno == EWOULDBLOCK)
void
pq_putmessage_noblock(char msgtype, const char *s, size_t len)
{
- int res;
- int required;
+ int res;
+ int required;
/*
* Ensure we have enough space in the output buffer for the message header
PqSendBufferSize = required;
}
res = pq_putmessage(msgtype, s, len);
- Assert(res == 0); /* should not fail when the message fits in buffer */
+ Assert(res == 0); /* should not fail when the message fits in
+ * buffer */
}
static int
pq_setkeepaliveswin32(Port *port, int idle, int interval)
{
- struct tcp_keepalive ka;
- DWORD retsize;
+ struct tcp_keepalive ka;
+ DWORD retsize;
if (idle <= 0)
- idle = 2 * 60 * 60; /* default = 2 hours */
+ idle = 2 * 60 * 60; /* default = 2 hours */
if (interval <= 0)
- interval = 1; /* default = 1 second */
+ interval = 1; /* default = 1 second */
ka.onoff = 1;
ka.keepalivetime = idle * 1000;
elog(LOG, "getsockopt(TCP_KEEPALIVE) failed: %m");
port->default_keepalives_idle = -1; /* don't know */
}
-#endif /* TCP_KEEPIDLE */
-#else /* WIN32 */
+#endif /* TCP_KEEPIDLE */
+#else /* WIN32 */
/* We can't get the defaults on Windows, so return "don't know" */
port->default_keepalives_idle = -1;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
return port->default_keepalives_idle;
#endif
port->keepalives_idle = idle;
-#else /* WIN32 */
+#else /* WIN32 */
return pq_setkeepaliveswin32(port, idle, port->keepalives_interval);
#endif
-#else /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */
+#else /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */
if (idle != 0)
{
elog(LOG, "setting the keepalive idle time is not supported");
#else
/* We can't get the defaults on Windows, so return "don't know" */
port->default_keepalives_interval = -1;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
return port->default_keepalives_interval;
}
port->keepalives_interval = interval;
-#else /* WIN32 */
+#else /* WIN32 */
return pq_setkeepaliveswin32(port, port->keepalives_idle, interval);
#endif
#else
/*
* Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in the launch of any new
- * server process. Note that this code will NOT be executed when a backend
+ * server process. Note that this code will NOT be executed when a backend
* or sub-bootstrap process is forked, unless we are in a fork/exec
* environment (ie EXEC_BACKEND is defined).
*
/*
* On some platforms, unaligned memory accesses result in a kernel trap;
* the default kernel behavior is to emulate the memory access, but this
- * results in a significant performance penalty. We want PG never to
- * make such unaligned memory accesses, so this code disables the kernel
+ * results in a significant performance penalty. We want PG never to make
+ * such unaligned memory accesses, so this code disables the kernel
* emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
#if defined(__alpha) /* no __alpha__ ? */
{
- int buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT};
+ int buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT};
if (setsysinfo(SSI_NVPAIRS, buffer, 1, (caddr_t) NULL,
(unsigned long) NULL) < 0)
progname, strerror(errno));
}
#endif /* __alpha */
-
#endif /* NOFIXADE */
/*
static FdwPlan *
_copyFdwPlan(FdwPlan *from)
{
- FdwPlan *newnode = makeNode(FdwPlan);
+ FdwPlan *newnode = makeNode(FdwPlan);
COPY_SCALAR_FIELD(startup_cost);
COPY_SCALAR_FIELD(total_cost);
static CollateExpr *
_copyCollateExpr(CollateExpr *from)
{
- CollateExpr *newnode = makeNode(CollateExpr);
+ CollateExpr *newnode = makeNode(CollateExpr);
COPY_NODE_FIELD(arg);
COPY_SCALAR_FIELD(collOid);
static CollateClause *
_copyCollateClause(CollateClause *from)
{
- CollateClause *newnode = makeNode(CollateClause);
+ CollateClause *newnode = makeNode(CollateClause);
COPY_NODE_FIELD(arg);
COPY_NODE_FIELD(collname);
coll = ((NullIfExpr *) expr)->opcollid;
break;
case T_ScalarArrayOpExpr:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_BoolExpr:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_SubLink:
{
coll = ((FieldSelect *) expr)->resultcollid;
break;
case T_FieldStore:
- coll = InvalidOid; /* result is always composite */
+ coll = InvalidOid; /* result is always composite */
break;
case T_RelabelType:
coll = ((RelabelType *) expr)->resultcollid;
coll = ((ArrayCoerceExpr *) expr)->resultcollid;
break;
case T_ConvertRowtypeExpr:
- coll = InvalidOid; /* result is always composite */
+ coll = InvalidOid; /* result is always composite */
break;
case T_CollateExpr:
coll = ((CollateExpr *) expr)->collOid;
coll = ((ArrayExpr *) expr)->array_collid;
break;
case T_RowExpr:
- coll = InvalidOid; /* result is always composite */
+ coll = InvalidOid; /* result is always composite */
break;
case T_RowCompareExpr:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_CoalesceExpr:
coll = ((CoalesceExpr *) expr)->coalescecollid;
coll = ((MinMaxExpr *) expr)->minmaxcollid;
break;
case T_XmlExpr:
+
/*
* XMLSERIALIZE returns text from non-collatable inputs, so its
- * collation is always default. The other cases return boolean
- * or XML, which are non-collatable.
+ * collation is always default. The other cases return boolean or
+ * XML, which are non-collatable.
*/
if (((XmlExpr *) expr)->op == IS_XMLSERIALIZE)
coll = DEFAULT_COLLATION_OID;
coll = InvalidOid;
break;
case T_NullTest:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_BooleanTest:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_CoerceToDomain:
coll = ((CoerceToDomain *) expr)->resultcollid;
coll = ((SetToDefault *) expr)->collation;
break;
case T_CurrentOfExpr:
- coll = InvalidOid; /* result is always boolean */
+ coll = InvalidOid; /* result is always boolean */
break;
case T_PlaceHolderVar:
coll = exprCollation((Node *) ((PlaceHolderVar *) expr)->phexpr);
((NullIfExpr *) expr)->opcollid = collation;
break;
case T_ScalarArrayOpExpr:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_BoolExpr:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_SubLink:
#ifdef USE_ASSERT_CHECKING
Assert(!OidIsValid(collation));
}
}
-#endif /* USE_ASSERT_CHECKING */
+#endif /* USE_ASSERT_CHECKING */
break;
case T_FieldSelect:
((FieldSelect *) expr)->resultcollid = collation;
break;
case T_FieldStore:
- Assert(!OidIsValid(collation)); /* result is always composite */
+ Assert(!OidIsValid(collation)); /* result is always composite */
break;
case T_RelabelType:
((RelabelType *) expr)->resultcollid = collation;
((ArrayCoerceExpr *) expr)->resultcollid = collation;
break;
case T_ConvertRowtypeExpr:
- Assert(!OidIsValid(collation)); /* result is always composite */
+ Assert(!OidIsValid(collation)); /* result is always composite */
break;
case T_CaseExpr:
((CaseExpr *) expr)->casecollid = collation;
((ArrayExpr *) expr)->array_collid = collation;
break;
case T_RowExpr:
- Assert(!OidIsValid(collation)); /* result is always composite */
+ Assert(!OidIsValid(collation)); /* result is always composite */
break;
case T_RowCompareExpr:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_CoalesceExpr:
((CoalesceExpr *) expr)->coalescecollid = collation;
(collation == InvalidOid));
break;
case T_NullTest:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_BooleanTest:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
case T_CoerceToDomain:
((CoerceToDomain *) expr)->resultcollid = collation;
((SetToDefault *) expr)->collation = collation;
break;
case T_CurrentOfExpr:
- Assert(!OidIsValid(collation)); /* result is always boolean */
+ Assert(!OidIsValid(collation)); /* result is always boolean */
break;
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
/* sizeof(ParamListInfoData) includes the first array element */
size = sizeof(ParamListInfoData) +
- (from->numParams - 1) *sizeof(ParamExternData);
+ (from->numParams - 1) * sizeof(ParamExternData);
retval = (ParamListInfo) palloc(size);
retval->paramFetch = NULL;
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
bool *differentTypes);
/*
* We have to make child entries in the EquivalenceClass data
- * structures as well. This is needed either if the parent
- * participates in some eclass joins (because we will want to
- * consider inner-indexscan joins on the individual children)
- * or if the parent has useful pathkeys (because we should try
- * to build MergeAppend paths that produce those sort orderings).
+ * structures as well. This is needed either if the parent
+ * participates in some eclass joins (because we will want to consider
+ * inner-indexscan joins on the individual children) or if the parent
+ * has useful pathkeys (because we should try to build MergeAppend
+ * paths that produce those sort orderings).
*/
if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
add_child_rel_equivalences(root, appinfo, rel, childrel);
/* Have we already seen this ordering? */
foreach(lpk, all_child_pathkeys)
{
- List *existing_pathkeys = (List *) lfirst(lpk);
+ List *existing_pathkeys = (List *) lfirst(lpk);
if (compare_pathkeys(existing_pathkeys,
childkeys) == PATHKEYS_EQUAL)
/*
* Next, build MergeAppend paths based on the collected list of child
- * pathkeys. We consider both cheapest-startup and cheapest-total
- * cases, ie, for each interesting ordering, collect all the cheapest
- * startup subpaths and all the cheapest total paths, and build a
- * MergeAppend path for each list.
+ * pathkeys. We consider both cheapest-startup and cheapest-total cases,
+ * ie, for each interesting ordering, collect all the cheapest startup
+ * subpaths and all the cheapest total paths, and build a MergeAppend path
+ * for each list.
*/
foreach(l, all_child_pathkeys)
{
- List *pathkeys = (List *) lfirst(l);
- List *startup_subpaths = NIL;
- List *total_subpaths = NIL;
- bool startup_neq_total = false;
- ListCell *lcr;
+ List *pathkeys = (List *) lfirst(l);
+ List *startup_subpaths = NIL;
+ List *total_subpaths = NIL;
+ bool startup_neq_total = false;
+ ListCell *lcr;
/* Select the child paths for this ordering... */
foreach(lcr, live_childrels)
/*
* Notice whether we actually have different paths for the
- * "cheapest" and "total" cases; frequently there will be no
- * point in two create_merge_append_path() calls.
+ * "cheapest" and "total" cases; frequently there will be no point
+ * in two create_merge_append_path() calls.
*/
if (cheapest_startup != cheapest_total)
startup_neq_total = true;
{
if (IsA(path, AppendPath))
{
- AppendPath *apath = (AppendPath *) path;
+ AppendPath *apath = (AppendPath *) path;
/* list_copy is important here to avoid sharing list substructure */
return list_concat(subpaths, list_copy(apath->subpaths));
* accesses (XXX can't we refine that guess?)
*
* By default, we charge two operator evals per tuple comparison, which should
- * be in the right ballpark in most cases. The caller can tweak this by
+ * be in the right ballpark in most cases. The caller can tweak this by
* specifying nonzero comparison_cost; typically that's used for any extra
* work that has to be done to prepare the inputs to the comparison operators.
*
* Determines and returns the cost of a MergeAppend node.
*
* MergeAppend merges several pre-sorted input streams, using a heap that
- * at any given instant holds the next tuple from each stream. If there
+ * at any given instant holds the next tuple from each stream. If there
* are N streams, we need about N*log2(N) tuple comparisons to construct
* the heap at startup, and then for each output tuple, about log2(N)
* comparisons to delete the top heap entry and another log2(N) comparisons
List *nrclauses;
nrclauses = select_nonredundant_join_clauses(root,
- path->joinrestrictinfo,
+ path->joinrestrictinfo,
path->innerjoinpath);
*indexed_join_quals = (nrclauses == NIL);
}
/*
* Compute per-output-column width estimates by examining the subquery's
- * targetlist. For any output that is a plain Var, get the width estimate
+ * targetlist. For any output that is a plain Var, get the width estimate
* that was made while planning the subquery. Otherwise, fall back on a
* datatype-based estimate.
*/
if (IsA(texpr, Var) &&
subroot->parse->setOperations == NULL)
{
- Var *var = (Var *) texpr;
+ Var *var = (Var *) texpr;
RelOptInfo *subrel = find_base_rel(subroot, var->varno);
item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
* of estimating baserestrictcost, so we set that, and we also set up width
* using what will be purely datatype-driven estimates from the targetlist.
* There is no way to do anything sane with the rows value, so we just put
- * a default estimate and hope that the wrapper can improve on it. The
+ * a default estimate and hope that the wrapper can improve on it. The
* wrapper's PlanForeignScan function will be called momentarily.
*
* The rel's targetlist and restrictinfo list must have been constructed
ndx = var->varattno - rel->min_attr;
/*
- * If it's a whole-row Var, we'll deal with it below after we
- * have already cached as many attr widths as possible.
+ * If it's a whole-row Var, we'll deal with it below after we have
+ * already cached as many attr widths as possible.
*/
if (var->varattno == 0)
{
}
/*
- * The width may have been cached already (especially if it's
- * a subquery), so don't duplicate effort.
+ * The width may have been cached already (especially if it's a
+ * subquery), so don't duplicate effort.
*/
if (rel->attr_widths[ndx] > 0)
{
*/
if (have_wholerow_var)
{
- int32 wholerow_width = sizeof(HeapTupleHeaderData);
+ int32 wholerow_width = sizeof(HeapTupleHeaderData);
if (reloid != InvalidOid)
{
/* Real relation, so estimate true tuple width */
wholerow_width += get_relation_data_width(reloid,
- rel->attr_widths - rel->min_attr);
+ rel->attr_widths - rel->min_attr);
}
else
{
rel->attr_widths[0 - rel->min_attr] = wholerow_width;
/*
- * Include the whole-row Var as part of the output tuple. Yes,
- * that really is what happens at runtime.
+ * Include the whole-row Var as part of the output tuple. Yes, that
+ * really is what happens at runtime.
*/
tuple_width += wholerow_width;
}
* Also, the expression's exposed collation must match the EC's collation.
* This is important because in comparisons like "foo < bar COLLATE baz",
* only one of the expressions has the correct exposed collation as we receive
- * it from the parser. Forcing both of them to have it ensures that all
+ * it from the parser. Forcing both of them to have it ensures that all
* variant spellings of such a construct behave the same. Again, we can
* stick on a RelabelType to force the right exposed collation. (It might
* work to not label the collation at all in EC members, but this is risky
exprCollation((Node *) expr) != req_collation)
{
/*
- * Strip any existing RelabelType, then add a new one if needed.
- * This is to preserve the invariant of no redundant RelabelTypes.
+ * Strip any existing RelabelType, then add a new one if needed. This
+ * is to preserve the invariant of no redundant RelabelTypes.
*
* If we have to change the exposed type of the stripped expression,
* set typmod to -1 (since the new type may not have the same typmod
- * interpretation). If we only have to change collation, preserve
- * the exposed typmod.
+ * interpretation). If we only have to change collation, preserve the
+ * exposed typmod.
*/
while (expr && IsA(expr, RelabelType))
expr = (Expr *) ((RelabelType *) expr)->arg;
ListCell *lc2;
/*
- * If this EC contains a constant, then it's not useful for sorting
- * or driving an inner index-scan, so we skip generating child EMs.
+ * If this EC contains a constant, then it's not useful for sorting or
+ * driving an inner index-scan, so we skip generating child EMs.
*
* If this EC contains a volatile expression, then generating child
* EMs would be downright dangerous. We rely on a volatile EC having
static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
IndexOptInfo *index);
static List *expand_indexqual_opclause(RestrictInfo *rinfo,
- Oid opfamily, Oid idxcollation);
+ Oid opfamily, Oid idxcollation);
static RestrictInfo *expand_indexqual_rowcompare(RestrictInfo *rinfo,
IndexOptInfo *index,
int indexcol);
* (2) must contain an operator which is in the same family as the index
* operator for this column, or is a "special" operator as recognized
* by match_special_index_operator();
- * and
- * (3) must match the collation of the index, if collation is relevant.
+ * and
+ * (3) must match the collation of the index, if collation is relevant.
*
* Our definition of "const" is pretty liberal: we allow Vars belonging
* to the caller-specified outer_relids relations (which had better not
* is a "special" indexable operator.
*/
if (plain_op &&
- match_special_index_operator(clause, opfamily, idxcollation, true))
+ match_special_index_operator(clause, opfamily, idxcollation, true))
return true;
return false;
}
/****************************************************************************
- * ---- ROUTINES TO CHECK ORDERING OPERATORS ----
+ * ---- ROUTINES TO CHECK ORDERING OPERATORS ----
****************************************************************************/
/*
foreach(lc1, pathkeys)
{
- PathKey *pathkey = (PathKey *) lfirst(lc1);
+ PathKey *pathkey = (PathKey *) lfirst(lc1);
bool found = false;
ListCell *lc2;
foreach(lc2, pathkey->pk_eclass->ec_members)
{
EquivalenceMember *member = (EquivalenceMember *) lfirst(lc2);
- int indexcol;
+ int indexcol;
/* No possibility of match if it references other relations */
if (!bms_equal(member->em_relids, index->rel->relids))
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
{
- Expr *expr;
+ Expr *expr;
expr = match_clause_to_ordering_op(index,
indexcol,
* Note that we currently do not consider the collation of the ordering
* operator's result. In practical cases the result type will be numeric
* and thus have no collation, and it's not very clear what to match to
- * if it did have a collation. The index's collation should match the
+ * if it did have a collation. The index's collation should match the
* ordering operator's input collation, not its result.
*
* If successful, return 'clause' as-is if the indexkey is on the left,
return NULL;
/*
- * Is the (commuted) operator an ordering operator for the opfamily?
- * And if so, does it yield the right sorting semantics?
+ * Is the (commuted) operator an ordering operator for the opfamily? And
+ * if so, does it yield the right sorting semantics?
*/
sortfamily = get_op_opfamily_sortfamily(expr_op, opfamily);
if (sortfamily != pk_opfamily)
continue;
/*
- * XXX at some point we may need to check collations here
- * too. For the moment we assume all collations reduce to
- * the same notion of equality.
+ * XXX at some point we may need to check collations here too.
+ * For the moment we assume all collations reduce to the same
+ * notion of equality.
*/
/* OK, see if the condition operand matches the index key */
*
* The non-pattern opclasses will not sort the way we need in most non-C
* locales. We can use such an index anyway for an exact match (simple
- * equality), but not for prefix-match cases. Note that we are looking
- * at the index's collation, not the expression's collation -- this test
- * is not dependent on the LIKE/regex operator's collation (which would
- * only affect case folding behavior of ILIKE, anyway).
+ * equality), but not for prefix-match cases. Note that we are looking at
+ * the index's collation, not the expression's collation -- this test is
+ * not dependent on the LIKE/regex operator's collation (which would only
+ * affect case folding behavior of ILIKE, anyway).
*/
switch (expr_op)
{
resultquals = list_concat(resultquals,
expand_indexqual_opclause(rinfo,
curFamily,
- curCollation));
+ curCollation));
}
else if (IsA(clause, ScalarArrayOpExpr))
{
expr = make_opclause(opr1oid, BOOLOID, false,
(Expr *) leftop,
(Expr *) makeConst(datatype, -1,
- InvalidOid, /* not collatable */
+ InvalidOid, /* not collatable */
-1, opr1right,
false, false),
InvalidOid, InvalidOid);
expr = make_opclause(opr2oid, BOOLOID, false,
(Expr *) leftop,
(Expr *) makeConst(datatype, -1,
- InvalidOid, /* not collatable */
+ InvalidOid, /* not collatable */
-1, opr2right,
false, false),
InvalidOid, InvalidOid);
/*
* 1. Consider mergejoin paths where both relations must be explicitly
- * sorted. Skip this if we can't mergejoin.
+ * sorted. Skip this if we can't mergejoin.
*/
if (mergejoin_allowed)
sort_inner_and_outer(root, joinrel, outerrel, innerrel,
- restrictlist, mergeclause_list, jointype, sjinfo);
+ restrictlist, mergeclause_list, jointype, sjinfo);
/*
* 2. Consider paths where the outer relation need not be explicitly
*/
if (mergejoin_allowed)
match_unsorted_outer(root, joinrel, outerrel, innerrel,
- restrictlist, mergeclause_list, jointype, sjinfo);
+ restrictlist, mergeclause_list, jointype, sjinfo);
#ifdef NOT_USED
*/
if (mergejoin_allowed)
match_unsorted_inner(root, joinrel, outerrel, innerrel,
- restrictlist, mergeclause_list, jointype, sjinfo);
+ restrictlist, mergeclause_list, jointype, sjinfo);
#endif
/*
static bool is_dummy_rel(RelOptInfo *rel);
static void mark_dummy_rel(RelOptInfo *rel);
static bool restriction_is_constant_false(List *restrictlist,
- bool only_pushed_down);
+ bool only_pushed_down);
/*
*
* Also, a provably constant-false join restriction typically means that
* we can skip evaluating one or both sides of the join. We do this by
- * marking the appropriate rel as dummy. For outer joins, a constant-false
- * restriction that is pushed down still means the whole join is dummy,
- * while a non-pushed-down one means that no inner rows will join so we
- * can treat the inner rel as dummy.
+ * marking the appropriate rel as dummy. For outer joins, a
+ * constant-false restriction that is pushed down still means the whole
+ * join is dummy, while a non-pushed-down one means that no inner rows
+ * will join so we can treat the inner rel as dummy.
*
* We need only consider the jointypes that appear in join_info_list, plus
* JOIN_INNER.
/*
* EquivalenceClasses need to contain opfamily lists based on the family
* membership of mergejoinable equality operators, which could belong to
- * more than one opfamily. So we have to look up the opfamily's equality
+ * more than one opfamily. So we have to look up the opfamily's equality
* operator and get its membership.
*/
equality_op = get_opfamily_member(opfamily,
true);
/*
- * If the sort key isn't already present in any EquivalenceClass,
- * then it's not an interesting sort order for this query. So
- * we can stop now --- lower-order sort keys aren't useful either.
+ * If the sort key isn't already present in any EquivalenceClass, then
+ * it's not an interesting sort order for this query. So we can stop
+ * now --- lower-order sort keys aren't useful either.
*/
if (!cpathkey)
break;
continue;
/*
- * Build a representation of this targetlist entry as
- * an outer Var.
+ * Build a representation of this targetlist entry as an
+ * outer Var.
*/
outer_expr = (Expr *) makeVarFromTargetEntry(rel->relid,
tle);
* right sides.
*
* Note this is called before EC merging is complete, so the links won't
- * necessarily point to canonical ECs. Before they are actually used for
+ * necessarily point to canonical ECs. Before they are actually used for
* anything, update_mergeclause_eclasses must be called to ensure that
* they've been updated to point to canonical ECs.
*/
/* local functions */
static bool join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo);
static void remove_rel_from_query(PlannerInfo *root, int relid,
- Relids joinrelids);
+ Relids joinrelids);
static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
!bms_equal(restrictinfo->required_relids, joinrelids))
{
/*
- * If such a clause actually references the inner rel then
- * join removal has to be disallowed. We have to check this
- * despite the previous attr_needed checks because of the
- * possibility of pushed-down clauses referencing the rel.
+ * If such a clause actually references the inner rel then join
+ * removal has to be disallowed. We have to check this despite
+ * the previous attr_needed checks because of the possibility of
+ * pushed-down clauses referencing the rel.
*/
if (bms_is_member(innerrelid, restrictinfo->clause_relids))
return false;
* Likewise remove references from SpecialJoinInfo data structures.
*
* This is relevant in case the outer join we're deleting is nested inside
- * other outer joins: the upper joins' relid sets have to be adjusted.
- * The RHS of the target outer join will be made empty here, but that's OK
+ * other outer joins: the upper joins' relid sets have to be adjusted. The
+ * RHS of the target outer join will be made empty here, but that's OK
* since caller will delete that SpecialJoinInfo entirely.
*/
foreach(l, root->join_info_list)
{
/* Recheck that qual doesn't actually reference the target rel */
Assert(!bms_is_member(relid, rinfo->clause_relids));
+
/*
* The required_relids probably aren't shared with anything else,
* but let's copy them just to be sure.
List *tidquals);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
Index scanrelid, Node *funcexpr, List *funccolnames,
- List *funccoltypes, List *funccoltypmods, List *funccolcollations);
+ List *funccoltypes, List *funccoltypmods, List *funccolcollations);
static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
Index scanrelid, List *values_lists);
static CteScan *make_ctescan(List *qptlist, List *qpqual,
bool *mergenullsfirst,
Plan *lefttree, Plan *righttree,
JoinType jointype);
-static Sort *make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
- AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
+static Sort *
+make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
+AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
double limit_tuples);
static Plan *prepare_sort_from_pathkeys(PlannerInfo *root,
- Plan *lefttree, List *pathkeys,
- bool adjust_tlist_in_place,
- int *p_numsortkeys,
- AttrNumber **p_sortColIdx,
- Oid **p_sortOperators,
- Oid **p_collations,
- bool **p_nullsFirst);
+ Plan *lefttree, List *pathkeys,
+ bool adjust_tlist_in_place,
+ int *p_numsortkeys,
+ AttrNumber **p_sortColIdx,
+ Oid **p_sortOperators,
+ Oid **p_collations,
+ bool **p_nullsFirst);
static Material *make_material(Plan *lefttree);
/*
* create_plan
* Creates the access plan for a query by recursively processing the
- * desired tree of pathnodes, starting at the node 'best_path'. For
+ * desired tree of pathnodes, starting at the node 'best_path'. For
* every pathnode found, we create a corresponding plan node containing
* appropriate id, target list, and qualification information.
*
/* Now, insert a Sort node if subplan isn't sufficiently ordered */
if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
subplan = (Plan *) make_sort(root, subplan, numsortkeys,
- sortColIdx, sortOperators, collations, nullsFirst,
+ sortColIdx, sortOperators, collations, nullsFirst,
best_path->limit_tuples);
subplans = lappend(subplans, subplan);
sortcl->eqop = eqop;
sortcl->sortop = sortop;
sortcl->nulls_first = false;
- sortcl->hashable = false; /* no need to make this accurate */
+ sortcl->hashable = false; /* no need to make this accurate */
sortList = lappend(sortList, sortcl);
groupColPos++;
}
qpqual = extract_actual_clauses(qpqual, false);
/*
- * We have to replace any outer-relation variables with nestloop params
- * in the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
+ * We have to replace any outer-relation variables with nestloop params in
+ * the indexqualorig, qpqual, and indexorderbyorig expressions. A bit
* annoying to have to do this separately from the processing in
* fix_indexqual_references --- rethink this when generalizing the inner
* indexscan support. But note we can't really do this earlier because
*indexqual = lappend(*indexqual, pred);
}
}
+
/*
* Replace outer-relation variables with nestloop params, but only
* after doing the above comparisons to index predicates.
return NULL;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
- Param *param;
+ Var *var = (Var *) node;
+ Param *param;
NestLoopParam *nlp;
- ListCell *lc;
+ ListCell *lc;
/* Upper-level Vars should be long gone at this point */
Assert(var->varlevelsup == 0);
*
* This is a simplified version of fix_indexqual_references. The input does
* not have RestrictInfo nodes, and we assume that indxqual.c already
- * commuted the clauses to put the index keys on the left. Also, we don't
+ * commuted the clauses to put the index keys on the left. Also, we don't
* bother to support any cases except simple OpExprs, since nothing else
* is allowed for ordering operators.
*/
* If you change this, see also create_append_path(). Also, the size
* calculations should match set_append_rel_pathlist(). It'd be better
* not to duplicate all this logic, but some callers of this function
- * aren't working from an appendrel or AppendPath, so there's noplace
- * to copy the data from.
+ * aren't working from an appendrel or AppendPath, so there's noplace to
+ * copy the data from.
*/
plan->startup_cost = 0;
plan->total_cost = 0;
*/
static Sort *
make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
- AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
+AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
double limit_tuples)
{
Sort *node = makeNode(Sort);
* prepare_sort_from_pathkeys
* Prepare to sort according to given pathkeys
*
- * This is used to set up for both Sort and MergeAppend nodes. It calculates
+ * This is used to set up for both Sort and MergeAppend nodes. It calculates
* the executor's representation of the sort key information, and adjusts the
* plan targetlist if needed to add resjunk sort columns.
*
pathkey->pk_eclass->ec_collation,
pathkey->pk_nulls_first,
numsortkeys,
- sortColIdx, sortOperators, collations, nullsFirst);
+ sortColIdx, sortOperators, collations, nullsFirst);
}
Assert(numsortkeys > 0);
/* Now build the Sort node */
return make_sort(root, lefttree, numsortkeys,
- sortColIdx, sortOperators, collations, nullsFirst, limit_tuples);
+ sortColIdx, sortOperators, collations, nullsFirst, limit_tuples);
}
/*
exprCollation((Node *) tle->expr),
sortcl->nulls_first,
numsortkeys,
- sortColIdx, sortOperators, collations, nullsFirst);
+ sortColIdx, sortOperators, collations, nullsFirst);
}
Assert(numsortkeys > 0);
exprCollation((Node *) tle->expr),
grpcl->nulls_first,
numsortkeys,
- sortColIdx, sortOperators, collations, nullsFirst);
+ sortColIdx, sortOperators, collations, nullsFirst);
grpno++;
}
phinfo->ph_needed = bms_add_members(phinfo->ph_needed,
where_needed);
+
/*
- * Update ph_may_need too. This is currently only necessary
- * when being called from build_base_rel_tlists, but we may as
- * well do it always.
+ * Update ph_may_need too. This is currently only necessary when
+ * being called from build_base_rel_tlists, but we may as well do
+ * it always.
*/
phinfo->ph_may_need = bms_add_members(phinfo->ph_may_need,
where_needed);
* this join's nullable side, and it may get used above this join, then
* ensure that min_righthand contains the full eval_at set of the PHV.
* This ensures that the PHV actually can be evaluated within the RHS.
- * Note that this works only because we should already have determined
- * the final eval_at level for any PHV syntactically within this join.
+ * Note that this works only because we should already have determined the
+ * final eval_at level for any PHV syntactically within this join.
*/
foreach(l, root->placeholder_list)
{
*
* In all cases, it's important to initialize the left_ec and right_ec
* fields of a mergejoinable clause, so that all possibly mergejoinable
- * expressions have representations in EquivalenceClasses. If
+ * expressions have representations in EquivalenceClasses. If
* process_equivalence is successful, it will take care of that;
* otherwise, we have to call initialize_mergeclause_eclasses to do it.
*/
* ORDER BY col ASC/DESC
* LIMIT 1)
* Given a suitable index on tab.col, this can be much faster than the
- * generic scan-all-the-rows aggregation plan. We can handle multiple
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
* MIN/MAX aggregates by generating multiple subqueries, and their
- * orderings can be different. However, if the query contains any
+ * orderings can be different. However, if the query contains any
* non-optimizable aggregates, there's no point since we'll have to
* scan all the rows anyway.
*
*
* We don't handle GROUP BY or windowing, because our current
* implementations of grouping require looking at all the rows anyway, and
- * so there's not much point in optimizing MIN/MAX. (Note: relaxing
- * this would likely require some restructuring in grouping_planner(),
- * since it performs assorted processing related to these features between
- * calling preprocess_minmax_aggregates and optimize_minmax_aggregates.)
+ * so there's not much point in optimizing MIN/MAX. (Note: relaxing this
+ * would likely require some restructuring in grouping_planner(), since it
+ * performs assorted processing related to these features between calling
+ * preprocess_minmax_aggregates and optimize_minmax_aggregates.)
*/
if (parse->groupClause || parse->hasWindowFuncs)
return;
/*
* Scan the tlist and HAVING qual to find all the aggregates and verify
- * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
+ * all are MIN/MAX aggregates. Stop as soon as we find one that isn't.
*/
aggs_list = NIL;
if (find_minmax_aggs_walker((Node *) tlist, &aggs_list))
* ordering operator.
*/
eqop = get_equality_op_for_ordering_op(mminfo->aggsortop, &reverse);
- if (!OidIsValid(eqop)) /* shouldn't happen */
+ if (!OidIsValid(eqop)) /* shouldn't happen */
elog(ERROR, "could not find equality operator for ordering operator %u",
mminfo->aggsortop);
* We can use either an ordering that gives NULLS FIRST or one that
* gives NULLS LAST; furthermore there's unlikely to be much
* performance difference between them, so it doesn't seem worth
- * costing out both ways if we get a hit on the first one. NULLS
+ * costing out both ways if we get a hit on the first one. NULLS
* FIRST is more likely to be available if the operator is a
* reverse-sort operator, so try that first if reverse.
*/
/*
* We're done until path generation is complete. Save info for later.
- * (Setting root->minmax_aggs non-NIL signals we succeeded in making
- * index access paths for all the aggregates.)
+ * (Setting root->minmax_aggs non-NIL signals we succeeded in making index
+ * access paths for all the aggregates.)
*/
root->minmax_aggs = aggs_list;
}
mminfo->aggfnoid = aggref->aggfnoid;
mminfo->aggsortop = aggsortop;
mminfo->target = curTarget->expr;
- mminfo->subroot = NULL; /* don't compute path yet */
+ mminfo->subroot = NULL; /* don't compute path yet */
mminfo->path = NULL;
mminfo->pathcost = 0;
mminfo->param = NULL;
sortcl->eqop = eqop;
sortcl->sortop = sortop;
sortcl->nulls_first = nulls_first;
- sortcl->hashable = false; /* no need to make this accurate */
+ sortcl->hashable = false; /* no need to make this accurate */
parse->sortClause = list_make1(sortcl);
/* set up expressions for LIMIT 1 */
subroot->query_pathkeys = subroot->sort_pathkeys;
/*
- * Generate the best paths for this query, telling query_planner that
- * we have LIMIT 1.
+ * Generate the best paths for this query, telling query_planner that we
+ * have LIMIT 1.
*/
query_planner(subroot, parse->targetList, 1.0, 1.0,
&cheapest_path, &sorted_path, &dNumGroups);
exprCollation((Node *) mminfo->target));
/*
- * Make sure the initplan gets into the outer PlannerInfo, along with
- * any other initplans generated by the sub-planning run. We had to
- * include the outer PlannerInfo's pre-existing initplans into the
- * inner one's init_plans list earlier, so make sure we don't put back
- * any duplicate entries.
+ * Make sure the initplan gets into the outer PlannerInfo, along with any
+ * other initplans generated by the sub-planning run. We had to include
+ * the outer PlannerInfo's pre-existing initplans into the inner one's
+ * init_plans list earlier, so make sure we don't put back any duplicate
+ * entries.
*/
root->init_plans = list_concat_unique_ptr(root->init_plans,
subroot->init_plans);
/*
* Examine the targetlist and join tree, adding entries to baserel
* targetlists for all referenced Vars, and generating PlaceHolderInfo
- * entries for all referenced PlaceHolderVars. Restrict and join clauses
- * are added to appropriate lists belonging to the mentioned relations.
- * We also build EquivalenceClasses for provably equivalent expressions.
- * The SpecialJoinInfo list is also built to hold information about join
- * order restrictions. Finally, we form a target joinlist for
- * make_one_rel() to work from.
+ * entries for all referenced PlaceHolderVars. Restrict and join clauses
+ * are added to appropriate lists belonging to the mentioned relations. We
+ * also build EquivalenceClasses for provably equivalent expressions. The
+ * SpecialJoinInfo list is also built to hold information about join order
+ * restrictions. Finally, we form a target joinlist for make_one_rel() to
+ * work from.
*/
build_base_rel_tlists(root, tlist);
/*
* Examine any "placeholder" expressions generated during subquery pullup.
* Make sure that the Vars they need are marked as needed at the relevant
- * join level. This must be done before join removal because it might
+ * join level. This must be done before join removal because it might
* cause Vars or placeholders to be needed above a join when they weren't
* so marked before.
*/
inline_set_returning_functions(root);
/*
- * Check to see if any subqueries in the jointree can be merged into
- * this query.
+ * Check to see if any subqueries in the jointree can be merged into this
+ * query.
*/
parse->jointree = (FromExpr *)
pull_up_subqueries(root, (Node *) parse->jointree, NULL, NULL);
/*
- * If this is a simple UNION ALL query, flatten it into an appendrel.
- * We do this now because it requires applying pull_up_subqueries to the
- * leaf queries of the UNION ALL, which weren't touched above because they
+ * If this is a simple UNION ALL query, flatten it into an appendrel. We
+ * do this now because it requires applying pull_up_subqueries to the leaf
+ * queries of the UNION ALL, which weren't touched above because they
* weren't referenced by the jointree (they will be after we do this).
*/
if (parse->setOperations)
plan = (Plan *) make_modifytable(parse->commandType,
parse->canSetTag,
- list_make1_int(parse->resultRelation),
+ list_make1_int(parse->resultRelation),
list_make1(plan),
returningLists,
rowMarks,
/*
* Determine eval cost of the index expressions, if any. We need to
- * charge twice that amount for each tuple comparison that happens
- * during the sort, since tuplesort.c will have to re-evaluate the
- * index expressions each time. (XXX that's pretty inefficient...)
+ * charge twice that amount for each tuple comparison that happens during
+ * the sort, since tuplesort.c will have to re-evaluate the index
+ * expressions each time. (XXX that's pretty inefficient...)
*/
cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
*
* If a query's setOperations tree consists entirely of simple UNION ALL
* operations, flatten it into an append relation, which we can process more
- * intelligently than the general setops case. Otherwise, do nothing.
+ * intelligently than the general setops case. Otherwise, do nothing.
*
* In most cases, this can succeed only for a top-level query, because for a
* subquery in FROM, the parent query's invocation of pull_up_subqueries would
/*
* Make a copy of the leftmost RTE and add it to the rtable. This copy
- * will represent the leftmost leaf query in its capacity as a member
- * of the appendrel. The original will represent the appendrel as a
- * whole. (We must do things this way because the upper query's Vars
- * have to be seen as referring to the whole appendrel.)
+ * will represent the leftmost leaf query in its capacity as a member of
+ * the appendrel. The original will represent the appendrel as a whole.
+ * (We must do things this way because the upper query's Vars have to be
+ * seen as referring to the whole appendrel.)
*/
childRTE = copyObject(leftmostRTE);
parse->rtable = lappend(parse->rtable, childRTE);
parse->jointree->fromlist = list_make1(rtr);
/*
- * Now pretend the query has no setops. We must do this before trying
- * to do subquery pullup, because of Assert in pull_up_simple_subquery.
+ * Now pretend the query has no setops. We must do this before trying to
+ * do subquery pullup, because of Assert in pull_up_simple_subquery.
*/
parse->setOperations = NULL;
* never both, to the children of an outer join.
*
* Note that a SEMI join works like an inner join here: it's okay
- * to pass down both local and upper constraints. (There can't
- * be any upper constraints affecting its inner side, but it's
- * not worth having a separate code path to avoid passing them.)
+ * to pass down both local and upper constraints. (There can't be
+ * any upper constraints affecting its inner side, but it's not
+ * worth having a separate code path to avoid passing them.)
*
* At a FULL join we just punt and pass nothing down --- is it
* possible to be smarter?
pass_nonnullable_vars = local_nonnullable_vars;
pass_forced_null_vars = local_forced_null_vars;
}
- else if (jointype != JOIN_FULL) /* ie, LEFT or ANTI */
+ else if (jointype != JOIN_FULL) /* ie, LEFT or ANTI */
{
/* can't pass local constraints to non-nullable side */
pass_nonnullable_rels = nonnullable_rels;
* Although this can be invoked on its own, it's mainly intended as a helper
* for eval_const_expressions(), and that context drives several design
* decisions. In particular, if the input is already AND/OR flat, we must
- * preserve that property. We also don't bother to recurse in situations
+ * preserve that property. We also don't bother to recurse in situations
* where we can assume that lower-level executions of eval_const_expressions
* would already have simplified sub-clauses of the input.
*
* The difference between this and a simple make_notclause() is that this
- * tries to get rid of the NOT node by logical simplification. It's clearly
+ * tries to get rid of the NOT node by logical simplification. It's clearly
* always a win if the NOT node can be eliminated altogether. However, our
* use of DeMorgan's laws could result in having more NOT nodes rather than
* fewer. We do that unconditionally anyway, because in WHERE clauses it's
switch (expr->boolop)
{
- /*--------------------
- * Apply DeMorgan's Laws:
- * (NOT (AND A B)) => (OR (NOT A) (NOT B))
- * (NOT (OR A B)) => (AND (NOT A) (NOT B))
- * i.e., swap AND for OR and negate each subclause.
- *
- * If the input is already AND/OR flat and has no NOT
- * directly above AND or OR, this transformation preserves
- * those properties. For example, if no direct child of
- * the given AND clause is an AND or a NOT-above-OR, then
- * the recursive calls of negate_clause() can't return any
- * OR clauses. So we needn't call pull_ors() before
- * building a new OR clause. Similarly for the OR case.
- *--------------------
- */
+ /*--------------------
+ * Apply DeMorgan's Laws:
+ * (NOT (AND A B)) => (OR (NOT A) (NOT B))
+ * (NOT (OR A B)) => (AND (NOT A) (NOT B))
+ * i.e., swap AND for OR and negate each subclause.
+ *
+ * If the input is already AND/OR flat and has no NOT
+ * directly above AND or OR, this transformation preserves
+ * those properties. For example, if no direct child of
+ * the given AND clause is an AND or a NOT-above-OR, then
+ * the recursive calls of negate_clause() can't return any
+ * OR clauses. So we needn't call pull_ors() before
+ * building a new OR clause. Similarly for the OR case.
+ *--------------------
+ */
case AND_EXPR:
{
List *nargs = NIL;
}
break;
case NOT_EXPR:
+
/*
* NOT underneath NOT: they cancel. We assume the
* input is already simplified, so no need to recurse.
break;
case T_BooleanTest:
{
- BooleanTest *expr = (BooleanTest *) node;
- BooleanTest *newexpr = makeNode(BooleanTest);
+ BooleanTest *expr = (BooleanTest *) node;
+ BooleanTest *newexpr = makeNode(BooleanTest);
newexpr->arg = expr->arg;
switch (expr->booltesttype)
* Routines to preprocess the parse tree target list
*
* For INSERT and UPDATE queries, the targetlist must contain an entry for
- * each attribute of the target relation in the correct order. For all query
+ * each attribute of the target relation in the correct order. For all query
* types, we may need to add junk tlist entries for Vars used in the RETURNING
* list and row ID information needed for EvalPlanQual checking.
*
/*
* Add necessary junk columns for rowmarked rels. These values are needed
* for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
- * rechecking. See comments for PlanRowMark in plannodes.h.
+ * rechecking. See comments for PlanRowMark in plannodes.h.
*/
foreach(lc, root->rowMarks)
{
* The Vars are always generated with varno 0.
*/
static List *
-generate_append_tlist(List *colTypes, List*colCollations, bool flag,
+generate_append_tlist(List *colTypes, List *colCollations, bool flag,
List *input_plans,
List *refnames_tlist)
{
*
* Whenever a function is eliminated from the expression by means of
* constant-expression evaluation or inlining, we add the function to
- * root->glob->invalItems. This ensures the plan is known to depend on
+ * root->glob->invalItems. This ensures the plan is known to depend on
* such functions, even though they aren't referenced anymore.
*
* We assume that the tree has already been type-checked and contains
context);
/*
- * Use negate_clause() to see if we can simplify away
- * the NOT.
+ * Use negate_clause() to see if we can simplify away the
+ * NOT.
*/
return negate_clause(arg);
}
makeConst(OIDOID, -1, InvalidOid, sizeof(Oid),
ObjectIdGetDatum(intypioparam),
false, true),
- makeConst(INT4OID, -1, InvalidOid, sizeof(int32),
- Int32GetDatum(-1),
- false, true));
+ makeConst(INT4OID, -1, InvalidOid, sizeof(int32),
+ Int32GetDatum(-1),
+ false, true));
simple = simplify_function(infunc,
expr->resulttype, -1,
/*
* If we can simplify the input to a constant, then we don't need the
* CollateExpr node at all: just change the constcollid field of the
- * Const node. Otherwise, replace the CollateExpr with a RelabelType.
- * (We do that so as to improve uniformity of expression representation
- * and thus simplify comparison of expressions.)
+ * Const node. Otherwise, replace the CollateExpr with a RelabelType.
+ * (We do that so as to improve uniformity of expression
+ * representation and thus simplify comparison of expressions.)
*/
CollateExpr *collate = (CollateExpr *) node;
Node *arg;
* placeholder nodes, so that we have the opportunity to reduce
* constant test conditions. For example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
- * to reduce to 1 rather than drawing a divide-by-0 error. Note
+ * to reduce to 1 rather than drawing a divide-by-0 error. Note
* that when the test expression is constant, we don't have to
* include it in the resulting CASE; for example
* CASE 0 WHEN x THEN y ELSE z END
/*
* We can remove null constants from the list. For a non-null
* constant, if it has not been preceded by any other
- * non-null-constant expressions then it is the result. Otherwise,
- * it's the next argument, but we can drop following arguments
- * since they will never be reached.
+ * non-null-constant expressions then it is the result.
+ * Otherwise, it's the next argument, but we can drop following
+ * arguments since they will never be reached.
*/
if (IsA(e, Const))
{
if (DatumGetBool(((Const *) leftop)->constvalue))
return rightop; /* true = foo */
else
- return negate_clause(rightop); /* false = foo */
+ return negate_clause(rightop); /* false = foo */
}
else
{
if (DatumGetBool(((Const *) leftop)->constvalue))
- return negate_clause(rightop); /* true <> foo */
+ return negate_clause(rightop); /* true <> foo */
else
return rightop; /* false <> foo */
}
fexpr->funcresulttype = result_type;
fexpr->funcretset = false;
fexpr->funcformat = COERCE_DONTCARE; /* doesn't matter */
- fexpr->funccollid = result_collid; /* doesn't matter */
+ fexpr->funccollid = result_collid; /* doesn't matter */
fexpr->inputcollid = input_collid;
fexpr->args = args;
fexpr->location = -1;
MemoryContextDelete(mycxt);
/*
- * If the result is of a collatable type, force the result to expose
- * the correct collation. In most cases this does not matter, but
- * it's possible that the function result is used directly as a sort key
- * or in other places where we expect exprCollation() to tell the truth.
+ * If the result is of a collatable type, force the result to expose the
+ * correct collation. In most cases this does not matter, but it's
+ * possible that the function result is used directly as a sort key or in
+ * other places where we expect exprCollation() to tell the truth.
*/
if (OidIsValid(result_collid))
{
- Oid exprcoll = exprCollation(newexpr);
+ Oid exprcoll = exprCollation(newexpr);
if (OidIsValid(exprcoll) && exprcoll != result_collid)
{
- CollateExpr *newnode = makeNode(CollateExpr);
+ CollateExpr *newnode = makeNode(CollateExpr);
newnode->arg = (Expr *) newexpr;
newnode->collOid = result_collid;
oldcxt = MemoryContextSwitchTo(mycxt);
/*
- * When we call eval_const_expressions below, it might try to add items
- * to root->glob->invalItems. Since it is running in the temp context,
- * those items will be in that context, and will need to be copied out
- * if we're successful. Temporarily reset the list so that we can keep
- * those items separate from the pre-existing list contents.
+ * When we call eval_const_expressions below, it might try to add items to
+ * root->glob->invalItems. Since it is running in the temp context, those
+ * items will be in that context, and will need to be copied out if we're
+ * successful. Temporarily reset the list so that we can keep those items
+ * separate from the pre-existing list contents.
*/
saveInvalItems = root->glob->invalItems;
root->glob->invalItems = NIL;
goto fail;
/*
- * Set up to handle parameters while parsing the function body. We
- * can use the FuncExpr just created as the input for
+ * Set up to handle parameters while parsing the function body. We can
+ * use the FuncExpr just created as the input for
* prepare_sql_fn_parse_info.
*/
pinfo = prepare_sql_fn_parse_info(func_tuple,
querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
src,
- (ParserSetupHook) sql_fn_parser_setup,
+ (ParserSetupHook) sql_fn_parser_setup,
pinfo);
if (list_length(querytree_list) != 1)
goto fail;
ReleaseSysCache(func_tuple);
/*
- * We don't have to fix collations here because the upper query is
- * already parsed, ie, the collations in the RTE are what count.
+ * We don't have to fix collations here because the upper query is already
+ * parsed, ie, the collations in the RTE are what count.
*/
/*
else
{
/* We'll need to insert a Sort node, so include cost for that */
- Path sort_path; /* dummy for result of cost_sort */
+ Path sort_path; /* dummy for result of cost_sort */
cost_sort(&sort_path,
root,
ForeignPath *pathnode = makeNode(ForeignPath);
RangeTblEntry *rte;
FdwRoutine *fdwroutine;
- FdwPlan *fdwplan;
+ FdwPlan *fdwplan;
pathnode->path.pathtype = T_ForeignScan;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* result is always unordered */
+ pathnode->path.pathkeys = NIL; /* result is always unordered */
/* Get FDW's callback info */
rte = planner_rt_fetch(rel->relid, root);
/* Local functions */
static Relids find_placeholders_recurse(PlannerInfo *root, Node *jtnode);
static void find_placeholders_in_qual(PlannerInfo *root, Node *qual,
- Relids relids);
+ Relids relids);
/*
{
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(jtnode));
- jtrelids = NULL; /* keep compiler quiet */
+ jtrelids = NULL; /* keep compiler quiet */
}
return jtrelids;
}
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache3(AMOPOPID,
- ObjectIdGetDatum(clause_op_negator),
+ ObjectIdGetDatum(clause_op_negator),
CharGetDatum(AMOP_SEARCH),
ObjectIdGetDatum(opfamily_id));
if (HeapTupleIsValid(clause_tuple))
* entries might now be arbitrary expressions, not just Vars. This affects
* this function in one important way: we might find ourselves inserting
* SubLink expressions into subqueries, and we must make sure that their
- * Query.hasSubLinks fields get set to TRUE if so. If there are any
+ * Query.hasSubLinks fields get set to TRUE if so. If there are any
* SubLinks in the join alias lists, the outer Query should already have
* hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries.
*
* columns. Add a suitable hint if that seems to be the problem,
* because the main error message is quite misleading for this case.
* (If there's no stmtcols, you'll get something about data type
- * mismatch, which is less misleading so we don't worry about giving
- * a hint in that case.)
+ * mismatch, which is less misleading so we don't worry about giving a
+ * hint in that case.)
*/
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
* return -1 if expression isn't a RowExpr or a Var referencing one.
*
* This is currently used only for hint purposes, so we aren't terribly
- * tense about recognizing all possible cases. The Var case is interesting
+ * tense about recognizing all possible cases. The Var case is interesting
* because that's what we'll get in the INSERT ... SELECT (...) case.
*/
static int
/*
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
- * collations independently in each row, and don't worry about
- * whether they are consistent vertically either.
+ * collations independently in each row, and don't worry about whether
+ * they are consistent vertically either.
*/
assign_list_collations(pstate, newsublist);
* Recursively transform leaves and internal nodes of a set-op tree
*
* In addition to returning the transformed node, if targetlist isn't NULL
- * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
+ * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
* set-op node these are the actual targetlist entries; otherwise they are
* dummy entries created to carry the type, typmod, collation, and location
* (for error messages) of each output column of the set-op node. This info
* child query's semantics.
*
* If a child expression is an UNKNOWN-type Const or Param, we
- * want to replace it with the coerced expression. This can only
+ * want to replace it with the coerced expression. This can only
* happen when the child is a leaf set-op node. It's safe to
* replace the expression because if the child query's semantics
* depended on the type of this output column, it'd have already
* collation.)
*/
rescolcoll = select_common_collation(pstate,
- list_make2(lcolnode, rcolnode),
- (op->op == SETOP_UNION && op->all));
+ list_make2(lcolnode, rcolnode),
+ (op->op == SETOP_UNION && op->all));
/* emit results */
op->colTypes = lappend_oid(op->colTypes, rescoltype);
rescolnode->collation = rescolcoll;
rescolnode->location = bestlocation;
restle = makeTargetEntry((Expr *) rescolnode,
- 0, /* no need to set resno */
+ 0, /* no need to set resno */
NULL,
false);
*targetlist = lappend(*targetlist, restle);
case RTE_RELATION:
if (rte->relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be used with foreign table \"%s\"",
- rte->eref->aliasname),
- parser_errposition(pstate, thisrel->location)));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE/SHARE cannot be used with foreign table \"%s\"",
+ rte->eref->aliasname),
+ parser_errposition(pstate, thisrel->location)));
applyLockingClause(qry, i,
lc->forUpdate, lc->noWait,
pushedDown);
/*
* Check whether the Var is known functionally dependent on the GROUP
- * BY columns. If so, we can allow the Var to be used, because the
+ * BY columns. If so, we can allow the Var to be used, because the
* grouping is really a no-op for this table. However, this deduction
* depends on one or more constraints of the table, so we have to add
* those constraints to the query's constraintDeps list, because it's
* Because this is a pretty expensive check, and will have the same
* outcome for all columns of a table, we remember which RTEs we've
* already proven functional dependency for in the func_grouped_rels
- * list. This test also prevents us from adding duplicate entries
- * to the constraintDeps list.
+ * list. This test also prevents us from adding duplicate entries to
+ * the constraintDeps list.
*/
if (list_member_int(*context->func_grouped_rels, var->varno))
- return false; /* previously proven acceptable */
+ return false; /* previously proven acceptable */
Assert(var->varno > 0 &&
(int) var->varno <= list_length(context->pstate->p_rtable));
{
*context->func_grouped_rels =
lappend_int(*context->func_grouped_rels, var->varno);
- return false; /* acceptable */
+ return false; /* acceptable */
}
}
else if (l_colvar->vartypmod != outcoltypmod)
l_node = (Node *) makeRelabelType((Expr *) l_colvar,
outcoltype, outcoltypmod,
- InvalidOid, /* fixed below */
+ InvalidOid, /* fixed below */
COERCE_IMPLICIT_CAST);
else
l_node = (Node *) l_colvar;
else if (r_colvar->vartypmod != outcoltypmod)
r_node = (Node *) makeRelabelType((Expr *) r_colvar,
outcoltype, outcoltypmod,
- InvalidOid, /* fixed below */
+ InvalidOid, /* fixed below */
COERCE_IMPLICIT_CAST);
else
r_node = (Node *) r_colvar;
/*
* Apply assign_expr_collations to fix up the collation info in the
- * coercion and CoalesceExpr nodes, if we made any. This must be done
- * now so that the join node's alias vars show correct collation info.
+ * coercion and CoalesceExpr nodes, if we made any. This must be done now
+ * so that the join node's alias vars show correct collation info.
*/
assign_expr_collations(pstate, res_node);
{
/*
* If we have a COLLATE clause, we have to push the coercion
- * underneath the COLLATE. This is really ugly, but there is little
+ * underneath the COLLATE. This is really ugly, but there is little
* choice because the above hacks on Consts and Params wouldn't happen
* otherwise.
*/
Oid sourceElem;
if ((targetElem = get_element_type(targetTypeId)) != InvalidOid &&
- (sourceElem = get_base_element_type(sourceTypeId)) != InvalidOid)
+ (sourceElem = get_base_element_type(sourceTypeId)) != InvalidOid)
{
CoercionPathType elempathtype;
Oid elemfuncid;
static bool
typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId)
{
- Oid relid = typeidTypeRelid(reltypeId);
- bool result = false;
+ Oid relid = typeidTypeRelid(reltypeId);
+ bool result = false;
if (relid)
{
* 1. The output collation of each expression node, or InvalidOid if it
* returns a noncollatable data type. This can also be InvalidOid if the
* result type is collatable but the collation is indeterminate.
- * 2. The collation to be used in executing each function. InvalidOid means
+ * 2. The collation to be used in executing each function. InvalidOid means
* that there are no collatable inputs or their collation is indeterminate.
* This value is only stored in node types that might call collation-using
* functions.
*
* You might think we could get away with storing only one collation per
- * node, but the two concepts really need to be kept distinct. Otherwise
+ * node, but the two concepts really need to be kept distinct. Otherwise
* it's too confusing when a function produces a collatable output type but
* has no collatable inputs or produces noncollatable output from collatable
* inputs.
*
* Cases with indeterminate collation might result in an error being thrown
- * at runtime. If we knew exactly which functions require collation
+ * at runtime. If we knew exactly which functions require collation
* information, we could throw those errors at parse time instead.
*
* Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
static bool assign_query_collations_walker(Node *node, ParseState *pstate);
static bool assign_collations_walker(Node *node,
- assign_collations_context *context);
+ assign_collations_context *context);
/*
return false;
/*
- * We don't want to recurse into a set-operations tree; it's already
- * been fully processed in transformSetOperationStmt.
+ * We don't want to recurse into a set-operations tree; it's already been
+ * fully processed in transformSetOperationStmt.
*/
if (IsA(node, SetOperationStmt))
return false;
foreach(lc, exprs)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
assign_expr_collations(pstate, node);
}
* Recursive guts of collation processing.
*
* Nodes with no children (eg, Vars, Consts, Params) must have been marked
- * when built. All upper-level nodes are marked here.
+ * when built. All upper-level nodes are marked here.
*
* Note: if this is invoked directly on a List, it will attempt to infer a
* common collation for all the list members. In particular, it will throw
return false;
/*
- * Prepare for recursion. For most node types, though not all, the
- * first thing we do is recurse to process all nodes below this one.
- * Each level of the tree has its own local context.
+ * Prepare for recursion. For most node types, though not all, the first
+ * thing we do is recurse to process all nodes below this one. Each level
+ * of the tree has its own local context.
*/
loccontext.pstate = context->pstate;
loccontext.collation = InvalidOid;
{
/*
* CaseExpr is a special case because we do not want to
- * recurse into the test expression (if any). It was
- * already marked with collations during transformCaseExpr,
- * and furthermore its collation is not relevant to the
- * result of the CASE --- only the output expressions are.
- * So we can't use expression_tree_walker here.
+ * recurse into the test expression (if any). It was already
+ * marked with collations during transformCaseExpr, and
+ * furthermore its collation is not relevant to the result of
+ * the CASE --- only the output expressions are. So we can't
+ * use expression_tree_walker here.
*/
CaseExpr *expr = (CaseExpr *) node;
Oid typcollation;
CaseWhen *when = (CaseWhen *) lfirst(lc);
Assert(IsA(when, CaseWhen));
+
/*
* The condition expressions mustn't affect the CASE's
* result collation either; but since they are known to
case T_RowExpr:
{
/*
- * RowExpr is a special case because the subexpressions
- * are independent: we don't want to complain if some of
- * them have incompatible explicit collations.
+ * RowExpr is a special case because the subexpressions are
+ * independent: we don't want to complain if some of them have
+ * incompatible explicit collations.
*/
- RowExpr *expr = (RowExpr *) node;
+ RowExpr *expr = (RowExpr *) node;
assign_list_collations(context->pstate, expr->args);
* has a collation, we can just stop here: this node has no
* impact on the collation of its parent.
*/
- return false; /* done */
+ return false; /* done */
}
case T_RowCompareExpr:
{
forboth(l, expr->largs, r, expr->rargs)
{
- Node *le = (Node *) lfirst(l);
- Node *re = (Node *) lfirst(r);
- Oid coll;
+ Node *le = (Node *) lfirst(l);
+ Node *re = (Node *) lfirst(r);
+ Oid coll;
coll = select_common_collation(context->pstate,
list_make2(le, re),
expr->inputcollids = colls;
/*
- * Since the result is always boolean and therefore never
- * has a collation, we can just stop here: this node has no
- * impact on the collation of its parent.
+ * Since the result is always boolean and therefore never has
+ * a collation, we can just stop here: this node has no impact
+ * on the collation of its parent.
*/
- return false; /* done */
+ return false; /* done */
}
case T_CoerceToDomain:
{
* If the domain declaration included a non-default COLLATE
* spec, then use that collation as the output collation of
* the coercion. Otherwise allow the input collation to
- * bubble up. (The input should be of the domain's base
- * type, therefore we don't need to worry about it not being
+ * bubble up. (The input should be of the domain's base type,
+ * therefore we don't need to worry about it not being
* collatable when the domain is.)
*/
CoerceToDomain *expr = (CoerceToDomain *) node;
- Oid typcollation = get_typcollation(expr->resulttype);
+ Oid typcollation = get_typcollation(expr->resulttype);
/* ... but first, recurse */
(void) expression_tree_walker(node,
/*
* TargetEntry can have only one child, and should bubble that
- * state up to its parent. We can't use the general-case code
+ * state up to its parent. We can't use the general-case code
* below because exprType and friends don't work on TargetEntry.
*/
collation = loccontext.collation;
* There are some cases where there might not be a failure, for
* example if the planner chooses to use hash aggregation instead
* of sorting for grouping; but it seems better to predictably
- * throw an error. (Compare transformSetOperationTree, which will
- * throw error for indeterminate collation of set-op columns,
- * even though the planner might be able to implement the set-op
+ * throw an error. (Compare transformSetOperationTree, which will
+ * throw error for indeterminate collation of set-op columns, even
+ * though the planner might be able to implement the set-op
* without sorting.)
*/
if (strength == COLLATE_CONFLICT &&
(void) expression_tree_walker(node,
assign_collations_walker,
(void *) &loccontext);
+
/*
* When we're invoked on a query's jointree, we don't need to do
* anything with join nodes except recurse through them to process
case T_CaseTestExpr:
case T_SetToDefault:
case T_CurrentOfExpr:
+
/*
* General case for childless expression nodes. These should
* already have a collation assigned; it is not this function's
/*
* Note: in most cases, there will be an assigned collation
* whenever type_is_collatable(exprType(node)); but an exception
- * occurs for a Var referencing a subquery output column for
- * which a unique collation was not determinable. That may lead
- * to a runtime failure if a collation-sensitive function is
- * applied to the Var.
+ * occurs for a Var referencing a subquery output column for which
+ * a unique collation was not determinable. That may lead to a
+ * runtime failure if a collation-sensitive function is applied to
+ * the Var.
*/
if (OidIsValid(collation))
default:
{
/*
- * General case for most expression nodes with children.
- * First recurse, then figure out what to assign here.
+ * General case for most expression nodes with children. First
+ * recurse, then figure out what to assign here.
*/
- Oid typcollation;
+ Oid typcollation;
(void) expression_tree_walker(node,
assign_collations_walker,
}
/*
- * Save the result collation into the expression node.
- * If the state is COLLATE_CONFLICT, we'll set the collation
- * to InvalidOid, which might result in an error at runtime.
+ * Save the result collation into the expression node. If the
+ * state is COLLATE_CONFLICT, we'll set the collation to
+ * InvalidOid, which might result in an error at runtime.
*/
if (strength == COLLATE_CONFLICT)
exprSetCollation(node, InvalidOid);
cte->ctequery = (Node *) query;
/*
- * Check that we got something reasonable. These first two cases should
+ * Check that we got something reasonable. These first two cases should
* be prevented by the grammar.
*/
if (!IsA(query, Query))
continue;
varattno++;
Assert(varattno == te->resno);
- if (lctyp == NULL || lctypmod == NULL || lccoll == NULL) /* shouldn't happen */
+ if (lctyp == NULL || lctypmod == NULL || lccoll == NULL) /* shouldn't happen */
elog(ERROR, "wrong number of output columns in WITH");
texpr = (Node *) te->expr;
if (exprType(texpr) != lfirst_oid(lctyp) ||
lctypmod = lnext(lctypmod);
lccoll = lnext(lccoll);
}
- if (lctyp != NULL || lctypmod != NULL || lccoll != NULL) /* shouldn't happen */
+ if (lctyp != NULL || lctypmod != NULL || lccoll != NULL) /* shouldn't happen */
elog(ERROR, "wrong number of output columns in WITH");
}
}
CommonTableExpr *cte = cstate->items[i].cte;
SelectStmt *stmt = (SelectStmt *) cte->ctequery;
- Assert(!IsA(stmt, Query)); /* not analyzed yet */
+ Assert(!IsA(stmt, Query)); /* not analyzed yet */
/* Ignore items that weren't found to be recursive */
if (!cte->cterecursive)
typenameTypeIdAndMod(pstate, tc->typeName,
&targetType, &targetTypmod);
+
/*
* If target is a domain over array, work with the base
* array type here. transformTypeCast below will cast the
/*
* Run collation assignment on the test expression so that we know
- * what collation to mark the placeholder with. In principle we
- * could leave it to parse_collate.c to do that later, but propagating
- * the result to the CaseTestExpr would be unnecessarily complicated.
+ * what collation to mark the placeholder with. In principle we could
+ * leave it to parse_collate.c to do that later, but propagating the
+ * result to the CaseTestExpr would be unnecessarily complicated.
*/
assign_expr_collations(pstate, arg);
transformCollateClause(ParseState *pstate, CollateClause *c)
{
CollateExpr *newc;
- Oid argtype;
+ Oid argtype;
newc = makeNode(CollateExpr);
newc->arg = (Expr *) transformExpr(pstate, c->arg);
argtype = exprType((Node *) newc->arg);
+
/*
- * The unknown type is not collatable, but coerce_type() takes
- * care of it separately, so we'll let it go here.
+ * The unknown type is not collatable, but coerce_type() takes care of it
+ * separately, so we'll let it go here.
*/
if (!type_is_collatable(argtype) && argtype != UNKNOWNOID)
ereport(ERROR,
rcexpr->rctype = rctype;
rcexpr->opnos = opnos;
rcexpr->opfamilies = opfamilies;
- rcexpr->inputcollids = NIL; /* assign_expr_collations will fix this */
+ rcexpr->inputcollids = NIL; /* assign_expr_collations will fix this */
rcexpr->largs = largs;
rcexpr->rargs = rargs;
errmsg("function %s does not exist",
func_signature_string(funcname, nargs, argnames,
actual_arg_types)),
- errhint("No aggregate function matches the given name and argument types. "
- "Perhaps you misplaced ORDER BY; ORDER BY must appear "
- "after all regular arguments of the aggregate."),
+ errhint("No aggregate function matches the given name and argument types. "
+ "Perhaps you misplaced ORDER BY; ORDER BY must appear "
+ "after all regular arguments of the aggregate."),
parser_errposition(pstate, location)));
}
else
case COERCION_PATH_COERCEVIAIO:
if ((sourceType == RECORDOID ||
ISCOMPLEX(sourceType)) &&
- TypeCategory(targetType) == TYPCATEGORY_STRING)
+ TypeCategory(targetType) == TYPCATEGORY_STRING)
iscoercion = false;
else
iscoercion = true;
* If the input is a domain, smash to base type, and extract the actual
* typmod to be applied to the base type. Subscripting a domain is an
* operation that necessarily works on the base array type, not the domain
- * itself. (Note that we provide no method whereby the creator of a
+ * itself. (Note that we provide no method whereby the creator of a
* domain over an array type could hide its ability to be subscripted.)
*/
*arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
/*
* Caller may or may not have bothered to determine elementType. Note
- * that if the caller did do so, arrayType/arrayTypMod must be as
- * modified by transformArrayType, ie, smash domain to base type.
+ * that if the caller did do so, arrayType/arrayTypMod must be as modified
+ * by transformArrayType, ie, smash domain to base type.
*/
if (!OidIsValid(elementType))
elementType = transformArrayType(&arrayType, &arrayTypMod);
con = makeConst(typeid,
-1, /* typmod -1 is OK for all cases */
- InvalidOid, /* all cases are uncollatable types */
+ InvalidOid, /* all cases are uncollatable types */
typelen,
val,
false,
/*
* If the datatype is an array, then we can use array_lt and friends ...
* but only if there are suitable operators for the element type.
- * Likewise, array types are only hashable if the element type is.
- * Testing all three operator IDs here should be redundant, but let's do
- * it anyway.
+ * Likewise, array types are only hashable if the element type is. Testing
+ * all three operator IDs here should be redundant, but let's do it
+ * anyway.
*/
if (lt_opr == ARRAY_LT_OP ||
eq_opr == ARRAY_EQ_OP ||
/*
* This module always sets a Param's collation to be the default for
- * its datatype. If that's not what you want, you should be using
- * the more general parser substitution hooks.
+ * its datatype. If that's not what you want, you should be using the
+ * more general parser substitution hooks.
*/
param->paramcollid = get_typcollation(param->paramtype);
*/
if (IsA(cte->ctequery, Query))
{
- Query *ctequery = (Query *) cte->ctequery;
+ Query *ctequery = (Query *) cte->ctequery;
if (ctequery->commandType != CMD_SELECT &&
ctequery->returningList == NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("WITH query \"%s\" does not have a RETURNING clause",
- cte->ctename),
+ errmsg("WITH query \"%s\" does not have a RETURNING clause",
+ cte->ctename),
parser_errposition(pstate, rv->location)));
}
* what type the Const claims to be.
*/
*colvars = lappend(*colvars,
- makeNullConst(INT4OID, -1, InvalidOid));
+ makeNullConst(INT4OID, -1, InvalidOid));
}
}
continue;
Var *varnode;
varnode = makeVar(rtindex, attr->attnum,
- attr->atttypid, attr->atttypmod, attr->attcollation,
+ attr->atttypid, attr->atttypmod, attr->attcollation,
sublevels_up);
varnode->location = location;
parser_errposition(pstate, location)));
get_atttypetypmodcoll(typrelid, attnum,
- &fieldTypeId, &fieldTypMod, &fieldCollation);
+ &fieldTypeId, &fieldTypMod, &fieldCollation);
/* recurse to create appropriate RHS for field assign */
rhs = transformAssignmentIndirection(pstate,
/*
* Array normally has same collation as elements, but there's an
- * exception: we might be subscripting a domain over an array type.
- * In that case use collation of the base type.
+ * exception: we might be subscripting a domain over an array type. In
+ * that case use collation of the base type.
*/
if (arrayType == targetTypeId)
collationNeeded = targetCollation;
bool skipValidation,
bool isAddConstraint);
static void transformConstraintAttrs(CreateStmtContext *cxt,
- List *constraintList);
+ List *constraintList);
static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column);
static void setSchemaName(char *context_schema, char **stmt_schema_name);
* If this is ALTER ADD COLUMN, make sure the sequence will be owned
* by the table's owner. The current user might be someone else
* (perhaps a superuser, or someone who's only a member of the owning
- * role), but the SEQUENCE OWNED BY mechanisms will bleat unless
- * table and sequence have exactly the same owning role.
+ * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
+ * and sequence have exactly the same owning role.
*/
if (cxt->rel)
seqstmt->ownerId = cxt->rel->rd_rel->relowner;
/* Copy comment on constraint */
if ((inhRelation->options & CREATE_TABLE_LIKE_COMMENTS) &&
(comment = GetComment(get_constraint_oid(RelationGetRelid(relation),
- n->conname, false),
+ n->conname, false),
ConstraintRelationId,
0)) != NULL)
{
/*
* If it's ALTER TABLE ADD CONSTRAINT USING INDEX, look up the index and
* verify it's usable, then extract the implied column name list. (We
- * will not actually need the column name list at runtime, but we need
- * it now to check for duplicate column entries below.)
+ * will not actually need the column name list at runtime, but we need it
+ * now to check for duplicate column entries below.)
*/
if (constraint->indexname != NULL)
{
if (OidIsValid(get_index_constraint(index_oid)))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("index \"%s\" is already associated with a constraint",
- index_name),
+ errmsg("index \"%s\" is already associated with a constraint",
+ index_name),
parser_errposition(cxt->pstate, constraint->location)));
/* Perform validity checks on the index */
parser_errposition(cxt->pstate, constraint->location)));
/*
- * It's probably unsafe to change a deferred index to non-deferred.
- * (A non-constraint index couldn't be deferred anyway, so this case
+ * It's probably unsafe to change a deferred index to non-deferred. (A
+ * non-constraint index couldn't be deferred anyway, so this case
* should never occur; no need to sweat, but let's check it.)
*/
if (!index_form->indimmediate && !constraint->deferrable)
parser_errposition(cxt->pstate, constraint->location)));
/*
- * Insist on it being a btree. That's the only kind that supports
+ * Insist on it being a btree. That's the only kind that supports
* uniqueness at the moment anyway; but we must have an index that
* exactly matches what you'd get from plain ADD CONSTRAINT syntax,
* else dump and reload will produce a different index (breaking
for (i = 0; i < index_form->indnatts; i++)
{
- int2 attnum = index_form->indkey.values[i];
+ int2 attnum = index_form->indkey.values[i];
Form_pg_attribute attform;
- char *attname;
- Oid defopclass;
+ char *attname;
+ Oid defopclass;
/*
* We shouldn't see attnum == 0 here, since we already rejected
- * expression indexes. If we do, SystemAttributeDefinition
- * will throw an error.
+ * expression indexes. If we do, SystemAttributeDefinition will
+ * throw an error.
*/
if (attnum > 0)
{
}
else
attform = SystemAttributeDefinition(attnum,
- heap_rel->rd_rel->relhasoids);
+ heap_rel->rd_rel->relhasoids);
attname = pstrdup(NameStr(attform->attname));
/*
- * Insist on default opclass and sort options. While the index
+ * Insist on default opclass and sort options. While the index
* would still work as a constraint with non-default settings, it
* might not provide exactly the same uniqueness semantics as
* you'd get from a normally-created constraint; and there's also
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" does not have default sorting behavior", index_name),
errdetail("Cannot create a PRIMARY KEY or UNIQUE constraint using such an index."),
- parser_errposition(cxt->pstate, constraint->location)));
+ parser_errposition(cxt->pstate, constraint->location)));
constraint->keys = lappend(constraint->keys, makeString(attname));
}
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" appears twice in primary key constraint",
key),
- parser_errposition(cxt->pstate, constraint->location)));
+ parser_errposition(cxt->pstate, constraint->location)));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
errmsg("column \"%s\" appears twice in unique constraint",
key),
- parser_errposition(cxt->pstate, constraint->location)));
+ parser_errposition(cxt->pstate, constraint->location)));
}
}
Constraint *constraint = (Constraint *) lfirst(fkclist);
constraint->skip_validation = true;
- constraint->initially_valid = true;
+ constraint->initially_valid = true;
}
}
* However, they were already in the outer rangetable when we
* analyzed the query, so we have to check.
*
- * Note that in the INSERT...SELECT case, we need to examine
- * the CTE lists of both top_subqry and sub_qry.
+ * Note that in the INSERT...SELECT case, we need to examine the
+ * CTE lists of both top_subqry and sub_qry.
*
- * Note that we aren't digging into the body of the query
- * looking for WITHs in nested sub-SELECTs. A WITH down there
- * can legitimately refer to OLD/NEW, because it'd be an
+ * Note that we aren't digging into the body of the query looking
+ * for WITHs in nested sub-SELECTs. A WITH down there can
+ * legitimately refer to OLD/NEW, because it'd be an
* indirect-correlated outer reference.
*/
if (rangeTableEntry_used((Node *) top_subqry->cteList,
PRS2_OLD_VARNO, 0) ||
rangeTableEntry_used((Node *) sub_qry->cteList,
- PRS2_OLD_VARNO, 0))
+ PRS2_OLD_VARNO, 0))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot refer to OLD within WITH query")));
lockmode = AlterTableGetLockLevel(stmt->cmds);
/*
- * Acquire appropriate lock on the target relation, which will be held until
- * end of transaction. This ensures any decisions we make here based on
- * the state of the relation will still be good at execution. We must get
- * lock now because execution will later require it; taking a lower grade lock
- * now and trying to upgrade later risks deadlock. Any new commands we add
- * after this must not upgrade the lock level requested here.
+ * Acquire appropriate lock on the target relation, which will be held
+ * until end of transaction. This ensures any decisions we make here
+ * based on the state of the relation will still be good at execution. We
+ * must get lock now because execution will later require it; taking a
+ * lower grade lock now and trying to upgrade later risks deadlock. Any
+ * new commands we add after this must not upgrade the lock level
+ * requested here.
*/
rel = relation_openrv(stmt->relation, lockmode);
if (column->collClause)
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(ctype);
- Oid collOid;
+ Oid collOid;
collOid = LookupCollation(cxt->pstate,
column->collClause->collname,
snprintf(buf, sizeof(buf), "_%s", name);
name = buf;
}
-#endif /* !__ELF__ */
+#endif /* !__ELF__ */
if ((vp = dlsym(handle, (char *) name)) == NULL)
snprintf(error_message, sizeof(error_message),
"dlsym (%s) failed", name);
snprintf(buf, sizeof(buf), "_%s", name);
name = buf;
}
-#endif /* !__ELF__ */
+#endif /* !__ELF__ */
if ((vp = dlsym(handle, (char *) name)) == NULL)
snprintf(error_message, sizeof(error_message),
"dlsym (%s) failed", name);
snprintf(buf, sizeof(buf), "_%s", name);
name = buf;
}
-#endif /* !__ELF__ */
+#endif /* !__ELF__ */
if ((vp = dlsym(handle, (char *) name)) == NULL)
snprintf(error_message, sizeof(error_message),
"dlsym (%s) failed", name);
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(0);
serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+ if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to bind: %ui", WSAGetLastError())));
closesocket(s);
closesocket(s);
return -1;
}
- if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
+ if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to getsockname: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
- if (connect(handles[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+ if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to connect socket: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
- if ((handles[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET)
+ if ((handles[0] = accept(s, (SOCKADDR *) &serv_addr, &len)) == INVALID_SOCKET)
{
ereport(LOG, (errmsg_internal("pgpipe failed to accept socket: %ui", WSAGetLastError())));
closesocket(handles[1]);