cmp;
cmp = DatumGetInt32(DirectFunctionCall2Coll(
- data->typecmp,
- PG_GET_COLLATION(),
- (data->strategy == BTLessStrategyNumber ||
- data->strategy == BTLessEqualStrategyNumber)
- ? data->datum : a,
- b));
+ data->typecmp,
+ PG_GET_COLLATION(),
+ (data->strategy == BTLessStrategyNumber ||
+ data->strategy == BTLessEqualStrategyNumber)
+ ? data->datum : a,
+ b));
switch (data->strategy)
{
gin_extract_value_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_value(fcinfo, is_varlena); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
Datum \
gin_extract_query_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_query(fcinfo, \
is_varlena, leftmostvalue, typecmp); \
-} \
+} \
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
Datum \
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
{
return Int16GetDatum(SHRT_MIN);
}
+
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
static Datum
{
return Int32GetDatum(INT_MIN);
}
+
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
static Datum
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
static Datum
{
return Float4GetDatum(-get_float4_infinity());
}
+
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
static Datum
{
return Float8GetDatum(-get_float8_infinity());
}
+
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
static Datum
*/
return Int64GetDatum(SEQ_MINVALUE);
}
+
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
static Datum
{
return ObjectIdGetDatum(0);
}
+
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
static Datum
{
return TimestampGetDatum(DT_NOBEGIN);
}
+
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
{
return TimeADTGetDatum(0);
}
+
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
static Datum
return TimeTzADTPGetDatum(v);
}
+
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
static Datum
{
return DateADTGetDatum(DATEVAL_NOBEGIN);
}
+
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
static Datum
v->month = 0;
return IntervalPGetDatum(v);
}
+
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
static Datum
return MacaddrPGetDatum(v);
}
+
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
static Datum
{
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
}
+
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
{
return PointerGetDatum(cstring_to_text_with_len("", 0));
}
+
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
static Datum
{
return CharGetDatum(SCHAR_MIN);
}
+
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
static Datum
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
+
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
/*
{
return PointerGetDatum(NULL);
}
+
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
GISTENTRY *
gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
{
- GISTENTRY *retval;
+ GISTENTRY *retval;
if (entry->leafkey)
{
* Create a leaf-entry to store in the index, from a single Datum.
*/
static GBT_VARKEY *
-gbt_var_key_from_datum(const struct varlena *u)
+gbt_var_key_from_datum(const struct varlena * u)
{
int32 lowersize = VARSIZE(u);
GBT_VARKEY *r;
* in a case like this.
*/
-#define META_FREE(x) ((void)true) /* pfree((x)) */
+#define META_FREE(x) ((void)true) /* pfree((x)) */
#else /* not defined DMETAPHONE_MAIN */
/* use the standard malloc library when not running in PostgreSQL */
static pg_crc32
crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
INIT_TRADITIONAL_CRC32(crc);
COMP_TRADITIONAL_CRC32(crc, buf, size);
PG_FUNCTION_INFO_V1(hstore_to_plperl);
-Datum hstore_to_plperl(PG_FUNCTION_ARGS);
+Datum hstore_to_plperl(PG_FUNCTION_ARGS);
Datum
hstore_to_plperl(PG_FUNCTION_ARGS)
for (i = 0; i < count; i++)
{
const char *key;
- SV *value;
+ SV *value;
key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
- value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
+ value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
(void) hv_store(hv, key, strlen(key), value, 0);
}
PG_FUNCTION_INFO_V1(plperl_to_hstore);
-Datum plperl_to_hstore(PG_FUNCTION_ARGS);
+Datum plperl_to_hstore(PG_FUNCTION_ARGS);
Datum
plperl_to_hstore(PG_FUNCTION_ARGS)
i = 0;
while ((he = hv_iternext(hv)))
{
- char *key = sv2cstr(HeSVKEY_force(he));
- SV *value = HeVAL(he);
+ char *key = sv2cstr(HeSVKEY_force(he));
+ SV *value = HeVAL(he);
pairs[i].key = pstrdup(key);
pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
PG_FUNCTION_INFO_V1(hstore_to_plpython);
-Datum hstore_to_plpython(PG_FUNCTION_ARGS);
+Datum hstore_to_plpython(PG_FUNCTION_ARGS);
Datum
hstore_to_plpython(PG_FUNCTION_ARGS)
PyDict_SetItem(dict, key, Py_None);
else
{
- PyObject *value;
+ PyObject *value;
- value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
+ value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
PyDict_SetItem(dict, key, value);
Py_XDECREF(value);
}
PG_FUNCTION_INFO_V1(plpython_to_hstore);
-Datum plpython_to_hstore(PG_FUNCTION_ARGS);
+Datum plpython_to_hstore(PG_FUNCTION_ARGS);
Datum
plpython_to_hstore(PG_FUNCTION_ARGS)
for (i = 0; i < pcount; i++)
{
- PyObject *tuple;
- PyObject *key;
- PyObject *value;
+ PyObject *tuple;
+ PyObject *key;
+ PyObject *value;
tuple = PyList_GetItem(items, i);
key = PyTuple_GetItem(tuple, 0);
unsigned int
ltree_crc32_sz(char *buf, int size)
{
- pg_crc32 crc;
+ pg_crc32 crc;
char *p = buf;
INIT_TRADITIONAL_CRC32(crc);
while (size > 0)
{
- char c = (char) TOLOWER(*p);
+ char c = (char) TOLOWER(*p);
+
COMP_TRADITIONAL_CRC32(crc, &c, 1);
size--;
p++;
PG_FUNCTION_INFO_V1(ltree_to_plpython);
-Datum ltree_to_plpython(PG_FUNCTION_ARGS);
+Datum ltree_to_plpython(PG_FUNCTION_ARGS);
Datum
ltree_to_plpython(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
Page page = VARDATA(raw_page);
- char *type;
+ char *type;
switch (BrinPageType(page))
{
static Page
verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
{
- Page page;
- int raw_page_size;
+ Page page;
+ int raw_page_size;
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small"),
- errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+ errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
page = VARDATA(raw_page);
indexRel = index_open(indexRelid, AccessShareLock);
state = palloc(offsetof(brin_page_state, columns) +
- sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
+ sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
state->bdesc = brin_build_desc(indexRel);
state->page = page;
*/
for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
{
- Oid output;
- bool isVarlena;
+ Oid output;
+ bool isVarlena;
BrinOpcInfo *opcinfo;
- int i;
+ int i;
brin_column_state *column;
opcinfo = state->bdesc->bd_info[attno - 1];
*/
if (state->dtup == NULL)
{
- BrinTuple *tup;
+ BrinTuple *tup;
MemoryContext mctx;
ItemId itemId;
if (ItemIdIsUsed(itemId))
{
tup = (BrinTuple *) PageGetItem(state->page,
- PageGetItemId(state->page,
- state->offset));
+ PageGetItemId(state->page,
+ state->offset));
state->dtup = brin_deform_tuple(state->bdesc, tup);
state->attno = 1;
state->unusedItem = false;
}
else
{
- int att = state->attno - 1;
+ int att = state->attno - 1;
values[0] = UInt16GetDatum(state->offset);
values[1] = UInt32GetDatum(state->dtup->bt_blkno);
values[5] = BoolGetDatum(state->dtup->bt_placeholder);
if (!state->dtup->bt_columns[att].bv_allnulls)
{
- BrinValues *bvalues = &state->dtup->bt_columns[att];
- StringInfoData s;
+ BrinValues *bvalues = &state->dtup->bt_columns[att];
+ StringInfoData s;
bool first;
int i;
first = true;
for (i = 0; i < state->columns[att]->nstored; i++)
{
- char *val;
+ char *val;
if (!first)
appendStringInfoString(&s, " .. ");
}
/*
- * If we're beyond the end of the page, set flag to end the function in
- * the following iteration.
+ * If we're beyond the end of the page, set flag to end the function
+ * in the following iteration.
*/
if (state->offset > PageGetMaxOffsetNumber(state->page))
state->done = true;
struct
{
ItemPointerData *tids;
- int idx;
- } *state;
+ int idx;
+ } *state;
FuncCallContext *fctx;
if (!superuser())
TupleDesc tupd;
GinPostingList *seg;
GinPostingList *lastseg;
-} gin_leafpage_items_state;
+} gin_leafpage_items_state;
Datum
gin_leafpage_items(PG_FUNCTION_ARGS)
PG_MODULE_MAGIC;
-void _PG_init(void);
+void _PG_init(void);
/* Prototypes for functions used with event triggers */
-Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
-Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
+Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
+Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(pg_audit_ddl_command_end);
PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
#define LOG_ROLE (1 << 4) /* GRANT/REVOKE, CREATE/ALTER/DROP ROLE */
#define LOG_WRITE (1 << 5) /* INSERT, UPDATE, DELETE, TRUNCATE */
-#define LOG_NONE 0 /* nothing */
+#define LOG_NONE 0 /* nothing */
#define LOG_ALL (0xFFFFFFFF) /* All */
/* GUC variable for pg_audit.log, which defines the classes to log. */
-char *auditLog = NULL;
+char *auditLog = NULL;
/* Bitmap of classes selected */
-static int auditLogBitmap = LOG_NONE;
+static int auditLogBitmap = LOG_NONE;
/*
* String constants for log classes - used when processing tokens in the
* the query are in pg_catalog. Interactive sessions (eg: psql) can cause
* a lot of noise in the logs which might be uninteresting.
*/
-bool auditLogCatalog = true;
+bool auditLogCatalog = true;
/*
* GUC variable for pg_audit.log_level
* at. The default level is LOG, which goes into the server log but does
* not go to the client. Set to NOTICE in the regression tests.
*/
-char *auditLogLevelString = NULL;
-int auditLogLevel = LOG;
+char *auditLogLevelString = NULL;
+int auditLogLevel = LOG;
/*
* GUC variable for pg_audit.log_parameter
* Administrators can choose if parameters passed into a statement are
* included in the audit log.
*/
-bool auditLogParameter = false;
+bool auditLogParameter = false;
/*
* GUC variable for pg_audit.log_relation
* in READ/WRITE class queries. By default, SESSION logs include the query but
* do not have a log entry for each relation.
*/
-bool auditLogRelation = false;
+bool auditLogRelation = false;
/*
* GUC variable for pg_audit.log_statement_once
* the audit log to facilitate searching, but this can cause the log to be
* unnecessairly bloated in some environments.
*/
-bool auditLogStatementOnce = false;
+bool auditLogStatementOnce = false;
/*
* GUC variable for pg_audit.role
* Object-level auditing uses the privileges which are granted to this role to
* determine if a statement should be logged.
*/
-char *auditRole = NULL;
+char *auditRole = NULL;
/*
* String constants for the audit log fields.
*/
typedef struct
{
- int64 statementId; /* Simple counter */
- int64 substatementId; /* Simple counter */
+ int64 statementId; /* Simple counter */
+ int64 substatementId; /* Simple counter */
LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, */
- /* generated when not. */
- NodeTag commandTag; /* same here */
+ /* generated when not. */
+ NodeTag commandTag; /* same here */
const char *command; /* same here */
const char *objectType; /* From event trigger when possible */
- /* generated when not. */
- char *objectName; /* Fully qualified object identification */
+ /* generated when not. */
+ char *objectName; /* Fully qualified object identification */
const char *commandText; /* sourceText / queryString */
ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */
- bool granted; /* Audit role has object permissions? */
- bool logged; /* Track if we have logged this event, used */
- /* post-ProcessUtility to make sure we log */
- bool statementLogged; /* Track if we have logged the statement */
+ bool granted; /* Audit role has object permissions? */
+ bool logged; /* Track if we have logged this event, used */
+ /* post-ProcessUtility to make sure we log */
+ bool statementLogged; /* Track if we have logged the statement */
} AuditEvent;
/*
{
struct AuditEventStackItem *next;
- AuditEvent auditEvent;
+ AuditEvent auditEvent;
- int64 stackId;
+ int64 stackId;
MemoryContext contextAudit;
MemoryContextCallback contextCallback;
while (nextItem != NULL)
{
/* Check if this item matches the item to be freed */
- if (nextItem == (AuditEventStackItem *)stackFree)
+ if (nextItem == (AuditEventStackItem *) stackFree)
{
/* Move top of stack to the item after the freed item */
auditEventStack = nextItem->next;
substatementTotal = 0;
/*
- * Reset statement logged so that next statement will be logged.
+ * Reset statement logged so that next statement will be
+ * logged.
*/
statementLogged = false;
}
* the stack at this item.
*/
stackItem->contextCallback.func = stack_free;
- stackItem->contextCallback.arg = (void *)stackItem;
+ stackItem->contextCallback.arg = (void *) stackItem;
MemoryContextRegisterResetCallback(contextAudit,
&stackItem->contextCallback);
for (pChar = appendStr; *pChar; pChar++)
{
- if (*pChar == '"') /* double single quotes */
+ if (*pChar == '"') /* double single quotes */
appendStringInfoCharMacro(buffer, *pChar);
appendStringInfoCharMacro(buffer, *pChar);
log_audit_event(AuditEventStackItem *stackItem)
{
/* By default, put everything in the MISC class. */
- int class = LOG_MISC;
- const char *className = CLASS_MISC;
- MemoryContext contextOld;
- StringInfoData auditStr;
+ int class = LOG_MISC;
+ const char *className = CLASS_MISC;
+ MemoryContext contextOld;
+ StringInfoData auditStr;
/* Classify the statement using log stmt level and the command tag */
switch (stackItem->auditEvent.logStmtLevel)
{
- /* All mods go in WRITE class, execpt EXECUTE */
+ /* All mods go in WRITE class, execpt EXECUTE */
case LOGSTMT_MOD:
className = CLASS_WRITE;
class = LOG_WRITE;
switch (stackItem->auditEvent.commandTag)
{
- /* Currently, only EXECUTE is different */
+ /* Currently, only EXECUTE is different */
case T_ExecuteStmt:
className = CLASS_MISC;
class = LOG_MISC;
}
break;
- /* These are DDL, unless they are ROLE */
+ /* These are DDL, unless they are ROLE */
case LOGSTMT_DDL:
className = CLASS_DDL;
class = LOG_DDL;
/* Identify role statements */
switch (stackItem->auditEvent.commandTag)
{
- /* We know these are all role statements */
+ /* We know these are all role statements */
case T_GrantStmt:
case T_GrantRoleStmt:
case T_CreateRoleStmt:
className = CLASS_ROLE;
class = LOG_ROLE;
break;
- /*
- * Rename and Drop are general and therefore we have to do an
- * additional check against the command string to see if they
- * are role or regular DDL.
- */
+
+ /*
+ * Rename and Drop are general and therefore we have to do
+ * an additional check against the command string to see
+ * if they are role or regular DDL.
+ */
case T_RenameStmt:
case T_DropStmt:
if (pg_strcasecmp(stackItem->auditEvent.command,
}
break;
- /* Classify the rest */
+ /* Classify the rest */
case LOGSTMT_ALL:
switch (stackItem->auditEvent.commandTag)
{
- /* READ statements */
+ /* READ statements */
case T_CopyStmt:
case T_SelectStmt:
case T_PrepareStmt:
class = LOG_READ;
break;
- /* FUNCTION statements */
+ /* FUNCTION statements */
case T_DoStmt:
className = CLASS_FUNCTION;
class = LOG_FUNCTION;
/*
* Only log the statement if:
*
- * 1. If object was selected for audit logging (granted)
- * 2. The statement belongs to a class that is being logged
+ * 1. If object was selected for audit logging (granted) 2. The statement
+ * belongs to a class that is being logged
*
* If neither of these is true, return.
*/
/* Handle parameter logging, if enabled. */
if (auditLogParameter)
{
- int paramIdx;
- int numParams;
- StringInfoData paramStrResult;
- ParamListInfo paramList = stackItem->auditEvent.paramList;
+ int paramIdx;
+ int numParams;
+ StringInfoData paramStrResult;
+ ParamListInfo paramList = stackItem->auditEvent.paramList;
numParams = paramList == NULL ? 0 : paramList->numParams;
paramIdx++)
{
ParamExternData *prm = ¶mList->params[paramIdx];
- Oid typeOutput;
- bool typeIsVarLena;
- char *paramStr;
+ Oid typeOutput;
+ bool typeIsVarLena;
+ char *paramStr;
/* Add a comma for each param */
if (paramIdx != 0)
else
/* we were asked to not log it */
appendStringInfoString(&auditStr,
- "<previously logged>,<previously logged>");
+ "<previously logged>,<previously logged>");
/*
* Log the audit entry. Note: use of INT64_FORMAT here is bad for
{
bool result = false;
Acl *acl;
- AclItem *aclItemData;
+ AclItem *aclItemData;
int aclIndex;
int aclTotal;
/* Check privileges granted directly to auditOid */
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
{
- AclItem *aclItem = &aclItemData[aclIndex];
+ AclItem *aclItem = &aclItemData[aclIndex];
if (aclItem->ai_grantee == auditOid &&
aclItem->ai_privs & mask)
{
for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
{
- AclItem *aclItem = &aclItemData[aclIndex];
+ AclItem *aclItem = &aclItemData[aclIndex];
/* Don't test public or auditOid (it has been tested already) */
if (aclItem->ai_grantee == ACL_ID_PUBLIC ||
Bitmapset *attributeSet,
AclMode mode)
{
- bool result = false;
- AttrNumber col;
- Bitmapset *tmpSet;
+ bool result = false;
+ AttrNumber col;
+ Bitmapset *tmpSet;
/* If bms is empty then check for any column match */
if (bms_is_empty(attributeSet))
static void
log_select_dml(Oid auditOid, List *rangeTabls)
{
- ListCell *lr;
- bool first = true;
- bool found = false;
+ ListCell *lr;
+ bool first = true;
+ bool found = false;
/* Do not log if this is an internal statement */
if (internalStatement)
foreach(lr, rangeTabls)
{
- Oid relOid;
- Relation rel;
+ Oid relOid;
+ Relation rel;
RangeTblEntry *rte = lfirst(lr);
/* We only care about tables, and can ignore subqueries etc. */
found = true;
/*
- * If we are not logging all-catalog queries (auditLogCatalog is false)
- * then filter out any system relations here.
+ * If we are not logging all-catalog queries (auditLogCatalog is
+ * false) then filter out any system relations here.
*/
relOid = rte->relid;
rel = relation_open(relOid, NoLock);
{
case RELKIND_RELATION:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_TABLE;
+ OBJECT_TYPE_TABLE;
+
break;
case RELKIND_INDEX:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_INDEX;
+ OBJECT_TYPE_INDEX;
+
break;
case RELKIND_SEQUENCE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_SEQUENCE;
+ OBJECT_TYPE_SEQUENCE;
+
break;
case RELKIND_TOASTVALUE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_TOASTVALUE;
+ OBJECT_TYPE_TOASTVALUE;
+
break;
case RELKIND_VIEW:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_VIEW;
+ OBJECT_TYPE_VIEW;
+
break;
case RELKIND_COMPOSITE_TYPE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_COMPOSITE_TYPE;
+ OBJECT_TYPE_COMPOSITE_TYPE;
+
break;
case RELKIND_FOREIGN_TABLE:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_FOREIGN_TABLE;
+ OBJECT_TYPE_FOREIGN_TABLE;
+
break;
case RELKIND_MATVIEW:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_MATVIEW;
+ OBJECT_TYPE_MATVIEW;
+
break;
default:
auditEventStack->auditEvent.objectType =
- OBJECT_TYPE_UNKNOWN;
+ OBJECT_TYPE_UNKNOWN;
+
break;
}
/* Get a copy of the relation name and assign it to object name */
auditEventStack->auditEvent.objectName =
quote_qualified_identifier(get_namespace_name(
- RelationGetNamespace(rel)),
+ RelationGetNamespace(rel)),
RelationGetRelationName(rel));
relation_close(rel, NoLock);
/* Perform object auditing only if the audit role is valid */
if (auditOid != InvalidOid)
{
- AclMode auditPerms =
- (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
- rte->requiredPerms;
+ AclMode auditPerms =
+ (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
+ rte->requiredPerms;
/*
* If any of the required permissions for the relation are granted
/*
* If no tables were found that means that RangeTbls was empty or all
- * relations were in the system schema. In that case still log a
- * session record.
+ * relations were in the system schema. In that case still log a session
+ * record.
*/
if (!found)
{
static void
log_function_execute(Oid objectId)
{
- HeapTuple proctup;
+ HeapTuple proctup;
Form_pg_proc proc;
AuditEventStackItem *stackItem;
stackItem->auditEvent.commandTag = T_DoStmt;
stackItem->auditEvent.command = COMMAND_EXECUTE;
stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
+
stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
log_audit_event(stackItem);
standard_ExecutorStart(queryDesc, eflags);
/*
- * Move the stack memory context to the query memory context. This needs to
- * be done here because the query context does not exist before the call
- * to standard_ExecutorStart() but the stack item is required by
+ * Move the stack memory context to the query memory context. This needs
+ * to be done here because the query context does not exist before the
+ * call to standard_ExecutorStart() but the stack item is required by
* pg_audit_ExecutorCheckPerms_hook() which is called during
* standard_ExecutorStart().
*/
static bool
pg_audit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort)
{
- Oid auditOid;
+ Oid auditOid;
/* Get the audit oid if the role exists */
auditOid = get_role_oid(auditRole, true);
char *completionTag)
{
AuditEventStackItem *stackItem = NULL;
- int64 stackId = 0;
+ int64 stackId = 0;
/*
* Don't audit substatements. All the substatements we care about should
params, dest, completionTag);
/*
- * Process the audit event if there is one. Also check that this event was
- * not popped off the stack by a memory context being free'd elsewhere.
+ * Process the audit event if there is one. Also check that this event
+ * was not popped off the stack by a memory context being free'd
+ * elsewhere.
*/
if (stackItem && !IsAbortedTransactionBlockState())
{
/*
- * Make sure the item we want to log is still on the stack - if not then
- * something has gone wrong and an error will be raised.
+ * Make sure the item we want to log is still on the stack - if not
+ * then something has gone wrong and an error will be raised.
*/
stack_valid(stackId);
- /* Log the utility command if logging is on, the command has not already
- * been logged by another hook, and the transaction is not aborted.
+ /*
+ * Log the utility command if logging is on, the command has not
+ * already been logged by another hook, and the transaction is not
+ * aborted.
*/
if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
log_audit_event(stackItem);
pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
{
EventTriggerData *eventData;
- int result, row;
- TupleDesc spiTupDesc;
- const char *query;
- MemoryContext contextQuery;
- MemoryContext contextOld;
+ int result,
+ row;
+ TupleDesc spiTupDesc;
+ const char *query;
+ MemoryContext contextQuery;
+ MemoryContext contextOld;
/* Continue only if session DDL logging is enabled */
if (~auditLogBitmap & LOG_DDL)
/* Be sure the module was loaded */
if (!auditEventStack)
elog(ERROR, "pg_audit not loaded before call to "
- "pg_audit_ddl_command_end()");
+ "pg_audit_ddl_command_end()");
/* This is an internal statement - do not log it */
internalStatement = true;
/* Switch memory context for query */
contextQuery = AllocSetContextCreate(
- CurrentMemoryContext,
- "pg_audit_func_ddl_command_end temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ CurrentMemoryContext,
+ "pg_audit_func_ddl_command_end temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
contextOld = MemoryContextSwitchTo(contextQuery);
/* Get information about triggered events */
/* Return objects affected by the (non drop) DDL statement */
query = "SELECT UPPER(object_type), object_identity\n"
- " FROM pg_event_trigger_ddl_commands()";
+ " FROM pg_event_trigger_ddl_commands()";
/* Attempt to connect */
result = SPI_connect();
if (result < 0)
elog(ERROR, "pg_audit_ddl_command_end: SPI_connect returned %d",
- result);
+ result);
/* Execute the query */
result = SPI_execute(query, true, 0);
if (result != SPI_OK_SELECT)
elog(ERROR, "pg_audit_ddl_command_end: SPI_execute returned %d",
- result);
+ result);
/* Iterate returned rows */
spiTupDesc = SPI_tuptable->tupdesc;
for (row = 0; row < SPI_processed; row++)
{
- HeapTuple spiTuple;
+ HeapTuple spiTuple;
spiTuple = SPI_tuptable->vals[row];
/* Supply object name and type for audit event */
auditEventStack->auditEvent.objectType =
- SPI_getvalue(spiTuple, spiTupDesc, 1);
+ SPI_getvalue(spiTuple, spiTupDesc, 1);
+
auditEventStack->auditEvent.objectName =
SPI_getvalue(spiTuple, spiTupDesc, 2);
Datum
pg_audit_sql_drop(PG_FUNCTION_ARGS)
{
- int result, row;
- TupleDesc spiTupDesc;
- const char *query;
- MemoryContext contextQuery;
- MemoryContext contextOld;
+ int result,
+ row;
+ TupleDesc spiTupDesc;
+ const char *query;
+ MemoryContext contextQuery;
+ MemoryContext contextOld;
if (~auditLogBitmap & LOG_DDL)
PG_RETURN_NULL();
/* Be sure the module was loaded */
if (!auditEventStack)
elog(ERROR, "pg_audit not loaded before call to "
- "pg_audit_sql_drop()");
+ "pg_audit_sql_drop()");
/* This is an internal statement - do not log it */
internalStatement = true;
/* Switch memory context for the query */
contextQuery = AllocSetContextCreate(
- CurrentMemoryContext,
- "pg_audit_func_ddl_command_end temporary context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ CurrentMemoryContext,
+ "pg_audit_func_ddl_command_end temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
contextOld = MemoryContextSwitchTo(contextQuery);
/* Return objects affected by the drop statement */
query = "SELECT UPPER(object_type),\n"
- " object_identity\n"
- " FROM pg_event_trigger_dropped_objects()\n"
- " WHERE lower(object_type) <> 'type'\n"
- " AND schema_name <> 'pg_toast'";
+ " object_identity\n"
+ " FROM pg_event_trigger_dropped_objects()\n"
+ " WHERE lower(object_type) <> 'type'\n"
+ " AND schema_name <> 'pg_toast'";
/* Attempt to connect */
result = SPI_connect();
if (result < 0)
elog(ERROR, "pg_audit_ddl_drop: SPI_connect returned %d",
- result);
+ result);
/* Execute the query */
result = SPI_execute(query, true, 0);
if (result != SPI_OK_SELECT)
elog(ERROR, "pg_audit_ddl_drop: SPI_execute returned %d",
- result);
+ result);
/* Iterate returned rows */
spiTupDesc = SPI_tuptable->tupdesc;
for (row = 0; row < SPI_processed; row++)
{
- HeapTuple spiTuple;
+ HeapTuple spiTuple;
spiTuple = SPI_tuptable->vals[row];
auditEventStack->auditEvent.objectType =
- SPI_getvalue(spiTuple, spiTupDesc, 1);
+ SPI_getvalue(spiTuple, spiTupDesc, 1);
+
auditEventStack->auditEvent.objectName =
- SPI_getvalue(spiTuple, spiTupDesc, 2);
+ SPI_getvalue(spiTuple, spiTupDesc, 2);
log_audit_event(auditEventStack);
}
static bool
check_pg_audit_log(char **newVal, void **extra, GucSource source)
{
- List *flagRawList;
- char *rawVal;
- ListCell *lt;
- int *flags;
+ List *flagRawList;
+ char *rawVal;
+ ListCell *lt;
+ int *flags;
/* Make sure newval is a comma-separated list of tokens. */
rawVal = pstrdup(*newVal);
* Check that we recognise each token, and add it to the bitmap we're
* building up in a newly-allocated int *f.
*/
- if (!(flags = (int *)malloc(sizeof(int))))
+ if (!(flags = (int *) malloc(sizeof(int))))
return false;
*flags = 0;
foreach(lt, flagRawList)
{
- bool subtract = false;
- int class;
+ bool subtract = false;
+ int class;
/* Retrieve a token */
- char *token = (char *)lfirst(lt);
+ char *token = (char *) lfirst(lt);
/* If token is preceded by -, then the token is subtractive */
if (strstr(token, "-") == token)
assign_pg_audit_log(const char *newVal, void *extra)
{
if (extra)
- auditLogBitmap = *(int *)extra;
+ auditLogBitmap = *(int *) extra;
}
/*
static bool
check_pg_audit_log_level(char **newVal, void **extra, GucSource source)
{
- int *logLevel;
+ int *logLevel;
/* Allocate memory to store the log level */
- if (!(logLevel = (int *)malloc(sizeof(int))))
+ if (!(logLevel = (int *) malloc(sizeof(int))))
return false;
/* Find the log level enum */
assign_pg_audit_log_level(const char *newVal, void *extra)
{
if (extra)
- auditLogLevel = *(int *)extra;
+ auditLogLevel = *(int *) extra;
}
/*
{
/* Define pg_audit.log */
DefineCustomStringVariable(
- "pg_audit.log",
-
- "Specifies which classes of statements will be logged by session audit "
- "logging. Multiple classes can be provided using a comma-separated "
- "list and classes can be subtracted by prefacing the class with a "
- "- sign.",
-
- NULL,
- &auditLog,
- "none",
- PGC_SUSET,
- GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
- check_pg_audit_log,
- assign_pg_audit_log,
- NULL);
+ "pg_audit.log",
+
+ "Specifies which classes of statements will be logged by session audit "
+ "logging. Multiple classes can be provided using a comma-separated "
+ "list and classes can be subtracted by prefacing the class with a "
+ "- sign.",
+
+ NULL,
+ &auditLog,
+ "none",
+ PGC_SUSET,
+ GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+ check_pg_audit_log,
+ assign_pg_audit_log,
+ NULL);
/* Define pg_audit.log_catalog */
DefineCustomBoolVariable(
- "pg_audit.log_catalog",
+ "pg_audit.log_catalog",
"Specifies that session logging should be enabled in the case where "
- "all relations in a statement are in pg_catalog. Disabling this "
- "setting will reduce noise in the log from tools like psql and PgAdmin "
- "that query the catalog heavily.",
+ "all relations in a statement are in pg_catalog. Disabling this "
+ "setting will reduce noise in the log from tools like psql and PgAdmin "
+ "that query the catalog heavily.",
- NULL,
- &auditLogCatalog,
- true,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogCatalog,
+ true,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_level */
DefineCustomStringVariable(
- "pg_audit.log_level",
-
- "Specifies the log level that will be used for log entries. This "
- "setting is used for regression testing and may also be useful to end "
- "users for testing or other purposes. It is not intended to be used "
- "in a production environment as it may leak which statements are being "
- "logged to the user.",
-
- NULL,
- &auditLogLevelString,
- "log",
- PGC_SUSET,
- GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
- check_pg_audit_log_level,
- assign_pg_audit_log_level,
- NULL);
+ "pg_audit.log_level",
+
+ "Specifies the log level that will be used for log entries. This "
+ "setting is used for regression testing and may also be useful to end "
+ "users for testing or other purposes. It is not intended to be used "
+ "in a production environment as it may leak which statements are being "
+ "logged to the user.",
+
+ NULL,
+ &auditLogLevelString,
+ "log",
+ PGC_SUSET,
+ GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+ check_pg_audit_log_level,
+ assign_pg_audit_log_level,
+ NULL);
/* Define pg_audit.log_parameter */
DefineCustomBoolVariable(
- "pg_audit.log_parameter",
+ "pg_audit.log_parameter",
- "Specifies that audit logging should include the parameters that were "
- "passed with the statement. When parameters are present they will be "
- "be included in CSV format after the statement text.",
+ "Specifies that audit logging should include the parameters that were "
+ "passed with the statement. When parameters are present they will be "
+ "be included in CSV format after the statement text.",
- NULL,
- &auditLogParameter,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogParameter,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_relation */
DefineCustomBoolVariable(
- "pg_audit.log_relation",
+ "pg_audit.log_relation",
- "Specifies whether session audit logging should create a separate log "
- "entry for each relation referenced in a SELECT or DML statement. "
- "This is a useful shortcut for exhaustive logging without using object "
- "audit logging.",
+ "Specifies whether session audit logging should create a separate log "
+ "entry for each relation referenced in a SELECT or DML statement. "
+ "This is a useful shortcut for exhaustive logging without using object "
+ "audit logging.",
- NULL,
- &auditLogRelation,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditLogRelation,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.log_statement_once */
DefineCustomBoolVariable(
- "pg_audit.log_statement_once",
-
- "Specifies whether logging will include the statement text and "
- "parameters with the first log entry for a statement/substatement "
- "combination or with every entry. Disabling this setting will result "
- "in less verbose logging but may make it more difficult to determine "
- "the statement that generated a log entry, though the "
- "statement/substatement pair along with the process id should suffice "
- "to identify the statement text logged with a previous entry.",
-
- NULL,
- &auditLogStatementOnce,
- false,
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ "pg_audit.log_statement_once",
+
+ "Specifies whether logging will include the statement text and "
+ "parameters with the first log entry for a statement/substatement "
+ "combination or with every entry. Disabling this setting will result "
+ "in less verbose logging but may make it more difficult to determine "
+ "the statement that generated a log entry, though the "
+ "statement/substatement pair along with the process id should suffice "
+ "to identify the statement text logged with a previous entry.",
+
+ NULL,
+ &auditLogStatementOnce,
+ false,
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/* Define pg_audit.role */
DefineCustomStringVariable(
- "pg_audit.role",
+ "pg_audit.role",
- "Specifies the master role to use for object audit logging. Muliple "
- "audit roles can be defined by granting them to the master role. This "
- "allows multiple groups to be in charge of different aspects of audit "
- "logging.",
+ "Specifies the master role to use for object audit logging. Muliple "
+ "audit roles can be defined by granting them to the master role. This "
+ "allows multiple groups to be in charge of different aspects of audit "
+ "logging.",
- NULL,
- &auditRole,
- "",
- PGC_SUSET,
- GUC_NOT_IN_SAMPLE,
- NULL, NULL, NULL);
+ NULL,
+ &auditRole,
+ "",
+ PGC_SUSET,
+ GUC_NOT_IN_SAMPLE,
+ NULL, NULL, NULL);
/*
- * Install our hook functions after saving the existing pointers to preserve
- * the chains.
+ * Install our hook functions after saving the existing pointers to
+ * preserve the chains.
*/
next_ExecutorStart_hook = ExecutorStart_hook;
ExecutorStart_hook = pg_audit_ExecutorStart_hook;
bool isvalid;
bool isdirty;
uint16 usagecount;
+
/*
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
* being pinned by too many backends and each backend will only pin once
{
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
- double min_time; /* minimim execution time in msec */
- double max_time; /* maximum execution time in msec */
- double mean_time; /* mean execution time in msec */
- double sum_var_time; /* sum of variances in execution time in msec */
+ double min_time; /* minimim execution time in msec */
+ double max_time; /* maximum execution time in msec */
+ double mean_time; /* mean execution time in msec */
+ double sum_var_time; /* sum of variances in execution time in msec */
int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
else
{
/*
- * Welford's method for accurately computing variance.
- * See <http://www.johndcook.com/blog/standard_deviation/>
+ * Welford's method for accurately computing variance. See
+ * <http://www.johndcook.com/blog/standard_deviation/>
*/
- double old_mean = e->counters.mean_time;
+ double old_mean = e->counters.mean_time;
e->counters.mean_time +=
(total_time - old_mean) / e->counters.calls;
values[i++] = Float8GetDatumFast(tmp.min_time);
values[i++] = Float8GetDatumFast(tmp.max_time);
values[i++] = Float8GetDatumFast(tmp.mean_time);
+
/*
* Note we are calculating the population variance here, not the
- * sample variance, as we have data for the whole population,
- * so Bessel's correction is not used, and we don't divide by
+ * sample variance, as we have data for the whole population, so
+ * Bessel's correction is not used, and we don't divide by
* tmp.calls - 1.
*/
if (tmp.calls > 1)
break;
case T_OnConflictExpr:
{
- OnConflictExpr *conf = (OnConflictExpr *) node;
+ OnConflictExpr *conf = (OnConflictExpr *) node;
APP_JUMB(conf->action);
JumbleExpr(jstate, (Node *) conf->arbiterElems);
JumbleExpr(jstate, conf->arbiterWhere);
- JumbleExpr(jstate, (Node *) conf->onConflictSet);
+ JumbleExpr(jstate, (Node *) conf->onConflictSet);
JumbleExpr(jstate, conf->onConflictWhere);
APP_JUMB(conf->constraint);
APP_JUMB(conf->exclRelIndex);
- JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+ JumbleExpr(jstate, (Node *) conf->exclRelTlist);
}
break;
case T_List:
char *line;
char *nextline;
char *eol,
- *colon;
+ *colon;
int hlen;
char *buf;
int hdrlines;
res = pgp_set_convert_crlf(ctx, atoi(val));
else if (strcmp(key, "unicode-mode") == 0)
res = pgp_set_unicode_mode(ctx, atoi(val));
+
/*
* The remaining options are for debugging/testing and are therefore not
* documented in the user-facing docs.
parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
char ***p_keys, char ***p_values)
{
- int nkdims = ARR_NDIM(key_array);
- int nvdims = ARR_NDIM(val_array);
- char **keys,
- **values;
- Datum *key_datums,
- *val_datums;
- bool *key_nulls,
- *val_nulls;
- int key_count,
- val_count;
- int i;
+ int nkdims = ARR_NDIM(key_array);
+ int nvdims = ARR_NDIM(val_array);
+ char **keys,
+ **values;
+ Datum *key_datums,
+ *val_datums;
+ bool *key_nulls,
+ *val_nulls;
+ int key_count,
+ val_count;
+ int i;
if (nkdims > 1 || nkdims != nvdims)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("wrong number of array subscripts")));
+ errmsg("wrong number of array subscripts")));
if (nkdims == 0)
return 0;
for (i = 0; i < key_count; i++)
{
- char *v;
+ char *v;
/* Check that the key doesn't contain anything funny */
if (key_nulls[i])
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header key must not contain non-ASCII characters")));
+ errmsg("header key must not contain non-ASCII characters")));
if (strstr(v, ": "))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("header value must not contain non-ASCII characters")));
+ errmsg("header value must not contain non-ASCII characters")));
if (strchr(v, '\n'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
SRF_RETURN_DONE(funcctx);
else
{
- char *values[2];
+ char *values[2];
/* we assume that the keys (and values) are in UTF-8. */
utf8key = state->keys[funcctx->call_cntr];
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
-void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
- int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+ int num_headers, char **keys, char **values);
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int pgp_extract_armor_headers(const uint8 *src, unsigned len,
- int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+ int *nheaders, char ***keys, char ***values);
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
CHECK_FOR_INTERRUPTS();
/*
- * If the page has only visible tuples, then we can find out the
- * free space from the FSM and move on.
+ * If the page has only visible tuples, then we can find out the free
+ * space from the FSM and move on.
*/
if (visibilitymap_test(rel, blkno, &vmbuffer))
{
page = BufferGetPage(buf);
/*
- * It's not safe to call PageGetHeapFreeSpace() on new pages, so
- * we treat them as being free space for our purposes.
+ * It's not safe to call PageGetHeapFreeSpace() on new pages, so we
+ * treat them as being free space for our purposes.
*/
if (!PageIsNew(page))
stat->free_space += PageGetHeapFreeSpace(page);
scanned++;
/*
- * Look at each tuple on the page and decide whether it's live
- * or dead, then count it and its size. Unlike lazy_scan_heap,
- * we can afford to ignore problems and special cases.
+ * Look at each tuple on the page and decide whether it's live or
+ * dead, then count it and its size. Unlike lazy_scan_heap, we can
+ * afford to ignore problems and special cases.
*/
maxoff = PageGetMaxOffsetNumber(page);
UnlockReleaseBuffer(buf);
}
- stat->table_len = (uint64) nblocks * BLCKSZ;
+ stat->table_len = (uint64) nblocks *BLCKSZ;
+
stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
- stat->tuple_count+misc_count);
+ stat->tuple_count + misc_count);
/*
* Calculate percentages if the relation has one or more pages.
errmsg("cannot access temporary tables of other sessions")));
/*
- * We support only ordinary relations and materialised views,
- * because we depend on the visibility map and free space map
- * for our estimates about unscanned pages.
+ * We support only ordinary relations and materialised views, because we
+ * depend on the visibility map and free space map for our estimates about
+ * unscanned pages.
*/
if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_MATVIEW))
values[i++] = Int64GetDatum(stat.free_space);
values[i++] = Float8GetDatum(stat.free_percent);
- ret = heap_form_tuple(tupdesc, values, nulls);
+ ret = heap_form_tuple(tupdesc, values, nulls);
return HeapTupleGetDatum(ret);
}
/* for random sampling */
double samplerows; /* # of rows fetched */
double rowstoskip; /* # of rows to skip before next sample */
- ReservoirStateData rstate; /* state for reservoir sampling*/
+ ReservoirStateData rstate; /* state for reservoir sampling */
/* working memory contexts */
MemoryContext anl_cxt; /* context for per-analyze lifespan data */
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn);
static void pg_output_begin(LogicalDecodingContext *ctx,
- TestDecodingData *data,
- ReorderBufferTXN *txn,
- bool last_write);
+ TestDecodingData *data,
+ ReorderBufferTXN *txn,
+ bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pg_decode_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
- RepOriginId origin_id);
+ RepOriginId origin_id);
void
_PG_init(void)
typedef struct
{
SamplerRandomState randstate;
- uint32 seed; /* random seed */
- BlockNumber nblocks; /* number of block in relation */
- int32 ntuples; /* number of tuples to return */
- int32 donetuples; /* tuples already returned */
- OffsetNumber lt; /* last tuple returned from current block */
- BlockNumber step; /* step size */
- BlockNumber lb; /* last block visited */
- BlockNumber doneblocks; /* number of already returned blocks */
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 ntuples; /* number of tuples to return */
+ int32 donetuples; /* tuples already returned */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
Datum
tsm_system_rows_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (ntuples < 1)
ereport(ERROR,
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
Datum
tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
Datum
tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
Datum
tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- bool visible = PG_GETARG_BOOL(3);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ bool visible = PG_GETARG_BOOL(3);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
if (!visible)
PG_RETURN_BOOL(false);
Datum
tsm_system_rows_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->donetuples = 0;
Datum
tsm_system_rows_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *limitnode;
- int32 ntuples;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 ntuples;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
{
- uint32 c;
+ uint32 c;
while (a != 0)
{
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
- uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
- t;
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
typedef struct
{
SamplerRandomState randstate;
- uint32 seed; /* random seed */
- BlockNumber nblocks; /* number of block in relation */
- int32 time; /* time limit for sampling */
- TimestampTz start_time; /* start time of sampling */
- TimestampTz end_time; /* end time of sampling */
- OffsetNumber lt; /* last tuple returned from current block */
- BlockNumber step; /* step size */
- BlockNumber lb; /* last block visited */
- BlockNumber estblocks; /* estimated number of returned blocks (moving) */
- BlockNumber doneblocks; /* number of already returned blocks */
+ uint32 seed; /* random seed */
+ BlockNumber nblocks; /* number of block in relation */
+ int32 time; /* time limit for sampling */
+ TimestampTz start_time; /* start time of sampling */
+ TimestampTz end_time; /* end time of sampling */
+ OffsetNumber lt; /* last tuple returned from current block */
+ BlockNumber step; /* step size */
+ BlockNumber lb; /* last block visited */
+ BlockNumber estblocks; /* estimated number of returned blocks
+ * (moving) */
+ BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
Datum
tsm_system_time_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (time < 1)
ereport(ERROR,
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
Datum
tsm_system_time_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
* Update the estimations for time limit at least 10 times per estimated
* number of returned blocks to handle variations in block read speed.
*/
- if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
+ if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
{
- TimestampTz now = GetCurrentTimestamp();
- long secs;
- int usecs;
+ TimestampTz now = GetCurrentTimestamp();
+ long secs;
+ int usecs;
int usecs_remaining;
int time_per_block;
TimestampDifference(sampler->start_time, now, &secs, &usecs);
- usecs += (int) secs * 1000000;
+ usecs += (int) secs *1000000;
time_per_block = usecs / sampler->doneblocks;
PG_RETURN_UINT32(InvalidBlockNumber);
/* Remaining microseconds */
- usecs_remaining = usecs + (int) secs * 1000000;
+ usecs_remaining = usecs + (int) secs *1000000;
/* Recalculate estimated returned number of blocks */
if (time_per_block < usecs_remaining && time_per_block > 0)
Datum
tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
Datum
tsm_system_time_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->start_time = GetCurrentTimestamp();
Datum
tsm_system_time_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *limitnode;
- int32 time;
- BlockNumber relpages;
- double reltuples;
- double density;
- double spc_random_page_cost;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *limitnode;
+ int32 time;
+ BlockNumber relpages;
+ double reltuples;
+ double density;
+ double spc_random_page_cost;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
/*
* Assumption here is that we'll never read less than 1% of table pages,
* this is here mainly because it is much less bad to overestimate than
- * underestimate and using just spc_random_page_cost will probably lead
- * to underestimations in general.
+ * underestimate and using just spc_random_page_cost will probably lead to
+ * underestimations in general.
*/
- *pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
+ *pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
path->rows = *tuples;
}
static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
{
- uint32 c;
+ uint32 c;
while (a != 0)
{
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
- uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
- t;
+ uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+ t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
*/
Assert((key->sk_flags & SK_ISNULL) ||
(key->sk_collation ==
- bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
+ bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
/* First time this column? look up consistent function */
if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
/*
- * If we're in a block that belongs to a future range, summarize what we've
- * got and start afresh. Note the scan might have skipped many pages,
- * if they were devoid of live tuples; make sure to insert index tuples
- * for those too.
+ * If we're in a block that belongs to a future range, summarize what
+ * we've got and start afresh. Note the scan might have skipped many
+ * pages, if they were devoid of live tuples; make sure to insert index
+ * tuples for those too.
*/
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
{
Datum
brinbuildempty(PG_FUNCTION_ARGS)
{
-
Relation index = (Relation) PG_GETARG_POINTER(0);
Buffer metabuf;
{
/* other arguments are not currently used */
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats =
- (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation heapRel;
/* No-op in ANALYZE ONLY mode */
page = BufferGetPage(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel,
- BufferGetBlockNumber(state->bs_currentInsertBuf),
+ BufferGetBlockNumber(state->bs_currentInsertBuf),
PageGetFreeSpace(page));
ReleaseBuffer(state->bs_currentInsertBuf);
}
* 0 - the union of the values in the block range
* 1 - whether an empty value is present in any tuple in the block range
* 2 - whether the values in the block range cannot be merged (e.g. an IPv6
- * address amidst IPv4 addresses).
+ * address amidst IPv4 addresses).
*/
-#define INCLUSION_UNION 0
-#define INCLUSION_UNMERGEABLE 1
-#define INCLUSION_CONTAINS_EMPTY 2
+#define INCLUSION_UNION 0
+#define INCLUSION_UNMERGEABLE 1
+#define INCLUSION_CONTAINS_EMPTY 2
typedef struct InclusionOpaque
unionval = column->bv_values[INCLUSION_UNION];
switch (key->sk_strategy)
{
- /*
- * Placement strategies
- *
- * These are implemented by logically negating the result of the
- * converse placement operator; for this to work, the converse operator
- * must be part of the opclass. An error will be thrown by
- * inclusion_get_strategy_procinfo() if the required strategy is not
- * part of the opclass.
- *
- * These all return false if either argument is empty, so there is
- * no need to check for empty elements.
- */
+ /*
+ * Placement strategies
+ *
+ * These are implemented by logically negating the result of the
+ * converse placement operator; for this to work, the converse
+ * operator must be part of the opclass. An error will be thrown
+ * by inclusion_get_strategy_procinfo() if the required strategy
+ * is not part of the opclass.
+ *
+ * These all return false if either argument is empty, so there is
+ * no need to check for empty elements.
+ */
case RTLeftStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverRightStrategyNumber);
+ RTOverRightStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
case RTBelowStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverAboveStrategyNumber);
+ RTOverAboveStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
case RTAboveStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
- RTOverBelowStrategyNumber);
+ RTOverBelowStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
* strategies because some elements can be contained even though
* the union is not; instead we use the overlap operator.
*
- * We check for empty elements separately as they are not merged to
- * the union but contained by everything.
+ * We check for empty elements separately as they are not merged
+ * to the union but contained by everything.
*/
case RTContainedByStrategyNumber:
/*
* Adjacent strategy
*
- * We test for overlap first but to be safe we need to call
- * the actual adjacent operator also.
+ * We test for overlap first but to be safe we need to call the
+ * actual adjacent operator also.
*
* An empty element cannot be adjacent to any other, so there is
* no need to check for it.
* the contains operator. Generally, inequality strategies do not
* make much sense for the types which will be used with the
* inclusion BRIN family of opclasses, but is is possible to
- * implement them with logical negation of the left-of and right-of
- * operators.
+ * implement them with logical negation of the left-of and
+ * right-of operators.
*
* NB: These strategies cannot be used with geometric datatypes
* that use comparison of areas! The only exception is the "same"
Datum brin_minmax_consistent(PG_FUNCTION_ARGS);
Datum brin_minmax_union(PG_FUNCTION_ARGS);
static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
- Oid subtype, uint16 strategynum);
+ Oid subtype, uint16 strategynum);
Datum
break;
/* max() >= scankey */
finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
- BTGreaterEqualStrategyNumber);
+ BTGreaterEqualStrategyNumber);
matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
value);
break;
attr = bdesc->bd_tupdesc->attrs[attno - 1];
/*
- * Adjust "allnulls". If A doesn't have values, just copy the values
- * from B into A, and we're done. We cannot run the operators in this
- * case, because values in A might contain garbage. Note we already
- * established that B contains values.
+ * Adjust "allnulls". If A doesn't have values, just copy the values from
+ * B into A, and we're done. We cannot run the operators in this case,
+ * because values in A might contain garbage. Note we already established
+ * that B contains values.
*/
if (col_a->bv_allnulls)
{
strategynum, attr->atttypid, subtype, opfamily);
oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
- Anum_pg_amop_amopopr, &isNull));
+ Anum_pg_amop_amopopr, &isNull));
ReleaseSysCache(tuple);
Assert(!isNull && RegProcedureIsValid(oprid));
{
Relation rm_irel;
BlockNumber rm_pagesPerRange;
- BlockNumber rm_lastRevmapPage; /* cached from the metapage */
+ BlockNumber rm_lastRevmapPage; /* cached from the metapage */
Buffer rm_metaBuf;
Buffer rm_currBuf;
};
static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
- BlockNumber heapBlk);
+ BlockNumber heapBlk);
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
BlockNumber heapBlk);
void
brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
+ BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg_internal("corrupted BRIN index: inconsistent range map")));
+ errmsg_internal("corrupted BRIN index: inconsistent range map")));
previptr = *iptr;
blk = ItemPointerGetBlockNumber(iptr);
static BlockNumber
revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
{
- BlockNumber targetblk;
+ BlockNumber targetblk;
/* obtain revmap block number, skip 1 for metapage block */
targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
- errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
- BrinPageType(page),
- RelationGetRelationName(irel),
- BufferGetBlockNumber(buf))));
+ errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
+ BrinPageType(page),
+ RelationGetRelationName(irel),
+ BufferGetBlockNumber(buf))));
/* If the page is in use, evacuate it and restart */
if (brin_start_evacuating_page(irel, buf))
{
for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
TupleDescInitEntry(tupdesc, attno++, NULL,
- brdesc->bd_info[i]->oi_typcache[j]->type_id,
+ brdesc->bd_info[i]->oi_typcache[j]->type_id,
-1, 0);
}
/*
* Set up the scan keys, and check for unsatisfiable query.
*/
- ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
+ ginFreeScanKeys(so); /* there should be no keys yet, but just to be
+ * sure */
ginNewScanKey(scan);
if (GinIsVoidRes(scan))
static const relopt_parse_elt tab[] = {
{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
- pendingListCleanupSize)}
+ pendingListCleanupSize)}
};
options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
/* opclasses are not required to provide a Fetch method */
if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
fmgr_info_copy(&(giststate->fetchFn[i]),
- index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
+ index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
scanCxt);
else
giststate->fetchFn[i].fn_oid = InvalidOid;
}
/*
- * If we're doing an index-only scan, on the first call, also initialize
- * a tuple descriptor to represent the returned index tuples and create a
+ * If we're doing an index-only scan, on the first call, also initialize a
+ * tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_itupdesc)
* descriptor. Instead, construct a descriptor with the original data
* types.
*/
- natts = RelationGetNumberOfAttributes(scan->indexRelation);
+ natts = RelationGetNumberOfAttributes(scan->indexRelation);
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
for (attno = 1; attno <= natts; attno++)
{
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/*
- * Look up the datatype returned by the original ordering operator.
- * GiST always uses a float8 for the distance function, but the
- * ordering operator could be anything else.
+ * Look up the datatype returned by the original ordering
+ * operator. GiST always uses a float8 for the distance function,
+ * but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise
isleaf);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
- giststate->supportCollation[i],
+ giststate->supportCollation[i],
PointerGetDatum(¢ry)));
compatt[i] = cep->key;
}
static HeapScanDesc heap_beginscan_internal(Relation relation,
Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
bool is_bitmapscan, bool is_samplescan,
bool temp_snap);
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
static HeapScanDesc
heap_beginscan_internal(Relation relation, Snapshot snapshot,
int nkeys, ScanKey key,
- bool allow_strat, bool allow_sync, bool allow_pagemode,
- bool is_bitmapscan, bool is_samplescan, bool temp_snap)
+ bool allow_strat, bool allow_sync, bool allow_pagemode,
+ bool is_bitmapscan, bool is_samplescan, bool temp_snap)
{
HeapScanDesc scan;
{
/*
* For now, parallel operations are required to be strictly read-only.
- * Unlike heap_update() and heap_delete(), an insert should never create
- * a combo CID, so it might be possible to relax this restriction, but
- * not without more thought and testing.
+ * Unlike heap_update() and heap_delete(), an insert should never create a
+ * combo CID, so it might be possible to relax this restriction, but not
+ * without more thought and testing.
*/
if (IsInParallelMode())
ereport(ERROR,
infomask = tp.t_data->t_infomask;
/*
- * Sleep until concurrent transaction ends -- except when there's a single
- * locker and it's our own transaction. Note we don't care
+ * Sleep until concurrent transaction ends -- except when there's a
+ * single locker and it's our own transaction. Note we don't care
* which lock mode the locker has, because we need the strongest one.
*
* Before sleeping, we need to acquire tuple lock to establish our
else if (!TransactionIdIsCurrentTransactionId(xwait))
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
*
* Before sleeping, we need to acquire tuple lock to establish our
* priority for the tuple (see heap_lock_tuple). LockTuple will
- * release us when we are next-in-line for the tuple. Note we must not
- * acquire the tuple lock until we're sure we're going to sleep;
+ * release us when we are next-in-line for the tuple. Note we must
+ * not acquire the tuple lock until we're sure we're going to sleep;
* otherwise we're open for race conditions with other transactions
* holding the tuple lock which sleep on us.
*
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask,
infomask) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
}
else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
checked_lockers = true;
locker_remains = true;
else
{
/*
- * Wait for regular transaction to end; but first, acquire
- * tuple lock.
+ * Wait for regular transaction to end; but first, acquire tuple
+ * lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
*/
if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
!TransactionIdEquals(xwait,
- HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+ HeapTupleHeaderGetRawXmax(oldtup.t_data)))
goto l2;
/* Otherwise check if it committed or aborted */
HeapTupleClearHeapOnly(newtup);
}
- RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
if (!already_marked)
{
if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
TransactionIdIsCurrentTransactionId(xwait))
{
- /* ... but if the xmax changed in the meantime, start over */
+ /* ... but if the xmax changed in the meantime, start over */
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
* for the tuple. We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock;
- * this arranges that we stay at the head of the line while rechecking
- * tuple state.
+ * this arranges that we stay at the head of the line while
+ * rechecking tuple state.
*/
if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
&have_tuple_lock))
{
case LockWaitBlock:
MultiXactIdWait((MultiXactId) xwait, status, infomask,
- relation, &tuple->t_self, XLTW_Lock, NULL);
+ relation, &tuple->t_self, XLTW_Lock, NULL);
break;
case LockWaitSkip:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
{
result = HeapTupleWouldBlock;
break;
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
- status, infomask, relation,
+ status, infomask, relation,
NULL))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ RelationGetRelationName(relation))));
break;
}
}
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then
- * some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then some
+ * other xact could update this tuple before we get to this point.
+ * Check for xmax change, and start over if so.
*/
if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
!TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
* Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that would have been handled above. So
- * that transaction must necessarily be gone by now. But don't
- * check for this in the multixact case, because some locker
- * transactions might still be running.
+ * that transaction must necessarily be gone by now. But
+ * don't check for this in the multixact case, because some
+ * locker transactions might still be running.
*/
UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
}
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
break;
}
*have_tuple_lock = true;
MarkBufferDirty(buffer);
/*
- * Replace the speculative insertion token with a real t_ctid,
- * pointing to itself like it does on regular tuples.
+ * Replace the speculative insertion token with a real t_ctid, pointing to
+ * itself like it does on regular tuples.
*/
htup->t_ctid = tuple->t_self;
DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
LockTupleMode lockmode)
{
- bool allow_old;
- int nmembers;
+ bool allow_old;
+ int nmembers;
MultiXactMember *members;
- bool result = false;
- LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+ bool result = false;
+ LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
HEAP_XMAX_IS_LOCKED_ONLY(infomask));
if (nmembers >= 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
- TransactionId memxid;
- LOCKMODE memlockmode;
+ TransactionId memxid;
+ LOCKMODE memlockmode;
memlockmode = LOCKMODE_from_mxstatus(members[i].status);
{
XLogRegisterBufData(0,
((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+ newtup->t_len - SizeofHeapTupleHeader - suffixlen);
}
else
{
if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
{
XLogRegisterBufData(0,
- ((char *) newtup->t_data) + SizeofHeapTupleHeader,
- newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+ ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+ newtup->t_data->t_hoff - SizeofHeapTupleHeader);
}
/* data after common prefix */
{
/*
* The OID column can appear in an index definition, but that's
- * OK, because we always copy the OID if present (see below). Other
- * system columns may not.
+ * OK, because we always copy the OID if present (see below).
+ * Other system columns may not.
*/
if (attno == ObjectIdAttributeNumber)
continue;
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
- * Insert the correct position into CTID of the stored tuple, too
- * (unless this is a speculative insertion, in which case the token is
- * held in CTID field instead)
+ * Insert the correct position into CTID of the stored tuple, too (unless
+ * this is a speculative insertion, in which case the token is held in
+ * CTID field instead)
*/
if (!token)
{
* Check permissions- if the user does not have access to view all of the
* key columns then return NULL to avoid leaking data.
*
- * First check if RLS is enabled for the relation. If so, return NULL
- * to avoid leaking data.
+ * First check if RLS is enabled for the relation. If so, return NULL to
+ * avoid leaking data.
*
- * Next we need to check table-level SELECT access and then, if
- * there is no access there, check column-level permissions.
+ * Next we need to check table-level SELECT access and then, if there is
+ * no access there, check column-level permissions.
*/
/*
if (aclresult != ACLCHECK_OK)
{
/*
- * No table-level access, so step through the columns in the
- * index and make sure the user has SELECT rights on all of them.
+ * No table-level access, so step through the columns in the index and
+ * make sure the user has SELECT rights on all of them.
*/
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
/*
- * Note that if attnum == InvalidAttrNumber, then this is an
- * index based on an expression and we return no detail rather
- * than try to figure out what column(s) the expression includes
- * and if the user has SELECT rights on them.
+ * Note that if attnum == InvalidAttrNumber, then this is an index
+ * based on an expression and we return no detail rather than try
+ * to figure out what column(s) the expression includes and if the
+ * user has SELECT rights on them.
*/
if (attnum == InvalidAttrNumber ||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
- TransactionId xwait;
- uint32 speculativeToken;
+ TransactionId xwait;
+ uint32 speculativeToken;
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
+
/*
- * If it's a speculative insertion, wait for it to finish (ie.
- * to go ahead with the insertion, or kill the tuple). Otherwise
+ * If it's a speculative insertion, wait for it to finish (ie. to
+ * go ahead with the insertion, or kill the tuple). Otherwise
* wait for the transaction to finish as usual.
*/
if (speculativeToken)
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel)),
- key_desc ? errdetail("Key %s already exists.",
- key_desc) : 0,
+ key_desc ? errdetail("Key %s already exists.",
+ key_desc) : 0,
errtableconstraint(heapRel,
RelationGetRelationName(rel))));
}
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
/*
* If the left sibling is split again by another backend,
* after we released the lock, we know that the first
leafrightsib = opaque->btpo_next;
/*
- * Before attempting to lock the parent page, check that the right
- * sibling is not in half-dead state. A half-dead right sibling would
- * have no downlink in the parent, which would be highly confusing later
- * when we delete the downlink that follows the current page's downlink.
- * (I believe the deletion would work correctly, but it would fail the
+ * Before attempting to lock the parent page, check that the right sibling
+ * is not in half-dead state. A half-dead right sibling would have no
+ * downlink in the parent, which would be highly confusing later when we
+ * delete the downlink that follows the current page's downlink. (I
+ * believe the deletion would work correctly, but it would fail the
* cross-check we make that the following downlink points to the right
* sibling of the delete page.)
*/
BTSpool *spool;
/*
- * spool2 is needed only when the index is a unique index. Dead tuples
- * are put into spool2 instead of spool in order to avoid uniqueness
- * check.
+ * spool2 is needed only when the index is a unique index. Dead tuples are
+ * put into spool2 instead of spool in order to avoid uniqueness check.
*/
BTSpool *spool2;
double indtuples;
offnum = OffsetNumberPrev(offnum);
/*
- * By here the scan position is now set for the first key. If all
- * further tuples are expected to match we set the SK_BT_MATCHED flag
- * to avoid re-checking the scan key later. This is a big win for
- * slow key matches though is still significant even for fast datatypes.
+ * By here the scan position is now set for the first key. If all further
+ * tuples are expected to match we set the SK_BT_MATCHED flag to avoid
+ * re-checking the scan key later. This is a big win for slow key matches
+ * though is still significant even for fast datatypes.
*/
switch (startKeys[0]->sk_strategy)
{
{
for (i = 1; i <= keysz; i++)
{
- SortSupport entry;
+ SortSupport entry;
Datum attrDatum1,
attrDatum2;
bool isNull1,
Datum test;
/*
- * If the scan key has already matched we can skip this key, as
- * long as the index tuple does not contain NULL values.
+ * If the scan key has already matched we can skip this key, as long
+ * as the index tuple does not contain NULL values.
*/
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
continue;
* any items from the page, and so there is no need to search left from the
* recorded offset. (This observation also guarantees that the item is still
* the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
+ * TIDs can get recycled.) This holds true even if the page has been modified
* by inserts and page splits, so there is no need to consult the LSN.
*
* If the pin was released after reading the page, then we re-read it. If it
/*-------------------------------------------------------------------------
*
* committsdesc.c
- * rmgr descriptor routines for access/transam/commit_ts.c
+ * rmgr descriptor routines for access/transam/commit_ts.c
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/committsdesc.c
+ * src/backend/access/rmgrdesc/committsdesc.c
*
*-------------------------------------------------------------------------
*/
else if (info == COMMIT_TS_SETTS)
{
xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
- int nsubxids;
+ int nsubxids;
appendStringInfo(buf, "set %s/%d for: %u",
timestamptz_to_str(xlrec->timestamp),
sizeof(TransactionId));
if (nsubxids > 0)
{
- int i;
+ int i;
TransactionId *subxids;
subxids = palloc(sizeof(TransactionId) * nsubxids);
/*-------------------------------------------------------------------------
*
* replorigindesc.c
- * rmgr descriptor routines for replication/logical/replication_origin.c
+ * rmgr descriptor routines for replication/logical/replication_origin.c
*
* Portions Copyright (c) 2015, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/replorigindesc.c
+ * src/backend/access/rmgrdesc/replorigindesc.c
*
*-------------------------------------------------------------------------
*/
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec;
+
xlrec = (xl_replorigin_set *) rec;
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
+
xlrec = (xl_replorigin_drop *) rec;
appendStringInfo(buf, "drop %u", xlrec->node_id);
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
memset(parsed, 0, sizeof(*parsed));
- parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+ parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
+ * present */
parsed->xact_time = xlrec->xact_time;
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
- xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
+ xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
{
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
origin_id,
- (uint32)(parsed.origin_lsn >> 32),
- (uint32)parsed.origin_lsn,
+ (uint32) (parsed.origin_lsn >> 32),
+ (uint32) parsed.origin_lsn,
timestamptz_to_str(parsed.origin_timestamp));
}
}
spgcanreturn(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
+
/* int i = PG_GETARG_INT32(1); */
SpGistCache *cache;
/* tsdesc */
typedef struct
{
- uint32 seed; /* random seed */
- BlockNumber startblock; /* starting block, we use ths for syncscan support */
+ uint32 seed; /* random seed */
+ BlockNumber startblock; /* starting block, we use ths for syncscan
+ * support */
BlockNumber nblocks; /* number of blocks */
BlockNumber blockno; /* current block */
- float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
+ float4 probability; /* probabilty that tuple will be returned
+ * (0.0-1.0) */
OffsetNumber lt; /* last tuple returned from current block */
- SamplerRandomState randstate; /* random generator tsdesc */
+ SamplerRandomState randstate; /* random generator tsdesc */
} BernoulliSamplerData;
/*
Datum
tsm_bernoulli_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
BernoulliSamplerData *sampler;
if (percent < 0 || percent > 100)
Datum
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
/*
- * Bernoulli sampling scans all blocks on the table and supports
- * syncscan so loop from startblock to startblock instead of
- * from 0 to nblocks.
+ * Bernoulli sampling scans all blocks on the table and supports syncscan
+ * so loop from startblock to startblock instead of from 0 to nblocks.
*/
if (sampler->blockno == InvalidBlockNumber)
sampler->blockno = sampler->startblock;
* tuples have same probability of being returned the visible and invisible
* tuples will be returned in same ratio as they have in the actual table.
* This means that there is no skew towards either visible or invisible tuples
- * and the number returned visible tuples to from the executor node is the
+ * and the number returned visible tuples to from the executor node is the
* fraction of visible tuples which was specified in input.
*
* This is faster than doing the coinflip in the examinetuple because we don't
Datum
tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
- float4 probability = sampler->probability;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
+ float4 probability = sampler->probability;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
/*
* Loop over tuple offsets until the random generator returns value that
- * is within the probability of returning the tuple or until we reach
- * end of the block.
+ * is within the probability of returning the tuple or until we reach end
+ * of the block.
*
* (This is our implementation of bernoulli trial)
*/
Datum
tsm_bernoulli_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- BernoulliSamplerData *sampler =
- (BernoulliSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ BernoulliSamplerData *sampler =
+ (BernoulliSamplerData *) tsdesc->tsmdata;
sampler->blockno = InvalidBlockNumber;
sampler->lt = InvalidOffsetNumber;
Datum
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
*pages = baserel->pages;
typedef struct
{
BlockSamplerData bs;
- uint32 seed; /* random seed */
+ uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
- int samplesize; /* number of blocks to return */
+ int samplesize; /* number of blocks to return */
OffsetNumber lt; /* last tuple returned from current block */
} SystemSamplerData;
Datum
tsm_system_init(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- uint32 seed = PG_GETARG_UINT32(1);
- float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
- HeapScanDesc scan = tsdesc->heapScan;
- SystemSamplerData *sampler;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ uint32 seed = PG_GETARG_UINT32(1);
+ float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+ HeapScanDesc scan = tsdesc->heapScan;
+ SystemSamplerData *sampler;
if (percent < 0 || percent > 100)
ereport(ERROR,
Datum
tsm_system_nextblock(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- BlockNumber blockno;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ BlockNumber blockno;
if (!BlockSampler_HasMore(&sampler->bs))
PG_RETURN_UINT32(InvalidBlockNumber);
Datum
tsm_system_nexttuple(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- OffsetNumber maxoffset = PG_GETARG_UINT16(2);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
- OffsetNumber tupoffset = sampler->lt;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
Datum
tsm_system_reset(PG_FUNCTION_ARGS)
{
- TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
- SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+ TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+ SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
Datum
tsm_system_cost(PG_FUNCTION_ARGS)
{
- PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
- Path *path = (Path *) PG_GETARG_POINTER(1);
- RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
- List *args = (List *) PG_GETARG_POINTER(3);
- BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
- double *tuples = (double *) PG_GETARG_POINTER(5);
- Node *pctnode;
- float4 samplesize;
+ PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+ Path *path = (Path *) PG_GETARG_POINTER(1);
+ RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+ List *args = (List *) PG_GETARG_POINTER(3);
+ BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+ double *tuples = (double *) PG_GETARG_POINTER(5);
+ Node *pctnode;
+ float4 samplesize;
pctnode = linitial(args);
pctnode = estimate_expression_value(root, pctnode);
/*-------------------------------------------------------------------------
*
* tablesample.c
- * TABLESAMPLE internal API
+ * TABLESAMPLE internal API
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/tablesample/tablesample.c
+ * src/backend/access/tablesample/tablesample.c
*
* TABLESAMPLE is the SQL standard clause for sampling the relations.
*
List *args = tablesample->args;
ListCell *arg;
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
- TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
+ TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
/* Load functions */
fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
fcinfo.argnull[0] = false;
/*
- * Second arg for init function is always REPEATABLE
- * When tablesample->repeatable is NULL then REPEATABLE clause was not
- * specified.
- * When specified, the expression cannot evaluate to NULL.
+ * Second arg for init function is always REPEATABLE When
+ * tablesample->repeatable is NULL then REPEATABLE clause was not
+ * specified. When specified, the expression cannot evaluate to NULL.
*/
if (tablesample->repeatable)
{
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
(PlanState *) scanstate);
+
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
&fcinfo.argnull[1], NULL);
if (fcinfo.argnull[1])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("REPEATABLE clause must be NOT NULL numeric value")));
+ errmsg("REPEATABLE clause must be NOT NULL numeric value")));
}
else
{
HeapTuple
tablesample_getnext(TableSampleDesc *desc)
{
- HeapScanDesc scan = desc->heapScan;
- HeapTuple tuple = &(scan->rs_ctup);
- bool pagemode = scan->rs_pageatatime;
- BlockNumber blockno;
- Page page;
- bool page_all_visible;
- ItemId itemid;
- OffsetNumber tupoffset,
- maxoffset;
+ HeapScanDesc scan = desc->heapScan;
+ HeapTuple tuple = &(scan->rs_ctup);
+ bool pagemode = scan->rs_pageatatime;
+ BlockNumber blockno;
+ Page page;
+ bool page_all_visible;
+ ItemId itemid;
+ OffsetNumber tupoffset,
+ maxoffset;
if (!scan->rs_inited)
{
return NULL;
}
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
if (!BlockNumberIsValid(blockno))
{
tuple->t_data = NULL;
CHECK_FOR_INTERRUPTS();
tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- UInt16GetDatum(maxoffset)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ UInt16GetDatum(maxoffset)));
if (OffsetNumberIsValid(tupoffset))
{
- bool visible;
- bool found;
+ bool visible;
+ bool found;
/* Skip invalid tuple pointers. */
itemid = PageGetItemId(page, tupoffset);
visible = SampleTupleVisible(tuple, tupoffset, scan);
/*
- * Let the sampling method examine the actual tuple and decide if we
- * should return it.
+ * Let the sampling method examine the actual tuple and decide if
+ * we should return it.
*
* Note that we let it examine even invisible tuples for
* statistical purposes, but not return them since user should
if (OidIsValid(desc->tsmexaminetuple.fn_oid))
{
found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
- PointerGetDatum(desc),
- UInt32GetDatum(blockno),
- PointerGetDatum(tuple),
- BoolGetDatum(visible)));
+ PointerGetDatum(desc),
+ UInt32GetDatum(blockno),
+ PointerGetDatum(tuple),
+ BoolGetDatum(visible)));
/* Should not happen if sampling method is well written. */
if (found && !visible)
elog(ERROR, "Sampling method wanted to return invisible tuple");
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
- PointerGetDatum(desc)));
+ PointerGetDatum(desc)));
/*
- * Report our new scan position for synchronization purposes. We
- * don't do that when moving backwards, however. That would just
- * mess up any other forward-moving scanners.
+ * Report our new scan position for synchronization purposes. We don't
+ * do that when moving backwards, however. That would just mess up any
+ * other forward-moving scanners.
*
- * Note: we do this before checking for end of scan so that the
- * final state of the position hint is back at the start of the
- * rel. That's not strictly necessary, but otherwise when you run
- * the same query multiple times the starting position would shift
- * a little bit backwards on every invocation, which is confusing.
- * We don't guarantee any specific ordering in general, though.
+ * Note: we do this before checking for end of scan so that the final
+ * state of the position hint is back at the start of the rel. That's
+ * not strictly necessary, but otherwise when you run the same query
+ * multiple times the starting position would shift a little bit
+ * backwards on every invocation, which is confusing. We don't
+ * guarantee any specific ordering in general, though.
*/
if (scan->rs_syncscan)
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
{
/*
* If this scan is reading whole pages at a time, there is already
- * visibility info present in rs_vistuples so we can just search it
- * for the tupoffset.
+ * visibility info present in rs_vistuples so we can just search it for
+ * the tupoffset.
*/
if (scan->rs_pageatatime)
{
- int start = 0,
- end = scan->rs_ntuples - 1;
+ int start = 0,
+ end = scan->rs_ntuples - 1;
/*
* Do the binary search over rs_vistuples, it's already sorted by
* OffsetNumber so we don't need to do any sorting ourselves here.
*
- * We could use bsearch() here but it's slower for integers because
- * of the function call overhead and because it needs boiler plate code
+ * We could use bsearch() here but it's slower for integers because of
+ * the function call overhead and because it needs boiler plate code
* it would not save us anything code-wise anyway.
*/
while (start <= end)
{
- int mid = start + (end - start) / 2;
+ int mid = start + (end - start) / 2;
OffsetNumber curoffset = scan->rs_vistuples[mid];
if (curoffset == tupoffset)
Snapshot snapshot = scan->rs_snapshot;
Buffer buffer = scan->rs_cbuf;
- bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+ bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
snapshot);
*/
typedef struct CommitTimestampEntry
{
- TimestampTz time;
- RepOriginId nodeid;
+ TimestampTz time;
+ RepOriginId nodeid;
} CommitTimestampEntry;
#define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
#define COMMIT_TS_XACTS_PER_PAGE \
(BLCKSZ / SizeOfCommitTimestampEntry)
-#define TransactionIdToCTsPage(xid) \
+#define TransactionIdToCTsPage(xid) \
((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
#define TransactionIdToCTsEntry(xid) \
((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
*/
typedef struct CommitTimestampShared
{
- TransactionId xidLastCommit;
+ TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
} CommitTimestampShared;
-CommitTimestampShared *commitTsShared;
+CommitTimestampShared *commitTsShared;
/* GUC variable */
-bool track_commit_timestamp;
+bool track_commit_timestamp;
static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
TransactionId *subxids, TimestampTz ts,
RepOriginId nodeid, int pageno);
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
- RepOriginId nodeid, int slotno);
+ RepOriginId nodeid, int slotno);
static int ZeroCommitTsPage(int pageno, bool writeXlog);
static bool CommitTsPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
return;
/*
- * Comply with the WAL-before-data rule: if caller specified it wants
- * this value to be recorded in WAL, do so before touching the data.
+ * Comply with the WAL-before-data rule: if caller specified it wants this
+ * value to be recorded in WAL, do so before touching the data.
*/
if (do_xlog)
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
/*
* We split the xids to set the timestamp to in groups belonging to the
* same SLRU page; the first element in each such set is its head. The
- * first group has the main XID as the head; subsequent sets use the
- * first subxid not on the previous page as head. This way, we only have
- * to lock/modify each SLRU page once.
+ * first group has the main XID as the head; subsequent sets use the first
+ * subxid not on the previous page as head. This way, we only have to
+ * lock/modify each SLRU page once.
*/
for (i = 0, headxid = xid;;)
{
break;
/*
- * Set the new head and skip over it, as well as over the subxids
- * we just wrote.
+ * Set the new head and skip over it, as well as over the subxids we
+ * just wrote.
*/
headxid = subxids[j];
i += j - i + 1;
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
/* error if the given Xid doesn't normally commit */
if (!TransactionIdIsNormal(xid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
+ errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
/*
* Return empty if the requested value is outside our valid range.
TransactionId
GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
{
- TransactionId xid;
+ TransactionId xid;
/* Error if module not enabled */
if (!track_commit_timestamp)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
- errhint("Make sure the configuration parameter \"%s\" is set.",
- "track_commit_timestamp")));
+ errhint("Make sure the configuration parameter \"%s\" is set.",
+ "track_commit_timestamp")));
LWLockAcquire(CommitTsLock, LW_SHARED);
xid = commitTsShared->xidLastCommit;
Datum
pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
{
- TransactionId xid = PG_GETARG_UINT32(0);
- TimestampTz ts;
- bool found;
+ TransactionId xid = PG_GETARG_UINT32(0);
+ TimestampTz ts;
+ bool found;
found = TransactionIdGetCommitTsData(xid, &ts, NULL);
Datum
pg_last_committed_xact(PG_FUNCTION_ARGS)
{
- TransactionId xid;
- TimestampTz ts;
- Datum values[2];
- bool nulls[2];
- TupleDesc tupdesc;
+ TransactionId xid;
+ TimestampTz ts;
+ Datum values[2];
+ bool nulls[2];
+ TupleDesc tupdesc;
HeapTuple htup;
/* and construct a tuple with our data */
void
CommitTsShmemInit(void)
{
- bool found;
+ bool found;
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
{
/*
* Nothing to do here at present, unlike most other SLRU modules; segments
- * are created when the server is started with this module enabled.
- * See StartupCommitTs.
+ * are created when the server is started with this module enabled. See
+ * StartupCommitTs.
*/
}
/*
* Activate this module whenever necessary.
- * This must happen during postmaster or standalong-backend startup,
- * or during WAL replay anytime the track_commit_timestamp setting is
- * changed in the master.
+ * This must happen during postmaster or standalong-backend startup,
+ * or during WAL replay anytime the track_commit_timestamp setting is
+ * changed in the master.
*
* The reason why this SLRU needs separate activation/deactivation functions is
* that it can be enabled/disabled during start and the activation/deactivation
/* Finally, create the current segment file, if necessary */
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
{
- int slotno;
+ int slotno;
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
slotno = ZeroCommitTsPage(pageno, false);
TransactionId *subxids, TimestampTz timestamp,
RepOriginId nodeid)
{
- xl_commit_ts_set record;
+ xl_commit_ts_set record;
record.timestamp = timestamp;
record.nodeid = nodeid;
subxids = NULL;
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
- setts->timestamp, setts->nodeid, false);
+ setts->timestamp, setts->nodeid, false);
if (subxids)
pfree(subxids);
}
*/
if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
(MultiXactState->nextOffset - MultiXactState->oldestOffset
- > MULTIXACT_MEMBER_SAFE_THRESHOLD))
+ > MULTIXACT_MEMBER_SAFE_THRESHOLD))
{
/*
* For safety's sake, we release MultiXactGenLock while sending
MultiXactIdSetOldestVisible();
/*
- * If we know the multi is used only for locking and not for updates,
- * then we can skip checking if the value is older than our oldest
- * visible multi. It cannot possibly still be running.
+ * If we know the multi is used only for locking and not for updates, then
+ * we can skip checking if the value is older than our oldest visible
+ * multi. It cannot possibly still be running.
*/
if (onlyLock &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it has already been removed, or will be removed shortly, by
- * truncation. Returning the wrong values could lead
- * to an incorrect visibility result. However, to support pg_upgrade we
- * need to allow an empty set to be returned regardless, if the caller is
- * willing to accept it; the caller is expected to check that it's an
- * allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
- * expecting this to be called only on recently created multis, then we
- * raise an error.
+ * truncation. Returning the wrong values could lead to an incorrect
+ * visibility result. However, to support pg_upgrade we need to allow an
+ * empty set to be returned regardless, if the caller is willing to accept
+ * it; the caller is expected to check that it's an allowed condition
+ * (such as ensuring that the infomask bits set on the tuple are
+ * consistent with the pg_upgrade scenario). If the caller is expecting
+ * this to be called only on recently created multis, then we raise an
+ * error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises a
* enough to contain the next value that would be created.
*
* We need to do this pretty early during the first startup in binary
- * upgrade mode: before StartupMultiXact() in fact, because this routine is
- * called even before that by StartupXLOG(). And we can't do it earlier
- * than at this point, because during that first call of this routine we
- * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
- * needs.
+ * upgrade mode: before StartupMultiXact() in fact, because this routine
+ * is called even before that by StartupXLOG(). And we can't do it
+ * earlier than at this point, because during that first call of this
+ * routine we determine the MultiXactState->nextMXact value that
+ * MaybeExtendOffsetSlru needs.
*/
if (IsBinaryUpgrade)
MaybeExtendOffsetSlru();
/*
* Determine the offset of the oldest multixact that might still be
- * referenced. Normally, we can read the offset from the multixact itself,
- * but there's an important special case: if there are no multixacts in
- * existence at all, oldest_datminmxid obviously can't point to one. It
- * will instead point to the multixact ID that will be assigned the next
- * time one is needed.
+ * referenced. Normally, we can read the offset from the multixact
+ * itself, but there's an important special case: if there are no
+ * multixacts in existence at all, oldest_datminmxid obviously can't point
+ * to one. It will instead point to the multixact ID that will be
+ * assigned the next time one is needed.
*
* NB: oldest_dataminmxid is the oldest multixact that might still be
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
* obviously can't point to one. It will instead point to the multixact
* ID that will be assigned the next time one is needed.
*
- * NB: oldestMXact should be the oldest multixact that still exists in
- * the SLRU, unlike in SetMultiXactIdLimit, where we do this same
- * computation based on the oldest value that might be referenced in a
- * table.
+ * NB: oldestMXact should be the oldest multixact that still exists in the
+ * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
+ * based on the oldest value that might be referenced in a table.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
if (MultiXactState->nextMXact == oldestMXact)
MultiXactMemberFreezeThreshold(void)
{
MultiXactOffset members;
- uint32 multixacts;
- uint32 victim_multixacts;
- double fraction;
+ uint32 multixacts;
+ uint32 victim_multixacts;
+ double fraction;
ReadMultiXactCounts(&multixacts, &members);
void
TruncateMultiXact(void)
{
- MultiXactId oldestMXact;
+ MultiXactId oldestMXact;
MultiXactOffset oldestOffset;
MultiXactOffset nextOffset;
mxtruncinfo trunc;
* without blocking. That way, a worker that errors out can write the whole
* message into the queue and terminate without waiting for the user backend.
*/
-#define PARALLEL_ERROR_QUEUE_SIZE 16384
+#define PARALLEL_ERROR_QUEUE_SIZE 16384
/* Magic number for parallel context TOC. */
#define PARALLEL_MAGIC 0x50477c7c
BackendId parallel_master_backend_id;
/* Entrypoint for parallel workers. */
- parallel_worker_main_type entrypoint;
+ parallel_worker_main_type entrypoint;
/* Mutex protects remaining fields. */
slock_t mutex;
* and < the number of workers before any user code is invoked; each parallel
* worker will get a different parallel worker number.
*/
-int ParallelWorkerNumber = -1;
+int ParallelWorkerNumber = -1;
/* Is there a parallel message pending which we need to receive? */
-bool ParallelMessagePending = false;
+bool ParallelMessagePending = false;
/* Pointer to our fixed parallel state. */
static FixedParallelState *MyFixedParallelState;
ParallelContext *
CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
{
- MemoryContext oldcontext;
- ParallelContext *pcxt;
+ MemoryContext oldcontext;
+ ParallelContext *pcxt;
/* It is unsafe to create a parallel context if not in parallel mode. */
Assert(IsInParallelMode());
char *function_name,
int nworkers)
{
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
ParallelContext *pcxt;
/* We might be running in a very short-lived memory context. */
void
InitializeParallelDSM(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- Size library_len = 0;
- Size guc_len = 0;
- Size combocidlen = 0;
- Size tsnaplen = 0;
- Size asnaplen = 0;
- Size tstatelen = 0;
- Size segsize = 0;
- int i;
+ MemoryContext oldcontext;
+ Size library_len = 0;
+ Size guc_len = 0;
+ Size combocidlen = 0;
+ Size tsnaplen = 0;
+ Size asnaplen = 0;
+ Size tstatelen = 0;
+ Size segsize = 0;
+ int i;
FixedParallelState *fps;
Snapshot transaction_snapshot = GetTransactionSnapshot();
Snapshot active_snapshot = GetActiveSnapshot();
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
- * Normally, the user will have requested at least one worker process,
- * but if by chance they have not, we can skip a bunch of things here.
+ * Normally, the user will have requested at least one worker process, but
+ * if by chance they have not, we can skip a bunch of things here.
*/
if (pcxt->nworkers > 0)
{
/* Estimate space need for error queues. */
StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
- PARALLEL_ERROR_QUEUE_SIZE,
- "parallel error queue size not buffer-aligned");
+ PARALLEL_ERROR_QUEUE_SIZE,
+ "parallel error queue size not buffer-aligned");
shm_toc_estimate_chunk(&pcxt->estimator,
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
shm_toc_estimate_keys(&pcxt->estimator, 1);
* memory segment; instead, just use backend-private memory.
*
* Also, if we can't create a dynamic shared memory segment because the
- * maximum number of segments have already been created, then fall back
- * to backend-private memory, and plan not to use any workers. We hope
- * this won't happen very often, but it's better to abandon the use of
+ * maximum number of segments have already been created, then fall back to
+ * backend-private memory, and plan not to use any workers. We hope this
+ * won't happen very often, but it's better to abandon the use of
* parallelism than to fail outright.
*/
segsize = shm_toc_estimate(&pcxt->estimator);
/* We can skip the rest of this if we're not budgeting for any workers. */
if (pcxt->nworkers > 0)
{
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- char *error_queue_space;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ char *error_queue_space;
/* Serialize shared libraries we have loaded. */
libraryspace = shm_toc_allocate(pcxt->toc, library_len);
* should be transmitted via separate (possibly larger?) queues.
*/
error_queue_space =
- shm_toc_allocate(pcxt->toc,
- PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+ shm_toc_allocate(pcxt->toc,
+ PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
for (i = 0; i < pcxt->nworkers; ++i)
{
- char *start;
- shm_mq *mq;
+ char *start;
+ shm_mq *mq;
start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
/* Serialize extension entrypoint information. */
if (pcxt->library_name != NULL)
{
- Size lnamelen = strlen(pcxt->library_name);
- char *extensionstate;
+ Size lnamelen = strlen(pcxt->library_name);
+ char *extensionstate;
extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
+ strlen(pcxt->function_name) + 2);
void
LaunchParallelWorkers(ParallelContext *pcxt)
{
- MemoryContext oldcontext;
- BackgroundWorker worker;
- int i;
- bool any_registrations_failed = false;
+ MemoryContext oldcontext;
+ BackgroundWorker worker;
+ int i;
+ bool any_registrations_failed = false;
/* Skip this if we have no workers. */
if (pcxt->nworkers == 0)
*
* The caller must be able to tolerate ending up with fewer workers than
* expected, so there is no need to throw an error here if registration
- * fails. It wouldn't help much anyway, because registering the worker
- * in no way guarantees that it will start up and initialize successfully.
+ * fails. It wouldn't help much anyway, because registering the worker in
+ * no way guarantees that it will start up and initialize successfully.
*/
for (i = 0; i < pcxt->nworkers; ++i)
{
else
{
/*
- * If we weren't able to register the worker, then we've bumped
- * up against the max_worker_processes limit, and future
+ * If we weren't able to register the worker, then we've bumped up
+ * against the max_worker_processes limit, and future
* registrations will probably fail too, so arrange to skip them.
* But we still have to execute this code for the remaining slots
* to make sure that we forget about the error queues we budgeted
{
for (;;)
{
- bool anyone_alive = false;
- int i;
+ bool anyone_alive = false;
+ int i;
/*
- * This will process any parallel messages that are pending, which
- * may change the outcome of the loop that follows. It may also
- * throw an error propagated from a worker.
+ * This will process any parallel messages that are pending, which may
+ * change the outcome of the loop that follows. It may also throw an
+ * error propagated from a worker.
*/
CHECK_FOR_INTERRUPTS();
void
DestroyParallelContext(ParallelContext *pcxt)
{
- int i;
+ int i;
/*
* Be careful about order of operations here! We remove the parallel
/* Wait until the workers actually die. */
for (i = 0; i < pcxt->nworkers; ++i)
{
- BgwHandleStatus status;
+ BgwHandleStatus status;
if (pcxt->worker[i].bgwhandle == NULL)
continue;
dlist_foreach(iter, &pcxt_list)
{
ParallelContext *pcxt;
- int i;
- Size nbytes;
- void *data;
+ int i;
+ Size nbytes;
+ void *data;
pcxt = dlist_container(ParallelContext, node, iter.cur);
if (pcxt->worker == NULL)
for (i = 0; i < pcxt->nworkers; ++i)
{
/*
- * Read as many messages as we can from each worker, but stop
- * when either (1) the error queue goes away, which can happen if
- * we receive a Terminate message from the worker; or (2) no more
+ * Read as many messages as we can from each worker, but stop when
+ * either (1) the error queue goes away, which can happen if we
+ * receive a Terminate message from the worker; or (2) no more
* messages can be read from the worker without blocking.
*/
while (pcxt->worker[i].error_mqh != NULL)
{
- shm_mq_result res;
+ shm_mq_result res;
res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
&data, true);
break;
else if (res == SHM_MQ_SUCCESS)
{
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendBinaryStringInfo(&msg, data, nbytes);
}
else
ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
+ (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
errmsg("lost connection to parallel worker")));
/* This might make the error queue go away. */
static void
HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
{
- char msgtype;
+ char msgtype;
msgtype = pq_getmsgbyte(msg);
switch (msgtype)
{
- case 'K': /* BackendKeyData */
+ case 'K': /* BackendKeyData */
{
- int32 pid = pq_getmsgint(msg, 4);
+ int32 pid = pq_getmsgint(msg, 4);
+
(void) pq_getmsgint(msg, 4); /* discard cancel key */
(void) pq_getmsgend(msg);
pcxt->worker[i].pid = pid;
break;
}
- case 'E': /* ErrorResponse */
- case 'N': /* NoticeResponse */
+ case 'E': /* ErrorResponse */
+ case 'N': /* NoticeResponse */
{
ErrorData edata;
ErrorContextCallback errctx;
break;
}
- case 'A': /* NotifyResponse */
+ case 'A': /* NotifyResponse */
{
/* Propagate NotifyResponse. */
pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
break;
}
- case 'X': /* Terminate, indicating clean exit */
+ case 'X': /* Terminate, indicating clean exit */
{
pfree(pcxt->worker[i].bgwhandle);
pfree(pcxt->worker[i].error_mqh);
ParallelWorkerMain(Datum main_arg)
{
dsm_segment *seg;
- shm_toc *toc;
+ shm_toc *toc;
FixedParallelState *fps;
- char *error_queue_space;
- shm_mq *mq;
+ char *error_queue_space;
+ shm_mq *mq;
shm_mq_handle *mqh;
- char *libraryspace;
- char *gucspace;
- char *combocidspace;
- char *tsnapspace;
- char *asnapspace;
- char *tstatespace;
- StringInfoData msgbuf;
+ char *libraryspace;
+ char *gucspace;
+ char *combocidspace;
+ char *tsnapspace;
+ char *asnapspace;
+ char *tstatespace;
+ StringInfoData msgbuf;
/* Establish signal handlers. */
pqsignal(SIGTERM, die);
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Now that we have a resource owner, we can attach to the dynamic
- * shared memory segment and read the table of contents.
+ * Now that we have a resource owner, we can attach to the dynamic shared
+ * memory segment and read the table of contents.
*/
seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("bad magic number in dynamic shared memory segment")));
+ errmsg("bad magic number in dynamic shared memory segment")));
/* Determine and set our worker number. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
*/
error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
mq = (shm_mq *) (error_queue_space +
- ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
+ ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
shm_mq_set_sender(mq, MyProc);
mqh = shm_mq_attach(mq, seg, NULL);
pq_redirect_to_shm_mq(mq, mqh);
/*
* Send a BackendKeyData message to the process that initiated parallelism
* so that it has access to our PID before it receives any other messages
- * from us. Our cancel key is sent, too, since that's the way the protocol
- * message is defined, but it won't actually be used for anything in this
- * case.
+ * from us. Our cancel key is sent, too, since that's the way the
+ * protocol message is defined, but it won't actually be used for anything
+ * in this case.
*/
pq_beginmessage(&msgbuf, 'K');
pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
pq_endmessage(&msgbuf);
/*
- * Hooray! Primary initialization is complete. Now, we need to set up
- * our backend-local state to match the original backend.
+ * Hooray! Primary initialization is complete. Now, we need to set up our
+ * backend-local state to match the original backend.
*/
/*
- * Load libraries that were loaded by original backend. We want to do this
- * before restoring GUCs, because the libraries might define custom
+ * Load libraries that were loaded by original backend. We want to do
+ * this before restoring GUCs, because the libraries might define custom
* variables.
*/
libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
/*
- * We've initialized all of our state now; nothing should change hereafter.
+ * We've initialized all of our state now; nothing should change
+ * hereafter.
*/
EnterParallelMode();
static void
ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
{
- char *extensionstate;
- char *library_name;
- char *function_name;
+ char *extensionstate;
+ char *library_name;
+ char *function_name;
parallel_worker_main_type entrypt;
extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
static void
ParallelErrorContext(void *arg)
{
- errcontext("parallel worker, pid %d", * (int32 *) arg);
+ errcontext("parallel worker, pid %d", *(int32 *) arg);
}
/*
TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
- BackendId locking_backend; /* backend currently working on the xact */
+ BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;
return;
/*
- * What to do with the locked global transaction entry? If we were in
- * the process of preparing the transaction, but haven't written the WAL
+ * What to do with the locked global transaction entry? If we were in the
+ * process of preparing the transaction, but haven't written the WAL
* record and state file yet, the transaction must not be considered as
* prepared. Likewise, if we are in the process of finishing an
- * already-prepared transaction, and fail after having already written
- * the 2nd phase commit or rollback record to the WAL, the transaction
- * should not be considered as prepared anymore. In those cases, just
- * remove the entry from shared memory.
+ * already-prepared transaction, and fail after having already written the
+ * 2nd phase commit or rollback record to the WAL, the transaction should
+ * not be considered as prepared anymore. In those cases, just remove the
+ * entry from shared memory.
*
- * Otherwise, the entry must be left in place so that the transaction
- * can be finished later, so just unlock it.
+ * Otherwise, the entry must be left in place so that the transaction can
+ * be finished later, so just unlock it.
*