pg_file_sync(PG_FUNCTION_ARGS)
{
char *filename;
- struct stat fst;
+ struct stat fst;
filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0));
static bool
checkcondition_bit(void *checkval, ITEM *item, void *siglen)
{
- return GETBIT(checkval, HASHVAL(item->val, (int)(intptr_t) siglen));
+ return GETBIT(checkval, HASHVAL(item->val, (int) (intptr_t) siglen));
}
/*
signconsistent(QUERYTYPE *query, BITVECP sign, int siglen, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
- (void *) sign, (void *)(intptr_t) siglen, calcnot,
+ (void *) sign, (void *) (intptr_t) siglen, calcnot,
checkcondition_bit);
}
typedef struct LtreeSignature
{
- BITVECP sign;
- int siglen;
+ BITVECP sign;
+ int siglen;
} LtreeSignature;
static bool
#define LTG_GETRNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_RNODE(x, siglen) )
extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
- ltree *left, ltree *right);
+ ltree *left, ltree *right);
/* GiST support for ltree[] */
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
typedef struct LtreeSignature
{
- BITVECP sign;
- int siglen;
+ BITVECP sign;
+ int siglen;
} LtreeSignature;
static bool
foreach(lc, rowMarks)
{
RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
+
if (!rowmark->pushedDown)
{
APP_JUMB(rowmark->rti);
Oid relid = PG_GETARG_OID(0);
Relation rel;
ForkNumber fork;
- BlockNumber block;
+ BlockNumber block;
rel = relation_open(relid, AccessExclusiveLock);
/*
* Check that non-superuser has used password to establish connection;
* otherwise, he's piggybacking on the postgres server's user
- * identity. See also dblink_security_check() in contrib/dblink
- * and check_conn_params.
+ * identity. See also dblink_security_check() in contrib/dblink and
+ * check_conn_params.
*/
if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) &&
!PQconnectionUsedPassword(conn))
foreach(cell, user->options)
{
DefElem *def = (DefElem *) lfirst(cell);
+
if (strcmp(def->defname, "password_required") == 0)
return defGetBoolean(def);
}
}
else if (strcmp(def->defname, "password_required") == 0)
{
- bool pw_required = defGetBoolean(def);
+ bool pw_required = defGetBoolean(def);
/*
* Only the superuser may set this option on a user mapping, or
* alter a user mapping on which this option is set. We allow a
- * user to clear this option if it's set - in fact, we don't have a
- * choice since we can't see the old mapping when validating an
+ * user to clear this option if it's set - in fact, we don't have
+ * a choice since we can't see the old mapping when validating an
* alter.
*/
if (!superuser() && !pw_required)
{"fetch_size", ForeignServerRelationId, false},
{"fetch_size", ForeignTableRelationId, false},
{"password_required", UserMappingRelationId, false},
+
/*
* sslcert and sslkey are in fact libpq options, but we repeat them
- * here to allow them to appear in both foreign server context
- * (when we generate libpq options) and user mapping context
- * (from here).
+ * here to allow them to appear in both foreign server context (when
+ * we generate libpq options) and user mapping context (from here).
*/
{"sslcert", UserMappingRelationId, true},
{"sslkey", UserMappingRelationId, true},
print " <entry>$feature_id</entry>\n";
}
print " <entry>",
- defined($feature_packages{$feature_id}) ? $feature_packages{$feature_id} : "",
- "</entry>\n";
+ defined($feature_packages{$feature_id})
+ ? $feature_packages{$feature_id}
+ : "",
+ "</entry>\n";
if ($subfeature_id)
{
print " <entry>$subfeature_name</entry>\n";
*/
struct varlena *
detoast_attr_slice(struct varlena *attr,
- int32 sliceoffset, int32 slicelength)
+ int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
/*
* For compressed values, we need to fetch enough slices to decompress
- * at least the requested part (when a prefix is requested). Otherwise,
- * just fetch all slices.
+ * at least the requested part (when a prefix is requested).
+ * Otherwise, just fetch all slices.
*/
if (slicelength > 0 && sliceoffset >= 0)
{
- int32 max_size;
+ int32 max_size;
/*
* Determine maximum amount of compressed data needed for a prefix
Assert(!VARATT_IS_EXTERNAL_INDIRECT(redirect.pointer));
return detoast_attr_slice(redirect.pointer,
- sliceoffset, slicelength);
+ sliceoffset, slicelength);
}
else if (VARATT_IS_EXTERNAL_EXPANDED(attr))
{
SET_VARSIZE(result, attrsize + VARHDRSZ);
if (attrsize == 0)
- return result; /* Probably shouldn't happen, but just in case. */
+ return result; /* Probably shouldn't happen, but just in
+ * case. */
/*
* Open the toast relation and its indexes
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/*
- * It's nonsense to fetch slices of a compressed datum unless when it's
- * a prefix -- this isn't lo_* we can't return a compressed datum which
- * is meaningful to toast later.
+ * It's nonsense to fetch slices of a compressed datum unless when it's a
+ * prefix -- this isn't lo_* we can't return a compressed datum which is
+ * meaningful to toast later.
*/
Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) || 0 == sliceoffset);
left->buf, right->buf, false, false))
{
/*
- * If the parent page was split, the existing downlink might
- * have moved.
+ * If the parent page was split, the existing downlink might have
+ * moved.
*/
stack->downlinkoffnum = InvalidOffsetNumber;
}
tuples, 2,
stack->downlinkoffnum,
left->buf, right->buf,
- true, /* Unlock parent */
- unlockbuf /* Unlock stack->buffer if caller wants that */
- ))
+ true, /* Unlock parent */
+ unlockbuf /* Unlock stack->buffer if caller wants
+ * that */
+ ))
{
/*
* If the parent page was split, the downlink might have moved.
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
+
splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
argtype == XIDOID || argtype == CIDOID))
/* okay, allowed use of hashint4() */ ;
else if ((funcid == F_HASHINT8 || funcid == F_HASHINT8EXTENDED) &&
- (argtype == XID8OID))
+ (argtype == XID8OID))
/* okay, allowed use of hashint8() */ ;
else if ((funcid == F_TIMESTAMP_HASH ||
funcid == F_TIMESTAMP_HASH_EXTENDED) &&
RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
/*
- * Note that heap_multi_insert is not used for catalog tuples yet,
- * but this will cover the gap once that is the case.
+ * Note that heap_multi_insert is not used for catalog tuples yet, but
+ * this will cover the gap once that is the case.
*/
if (needwal && need_cids)
log_heap_new_cid(relation, heaptuples[ndone]);
/* fetch options support procedure if specified */
if (amoptsprocnum != 0)
- procid =index_getprocid(indrel, attnum, amoptsprocnum);
+ procid = index_getprocid(indrel, attnum, amoptsprocnum);
if (!OidIsValid(procid))
{
bool isnull;
if (!DatumGetPointer(attoptions))
- return NULL; /* ok, no options, no procedure */
+ return NULL; /* ok, no options, no procedure */
/*
* Report an error if the opclass's options-parsing procedure does not
BTScanInsert itup_key;
ItemId itemid;
IndexTuple targetkey;
- BlockNumber leftsib, leafblkno;
+ BlockNumber leftsib,
+ leafblkno;
Buffer sleafbuf;
itemid = PageGetItemId(page, P_HIKEY);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
#ifdef USE_ASSERT_CHECKING
+
/*
* This is just an assertion because _bt_lock_subtree_parent should have
* guaranteed tuple has the expected contents
Buffer *subtreeparent, OffsetNumber *poffset,
BlockNumber *topparent, BlockNumber *topparentrightsib)
{
- BlockNumber parent, leftsibparent;
+ BlockNumber parent,
+ leftsibparent;
OffsetNumber parentoffset,
maxoff;
Buffer pbuf;
/*
* Now make sure that the parent deletion is itself safe by examining the
* child's grandparent page. Recurse, passing the parent page as the
- * child page (child's grandparent is the parent on the next level up).
- * If parent deletion is unsafe, then child deletion must also be unsafe
- * (in which case caller cannot delete any pages at all).
+ * child page (child's grandparent is the parent on the next level up). If
+ * parent deletion is unsafe, then child deletion must also be unsafe (in
+ * which case caller cannot delete any pages at all).
*/
*topparent = parent;
*topparentrightsib = opaque->btpo_next;
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool attempt_pagedel;
- BlockNumber blkno, backtrack_to;
+ BlockNumber blkno,
+ backtrack_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
/*
* We need to save the location of the pivot tuple we chose in the
- * parent page on a stack. If we need to split a page, we'll use
- * the stack to work back up to its parent page. If caller ends up
- * splitting a page one level down, it usually ends up inserting a
- * new pivot tuple/downlink immediately after the location recorded
- * here.
+ * parent page on a stack. If we need to split a page, we'll use the
+ * stack to work back up to its parent page. If caller ends up
+ * splitting a page one level down, it usually ends up inserting a new
+ * pivot tuple/downlink immediately after the location recorded here.
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
static bool _bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid);
static OffsetNumber _bt_bestsplitloc(FindSplitData *state, int perfectpenalty,
bool *newitemonleft, FindSplitStrat strategy);
-static int _bt_defaultinterval(FindSplitData *state);
+static int _bt_defaultinterval(FindSplitData *state);
static int _bt_strategy(FindSplitData *state, SplitPoint *leftpage,
SplitPoint *rightpage, FindSplitStrat *strategy);
static void _bt_interval_edges(FindSplitData *state,
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
- int i;
+ int i;
appendStringInfo(buf, "dir");
for (i = 0; i < xlrec->ntablespaces; i++)
xact_desc_relations(StringInfo buf, char *label, int nrels,
RelFileNode *xnodes)
{
- int i;
+ int i;
if (nrels > 0)
{
static void
xact_desc_subxacts(StringInfo buf, int nsubxacts, TransactionId *subxacts)
{
- int i;
+ int i;
if (nsubxacts > 0)
{
if ((thisgroup->functionset & (((uint64) 1) << i)) != 0)
continue; /* got it */
if (i == SPGIST_OPTIONS_PROC)
- continue; /* optional method */
+ continue; /* optional method */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("operator family \"%s\" of access method %s is missing support function %d for type %s",
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"ROLLBACK AND CHAIN")));
else
{
uint8 xact_info;
TimestampTz xtime;
- TimestampTz delayUntil;
+ TimestampTz delayUntil;
long secs;
int microsecs;
switch (ControlFile->state)
{
case DB_SHUTDOWNED:
- /* This is the expected case, so don't be chatty in standalone mode */
+
+ /*
+ * This is the expected case, so don't be chatty in standalone
+ * mode
+ */
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
str_time(ControlFile->time))));
datadirpathlen = strlen(DataDir);
/*
- * Report that we are now estimating the total backup size
- * if we're streaming base backup as requested by pg_basebackup
+ * Report that we are now estimating the total backup size if we're
+ * streaming base backup as requested by pg_basebackup
*/
if (tablespaces)
pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
void
do_pg_abort_backup(int code, Datum arg)
{
- bool emit_warning = DatumGetBool(arg);
+ bool emit_warning = DatumGetBool(arg);
/*
* Quick exit if session is not keeping around a non-exclusive backup
*/
/*
- * We should be able to move to XLOG_FROM_STREAM
- * only in standby mode.
+ * We should be able to move to XLOG_FROM_STREAM only in
+ * standby mode.
*/
Assert(StandbyMode);
{
case XLOG_FROM_ARCHIVE:
case XLOG_FROM_PG_WAL:
+
/*
* WAL receiver must not be running when reading WAL from
* archive or pg_wal.
bool havedata;
/*
- * We should be able to move to XLOG_FROM_STREAM
- * only in standby mode.
+ * We should be able to move to XLOG_FROM_STREAM only in
+ * standby mode.
*/
Assert(StandbyMode);
TimeLineID restartTli;
/*
- * Ignore restore_command when not in archive recovery (meaning
- * we are in crash recovery).
+ * Ignore restore_command when not in archive recovery (meaning we are in
+ * crash recovery).
*/
if (!ArchiveRecoveryRequested)
goto not_available;
FullTransactionId
XLogRecGetFullXid(XLogReaderState *record)
{
- TransactionId xid,
- next_xid;
- uint32 epoch;
+ TransactionId xid,
+ next_xid;
+ uint32 epoch;
/*
* This function is only safe during replay, because it depends on the
epoch = EpochFromFullTransactionId(ShmemVariableCache->nextFullXid);
/*
- * If xid is numerically greater than next_xid, it has to be from the
- * last epoch.
+ * If xid is numerically greater than next_xid, it has to be from the last
+ * epoch.
*/
if (unlikely(xid > next_xid))
--epoch;
}
else
{
- push @{ $catalog_data{pg_description}}, \%descr;
+ push @{ $catalog_data{pg_description} }, \%descr;
}
}
close $schemapg;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
exit 0;
*/
foreach(cell, parent_cons)
{
- Oid parent = lfirst_oid(cell);
+ Oid parent = lfirst_oid(cell);
ScanKeyInit(&key,
Anum_pg_constraint_oid,
*
* Because of this arrangement, we can correctly catch all
* relevant relations by adding to 'parent_cons' all rows with
- * valid conparentid, and to the 'oids' list all rows with a
- * zero conparentid. If any oids are added to 'oids', redo the
- * first loop above by setting 'restart'.
+ * valid conparentid, and to the 'oids' list all rows with a zero
+ * conparentid. If any oids are added to 'oids', redo the first
+ * loop above by setting 'restart'.
*/
if (OidIsValid(con->conparentid))
parent_cons = list_append_unique_oid(parent_cons,
CastCreate(Oid sourcetypeid, Oid targettypeid, Oid funcid, char castcontext,
char castmethod, DependencyType behavior)
{
- Relation relation;
- HeapTuple tuple;
- Oid castid;
- Datum values[Natts_pg_cast];
- bool nulls[Natts_pg_cast];
- ObjectAddress myself,
- referenced;
+ Relation relation;
+ HeapTuple tuple;
+ Oid castid;
+ Datum values[Natts_pg_cast];
+ bool nulls[Natts_pg_cast];
+ ObjectAddress myself,
+ referenced;
relation = table_open(CastRelationId, RowExclusiveLock);
{
List *result = NIL;
Relation depRel;
- ScanKeyData key[2];
- SysScanDesc scan;
+ ScanKeyData key[2];
+ SysScanDesc scan;
HeapTuple tup;
depRel = table_open(DependRelationId, AccessShareLock);
sdepForm->objid);
break;
case SHARED_DEPENDENCY_POLICY:
+
/*
* Try to remove role from policy; if unable to, remove
* policy.
obj.classId = sdepForm->classid;
obj.objectId = sdepForm->objid;
obj.objectSubId = sdepForm->objsubid;
+
/*
* Acquire lock on object, then verify this dependency
* is still relevant. If not, the object might have
bool vm;
bool need_fsm_vacuum = false;
ForkNumber forks[MAX_FORKNUM];
- BlockNumber blocks[MAX_FORKNUM];
- int nforks = 0;
+ BlockNumber blocks[MAX_FORKNUM];
+ int nforks = 0;
/* Open it at the smgr level if not already done */
RelationOpenSmgr(rel);
blocks[nforks] = nblocks;
nforks++;
- /* Prepare for truncation of the FSM if it exists */
+ /* Prepare for truncation of the FSM if it exists */
fsm = smgrexists(rel->rd_smgr, FSM_FORKNUM);
if (fsm)
{
smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
/*
- * Update upper-level FSM pages to account for the truncation.
- * This is important because the just-truncated pages were likely
- * marked as all-free, and would be preferentially selected.
+ * Update upper-level FSM pages to account for the truncation. This is
+ * important because the just-truncated pages were likely marked as
+ * all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
SMgrRelation reln;
Relation rel;
ForkNumber forks[MAX_FORKNUM];
- BlockNumber blocks[MAX_FORKNUM];
- int nforks = 0;
+ BlockNumber blocks[MAX_FORKNUM];
+ int nforks = 0;
bool need_fsm_vacuum = false;
reln = smgropen(xlrec->rnode, InvalidBackendId);
smgrtruncate(reln, forks, nforks, blocks);
/*
- * Update upper-level FSM pages to account for the truncation.
- * This is important because the just-truncated pages were likely
- * marked as all-free, and would be preferentially selected.
+ * Update upper-level FSM pages to account for the truncation. This is
+ * important because the just-truncated pages were likely marked as
+ * all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, xlrec->blkno,
}
else
{
- List *currexts;
+ List *currexts;
/* Avoid duplicates */
currexts = getAutoExtensionsOfObject(address.classId,
Relation rel;
TableScanDesc scan;
HeapTuple tuple;
- List *ltblspc = NIL;
- ListCell *cell;
- int ntblspc;
- int i;
- Oid *tablespace_ids;
+ List *ltblspc = NIL;
+ ListCell *cell;
+ int ntblspc;
+ int i;
+ Oid *tablespace_ids;
rel = table_open(TableSpaceRelationId, AccessShareLock);
scan = table_beginscan_catalog(rel, 0, NULL);
static EventTriggerQueryState *currentEventTriggerState = NULL;
-typedef struct
-{
- const char *obtypename;
- bool supported;
-} event_trigger_support_data;
-
/* Support for dropped objects */
typedef struct SQLDropObject
{
* we don't need to do anything if there were 0 full groups.
*
* We still have to continue after this block if there are no full groups,
- * though, since it's possible that we have workers that did real work even
- * if the leader didn't participate.
+ * though, since it's possible that we have workers that did real work
+ * even if the leader didn't participate.
*/
if (fullsortGroupInfo->groupCount > 0)
{
&incrsortstate->shared_info->sinfo[n];
/*
- * If a worker hasn't processed any sort groups at all, then exclude
- * it from output since it either didn't launch or didn't
+ * If a worker hasn't processed any sort groups at all, then
+ * exclude it from output since it either didn't launch or didn't
* contribute anything meaningful.
*/
fullsortGroupInfo = &incsort_info->fullsortGroupInfo;
/*
* Since we never have any prefix groups unless we've first sorted
* a full groups and transitioned modes (copying the tuples into a
- * prefix group), we don't need to do anything if there were 0 full
- * groups.
+ * prefix group), we don't need to do anything if there were 0
+ * full groups.
*/
if (fullsortGroupInfo->groupCount == 0)
continue;
static void
show_hashagg_info(AggState *aggstate, ExplainState *es)
{
- Agg *agg = (Agg *)aggstate->ss.ps.plan;
- int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
+ Agg *agg = (Agg *) aggstate->ss.ps.plan;
+ int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
Assert(IsA(aggstate, AggState));
* does what is needed, we try to find a sequence of update scripts that
* will get us there.
*/
- filename = get_extension_script_filename(pcontrol, NULL, versionName);
- if (stat(filename, &fst) == 0)
- {
- /* Easy, no extra scripts */
- updateVersions = NIL;
- }
- else
- {
- /* Look for best way to install this version */
- List *evi_list;
- ExtensionVersionInfo *evi_start;
- ExtensionVersionInfo *evi_target;
+ filename = get_extension_script_filename(pcontrol, NULL, versionName);
+ if (stat(filename, &fst) == 0)
+ {
+ /* Easy, no extra scripts */
+ updateVersions = NIL;
+ }
+ else
+ {
+ /* Look for best way to install this version */
+ List *evi_list;
+ ExtensionVersionInfo *evi_start;
+ ExtensionVersionInfo *evi_target;
- /* Extract the version update graph from the script directory */
- evi_list = get_ext_ver_list(pcontrol);
+ /* Extract the version update graph from the script directory */
+ evi_list = get_ext_ver_list(pcontrol);
- /* Identify the target version */
- evi_target = get_ext_ver_info(versionName, &evi_list);
+ /* Identify the target version */
+ evi_target = get_ext_ver_info(versionName, &evi_list);
- /* Identify best path to reach target */
- evi_start = find_install_path(evi_list, evi_target,
- &updateVersions);
+ /* Identify best path to reach target */
+ evi_start = find_install_path(evi_list, evi_target,
+ &updateVersions);
- /* Fail if no path ... */
- if (evi_start == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
- pcontrol->name, versionName)));
+ /* Fail if no path ... */
+ if (evi_start == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
+ pcontrol->name, versionName)));
- /* Otherwise, install best starting point and then upgrade */
- versionName = evi_start->name;
- }
+ /* Otherwise, install best starting point and then upgrade */
+ versionName = evi_start->name;
+ }
/*
* Fetch control parameters for installation target version
char castmethod;
HeapTuple tuple;
AclResult aclresult;
- ObjectAddress myself;
+ ObjectAddress myself;
sourcetypeid = typenameTypeId(NULL, stmt->sourcetype);
targettypeid = typenameTypeId(NULL, stmt->targettype);
opfamilyoid, /* oid of containing opfamily */
opclassoid; /* oid of opclass we create */
int maxOpNumber, /* amstrategies value */
- optsProcNumber, /* amoptsprocnum value */
+ optsProcNumber, /* amoptsprocnum value */
maxProcNumber; /* amsupport value */
bool amstorage; /* amstorage flag */
List *operators; /* OpFamilyMember list for operators */
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
int maxOpNumber, /* amstrategies value */
- optsProcNumber, /* amopclassopts value */
+ optsProcNumber, /* amopclassopts value */
maxProcNumber; /* amsupport value */
HeapTuple tup;
Form_pg_am amform;
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree equal image functions must return boolean")));
+
/*
* pg_amproc functions are indexed by (lefttype, righttype), but
* an equalimage function can only be called at CREATE INDEX time.
* invalidate all partitions contained in the respective partition
* trees, not just those explicitly mentioned in the publication.
*/
- List *relids = GetPublicationRelations(pubform->oid,
- PUBLICATION_PART_ALL);
+ List *relids = GetPublicationRelations(pubform->oid,
+ PUBLICATION_PART_ALL);
/*
* We don't want to send too many individual messages, at some point
PublicationDropTables(pubid, rels, false);
else /* DEFELEM_SET */
{
- List *oldrelids = GetPublicationRelations(pubid,
- PUBLICATION_PART_ROOT);
+ List *oldrelids = GetPublicationRelations(pubid,
+ PUBLICATION_PART_ROOT);
List *delrels = NIL;
ListCell *oldlc;
Datum repl_val[Natts_pg_statistic_ext];
bool repl_null[Natts_pg_statistic_ext];
bool repl_repl[Natts_pg_statistic_ext];
- ObjectAddress address;
+ ObjectAddress address;
int newtarget = stmt->stxstattarget;
/* Limit statistics target to a sane range */
stxoid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok);
/*
- * If we got here and the OID is not valid, it means the statistics
- * does not exist, but the command specified IF EXISTS. So report
- * this as a simple NOTICE and we're done.
+ * If we got here and the OID is not valid, it means the statistics does
+ * not exist, but the command specified IF EXISTS. So report this as a
+ * simple NOTICE and we're done.
*/
if (!OidIsValid(stxoid))
{
List *changedIndexOids; /* OIDs of indexes to rebuild */
List *changedIndexDefs; /* string definitions of same */
char *replicaIdentityIndex; /* index to reset as REPLICA IDENTITY */
- char *clusterOnIndex; /* index to use for CLUSTER */
+ char *clusterOnIndex; /* index to use for CLUSTER */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
if (drop->concurrent)
{
/*
- * Note that for temporary relations this lock may get upgraded
- * later on, but as no other session can access a temporary
- * relation, this is actually fine.
+ * Note that for temporary relations this lock may get upgraded later
+ * on, but as no other session can access a temporary relation, this
+ * is actually fine.
*/
lockmode = ShareUpdateExclusiveLock;
Assert(drop->removeType == OBJECT_INDEX);
}
/*
- * Inherited TRUNCATE commands perform access
- * permission checks on the parent table only.
- * So we skip checking the children's permissions
- * and don't call truncate_check_perms() here.
+ * Inherited TRUNCATE commands perform access permission
+ * checks on the parent table only. So we skip checking the
+ * children's permissions and don't call
+ * truncate_check_perms() here.
*/
truncate_check_rel(RelationGetRelid(rel), rel->rd_rel);
truncate_check_activity(rel);
errmsg("column \"%s\" inherits from generated column but specifies identity",
def->colname)));
}
+
/*
* If the parent column is not generated, then take whatever
* the child column definition says.
*/
foreach(lc, RelationGetIndexList(rel))
{
- Oid indexoid = lfirst_oid(lc);
- Relation indrel;
+ Oid indexoid = lfirst_oid(lc);
+ Relation indrel;
AttrNumber indattnum = 0;
indrel = index_open(indexoid, lockmode);
DropClonedTriggersFromPartition(Oid partitionId)
{
ScanKeyData skey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple trigtup;
Relation tgrel;
ObjectAddresses *objects;
/*
* After a tuple in a partition goes through a trigger, the user
- * could have changed the partition key enough that the tuple
- * no longer fits the partition. Verify that.
+ * could have changed the partition key enough that the tuple no
+ * longer fits the partition. Verify that.
*/
if (trigger->tgisclone &&
!ExecPartitionCheck(relinfo, slot, estate, false))
bool nullcheck)
{
ExprContext *aggcontext;
- int adjust_jumpnull = -1;
+ int adjust_jumpnull = -1;
if (ishash)
aggcontext = aggstate->hashcontext;
static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull);
/* execution helper functions */
-static pg_attribute_always_inline void
-ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
-
-static pg_attribute_always_inline void
-ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
- AggStatePerGroup pergroup,
- ExprContext *aggcontext, int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
+static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate,
+ AggStatePerTrans pertrans,
+ AggStatePerGroup pergroup,
+ ExprContext *aggcontext,
+ int setno);
/*
* Prepare ExprState for interpreted execution.
EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK)
{
AggState *aggstate = castNode(AggState, state->parent);
- AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups
- [op->d.agg_plain_pergroup_nullcheck.setoff];
+ AggStatePerGroup pergroup_allaggs =
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
- AggStatePerGroup pergroup = &aggstate->all_pergroups
- [op->d.agg_trans.setoff]
- [op->d.agg_trans.transno];
+ AggStatePerGroup pergroup =
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
newVal = FunctionCallInvoke(fcinfo);
/*
- * For pass-by-ref datatype, must copy the new value into
- * aggcontext and free the prior transValue. But if transfn
- * returned a pointer to its first input, we don't need to do
- * anything. Also, if transfn returned a pointer to a R/W
- * expanded object that is already a child of the aggcontext,
- * assume we can adopt that value without copying it.
+ * For pass-by-ref datatype, must copy the new value into aggcontext and
+ * free the prior transValue. But if transfn returned a pointer to its
+ * first input, we don't need to do anything. Also, if transfn returned a
+ * pointer to a R/W expanded object that is already a child of the
+ * aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
newVal = ExecAggTransReparent(aggstate, pertrans,
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
- uint32 hash;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
+ uint32 hash;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
uint32
TupleHashTableHash(TupleHashTable hashtable, TupleTableSlot *slot)
{
- MemoryContext oldContext;
- uint32 hash;
+ MemoryContext oldContext;
+ uint32 hash;
hashtable->inputslot = slot;
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
LookupTupleHashEntryHash(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew, uint32 hash)
{
- TupleHashEntry entry;
- MemoryContext oldContext;
+ TupleHashEntry entry;
+ MemoryContext oldContext;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
ExprContext *
CreateWorkExprContext(EState *estate)
{
- Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
- Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
- Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
+ Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
+ Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
+ Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
while (16 * maxBlockSize > work_mem * 1024L)
*/
typedef struct HashTapeInfo
{
- LogicalTapeSet *tapeset;
- int ntapes;
- int *freetapes;
- int nfreetapes;
- int freetapes_alloc;
+ LogicalTapeSet *tapeset;
+ int ntapes;
+ int *freetapes;
+ int nfreetapes;
+ int freetapes_alloc;
} HashTapeInfo;
/*
typedef struct HashAggSpill
{
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int npartitions; /* number of partitions */
- int *partitions; /* spill partition tape numbers */
- int64 *ntuples; /* number of tuples in each partition */
- uint32 mask; /* mask to find partition from hash value */
- int shift; /* after masking, shift by this amount */
+ int npartitions; /* number of partitions */
+ int *partitions; /* spill partition tape numbers */
+ int64 *ntuples; /* number of tuples in each partition */
+ uint32 mask; /* mask to find partition from hash value */
+ int shift; /* after masking, shift by this amount */
} HashAggSpill;
/*
*/
typedef struct HashAggBatch
{
- int setno; /* grouping set */
- int used_bits; /* number of bits of hash already used */
- LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int input_tapenum; /* input partition tape */
- int64 input_tuples; /* number of tuples in this batch */
+ int setno; /* grouping set */
+ int used_bits; /* number of bits of hash already used */
+ LogicalTapeSet *tapeset; /* borrowed reference to tape set */
+ int input_tapenum; /* input partition tape */
+ int64 input_tuples; /* number of tuples in this batch */
} HashAggBatch;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
static long hash_choose_num_buckets(double hashentrysize,
long estimated_nbuckets,
Size memory);
-static int hash_choose_num_partitions(uint64 input_groups,
- double hashentrysize,
- int used_bits,
- int *log2_npartittions);
+static int hash_choose_num_partitions(uint64 input_groups,
+ double hashentrysize,
+ int used_bits,
+ int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash,
bool *in_hash_table);
static void lookup_hash_entries(AggState *aggstate);
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
- int i;
+ int i;
/* transfer just the needed columns into hashslot */
slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
static void
build_hash_tables(AggState *aggstate)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- long nbuckets;
- Size memory;
+ long nbuckets;
+ Size memory;
if (perhash->hashtable != NULL)
{
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
- nbuckets = hash_choose_num_buckets(
- aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
+ nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
+ perhash->aggnode->numGroups,
+ memory);
build_hash_table(aggstate, setno, nbuckets);
}
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- MemoryContext metacxt = aggstate->hash_metacxt;
- MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
- MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size additionalsize;
+ MemoryContext metacxt = aggstate->hash_metacxt;
+ MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
+ MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size additionalsize;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
- perhash->hashtable = BuildTupleHashTableExt(
- &aggstate->ss.ps,
- perhash->hashslot->tts_tupleDescriptor,
- perhash->numCols,
- perhash->hashGrpColIdxHash,
- perhash->eqfuncoids,
- perhash->hashfunctions,
- perhash->aggnode->grpCollations,
- nbuckets,
- additionalsize,
- metacxt,
- hashcxt,
- tmpcxt,
- DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+ perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
+ perhash->hashslot->tts_tupleDescriptor,
+ perhash->numCols,
+ perhash->hashGrpColIdxHash,
+ perhash->eqfuncoids,
+ perhash->hashfunctions,
+ perhash->aggnode->grpCollations,
+ nbuckets,
+ additionalsize,
+ metacxt,
+ hashcxt,
+ tmpcxt,
+ DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
/*
Size
hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
{
- Size tupleChunkSize;
- Size pergroupChunkSize;
- Size transitionChunkSize;
- Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
- tupleWidth);
- Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
+ Size tupleChunkSize;
+ Size pergroupChunkSize;
+ Size transitionChunkSize;
+ Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
+ tupleWidth);
+ Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
tupleChunkSize = CHUNKHDRSZ + tupleSize;
static void
hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
{
- AggStatePerPhase phase;
- int i = minslot ? 1 : 0;
- int j = nullcheck ? 1 : 0;
+ AggStatePerPhase phase;
+ int i = minslot ? 1 : 0;
+ int j = nullcheck ? 1 : 0;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
if (aggstate->aggstrategy == AGG_HASHED)
phase = &aggstate->phases[0];
- else /* AGG_MIXED */
+ else /* AGG_MIXED */
phase = &aggstate->phases[1];
if (phase->evaltrans_cache[i][j] == NULL)
{
- const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
- bool outerfixed = aggstate->ss.ps.outeropsfixed;
- bool dohash = true;
- bool dosort;
+ const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
+ bool outerfixed = aggstate->ss.ps.outeropsfixed;
+ bool dohash = true;
+ bool dosort;
dosort = aggstate->aggstrategy == AGG_MIXED ? true : false;
aggstate->ss.ps.outeropsfixed = true;
}
- phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
- aggstate, phase, dosort, dohash, nullcheck);
+ phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
+ dosort, dohash,
+ nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
Size *mem_limit, uint64 *ngroups_limit,
int *num_partitions)
{
- int npartitions;
- Size partition_mem;
+ int npartitions;
+ Size partition_mem;
/* if not expected to spill, use all of work_mem */
if (input_groups * hashentrysize < work_mem * 1024L)
/*
* Calculate expected memory requirements for spilling, which is the size
- * of the buffers needed for all the tapes that need to be open at
- * once. Then, subtract that from the memory available for holding hash
- * tables.
+ * of the buffers needed for all the tapes that need to be open at once.
+ * Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
static void
hash_agg_check_limits(AggState *aggstate)
{
- uint64 ngroups = aggstate->hash_ngroups_current;
- Size meta_mem = MemoryContextMemAllocated(
- aggstate->hash_metacxt, true);
- Size hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ uint64 ngroups = aggstate->hash_ngroups_current;
+ Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
+ true);
+ Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
+ true);
/*
* Don't spill unless there's at least one group in the hash table so we
hashagg_tapeinfo_init(aggstate);
- aggstate->hash_spills = palloc(
- sizeof(HashAggSpill) * aggstate->num_hashes);
+ aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- HashAggSpill *spill = &aggstate->hash_spills[setno];
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
perhash->aggnode->numGroups,
static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
- Size meta_mem;
- Size hash_mem;
- Size buffer_mem;
- Size total_mem;
+ Size meta_mem;
+ Size hash_mem;
+ Size buffer_mem;
+ Size total_mem;
if (aggstate->aggstrategy != AGG_MIXED &&
aggstate->aggstrategy != AGG_HASHED)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
- hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
- uint64 disk_used = LogicalTapeSetBlocks(
- aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
- (hash_mem / (double)aggstate->hash_ngroups_current);
+ (hash_mem / (double) aggstate->hash_ngroups_current);
}
}
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
{
- long max_nbuckets;
- long nbuckets = ngroups;
+ long max_nbuckets;
+ long nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
int used_bits, int *log2_npartitions)
{
- Size mem_wanted;
- int partition_limit;
- int npartitions;
- int partition_bits;
+ Size mem_wanted;
+ int partition_limit;
+ int npartitions;
+ int partition_bits;
/*
* Avoid creating so many partitions that the memory requirements of the
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
- bool isnew = false;
- bool *p_isnew;
+ bool isnew = false;
+ bool *p_isnew;
/* if hash table already spilled, don't create new entries */
p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
if (isnew)
{
- AggStatePerGroup pergroup;
- int transno;
+ AggStatePerGroup pergroup;
+ int transno;
aggstate->hash_ngroups_current++;
hash_agg_check_limits(aggstate);
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- uint32 hash;
- bool in_hash_table;
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ uint32 hash;
+ bool in_hash_table;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
/* check to see if we need to spill the tuple for this grouping set */
if (!in_hash_table)
{
- HashAggSpill *spill = &aggstate->hash_spills[setno];
- TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
static bool
agg_refill_hash_table(AggState *aggstate)
{
- HashAggBatch *batch;
- HashAggSpill spill;
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
- uint64 ngroups_estimate;
- bool spill_initialized = false;
+ HashAggBatch *batch;
+ HashAggSpill spill;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ uint64 ngroups_estimate;
+ bool spill_initialized = false;
if (aggstate->hash_batches == NIL)
return false;
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
- for (;;) {
- TupleTableSlot *slot = aggstate->hash_spill_slot;
- MinimalTuple tuple;
- uint32 hash;
- bool in_hash_table;
+ for (;;)
+ {
+ TupleTableSlot *slot = aggstate->hash_spill_slot;
+ MinimalTuple tuple;
+ uint32 hash;
+ bool in_hash_table;
CHECK_FOR_INTERRUPTS();
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
- aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
- aggstate, hash, &in_hash_table);
+ aggstate->hash_pergroup[batch->setno] =
+ lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
*/
spill_initialized = true;
hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
- ngroups_estimate, aggstate->hashentrysize);
+ ngroups_estimate, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
hashagg_spill_tuple(&spill, slot, hash);
static void
hashagg_tapeinfo_init(AggState *aggstate)
{
- HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
- int init_tapes = 16; /* expanded dynamically */
+ HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
+ int init_tapes = 16; /* expanded dynamically */
tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1);
tapeinfo->ntapes = init_tapes;
hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
int npartitions)
{
- int partidx = 0;
+ int partidx = 0;
/* use free tapes if available */
while (partidx < npartitions && tapeinfo->nfreetapes > 0)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
- tapeinfo->freetapes = repalloc(
- tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
+ tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
+ tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
uint64 input_groups, double hashentrysize)
{
- int npartitions;
- int partition_bits;
+ int npartitions;
+ int partition_bits;
- npartitions = hash_choose_num_partitions(
- input_groups, hashentrysize, used_bits, &partition_bits);
+ npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
+ used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
static Size
hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
{
- LogicalTapeSet *tapeset = spill->tapeset;
- int partition;
- MinimalTuple tuple;
- int tapenum;
- int total_written = 0;
- bool shouldFree;
+ LogicalTapeSet *tapeset = spill->tapeset;
+ int partition;
+ MinimalTuple tuple;
+ int tapenum;
+ int total_written = 0;
+ bool shouldFree;
Assert(spill->partitions != NULL);
hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
{
LogicalTapeSet *tapeset = batch->tapeset;
- int tapenum = batch->input_tapenum;
- MinimalTuple tuple;
- uint32 t_len;
- size_t nread;
- uint32 hash;
+ int tapenum = batch->input_tapenum;
+ MinimalTuple tuple;
+ uint32 t_len;
+ size_t nread;
+ uint32 hash;
nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
if (nread == 0)
tuple->t_len = t_len;
nread = LogicalTapeRead(tapeset, tapenum,
- (void *)((char *)tuple + sizeof(uint32)),
+ (void *) ((char *) tuple + sizeof(uint32)),
t_len - sizeof(uint32));
if (nread != t_len - sizeof(uint32))
ereport(ERROR,
static void
hashagg_finish_initial_spills(AggState *aggstate)
{
- int setno;
- int total_npartitions = 0;
+ int setno;
+ int total_npartitions = 0;
if (aggstate->hash_spills != NULL)
{
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
static void
hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
{
- int i;
- int used_bits = 32 - spill->shift;
+ int i;
+ int used_bits = 32 - spill->shift;
if (spill->npartitions == 0)
- return; /* didn't spill */
+ return; /* didn't spill */
for (i = 0; i < spill->npartitions; i++)
{
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
static void
hashagg_reset_spill_state(AggState *aggstate)
{
- ListCell *lc;
+ ListCell *lc;
/* free spills from initial pass */
if (aggstate->hash_spills != NULL)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
pfree(spill->ntuples);
pfree(spill->partitions);
}
/* free batches */
foreach(lc, aggstate->hash_batches)
{
- HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
+ HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
+
pfree(batch);
}
list_free(aggstate->hash_batches);
/* close tape set */
if (aggstate->hash_tapeinfo != NULL)
{
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
LogicalTapeSetClose(tapeinfo->tapeset);
pfree(tapeinfo->freetapes);
*/
if (use_hashing)
{
- Plan *outerplan = outerPlan(node);
- uint64 totalGroups = 0;
- int i;
+ Plan *outerplan = outerPlan(node);
+ uint64 totalGroups = 0;
+ int i;
- aggstate->hash_metacxt = AllocSetContextCreate(
- aggstate->ss.ps.state->es_query_cxt,
- "HashAgg meta context",
- ALLOCSET_DEFAULT_SIZES);
- aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
- estate, scanDesc, &TTSOpsMinimalTuple);
+ aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
+ "HashAgg meta context",
+ ALLOCSET_DEFAULT_SIZES);
+ aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
+ &TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
- aggstate->hashentrysize = hash_agg_entry_size(
- aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
+ aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
+ outerplan->plan_width,
+ node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits
ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate);
/*
- * Maximum number of prefetches for the tablespace if configured, otherwise
- * the current value of the effective_io_concurrency GUC.
+ * Maximum number of prefetches for the tablespace if configured,
+ * otherwise the current value of the effective_io_concurrency GUC.
*/
scanstate->prefetch_maximum =
get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace);
* - groupName: the token fullsort or prefixsort
*/
#define INSTRUMENT_SORT_GROUP(node, groupName) \
- if (node->ss.ps.instrument != NULL) \
- { \
- if (node->shared_info && node->am_worker) \
+ do { \
+ if ((node)->ss.ps.instrument != NULL) \
{ \
- Assert(IsParallelWorker()); \
- Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \
- instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \
- } else { \
- instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \
+ if ((node)->shared_info && (node)->am_worker) \
+ { \
+ Assert(IsParallelWorker()); \
+ Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \
+ instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
+ else \
+ { \
+ instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \
+ (node)->groupName##_state); \
+ } \
} \
- }
+ } while (0)
+
/* ----------------------------------------------------------------
* instrumentSortedGroup
Tuplesortstate *sortState)
{
TuplesortInstrumentation sort_instr;
+
groupInfo->groupCount++;
tuplesort_get_stats(sortState, &sort_instr);
SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
if (node->bounded)
{
SO1_printf("Sorting fullsort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n");
node->execution_status = INCSORT_READFULLSORT;
nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n");
node->execution_status = INCSORT_READFULLSORT;
}
/*
- * Unless we've already transitioned modes to reading from the full
- * sort state, then we assume that having read at least
+ * Unless we've already transitioned modes to reading from the
+ * full sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
- INSTRUMENT_SORT_GROUP(node, fullsort)
+ INSTRUMENT_SORT_GROUP(node, fullsort);
/*
* If the full sort tuplesort happened to switch into top-n
/*
* We might have multiple prefix key groups in the full sort
- * state, so the mode transition function needs to know that it
- * needs to move from the fullsort to presorted prefix sort.
+ * state, so the mode transition function needs to know that
+ * it needs to move from the fullsort to presorted prefix
+ * sort.
*/
node->n_fullsort_remaining = nTuples;
SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
- INSTRUMENT_SORT_GROUP(node, prefixsort)
+ INSTRUMENT_SORT_GROUP(node, prefixsort);
SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n");
node->execution_status = INCSORT_READPREFIXSORT;
SO_printf("ExecInitIncrementalSort: initializing sort node\n");
/*
- * Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
- * because the current sort state contains only one sort batch rather than
- * the full result set.
+ * Incremental sort can't be used with EXEC_FLAG_BACKWARD or
+ * EXEC_FLAG_MARK, because the current sort state contains only one sort
+ * batch rather than the full result set.
*/
Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0);
* Initialize child nodes.
*
* Incremental sort does not support backwards scans and mark/restore, so
- * we don't bother removing the flags from eflags here. We allow passing
- * a REWIND flag, because although incremental sort can't use it, the child
+ * we don't bother removing the flags from eflags here. We allow passing a
+ * REWIND flag, because although incremental sort can't use it, the child
* nodes may be able to do something more useful.
*/
outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
* re-execute the sort along with the child node. Incremental sort itself
* can't do anything smarter, but maybe the child nodes can.
*
- * In theory if we've only filled the full sort with one batch (and haven't
- * reset it for a new batch yet) then we could efficiently rewind, but
- * that seems a narrow enough case that it's not worth handling specially
- * at this time.
+ * In theory if we've only filled the full sort with one batch (and
+ * haven't reset it for a new batch yet) then we could efficiently rewind,
+ * but that seems a narrow enough case that it's not worth handling
+ * specially at this time.
*/
/* must drop pointer to sort result tuple */
/*
* If we've set up either of the sort states yet, we need to reset them.
* We could end them and null out the pointers, but there's no reason to
- * repay the setup cost, and because ExecIncrementalSort guards
- * presorted column functions by checking to see if the full sort state
- * has been initialized yet, setting the sort states to null here might
- * actually cause a leak.
+ * repay the setup cost, and because ExecIncrementalSort guards presorted
+ * column functions by checking to see if the full sort state has been
+ * initialized yet, setting the sort states to null here might actually
+ * cause a leak.
*/
if (node->fullsort_state != NULL)
{
if (tidstate->ss.ss_currentScanDesc == NULL)
tidstate->ss.ss_currentScanDesc =
table_beginscan_tid(tidstate->ss.ss_currentRelation,
- tidstate->ss.ps.state->es_snapshot);
+ tidstate->ss.ps.state->es_snapshot);
scan = tidstate->ss.ss_currentScanDesc;
/*
case EEOP_AGG_PLAIN_PERGROUP_NULLCHECK:
{
- int jumpnull;
- LLVMValueRef v_aggstatep;
- LLVMValueRef v_allpergroupsp;
- LLVMValueRef v_pergroup_allaggs;
- LLVMValueRef v_setoff;
+ int jumpnull;
+ LLVMValueRef v_aggstatep;
+ LLVMValueRef v_allpergroupsp;
+ LLVMValueRef v_pergroup_allaggs;
+ LLVMValueRef v_setoff;
jumpnull = op->d.agg_plain_pergroup_nullcheck.jumpnull;
* pergroup_allaggs = aggstate->all_pergroups
* [op->d.agg_plain_pergroup_nullcheck.setoff];
*/
- v_aggstatep = LLVMBuildBitCast(
- b, v_parent, l_ptr(StructAggState), "");
+ v_aggstatep = LLVMBuildBitCast(b, v_parent,
+ l_ptr(StructAggState), "");
- v_allpergroupsp = l_load_struct_gep(
- b, v_aggstatep,
- FIELDNO_AGGSTATE_ALL_PERGROUPS,
- "aggstate.all_pergroups");
+ v_allpergroupsp = l_load_struct_gep(b, v_aggstatep,
+ FIELDNO_AGGSTATE_ALL_PERGROUPS,
+ "aggstate.all_pergroups");
- v_setoff = l_int32_const(
- op->d.agg_plain_pergroup_nullcheck.setoff);
+ v_setoff = l_int32_const(op->d.agg_plain_pergroup_nullcheck.setoff);
- v_pergroup_allaggs = l_load_gep1(
- b, v_allpergroupsp, v_setoff, "");
+ v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
- LLVMBuildCondBr(
- b,
- LLVMBuildICmp(b, LLVMIntEQ,
- LLVMBuildPtrToInt(
- b, v_pergroup_allaggs, TypeSizeT, ""),
- l_sizet_const(0), ""),
- opblocks[jumpnull],
- opblocks[opno + 1]);
+ LLVMBuildCondBr(b,
+ LLVMBuildICmp(b, LLVMIntEQ,
+ LLVMBuildPtrToInt(b, v_pergroup_allaggs, TypeSizeT, ""),
+ l_sizet_const(0), ""),
+ opblocks[jumpnull],
+ opblocks[opno + 1]);
break;
}
static bool verify_client_proof(scram_state *state);
static bool verify_final_nonce(scram_state *state);
static void mock_scram_secret(const char *username, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static bool is_scram_printable(char *p);
static char *sanitize_char(char c);
static char *sanitize_str(const char *s);
if (password_type == PASSWORD_TYPE_SCRAM_SHA_256)
{
if (parse_scram_secret(shadow_pass, &state->iterations, &state->salt,
- state->StoredKey, state->ServerKey))
+ state->StoredKey, state->ServerKey))
got_secret = true;
else
{
}
/*
- * If the user did not have a valid SCRAM secret, we still go through
- * the motions with a mock one, and fail as if the client supplied an
+ * If the user did not have a valid SCRAM secret, we still go through the
+ * motions with a mock one, and fail as if the client supplied an
* incorrect password. This is to avoid revealing information to an
* attacker.
*/
if (!got_secret)
{
mock_scram_secret(state->port->user_name, &state->iterations,
- &state->salt, state->StoredKey, state->ServerKey);
+ &state->salt, state->StoredKey, state->ServerKey);
state->doomed = true;
}
errmsg("could not generate random salt")));
result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN,
- SCRAM_DEFAULT_ITERATIONS, password);
+ SCRAM_DEFAULT_ITERATIONS, password);
if (prep_password)
pfree(prep_password);
pg_saslprep_rc rc;
if (!parse_scram_secret(secret, &iterations, &encoded_salt,
- stored_key, server_key))
+ stored_key, server_key))
{
/*
* The password looked like a SCRAM secret, but could not be parsed.
*/
bool
parse_scram_secret(const char *secret, int *iterations, char **salt,
- uint8 *stored_key, uint8 *server_key)
+ uint8 *stored_key, uint8 *server_key)
{
char *v;
char *p;
*/
static void
mock_scram_secret(const char *username, int *iterations, char **salt,
- uint8 *stored_key, uint8 *server_key)
+ uint8 *stored_key, uint8 *server_key)
{
char *raw_salt;
char *encoded_salt;
#include "utils/memutils.h"
/* default init hook can be overridden by a shared library */
-static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
+static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
openssl_tls_init_hook_typ openssl_tls_init_hook = default_openssl_tls_init;
static int my_sock_read(BIO *h, char *buf, int size);
/*
* Call init hook (usually to set password callback)
*/
- (* openssl_tls_init_hook)(context, isServerStart);
+ (*openssl_tls_init_hook) (context, isServerStart);
/* used by the callback */
ssl_is_server_start = isServerStart;
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
+
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,
strspn(shadow_pass + 3, MD5_PASSWD_CHARSET) == MD5_PASSWD_LEN - 3)
return PASSWORD_TYPE_MD5;
if (parse_scram_secret(shadow_pass, &iterations, &encoded_salt,
- stored_key, server_key))
+ stored_key, server_key))
return PASSWORD_TYPE_SCRAM_SHA_256;
return PASSWORD_TYPE_PLAINTEXT;
}
List *useful_pathkeys_list = NIL;
/*
- * Considering query_pathkeys is always worth it, because it might allow us
- * to avoid a total sort when we have a partially presorted path available.
+ * Considering query_pathkeys is always worth it, because it might allow
+ * us to avoid a total sort when we have a partially presorted path
+ * available.
*/
if (root->query_pathkeys)
{
ListCell *lc;
- int npathkeys = 0; /* useful pathkeys */
+ int npathkeys = 0; /* useful pathkeys */
foreach(lc, root->query_pathkeys)
{
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
/*
- * We can only build an Incremental Sort for pathkeys which contain
- * an EC member in the current relation, so ignore any suffix of the
- * list as soon as we find a pathkey without an EC member the
- * relation.
+ * We can only build an Incremental Sort for pathkeys which
+ * contain an EC member in the current relation, so ignore any
+ * suffix of the list as soon as we find a pathkey without an EC
+ * member the relation.
*
- * By still returning the prefix of the pathkeys list that does meet
- * criteria of EC membership in the current relation, we enable not
- * just an incremental sort on the entirety of query_pathkeys but
- * also incremental sort below a JOIN.
+ * By still returning the prefix of the pathkeys list that does
+ * meet criteria of EC membership in the current relation, we
+ * enable not just an incremental sort on the entirety of
+ * query_pathkeys but also incremental sort below a JOIN.
*/
if (!find_em_expr_for_rel(pathkey_ec, rel))
break;
}
/*
- * The whole query_pathkeys list matches, so append it directly, to allow
- * comparing pathkeys easily by comparing list pointer. If we have to truncate
- * the pathkeys, we gotta do a copy though.
+ * The whole query_pathkeys list matches, so append it directly, to
+ * allow comparing pathkeys easily by comparing list pointer. If we
+ * have to truncate the pathkeys, we gotta do a copy though.
*/
if (npathkeys == list_length(root->query_pathkeys))
useful_pathkeys_list = lappend(useful_pathkeys_list,
/*
* If the path has no ordering at all, then we can't use either
- * incremental sort or rely on implict sorting with a gather merge.
+ * incremental sort or rely on implict sorting with a gather
+ * merge.
*/
if (subpath->pathkeys == NIL)
continue;
is_sorted = pathkeys_count_contained_in(useful_pathkeys,
- subpath->pathkeys,
- &presorted_keys);
+ subpath->pathkeys,
+ &presorted_keys);
/*
* We don't need to consider the case where a subpath is already
Path *tmp;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(useful_pathkeys) != 1);
/*
* Extract presorted keys as list of expressions.
*
- * We need to be careful about Vars containing "varno 0" which might
- * have been introduced by generate_append_tlist, which would confuse
+ * We need to be careful about Vars containing "varno 0" which might have
+ * been introduced by generate_append_tlist, which would confuse
* estimate_num_groups (in fact it'd fail for such expressions). See
* recurse_set_operations which has to deal with the same issue.
*
- * Unlike recurse_set_operations we can't access the original target
- * list here, and even if we could it's not very clear how useful would
- * that be for a set operation combining multiple tables. So we simply
- * detect if there are any expressions with "varno 0" and use the
- * default DEFAULT_NUM_DISTINCT in that case.
+ * Unlike recurse_set_operations we can't access the original target list
+ * here, and even if we could it's not very clear how useful would that be
+ * for a set operation combining multiple tables. So we simply detect if
+ * there are any expressions with "varno 0" and use the default
+ * DEFAULT_NUM_DISTINCT in that case.
*
- * We might also use either 1.0 (a single group) or input_tuples (each
- * row being a separate group), pretty much the worst and best case for
+ * We might also use either 1.0 (a single group) or input_tuples (each row
+ * being a separate group), pretty much the worst and best case for
* incremental sort. But those are extreme cases and using something in
* between seems reasonable. Furthermore, generate_append_tlist is used
* for set operations, which are likely to produce mostly unique output
/*
* Add the disk costs of hash aggregation that spills to disk.
*
- * Groups that go into the hash table stay in memory until finalized,
- * so spilling and reprocessing tuples doesn't incur additional
- * invocations of transCost or finalCost. Furthermore, the computed
- * hash value is stored with the spilled tuples, so we don't incur
- * extra invocations of the hash function.
+ * Groups that go into the hash table stay in memory until finalized, so
+ * spilling and reprocessing tuples doesn't incur additional invocations
+ * of transCost or finalCost. Furthermore, the computed hash value is
+ * stored with the spilled tuples, so we don't incur extra invocations of
+ * the hash function.
*
- * Hash Agg begins returning tuples after the first batch is
- * complete. Accrue writes (spilled tuples) to startup_cost and to
- * total_cost; accrue reads only to total_cost.
+ * Hash Agg begins returning tuples after the first batch is complete.
+ * Accrue writes (spilled tuples) to startup_cost and to total_cost;
+ * accrue reads only to total_cost.
*/
if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
{
- double pages;
- double pages_written = 0.0;
- double pages_read = 0.0;
- double hashentrysize;
- double nbatches;
- Size mem_limit;
- uint64 ngroups_limit;
- int num_partitions;
- int depth;
+ double pages;
+ double pages_written = 0.0;
+ double pages_read = 0.0;
+ double hashentrysize;
+ double nbatches;
+ Size mem_limit;
+ uint64 ngroups_limit;
+ int num_partitions;
+ int depth;
/*
* Estimate number of batches based on the computed limits. If less
* than or equal to one, all groups are expected to fit in memory;
* otherwise we expect to spill.
*/
- hashentrysize = hash_agg_entry_size(
- aggcosts->numAggs, input_width, aggcosts->transitionSpace);
+ hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width,
+ aggcosts->transitionSpace);
hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
&ngroups_limit, &num_partitions);
- nbatches = Max( (numGroups * hashentrysize) / mem_limit,
- numGroups / ngroups_limit );
+ nbatches = Max((numGroups * hashentrysize) / mem_limit,
+ numGroups / ngroups_limit);
nbatches = Max(ceil(nbatches), 1.0);
num_partitions = Max(num_partitions, 2);
* recursion; but for the purposes of this calculation assume it stays
* constant.
*/
- depth = ceil( log(nbatches) / log(num_partitions) );
+ depth = ceil(log(nbatches) / log(num_partitions));
/*
* Estimate number of pages read and written. For each level of
Assert(joinrel->consider_partitionwise_join);
/*
- * We can not perform partitionwise join if either of the joining relations
- * is not partitioned.
+ * We can not perform partitionwise join if either of the joining
+ * relations is not partitioned.
*/
if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
return;
* partition bounds as inputs, and the partitions with the same
* cardinal positions form the pairs.
*
- * Note: even in cases where one or both inputs have merged bounds,
- * it would be possible for both the bounds to be exactly the same, but
+ * Note: even in cases where one or both inputs have merged bounds, it
+ * would be possible for both the bounds to be exactly the same, but
* it seems unlikely to be worth the cycles to check.
*/
if (!rel1->partbounds_merged &&
/*
* If the join rel's partbounds_merged flag is true, it means inputs
* are not guaranteed to have the same partition bounds, therefore we
- * can't assume that the partitions at the same cardinal positions form
- * the pairs; let get_matching_part_pairs() generate the pairs.
+ * can't assume that the partitions at the same cardinal positions
+ * form the pairs; let get_matching_part_pairs() generate the pairs.
* Otherwise, nothing to do since we can assume that.
*/
if (joinrel->partbounds_merged)
{
bool rel1_is_simple = IS_SIMPLE_REL(rel1);
bool rel2_is_simple = IS_SIMPLE_REL(rel2);
- int cnt_parts;
+ int cnt_parts;
*parts1 = NIL;
*parts2 = NIL;
* Get a child rel for rel1 with the relids. Note that we should have
* the child rel even if rel1 is a join rel, because in that case the
* partitions specified in the relids would have matching/overlapping
- * boundaries, so the specified partitions should be considered as ones
- * to be joined when planning partitionwise joins of rel1, meaning that
- * the child rel would have been built by the time we get here.
+ * boundaries, so the specified partitions should be considered as
+ * ones to be joined when planning partitionwise joins of rel1,
+ * meaning that the child rel would have been built by the time we get
+ * here.
*/
if (rel1_is_simple)
{
return 0; /* unordered path */
(void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
- &n_common_pathkeys);
+ &n_common_pathkeys);
return n_common_pathkeys;
}
allow_hash = false; /* policy-based decision not to hash */
else
{
- Size hashentrysize = hash_agg_entry_size(
- 0, cheapest_input_path->pathtarget->width, 0);
+ Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
allow_hash = enable_hashagg_disk ||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
- input_path->pathkeys, &presorted_keys);
+ input_path->pathkeys, &presorted_keys);
if (is_sorted)
{
else
{
/*
- * Try adding an explicit sort, but only to the cheapest total path
- * since a full sort should generally add the same cost to all
- * paths.
+ * Try adding an explicit sort, but only to the cheapest total
+ * path since a full sort should generally add the same cost to
+ * all paths.
*/
if (input_path == cheapest_input_path)
{
}
/*
- * If incremental sort is enabled, then try it as well. Unlike with
- * regular sorts, we can't just look at the cheapest path, because
- * the cost of incremental sort depends on how well presorted the
- * path is. Additionally incremental sort may enable a cheaper
- * startup path to win out despite higher total cost.
+ * If incremental sort is enabled, then try it as well. Unlike
+ * with regular sorts, we can't just look at the cheapest path,
+ * because the cost of incremental sort depends on how well
+ * presorted the path is. Additionally incremental sort may enable
+ * a cheaper startup path to win out despite higher total cost.
*/
if (!enable_incrementalsort)
continue;
double total_groups;
/*
- * We don't care if this is the cheapest partial path - we can't
- * simply skip it, because it may be partially sorted in which
- * case we want to consider adding incremental sort (instead of
- * full sort, which is what happens above).
+ * We don't care if this is the cheapest partial path - we
+ * can't simply skip it, because it may be partially sorted in
+ * which case we want to consider adding incremental sort
+ * (instead of full sort, which is what happens above).
*/
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
- input_path->pathkeys,
- &presorted_keys);
+ input_path->pathkeys,
+ &presorted_keys);
/* No point in adding incremental sort on fully sorted paths. */
if (is_sorted)
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (path == cheapest_path || is_sorted)
{
else if (parse->hasAggs)
{
/*
- * We have aggregation, possibly with plain GROUP BY. Make
- * an AggPath.
+ * We have aggregation, possibly with plain GROUP BY. Make an
+ * AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
else if (parse->groupClause)
{
/*
- * We have GROUP BY without aggregation or grouping sets.
- * Make a GroupPath.
+ * We have GROUP BY without aggregation or grouping sets. Make
+ * a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
/*
* Insert a Sort node, if required. But there's no point in
continue;
/*
- * We should have already excluded pathkeys of length 1 because
- * then presorted_keys > 0 would imply is_sorted was true.
+ * We should have already excluded pathkeys of length 1
+ * because then presorted_keys > 0 would imply is_sorted was
+ * true.
*/
Assert(list_length(root->group_pathkeys) != 1);
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
/* Ignore already sorted paths */
if (is_sorted)
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (path == cheapest_partial_path || is_sorted)
{
* Consider incremental sort on all partial paths, if enabled.
*
* We can also skip the entire loop when we only have a single-item
- * group_pathkeys because then we can't possibly have a presorted
- * prefix of the list without having the list be fully sorted.
+ * group_pathkeys because then we can't possibly have a presorted prefix
+ * of the list without having the list be fully sorted.
*/
if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1)
return;
double total_groups;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
- path->pathkeys,
- &presorted_keys);
+ path->pathkeys,
+ &presorted_keys);
if (is_sorted)
continue;
* unadorned NULL that's not accepted back by the grammar.
*/
if (exprKind == EXPR_KIND_LIMIT && limitOption == LIMIT_OPTION_WITH_TIES &&
- IsA(clause, A_Const) && ((A_Const *) clause)->val.type == T_Null)
+ IsA(clause, A_Const) &&((A_Const *) clause)->val.type == T_Null)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE),
errmsg("row count cannot be NULL in FETCH FIRST ... WITH TIES clause")));
/*
* We must fill the attmap now so that it can be used to process generated
* column default expressions in the per-column loop below.
- */
+ */
new_attno = 1;
for (parent_attno = 1; parent_attno <= tupleDesc->natts;
parent_attno++)
* mentioned above.
*/
Datum attoptions =
- get_attoptions(RelationGetRelid(index_rel), i + 1);
+ get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
typedef struct PartitionMap
{
int nparts; /* number of partitions */
- int *merged_indexes; /* indexes of merged partitions */
+ int *merged_indexes; /* indexes of merged partitions */
bool *merged; /* flags to indicate whether partitions are
* merged with non-dummy partitions */
bool did_remapping; /* did we re-map partitions? */
static void init_partition_map(RelOptInfo *rel, PartitionMap *map);
static void free_partition_map(PartitionMap *map);
static bool is_dummy_partition(RelOptInfo *rel, int part_index);
-static int merge_matching_partitions(PartitionMap *outer_map,
- PartitionMap *inner_map,
- int outer_part,
- int inner_part,
- int *next_index);
-static int process_outer_partition(PartitionMap *outer_map,
- PartitionMap *inner_map,
- bool outer_has_default,
- bool inner_has_default,
- int outer_index,
- int inner_default,
- JoinType jointype,
- int *next_index,
- int *default_index);
-static int process_inner_partition(PartitionMap *outer_map,
- PartitionMap *inner_map,
- bool outer_has_default,
- bool inner_has_default,
- int inner_index,
- int outer_default,
- JoinType jointype,
- int *next_index,
- int *default_index);
+static int merge_matching_partitions(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ int outer_part,
+ int inner_part,
+ int *next_index);
+static int process_outer_partition(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ bool outer_has_default,
+ bool inner_has_default,
+ int outer_index,
+ int inner_default,
+ JoinType jointype,
+ int *next_index,
+ int *default_index);
+static int process_inner_partition(PartitionMap *outer_map,
+ PartitionMap *inner_map,
+ bool outer_has_default,
+ bool inner_has_default,
+ int inner_index,
+ int outer_default,
+ JoinType jointype,
+ int *next_index,
+ int *default_index);
static void merge_null_partitions(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_null,
JoinType jointype,
int *next_index,
int *default_index);
-static int merge_partition_with_dummy(PartitionMap *map, int index,
- int *next_index);
+static int merge_partition_with_dummy(PartitionMap *map, int index,
+ int *next_index);
static void fix_merged_indexes(PartitionMap *outer_map,
PartitionMap *inner_map,
int nmerged, List *merged_indexes);
List *merged_indexes,
int null_index,
int default_index);
-static int get_range_partition(RelOptInfo *rel,
- PartitionBoundInfo bi,
- int *lb_pos,
- PartitionRangeBound *lb,
- PartitionRangeBound *ub);
-static int get_range_partition_internal(PartitionBoundInfo bi,
- int *lb_pos,
- PartitionRangeBound *lb,
- PartitionRangeBound *ub);
+static int get_range_partition(RelOptInfo *rel,
+ PartitionBoundInfo bi,
+ int *lb_pos,
+ PartitionRangeBound *lb,
+ PartitionRangeBound *ub);
+static int get_range_partition_internal(PartitionBoundInfo bi,
+ int *lb_pos,
+ PartitionRangeBound *lb,
+ PartitionRangeBound *ub);
static bool compare_range_partitions(int partnatts, FmgrInfo *partsupfuncs,
Oid *partcollations,
PartitionRangeBound *outer_lb,
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
- int lb_cmpval, int ub_cmpval,
+ int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub);
static void add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
dest->kind = NULL;
/*
- * For hash partitioning, datums array will have two elements - modulus and
- * remainder.
+ * For hash partitioning, datums array will have two elements - modulus
+ * and remainder.
*/
hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
natts = hash_part ? 2 : partnatts;
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) outer_binfo->strategy);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
}
/*
* Merge partitions from both sides. In each iteration we compare a pair
- * of list values, one from each side, and decide whether the corresponding
- * partitions match or not. If the two values match exactly, move to the
- * next pair of list values, otherwise move to the next list value on the
- * side with a smaller list value.
+ * of list values, one from each side, and decide whether the
+ * corresponding partitions match or not. If the two values match
+ * exactly, move to the next pair of list values, otherwise move to the
+ * next list value on the side with a smaller list value.
*/
outer_pos = inner_pos = 0;
while (outer_pos < outer_bi->ndatums || inner_pos < inner_bi->ndatums)
if (outer_pos < outer_bi->ndatums)
{
/*
- * If the partition on the outer side has been proven empty, ignore
- * it and move to the next datum on the outer side.
+ * If the partition on the outer side has been proven empty,
+ * ignore it and move to the next datum on the outer side.
*/
outer_index = outer_bi->indexes[outer_pos];
if (is_dummy_partition(outer_rel, outer_index))
if (inner_pos < inner_bi->ndatums)
{
/*
- * If the partition on the inner side has been proven empty, ignore
- * it and move to the next datum on the inner side.
+ * If the partition on the inner side has been proven empty,
+ * ignore it and move to the next datum on the inner side.
*/
inner_index = inner_bi->indexes[inner_pos];
if (is_dummy_partition(inner_rel, inner_index))
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining values on the side which
* finishes later. For that we set the comparison parameter cmpval in
- * such a way that it appears as if the side which finishes earlier has
- * an extra value higher than any other value on the unfinished side.
- * That way we advance the values on the unfinished side till all of
- * its values are exhausted.
+ * such a way that it appears as if the side which finishes earlier
+ * has an extra value higher than any other value on the unfinished
+ * side. That way we advance the values on the unfinished side till
+ * all of its values are exhausted.
*/
if (outer_pos >= outer_bi->ndatums)
cmpval = 1;
Assert(outer_pos < outer_bi->ndatums);
/*
- * If the inner side has the default partition, or this is an outer
- * join, try to assign a merged partition to the outer partition
- * (see process_outer_partition()). Otherwise, the outer partition
- * will not contribute to the result.
+ * If the inner side has the default partition, or this is an
+ * outer join, try to assign a merged partition to the outer
+ * partition (see process_outer_partition()). Otherwise, the
+ * outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
- * (see process_inner_partition()). Otherwise, the inner partition
- * will not contribute to the result.
+ * (see process_inner_partition()). Otherwise, the inner
+ * partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
* partitions match or not. If the two ranges overlap, move to the next
* pair of ranges, otherwise move to the next range on the side with a
* lower range. outer_lb_pos/inner_lb_pos keep track of the positions of
- * lower bounds in the datums arrays in the outer/inner PartitionBoundInfos
- * respectively.
+ * lower bounds in the datums arrays in the outer/inner
+ * PartitionBoundInfos respectively.
*/
outer_lb_pos = inner_lb_pos = 0;
outer_index = get_range_partition(outer_rel, outer_bi, &outer_lb_pos,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining ranges on the side which
* finishes later. For that we set the comparison parameter cmpval in
- * such a way that it appears as if the side which finishes earlier has
- * an extra range higher than any other range on the unfinished side.
- * That way we advance the ranges on the unfinished side till all of
- * its ranges are exhausted.
+ * such a way that it appears as if the side which finishes earlier
+ * has an extra range higher than any other range on the unfinished
+ * side. That way we advance the ranges on the unfinished side till
+ * all of its ranges are exhausted.
*/
if (outer_index == -1)
{
goto cleanup;
/*
- * A row from a non-overlapping portion (if any) of a partition
- * on one side might find its join partner in the default
- * partition (if any) on the other side, causing the same
- * situation as above; give up in that case.
+ * A row from a non-overlapping portion (if any) of a partition on
+ * one side might find its join partner in the default partition
+ * (if any) on the other side, causing the same situation as
+ * above; give up in that case.
*/
if ((outer_has_default && (lb_cmpval > 0 || ub_cmpval < 0)) ||
(inner_has_default && (lb_cmpval < 0 || ub_cmpval > 0)))
outer_map.merged[outer_index] == false);
/*
- * If the inner side has the default partition, or this is an outer
- * join, try to assign a merged partition to the outer partition
- * (see process_outer_partition()). Otherwise, the outer partition
- * will not contribute to the result.
+ * If the inner side has the default partition, or this is an
+ * outer join, try to assign a merged partition to the outer
+ * partition (see process_outer_partition()). Otherwise, the
+ * outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
- * (see process_inner_partition()). Otherwise, the inner partition
- * will not contribute to the result.
+ * (see process_inner_partition()). Otherwise, the inner
+ * partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
}
/*
- * If we assigned a merged partition, add the range bounds and index of
- * the merged partition if appropriate.
+ * If we assigned a merged partition, add the range bounds and index
+ * of the merged partition if appropriate.
*/
if (merged_index >= 0 && merged_index != default_index)
add_merged_range_bounds(partnatts, partsupfuncs, partcollations,
merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map,
int outer_index, int inner_index, int *next_index)
{
- int outer_merged_index;
- int inner_merged_index;
- bool outer_merged;
- bool inner_merged;
+ int outer_merged_index;
+ int inner_merged_index;
+ bool outer_merged;
+ bool inner_merged;
Assert(outer_index >= 0 && outer_index < outer_map->nparts);
outer_merged_index = outer_map->merged_indexes[outer_index];
*/
if (outer_merged_index == -1 && inner_merged_index == -1)
{
- int merged_index = *next_index;
+ int merged_index = *next_index;
Assert(!outer_merged);
Assert(!inner_merged);
int *next_index,
int *default_index)
{
- int merged_index = -1;
+ int merged_index = -1;
Assert(outer_index >= 0);
/*
* If the inner side has the default partition, a row from the outer
* partition might find its join partner in the default partition; try
- * merging the outer partition with the default partition. Otherwise, this
- * should be an outer join, in which case the outer partition has to be
- * scanned all the way anyway; merge the outer partition with a dummy
+ * merging the outer partition with the default partition. Otherwise,
+ * this should be an outer join, in which case the outer partition has to
+ * be scanned all the way anyway; merge the outer partition with a dummy
* partition on the other side.
*/
if (inner_has_default)
/*
* If the outer side has the default partition as well, the default
- * partition on the inner side will have two matching partitions on the
- * other side: the outer partition and the default partition on the
- * outer side. Partitionwise join doesn't handle this scenario yet.
+ * partition on the inner side will have two matching partitions on
+ * the other side: the outer partition and the default partition on
+ * the outer side. Partitionwise join doesn't handle this scenario
+ * yet.
*/
if (outer_has_default)
return -1;
return -1;
/*
- * If this is a FULL join, the default partition on the inner side
- * has to be scanned all the way anyway, so the resulting partition
- * will contain all key values from the default partition, which any
- * other partition of the join relation will not contain. Thus the
+ * If this is a FULL join, the default partition on the inner side has
+ * to be scanned all the way anyway, so the resulting partition will
+ * contain all key values from the default partition, which any other
+ * partition of the join relation will not contain. Thus the
* resulting partition will act as the default partition of the join
* relation; record the index in *default_index if not already done.
*/
int *next_index,
int *default_index)
{
- int merged_index = -1;
+ int merged_index = -1;
Assert(inner_index >= 0);
/*
* If the outer side has the default partition, a row from the inner
* partition might find its join partner in the default partition; try
- * merging the inner partition with the default partition. Otherwise, this
- * should be a FULL join, in which case the inner partition has to be
+ * merging the inner partition with the default partition. Otherwise,
+ * this should be a FULL join, in which case the inner partition has to be
* scanned all the way anyway; merge the inner partition with a dummy
* partition on the other side.
*/
/*
* If the inner side has the default partition as well, the default
- * partition on the outer side will have two matching partitions on the
- * other side: the inner partition and the default partition on the
- * inner side. Partitionwise join doesn't handle this scenario yet.
+ * partition on the outer side will have two matching partitions on
+ * the other side: the inner partition and the default partition on
+ * the inner side. Partitionwise join doesn't handle this scenario
+ * yet.
*/
if (inner_has_default)
return -1;
int *next_index,
int *null_index)
{
- bool consider_outer_null = false;
- bool consider_inner_null = false;
+ bool consider_outer_null = false;
+ bool consider_inner_null = false;
Assert(outer_has_null || inner_has_null);
Assert(*null_index == -1);
/*
* If this is an outer join, the NULL partition on the outer side has
* to be scanned all the way anyway; merge the NULL partition with a
- * dummy partition on the other side. In that case consider_outer_null
- * means that the NULL partition only contains NULL values as the key
- * values, so the merged partition will do so; treat it as the NULL
- * partition of the join relation.
+ * dummy partition on the other side. In that case
+ * consider_outer_null means that the NULL partition only contains
+ * NULL values as the key values, so the merged partition will do so;
+ * treat it as the NULL partition of the join relation.
*/
if (IS_OUTER_JOIN(jointype))
{
Assert(inner_has_null);
/*
- * If this is a FULL join, the NULL partition on the inner side has
- * to be scanned all the way anyway; merge the NULL partition with a
- * dummy partition on the other side. In that case consider_inner_null
- * means that the NULL partition only contains NULL values as the key
- * values, so the merged partition will do so; treat it as the NULL
- * partition of the join relation.
+ * If this is a FULL join, the NULL partition on the inner side has to
+ * be scanned all the way anyway; merge the NULL partition with a
+ * dummy partition on the other side. In that case
+ * consider_inner_null means that the NULL partition only contains
+ * NULL values as the key values, so the merged partition will do so;
+ * treat it as the NULL partition of the join relation.
*/
if (jointype == JOIN_FULL)
*null_index = merge_partition_with_dummy(inner_map, inner_null,
int *next_index,
int *default_index)
{
- int outer_merged_index = -1;
- int inner_merged_index = -1;
+ int outer_merged_index = -1;
+ int inner_merged_index = -1;
Assert(outer_has_default || inner_has_default);
/*
* If this is an outer join, the default partition on the outer side
* has to be scanned all the way anyway; if we have not yet assigned a
- * partition, merge the default partition with a dummy partition on the
- * other side. The merged partition will act as the default partition
- * of the join relation (see comments in process_inner_partition()).
+ * partition, merge the default partition with a dummy partition on
+ * the other side. The merged partition will act as the default
+ * partition of the join relation (see comments in
+ * process_inner_partition()).
*/
if (IS_OUTER_JOIN(jointype))
{
else if (!outer_has_default && inner_has_default)
{
/*
- * If this is a FULL join, the default partition on the inner side
- * has to be scanned all the way anyway; if we have not yet assigned a
- * partition, merge the default partition with a dummy partition on the
- * other side. The merged partition will act as the default partition
- * of the join relation (see comments in process_outer_partition()).
+ * If this is a FULL join, the default partition on the inner side has
+ * to be scanned all the way anyway; if we have not yet assigned a
+ * partition, merge the default partition with a dummy partition on
+ * the other side. The merged partition will act as the default
+ * partition of the join relation (see comments in
+ * process_outer_partition()).
*/
if (jointype == JOIN_FULL)
{
static int
merge_partition_with_dummy(PartitionMap *map, int index, int *next_index)
{
- int merged_index = *next_index;
+ int merged_index = *next_index;
Assert(index >= 0 && index < map->nparts);
Assert(map->merged_indexes[index] == -1);
int *outer_indexes;
int *inner_indexes;
int max_nparts;
- int i;
+ int i;
Assert(nmerged > 0);
Assert(*outer_parts == NIL);
{
if (i < outer_nparts)
{
- int merged_index = outer_map->merged_indexes[i];
+ int merged_index = outer_map->merged_indexes[i];
if (merged_index >= 0)
{
}
if (i < inner_nparts)
{
- int merged_index = inner_map->merged_indexes[i];
+ int merged_index = inner_map->merged_indexes[i];
if (merged_index >= 0)
{
int inner_index = inner_indexes[i];
/*
- * If both partitions are dummy, it means the merged partition that had
- * been assigned to the outer/inner partition was removed when
- * re-merging the outer/inner partition in merge_matching_partitions();
- * ignore the merged partition.
+ * If both partitions are dummy, it means the merged partition that
+ * had been assigned to the outer/inner partition was removed when
+ * re-merging the outer/inner partition in
+ * merge_matching_partitions(); ignore the merged partition.
*/
if (outer_index == -1 && inner_index == -1)
continue;
Assert(bi->strategy == PARTITION_STRATEGY_RANGE);
- do {
+ do
+ {
part_index = get_range_partition_internal(bi, lb_pos, lb, ub);
if (part_index == -1)
return -1;
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
- int lb_cmpval, int ub_cmpval,
+ int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub)
{
/*
* A LEFT/ANTI join will have all the rows from the outer side, so
- * the bounds of the merged partition will be the same as the outer
- * bounds.
+ * the bounds of the merged partition will be the same as the
+ * outer bounds.
*/
*merged_lb = *outer_lb;
*merged_ub = *outer_ub;
case JOIN_FULL:
/*
- * A FULL join will have all the rows from both sides, so the lower
- * bound of the merged partition will be the lower of the two lower
- * bounds, and the upper bound of the merged partition will be the
- * higher of the two upper bounds.
+ * A FULL join will have all the rows from both sides, so the
+ * lower bound of the merged partition will be the lower of the
+ * two lower bounds, and the upper bound of the merged partition
+ * will be the higher of the two upper bounds.
*/
*merged_lb = (lb_cmpval < 0) ? *outer_lb : *inner_lb;
*merged_ub = (ub_cmpval > 0) ? *outer_ub : *inner_ub;
}
else
{
- PartitionRangeBound prev_ub;
+ PartitionRangeBound prev_ub;
Assert(*merged_datums);
Assert(*merged_kinds);
ListCell *lc;
/*
- * If this partitioned relation has a default partition and is itself
- * a partition (as evidenced by partition_qual being not NIL), we first
+ * If this partitioned relation has a default partition and is itself a
+ * partition (as evidenced by partition_qual being not NIL), we first
* check if the clauses contradict the partition constraint. If they do,
* there's no need to generate any steps as it'd already be proven that no
* partitions need to be scanned.
/* Determine if this table needs vacuum or analyze. */
*dovacuum = force_vacuum || (vactuples > vacthresh) ||
- (vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
+ (vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
*doanalyze = (anltuples > anlthresh);
}
else
ProcessConfigFile(PGC_SIGHUP);
/*
- * Checkpointer is the last process to shut down, so we ask it to
- * hold the keys for a range of other tasks required most of which
- * have nothing to do with checkpointing at all.
+ * Checkpointer is the last process to shut down, so we ask it to hold
+ * the keys for a range of other tasks required most of which have
+ * nothing to do with checkpointing at all.
*
- * For various reasons, some config values can change dynamically
- * so the primary copy of them is held in shared memory to make
- * sure all backends see the same value. We make Checkpointer
- * responsible for updating the shared memory copy if the
- * parameter setting changes because of SIGHUP.
+ * For various reasons, some config values can change dynamically so
+ * the primary copy of them is held in shared memory to make sure all
+ * backends see the same value. We make Checkpointer responsible for
+ * updating the shared memory copy if the parameter setting changes
+ * because of SIGHUP.
*/
UpdateSharedMemoryConfig();
}
if (ShutdownRequestPending)
{
/*
- * From here on, elog(ERROR) should end with exit(1), not send
- * control back to the sigsetjmp block above
+ * From here on, elog(ERROR) should end with exit(1), not send control
+ * back to the sigsetjmp block above
*/
ExitOnAnyError = true;
/* Close down the database */
ShutdownXLOG(0, 0);
/* Normal exit from the checkpointer is here */
- proc_exit(0); /* done */
+ proc_exit(0); /* done */
}
}
pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len)
{
int i;
- TimestampTz ts = GetCurrentTimestamp();
+ TimestampTz ts = GetCurrentTimestamp();
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
/*
* It is quite possible that a non-aggressive VACUUM ended up skipping
* various pages, however, we'll zero the insert counter here regardless.
- * It's currently used only to track when we need to perform an
- * "insert" autovacuum, which are mainly intended to freeze newly inserted
- * tuples. Zeroing this may just mean we'll not try to vacuum the table
- * again until enough tuples have been inserted to trigger another insert
+ * It's currently used only to track when we need to perform an "insert"
+ * autovacuum, which are mainly intended to freeze newly inserted tuples.
+ * Zeroing this may just mean we'll not try to vacuum the table again
+ * until enough tuples have been inserted to trigger another insert
* autovacuum. An anti-wraparound autovacuum will catch any persistent
* stragglers.
*/
int
pgstat_slru_index(const char *name)
{
- int i;
+ int i;
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
if (SSLok == 'S' && secure_open_server(port) == -1)
return STATUS_ERROR;
#endif
+
/*
* regular startup packet, cancel, etc packet should follow, but not
* another SSL negotiation request, and a GSS request should only
if (GSSok == 'G' && secure_open_gssapi(port) == -1)
return STATUS_ERROR;
#endif
+
/*
* regular startup packet, cancel, etc packet should follow, but not
* another GSS negotiation request, and an SSL request should only
void
AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
const char *pathname, size_t size, pg_time_t mtime,
- pg_checksum_context * checksum_ctx)
+ pg_checksum_context *checksum_ctx)
{
char pathbuf[MAXPGPATH];
int pathlen;
}
/*
- * Each file's entry needs to be separated from any entry that follows by a
- * comma, but there's no comma before the first one or after the last one.
- * To make that work, adding a file to the manifest starts by terminating
- * the most recently added line, with a comma if appropriate, but does not
- * terminate the line inserted for this file.
+ * Each file's entry needs to be separated from any entry that follows by
+ * a comma, but there's no comma before the first one or after the last
+ * one. To make that work, adding a file to the manifest starts by
+ * terminating the most recently added line, with a comma if appropriate,
+ * but does not terminate the line inserted for this file.
*/
initStringInfo(&buf);
if (manifest->first_file)
{
Oid partoid; /* LogicalRepPartMap's key */
LogicalRepRelMapEntry relmapentry;
-} LogicalRepPartMapEntry;
+} LogicalRepPartMapEntry;
/*
* Relcache invalidation callback for our relation map cache.
if (!publish)
{
- bool ancestor_published = false;
+ bool ancestor_published = false;
/*
* For a partition, check if any of the ancestors are
*/
if (am_partition)
{
- List *ancestors = get_partition_ancestors(relid);
- ListCell *lc2;
+ List *ancestors = get_partition_ancestors(relid);
+ ListCell *lc2;
- /* Find the "topmost" ancestor that is in this publication. */
+ /*
+ * Find the "topmost" ancestor that is in this
+ * publication.
+ */
foreach(lc2, ancestors)
{
- Oid ancestor = lfirst_oid(lc2);
+ Oid ancestor = lfirst_oid(lc2);
if (list_member_oid(GetRelationPublications(ancestor),
pub->oid))
retlsn = moveto;
/*
- * Dirty the slot so as it is written out at the next checkpoint.
- * Note that the LSN position advanced may still be lost in the
- * event of a crash, but this makes the data consistent after a
- * clean shutdown.
+ * Dirty the slot so as it is written out at the next checkpoint. Note
+ * that the LSN position advanced may still be lost in the event of a
+ * crash, but this makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
- * Dirty the slot so it is written out at the next checkpoint.
- * The LSN position advanced to may still be lost on a crash
- * but this makes the data consistent after a clean shutdown.
+ * Dirty the slot so it is written out at the next checkpoint. The
+ * LSN position advanced to may still be lost on a crash but this
+ * makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
walrcv->conninfo[0] = '\0';
/*
- * Use configured replication slot if present, and ignore the value
- * of create_temp_slot as the slot name should be persistent. Otherwise,
- * use create_temp_slot to determine whether this WAL receiver should
- * create a temporary slot by itself and use it, or not.
+ * Use configured replication slot if present, and ignore the value of
+ * create_temp_slot as the slot name should be persistent. Otherwise, use
+ * create_temp_slot to determine whether this WAL receiver should create a
+ * temporary slot by itself and use it, or not.
*/
if (slotname != NULL && slotname[0] != '\0')
{
void
WalSndResourceCleanup(bool isCommit)
{
- ResourceOwner resowner;
+ ResourceOwner resowner;
if (CurrentResourceOwner == NULL)
return;
/*
- * Deleting CurrentResourceOwner is not allowed, so we must save a
- * pointer in a local variable and clear it first.
+ * Deleting CurrentResourceOwner is not allowed, so we must save a pointer
+ * in a local variable and clear it first.
*/
resowner = CurrentResourceOwner;
CurrentResourceOwner = NULL;
else if (IsA(clause, ScalarArrayOpExpr))
{
/* If it's an scalar array operator, check for Var IN Const. */
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/*
* Reject ALL() variant, we only care about ANY/IN.
/*
* If it's not an "=" operator, just ignore the clause, as it's not
* compatible with functional dependencies. The operator is identified
- * simply by looking at which function it uses to estimate selectivity.
- * That's a bit strange, but it's what other similar places do.
+ * simply by looking at which function it uses to estimate
+ * selectivity. That's a bit strange, but it's what other similar
+ * places do.
*/
if (get_oprrest(expr->opno) != F_EQSEL)
return false;
find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
Bitmapset *attnums)
{
- int i, j;
+ int i,
+ j;
MVDependency *strongest = NULL;
/* number of attnums in clauses */
/*
* this dependency is stronger, but we must still check that it's
- * fully matched to these attnums. We perform this check last as it's
- * slightly more expensive than the previous checks.
+ * fully matched to these attnums. We perform this check last as
+ * it's slightly more expensive than the previous checks.
*/
if (dependency_is_fully_matched(dependency, attnums))
strongest = dependency; /* save new best match */
static void statext_store(Oid relid,
MVNDistinct *ndistinct, MVDependencies *dependencies,
MCVList *mcv, VacAttrStats **stats);
-static int statext_compute_stattarget(int stattarget,
- int natts, VacAttrStats **stats);
+static int statext_compute_stattarget(int stattarget,
+ int natts, VacAttrStats **stats);
/*
* Compute requested extended stats, using the rows sampled for the plain
stats);
/*
- * Don't rebuild statistics objects with statistics target set to 0 (we
- * just leave the existing values around, just like we do for regular
- * per-column statistics).
+ * Don't rebuild statistics objects with statistics target set to 0
+ * (we just leave the existing values around, just like we do for
+ * regular per-column statistics).
*/
if (stattarget == 0)
continue;
foreach(lc, lstats)
{
- StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
- int stattarget = stat->stattarget;
- VacAttrStats **stats;
- int nattrs = bms_num_members(stat->columns);
+ StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
+ int stattarget = stat->stattarget;
+ VacAttrStats **stats;
+ int nattrs = bms_num_members(stat->columns);
/*
* Check if we can build this statistics object based on the columns
static int
statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats)
{
- int i;
+ int i;
/*
- * If there's statistics target set for the statistics object, use it.
- * It may be set to 0 which disables building of that statistic.
+ * If there's statistics target set for the statistics object, use it. It
+ * may be set to 0 which disables building of that statistic.
*/
if (stattarget >= 0)
return stattarget;
/*
* The target for the statistics object is set to -1, in which case we
- * look at the maximum target set for any of the attributes the object
- * is defined on.
+ * look at the maximum target set for any of the attributes the object is
+ * defined on.
*/
for (i = 0; i < nattrs; i++)
{
/* Var IN Array */
if (IsA(clause, ScalarArrayOpExpr))
{
- RangeTblEntry *rte = root->simple_rte_array[relid];
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ RangeTblEntry *rte = root->simple_rte_array[relid];
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
Var *var;
/* Only expressions with two arguments are considered compatible. */
ListCell *l;
Bitmapset **list_attnums;
int listidx;
- Selectivity sel = 1.0;
+ Selectivity sel = 1.0;
/* check if there's any stats that might be useful for us. */
if (!has_stats_of_kind(rel->statlist, STATS_EXT_MCV))
stat = choose_best_statistics(rel->statlist, STATS_EXT_MCV,
list_attnums, list_length(clauses));
- /* if no (additional) matching stats could be found then we've nothing to do */
+ /*
+ * if no (additional) matching stats could be found then we've nothing
+ * to do
+ */
if (!stat)
break;
foreach(l, clauses)
{
/*
- * If the clause is compatible with the selected statistics, mark it
- * as estimated and add it to the list to estimate.
+ * If the clause is compatible with the selected statistics, mark
+ * it as estimated and add it to the list to estimate.
*/
if (list_attnums[listidx] != NULL &&
bms_is_subset(list_attnums[listidx], stat->keys))
/*
* First compute "simple" selectivity, i.e. without the extended
* statistics, and essentially assuming independence of the
- * columns/clauses. We'll then use the various selectivities computed from
- * MCV list to improve it.
+ * columns/clauses. We'll then use the various selectivities computed
+ * from MCV list to improve it.
*/
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
- jointype, sjinfo, NULL);
+ jointype, sjinfo, NULL);
/*
- * Now compute the multi-column estimate from the MCV list, along with the
- * other selectivities (base & total selectivity).
+ * Now compute the multi-column estimate from the MCV list, along with
+ * the other selectivities (base & total selectivity).
*/
mcv_sel = mcv_clauselist_selectivity(root, stat, stat_clauses, varRelid,
jointype, sjinfo, rel,
if (other_sel > 1.0 - mcv_totalsel)
other_sel = 1.0 - mcv_totalsel;
- /* Overall selectivity is the combination of MCV and non-MCV estimates. */
+ /*
+ * Overall selectivity is the combination of MCV and non-MCV
+ * estimates.
+ */
stat_sel = mcv_sel + other_sel;
CLAMP_PROBABILITY(stat_sel);
bool
examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp)
{
- Var *var;
- Const *cst;
- bool varonleft;
- Node *leftop,
- *rightop;
+ Var *var;
+ Const *cst;
+ bool varonleft;
+ Node *leftop,
+ *rightop;
/* enforced by statext_is_compatible_clause_internal */
Assert(list_length(args) == 2);
if (IsA(rightop, RelabelType))
rightop = (Node *) ((RelabelType *) rightop)->arg;
- if (IsA(leftop, Var) && IsA(rightop, Const))
+ if (IsA(leftop, Var) &&IsA(rightop, Const))
{
var = (Var *) leftop;
cst = (Const *) rightop;
varonleft = true;
}
- else if (IsA(leftop, Const) && IsA(rightop, Var))
+ else if (IsA(leftop, Const) &&IsA(rightop, Var))
{
var = (Var *) rightop;
cst = (Const *) leftop;
groups = build_distinct_groups(nitems, items, mss, &ngroups);
/*
- * Maximum number of MCV items to store, based on the statistics target
- * we computed for the statistics object (from target set for the object
+ * Maximum number of MCV items to store, based on the statistics target we
+ * computed for the statistics object (from target set for the object
* itself, attributes and the system default). In any case, we can't keep
* more groups than we have available.
*/
{
int j;
SortItem key;
- MultiSortSupport tmp;
+ MultiSortSupport tmp;
/* frequencies for values in each attribute */
SortItem **freqs;
static int
sort_item_compare(const void *a, const void *b, void *arg)
{
- SortSupport ssup = (SortSupport) arg;
+ SortSupport ssup = (SortSupport) arg;
SortItem *ia = (SortItem *) a;
SortItem *ib = (SortItem *) b;
/* allocate arrays for all columns as a single chunk */
ptr = palloc(MAXALIGN(sizeof(SortItem *) * mss->ndims) +
- mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
+ mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
/* initial array of pointers */
result = (SortItem **) ptr;
for (dim = 0; dim < mss->ndims; dim++)
{
- SortSupport ssup = &mss->ssup[dim];
+ SortSupport ssup = &mss->ssup[dim];
/* array of values for a single column */
result[dim] = (SortItem *) ptr;
/*
* Identify distinct values, compute frequency (there might be
- * multiple MCV items containing this value, so we need to sum
- * counts from all of them.
+ * multiple MCV items containing this value, so we need to sum counts
+ * from all of them.
*/
ncounts[dim] = 1;
for (i = 1; i < ngroups; i++)
{
- if (sort_item_compare(&result[dim][i-1], &result[dim][i], ssup) == 0)
+ if (sort_item_compare(&result[dim][i - 1], &result[dim][i], ssup) == 0)
{
- result[dim][ncounts[dim]-1].count += result[dim][i].count;
+ result[dim][ncounts[dim] - 1].count += result[dim][i].count;
continue;
}
*/
info[dim].nvalues = ndistinct;
- if (info[dim].typbyval) /* by-value data types */
+ if (info[dim].typbyval) /* by-value data types */
{
info[dim].nbytes = info[dim].nvalues * info[dim].typlen;
/*
* We copy the data into the MCV item during deserialization, so
* we don't need to allocate any extra space.
- */
+ */
info[dim].nbytes_aligned = 0;
}
- else if (info[dim].typlen > 0) /* fixed-length by-ref */
+ else if (info[dim].typlen > 0) /* fixed-length by-ref */
{
/*
* We don't care about alignment in the serialized data, so we
* pack the data as much as possible. But we also track how much
- * data will be needed after deserialization, and in that case
- * we need to account for alignment of each item.
+ * data will be needed after deserialization, and in that case we
+ * need to account for alignment of each item.
*
* Note: As the items are fixed-length, we could easily compute
* this during deserialization, but we do it here anyway.
/* serialized length (uint32 length + data) */
len = VARSIZE_ANY_EXHDR(values[dim][i]);
- info[dim].nbytes += sizeof(uint32); /* length */
- info[dim].nbytes += len; /* value (no header) */
+ info[dim].nbytes += sizeof(uint32); /* length */
+ info[dim].nbytes += len; /* value (no header) */
/*
* During deserialization we'll build regular varlena values
/* c-strings include terminator, so +1 byte */
len = strlen(DatumGetCString(values[dim][i])) + 1;
- info[dim].nbytes += sizeof(uint32); /* length */
- info[dim].nbytes += len; /* value */
+ info[dim].nbytes += sizeof(uint32); /* length */
+ info[dim].nbytes += len; /* value */
/* space needed for properly aligned deserialized copies */
info[dim].nbytes_aligned += MAXALIGN(len);
* whole serialized MCV list (varlena header, MCV header, dimension info
* for each attribute, deduplicated values and items).
*/
- total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
- + sizeof(AttrNumber) /* ndimensions */
- + (ndims * sizeof(Oid)); /* attribute types */
+ total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
+ + sizeof(AttrNumber) /* ndimensions */
+ + (ndims * sizeof(Oid)); /* attribute types */
/* dimension info */
total_length += ndims * sizeof(DimensionInfo);
info[dim].nvalues, sizeof(Datum),
compare_scalars_simple, &ssup[dim]);
- Assert(value != NULL); /* serialization or deduplication error */
+ Assert(value != NULL); /* serialization or deduplication
+ * error */
/* compute index within the deduplicated array */
index = (uint16) (value - values[dim]);
* serialized data - it's not aligned properly, and it may disappear while
* we're still using the MCV list, e.g. due to catcache release.
*
- * We do care about alignment here, because we will allocate all the pieces
- * at once, but then use pointers to different parts.
+ * We do care about alignment here, because we will allocate all the
+ * pieces at once, but then use pointers to different parts.
*/
mcvlen = MAXALIGN(offsetof(MCVList, items) + (sizeof(MCVItem) * nitems));
/* finally translate the indexes (for non-NULL only) */
for (dim = 0; dim < ndims; dim++)
{
- uint16 index;
+ uint16 index;
memcpy(&index, ptr, sizeof(uint16));
ptr += sizeof(uint16);
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
- if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more left to send */
+ if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more
+ * left to send */
{
Datum values[5];
bool nulls[5];
{
astate_nulls = accumArrayResult(astate_nulls,
- BoolGetDatum(item->isnull[i]),
- false,
- BOOLOID,
- CurrentMemoryContext);
+ BoolGetDatum(item->isnull[i]),
+ false,
+ BOOLOID,
+ CurrentMemoryContext);
if (!item->isnull[i])
{
txt = cstring_to_text(DatumGetPointer(val));
astate_values = accumArrayResult(astate_values,
- PointerGetDatum(txt),
- false,
- TEXTOID,
- CurrentMemoryContext);
+ PointerGetDatum(txt),
+ false,
+ TEXTOID,
+ CurrentMemoryContext);
}
else
astate_values = accumArrayResult(astate_values,
- (Datum) 0,
- true,
- TEXTOID,
- CurrentMemoryContext);
+ (Datum) 0,
+ true,
+ TEXTOID,
+ CurrentMemoryContext);
}
values[0] = Int32GetDatum(funcctx->call_cntr);
MCVItem *item = &mcvlist->items[i];
/*
- * When the MCV item or the Const value is NULL we can treat
- * this as a mismatch. We must not call the operator because
- * of strictness.
+ * When the MCV item or the Const value is NULL we can
+ * treat this as a mismatch. We must not call the operator
+ * because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
*
* We don't store collations used to build the statistics,
* but we can use the collation for the attribute itself,
- * as stored in varcollid. We do reset the statistics after
- * a type change (including collation change), so this is
- * OK. We may need to relax this after allowing extended
- * statistics on expressions.
+ * as stored in varcollid. We do reset the statistics
+ * after a type change (including collation change), so
+ * this is OK. We may need to relax this after allowing
+ * extended statistics on expressions.
*/
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&opproc,
}
else if (IsA(clause, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
FmgrInfo opproc;
/* valid only after examine_clause_args returns true */
MCVItem *item = &mcvlist->items[i];
/*
- * When the MCV item or the Const value is NULL we can treat
- * this as a mismatch. We must not call the operator because
- * of strictness.
+ * When the MCV item or the Const value is NULL we can
+ * treat this as a mismatch. We must not call the operator
+ * because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
for (j = 0; j < num_elems; j++)
{
- Datum elem_value = elem_values[j];
- bool elem_isnull = elem_nulls[j];
- bool elem_match;
+ Datum elem_value = elem_values[j];
+ bool elem_isnull = elem_nulls[j];
+ bool elem_match;
/* NULL values always evaluate as not matching. */
if (elem_isnull)
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
- InvalidateBuffer(bufHdr); /* releases spinlock */
+ InvalidateBuffer(bufHdr); /* releases spinlock */
break;
}
}
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
- return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
+ return InvalidBlockNumber; /* nothing to do; the FSM was already
+ * smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* NO EREPORT(ERROR) from here till changes are logged */
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
- return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
+ return InvalidBlockNumber; /* nothing to do; the FSM was already
+ * smaller */
}
return new_nfsmblocks;
!PostmasterIsAlive())
{
/*
- * The extra PostmasterIsAliveInternal() check prevents false alarms on
- * systems that give a different value for getppid() while being traced
- * by a debugger.
+ * The extra PostmasterIsAliveInternal() check prevents false alarms
+ * on systems that give a different value for getppid() while being
+ * traced by a debugger.
*/
set->report_postmaster_not_running = true;
}
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->delayChkpt = false; /* be sure this is cleared in abort */
+ proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- proc->delayChkpt = false; /* be sure this is cleared in abort */
+ proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
{
pid_t pss_pid;
sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
- pg_atomic_uint64 pss_barrierGeneration;
- pg_atomic_uint32 pss_barrierCheckMask;
+ pg_atomic_uint64 pss_barrierGeneration;
+ pg_atomic_uint32 pss_barrierCheckMask;
} ProcSignalSlot;
/*
*/
typedef struct
{
- pg_atomic_uint64 psh_barrierGeneration;
- ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
+ pg_atomic_uint64 psh_barrierGeneration;
+ ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
} ProcSignalHeader;
/*
Size
ProcSignalShmemSize(void)
{
- Size size;
+ Size size;
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
/* If we're first, initialize. */
if (!found)
{
- int i;
+ int i;
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
/*
* Initialize barrier state. Since we're a brand-new process, there
* shouldn't be any leftover backend-private state that needs to be
- * updated. Therefore, we can broadcast the latest barrier generation
- * and disregard any previously-set check bits.
+ * updated. Therefore, we can broadcast the latest barrier generation and
+ * disregard any previously-set check bits.
*
* NB: This only works if this initialization happens early enough in the
* startup sequence that we haven't yet cached any state that might need
- * to be invalidated. That's also why we have a memory barrier here, to
- * be sure that any later reads of memory happen strictly after this.
+ * to be invalidated. That's also why we have a memory barrier here, to be
+ * sure that any later reads of memory happen strictly after this.
*/
pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
barrier_generation =
uint64
EmitProcSignalBarrier(ProcSignalBarrierType type)
{
- uint64 flagbit = UINT64CONST(1) << (uint64) type;
- uint64 generation;
+ uint64 flagbit = UINT64CONST(1) << (uint64) type;
+ uint64 generation;
/*
* Set all the flags.
*
- * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this
- * is totally ordered with respect to anything the caller did before, and
- * anything that we do afterwards. (This is also true of the later call
- * to pg_atomic_add_fetch_u64.)
+ * Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
+ * totally ordered with respect to anything the caller did before, and
+ * anything that we do afterwards. (This is also true of the later call to
+ * pg_atomic_add_fetch_u64.)
*/
for (int i = 0; i < NumProcSignalSlots; i++)
{
* generation.
*
* Concurrency is not a problem here. Backends that have exited don't
- * matter, and new backends that have joined since we entered this function
- * must already have current state, since the caller is responsible for
- * making sure that the relevant state is entirely visible before calling
- * this function in the first place. We still have to wake them up -
- * because we can't distinguish between such backends and older backends
- * that need to update state - but they won't actually need to change
- * any state.
+ * matter, and new backends that have joined since we entered this
+ * function must already have current state, since the caller is
+ * responsible for making sure that the relevant state is entirely visible
+ * before calling this function in the first place. We still have to wake
+ * them up - because we can't distinguish between such backends and older
+ * backends that need to update state - but they won't actually need to
+ * change any state.
*/
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
- pid_t pid = slot->pss_pid;
+ pid_t pid = slot->pss_pid;
if (pid != 0)
kill(pid, SIGUSR1);
void
WaitForProcSignalBarrier(uint64 generation)
{
- long timeout = 125L;
+ long timeout = 125L;
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
- uint64 oldval;
+ uint64 oldval;
oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
while (oldval < generation)
{
- int events;
+ int events;
CHECK_FOR_INTERRUPTS();
}
/*
- * The caller is probably calling this function because it wants to
- * read the shared state or perform further writes to shared state once
- * all backends are known to have absorbed the barrier. However, the
- * read of pss_barrierGeneration was performed unlocked; insert a memory
- * barrier to separate it from whatever follows.
+ * The caller is probably calling this function because it wants to read
+ * the shared state or perform further writes to shared state once all
+ * backends are known to have absorbed the barrier. However, the read of
+ * pss_barrierGeneration was performed unlocked; insert a memory barrier
+ * to separate it from whatever follows.
*/
pg_memory_barrier();
}
void
ProcessProcSignalBarrier(void)
{
- uint64 generation;
- uint32 flags;
+ uint64 generation;
+ uint32 flags;
/* Exit quickly if there's no work to do. */
if (!ProcSignalBarrierPending)
ProcSignalBarrierPending = false;
/*
- * Read the current barrier generation, and then get the flags that
- * are set for this backend. Note that pg_atomic_exchange_u32 is a full
+ * Read the current barrier generation, and then get the flags that are
+ * set for this backend. Note that pg_atomic_exchange_u32 is a full
* barrier, so we're guaranteed that the read of the barrier generation
* happens before we atomically extract the flags, and that any subsequent
* state changes happen afterward.
* machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
* PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
* appropriately descriptive. Get rid of this function and instead have
- * ProcessBarrierSomethingElse. Most likely, that function should live
- * in the file pertaining to that subsystem, rather than here.
+ * ProcessBarrierSomethingElse. Most likely, that function should live in
+ * the file pertaining to that subsystem, rather than here.
*/
}
if (slot != NULL)
{
- uint64 mygen;
- uint64 curgen;
+ uint64 mygen;
+ uint64 curgen;
mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration);
curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);
}
else
{
- Size allocated_size;
+ Size allocated_size;
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAllocRaw(size, &allocated_size);
MemoryContext oldcontext;
HASH_SEQ_STATUS hstat;
ShmemIndexEnt *ent;
- Size named_allocated = 0;
+ Size named_allocated = 0;
Datum values[PG_GET_SHMEM_SIZES_COLS];
bool nulls[PG_GET_SHMEM_SIZES_COLS];
found_conflict = true;
else
found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
- lock, proclock);
+ lock, proclock);
if (!found_conflict)
{
void
smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
{
- int i;
+ int i;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
/*
* We might as well update the local smgr_fsm_nblocks and
- * smgr_vm_nblocks settings. The smgr cache inval message that
- * this function sent will cause other backends to invalidate
- * their copies of smgr_fsm_nblocks and smgr_vm_nblocks,
- * and these ones too at the next command boundary.
- * But these ensure they aren't outright wrong until then.
+ * smgr_vm_nblocks settings. The smgr cache inval message that this
+ * function sent will cause other backends to invalidate their copies
+ * of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the
+ * next command boundary. But these ensure they aren't outright wrong
+ * until then.
*/
if (forknum[i] == FSM_FORKNUM)
reln->smgr_fsm_nblocks = nblocks[i];
/*
* Surprisingly, ALTER SYSTEM meets all our definitions of
* read-only: it changes nothing that affects the output of
- * pg_dump, it doesn't write WAL or imperil the application
- * of future WAL, and it doesn't depend on any state that needs
+ * pg_dump, it doesn't write WAL or imperil the application of
+ * future WAL, and it doesn't depend on any state that needs
* to be synchronized with parallel workers.
*
* So, despite the fact that it writes to a file, it's read
case T_VariableSetStmt:
{
/*
- * These modify only backend-local state, so they're OK to
- * run in a read-only transaction or on a standby. However,
- * they are disallowed in parallel mode, because they either
- * rely upon or modify backend-local state that might not be
+ * These modify only backend-local state, so they're OK to run
+ * in a read-only transaction or on a standby. However, they
+ * are disallowed in parallel mode, because they either rely
+ * upon or modify backend-local state that might not be
* synchronized among cooperating backends.
*/
return COMMAND_OK_IN_RECOVERY | COMMAND_OK_IN_READ_ONLY_TXN;
case T_VacuumStmt:
{
/*
- * These commands write WAL, so they're not strictly read-only,
- * and running them in parallel workers isn't supported.
+ * These commands write WAL, so they're not strictly
+ * read-only, and running them in parallel workers isn't
+ * supported.
*
* However, they don't change the database state in a way that
* would affect pg_dump output, so it's fine to run them in a
case T_CopyStmt:
{
- CopyStmt *stmt = (CopyStmt *) parsetree;
+ CopyStmt *stmt = (CopyStmt *) parsetree;
/*
- * You might think that COPY FROM is not at all read only,
- * but it's OK to copy into a temporary table, because that
+ * You might think that COPY FROM is not at all read only, but
+ * it's OK to copy into a temporary table, because that
* wouldn't change the output of pg_dump. If the target table
* turns out to be non-temporary, DoCopy itself will call
* PreventCommandIfReadOnly.
case T_VariableShowStmt:
{
/*
- * These commands don't modify any data and are safe to run
- * in a parallel worker.
+ * These commands don't modify any data and are safe to run in
+ * a parallel worker.
*/
return COMMAND_IS_STRICTLY_READ_ONLY;
}
{
/*
* NOTIFY requires an XID assignment, so it can't be permitted
- * on a standby. Perhaps LISTEN could, since without NOTIFY
- * it would be OK to just do nothing, at least until promotion,
+ * on a standby. Perhaps LISTEN could, since without NOTIFY it
+ * would be OK to just do nothing, at least until promotion,
* but we currently prohibit it lest the user get the wrong
* idea.
*
case T_LockStmt:
{
- LockStmt *stmt = (LockStmt *) parsetree;
+ LockStmt *stmt = (LockStmt *) parsetree;
/*
* Only weaker locker modes are allowed during recovery. The
- * restrictions here must match those in LockAcquireExtended().
+ * restrictions here must match those in
+ * LockAcquireExtended().
*/
if (stmt->mode > RowExclusiveLock)
return COMMAND_OK_IN_READ_ONLY_TXN;
TransactionStmt *stmt = (TransactionStmt *) parsetree;
/*
- * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all
- * write WAL, so they're not read-only in the strict sense;
- * but the first and third do not change pg_dump output, so
- * they're OK in a read-only transactions.
+ * PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all write
+ * WAL, so they're not read-only in the strict sense; but the
+ * first and third do not change pg_dump output, so they're OK
+ * in a read-only transactions.
*
* We also consider COMMIT PREPARED to be OK in a read-only
* transaction environment, by way of exception.
case USE_XSD_DATES:
/* compatible with ISO date formats */
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_GERMAN_DATES:
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_POSTGRES_DATES:
}
*str++ = '-';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
}
case USE_XSD_DATES:
/* Compatible with ISO-8601 date formats */
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
str = AppendTimestampSeconds(str, tm, fsec);
*str++ = ' ';
str = pg_ultostr_zeropad(str,
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
if (print_tz)
{
static int32
int4gcd_internal(int32 arg1, int32 arg2)
{
- int32 swap;
- int32 a1, a2;
+ int32 swap;
+ int32 a1,
+ a2;
/*
* Put the greater absolute value in arg1.
Datum
int4gcd(PG_FUNCTION_ARGS)
{
- int32 arg1 = PG_GETARG_INT32(0);
- int32 arg2 = PG_GETARG_INT32(1);
- int32 result;
+ int32 arg1 = PG_GETARG_INT32(0);
+ int32 arg2 = PG_GETARG_INT32(1);
+ int32 result;
result = int4gcd_internal(arg1, arg2);
Datum
int4lcm(PG_FUNCTION_ARGS)
{
- int32 arg1 = PG_GETARG_INT32(0);
- int32 arg2 = PG_GETARG_INT32(1);
- int32 gcd;
- int32 result;
+ int32 arg1 = PG_GETARG_INT32(0);
+ int32 arg2 = PG_GETARG_INT32(1);
+ int32 gcd;
+ int32 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a
static int64
int8gcd_internal(int64 arg1, int64 arg2)
{
- int64 swap;
- int64 a1, a2;
+ int64 swap;
+ int64 a1,
+ a2;
/*
* Put the greater absolute value in arg1.
Datum
int8gcd(PG_FUNCTION_ARGS)
{
- int64 arg1 = PG_GETARG_INT64(0);
- int64 arg2 = PG_GETARG_INT64(1);
- int64 result;
+ int64 arg1 = PG_GETARG_INT64(0);
+ int64 arg2 = PG_GETARG_INT64(1);
+ int64 result;
result = int8gcd_internal(arg1, arg2);
Datum
int8lcm(PG_FUNCTION_ARGS)
{
- int64 arg1 = PG_GETARG_INT64(0);
- int64 arg2 = PG_GETARG_INT64(1);
- int64 gcd;
- int64 result;
+ int64 arg1 = PG_GETARG_INT64(0);
+ int64 arg2 = PG_GETARG_INT64(1);
+ int64 gcd;
+ int64 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a
JsonLexContext *lex;
JsonTokenType tok;
char *type;
- JsonParseErrorType result;
+ JsonParseErrorType result;
json = PG_GETARG_TEXT_PP(0);
lex = makeJsonLexContext(json, false);
void
pg_parse_json_or_ereport(JsonLexContext *lex, JsonSemAction *sem)
{
- JsonParseErrorType result;
+ JsonParseErrorType result;
result = pg_parse_json(lex, sem);
if (result != JSON_SUCCESS)
/* ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); */
/* Jsonb *newval = PG_GETARG_JSONB_P(2); */
/* bool create = PG_GETARG_BOOL(3); */
- text *handle_null;
- char *handle_val;
+ text *handle_null;
+ char *handle_val;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(3))
PG_RETURN_NULL();
errmsg("null_value_treatment must be \"delete_key\", \"return_target\", \"use_json_null\", or \"raise_exception\"")));
/* if the new value isn't an SQL NULL just call jsonb_set */
- if (! PG_ARGISNULL(2))
+ if (!PG_ARGISNULL(2))
return jsonb_set(fcinfo);
handle_null = PG_GETARG_TEXT_P(4);
handle_val = text_to_cstring(handle_null);
- if (strcmp(handle_val,"raise_exception") == 0)
+ if (strcmp(handle_val, "raise_exception") == 0)
{
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
}
else if (strcmp(handle_val, "use_json_null") == 0)
{
- Datum newval;
+ Datum newval;
newval = DirectFunctionCall1(jsonb_in, CStringGetDatum("null"));
else if (strcmp(handle_val, "return_target") == 0)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
+
PG_RETURN_JSONB_P(in);
}
else
decimalLength32(const uint32 v)
{
int t;
- static uint32 PowersOfTen[] =
- {1, 10, 100,
- 1000, 10000, 100000,
- 1000000, 10000000, 100000000,
- 1000000000};
+ static const uint32 PowersOfTen[] = {
+ 1, 10, 100,
+ 1000, 10000, 100000,
+ 1000000, 10000000, 100000000,
+ 1000000000
+ };
+
/*
* Compute base-10 logarithm by dividing the base-2 logarithm by a
* good-enough approximation of the base-2 logarithm of 10
decimalLength64(const uint64 v)
{
int t;
- static uint64 PowersOfTen[] = {
- UINT64CONST(1), UINT64CONST(10),
- UINT64CONST(100), UINT64CONST(1000),
- UINT64CONST(10000), UINT64CONST(100000),
- UINT64CONST(1000000), UINT64CONST(10000000),
- UINT64CONST(100000000), UINT64CONST(1000000000),
- UINT64CONST(10000000000), UINT64CONST(100000000000),
- UINT64CONST(1000000000000), UINT64CONST(10000000000000),
- UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
- UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
+ static const uint64 PowersOfTen[] = {
+ UINT64CONST(1), UINT64CONST(10),
+ UINT64CONST(100), UINT64CONST(1000),
+ UINT64CONST(10000), UINT64CONST(100000),
+ UINT64CONST(1000000), UINT64CONST(10000000),
+ UINT64CONST(100000000), UINT64CONST(1000000000),
+ UINT64CONST(10000000000), UINT64CONST(100000000000),
+ UINT64CONST(1000000000000), UINT64CONST(10000000000000),
+ UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
+ UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000)
};
pg_stat_get_slru(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_SLRU_COLS 9
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- int i;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ int i;
PgStat_SLRUStats *stats;
/* check to see if caller supports us returning a tuplestore */
/* request SLRU stats from the stat collector */
stats = pgstat_fetch_slru();
- for (i = 0; ; i++)
+ for (i = 0;; i++)
{
/* for each row */
Datum values[PG_STAT_GET_SLRU_COLS];
bool nulls[PG_STAT_GET_SLRU_COLS];
- PgStat_SLRUStats stat = stats[i];
+ PgStat_SLRUStats stat = stats[i];
const char *name;
name = pgstat_slru_name(i);
const RangeType *tst);
static int bound_cmp(const void *a, const void *b, void *arg);
-static int adjacent_inner_consistent(TypeCacheEntry *typcache,
- const RangeBound *arg, const RangeBound *centroid,
- const RangeBound *prev);
-static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
- const RangeBound *centroid);
+static int adjacent_inner_consistent(TypeCacheEntry *typcache,
+ const RangeBound *arg, const RangeBound *centroid,
+ const RangeBound *prev);
+static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
+ const RangeBound *centroid);
/*
* SP-GiST 'config' interface function.
char *nspname;
/*
- * Would this collation be found by regcollationin? If not, qualify it.
+ * Would this collation be found by regcollationin? If not,
+ * qualify it.
*/
if (CollationIsVisible(collationid))
nspname = NULL;
initStringInfo(&buf);
get_opclass_name(opclass, InvalidOid, &buf);
- return &buf.data[1]; /* get_opclass_name() prepends space */
+ return &buf.data[1]; /* get_opclass_name() prepends space */
}
/*
char *value;
/*
- * Each array element should have the form name=value. If the "="
- * is missing for some reason, treat it like an empty value.
+ * Each array element should have the form name=value. If the "=" is
+ * missing for some reason, treat it like an empty value.
*/
name = option;
separator = strchr(option, '=');
/*
* In general we need to quote the value; but to avoid unnecessary
- * clutter, do not quote if it is an identifier that would not
- * need quoting. (We could also allow numbers, but that is a bit
- * trickier than it looks --- for example, are leading zeroes
- * significant? We don't want to assume very much here about what
- * custom reloptions might mean.)
+ * clutter, do not quote if it is an identifier that would not need
+ * quoting. (We could also allow numbers, but that is a bit trickier
+ * than it looks --- for example, are leading zeroes significant? We
+ * don't want to assume very much here about what custom reloptions
+ * might mean.)
*/
if (quote_identifier(value) == value)
appendStringInfoString(buf, value);
static bool
checkcondition_bit(void *checkval, QueryOperand *val, ExecPhraseData *data)
{
- void *key = (SignTSVector *) checkval;
+ void *key = (SignTSVector *) checkval;
/*
* we are not able to find a prefix in signature tree
static int
hemdist(SignTSVector *a, SignTSVector *b)
{
- int siglena = GETSIGLEN(a);
- int siglenb = GETSIGLEN(b);
+ int siglena = GETSIGLEN(a);
+ int siglenb = GETSIGLEN(b);
if (ISALLTRUE(a))
{
else
size_alpha = SIGLENBIT(siglen) -
sizebitvec((cache[j].allistrue) ?
- GETSIGN(datum_l) :
- GETSIGN(cache[j].sign),
- siglen);
+ GETSIGN(datum_l) :
+ GETSIGN(cache[j].sign),
+ siglen);
}
else
size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l), siglen);
if (isnull)
result = (Datum) 0;
else
- result = datumCopy(attopts, false, -1); /* text[] */
+ result = datumCopy(attopts, false, -1); /* text[] */
ReleaseSysCache(tuple);
bool
get_index_isreplident(Oid index_oid)
{
- HeapTuple tuple;
- Form_pg_index rd_index;
- bool result;
+ HeapTuple tuple;
+ Form_pg_index rd_index;
+ bool result;
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
if (!HeapTupleIsValid(tuple))
if (relation->rd_rel->relispartition)
{
/* Add publications that the ancestors are in too. */
- List *ancestors = get_partition_ancestors(RelationGetRelid(relation));
- ListCell *lc;
+ List *ancestors = get_partition_ancestors(RelationGetRelid(relation));
+ ListCell *lc;
foreach(lc, ancestors)
{
- Oid ancestor = lfirst_oid(lc);
+ Oid ancestor = lfirst_oid(lc);
puboids = list_concat_unique_oid(puboids,
GetRelationPublications(ancestor));
* RelationGetIndexAttOptions
* get AM/opclass-specific options for an index parsed into a binary form
*/
-bytea **
+bytea **
RelationGetIndexAttOptions(Relation relation, bool copy)
{
MemoryContext oldcxt;
bytea **opts = relation->rd_opcoptions;
Oid relid = RelationGetRelid(relation);
- int natts = RelationGetNumberOfAttributes(relation); /* XXX IndexRelationGetNumberOfKeyAttributes */
+ int natts = RelationGetNumberOfAttributes(relation); /* XXX
+ * IndexRelationGetNumberOfKeyAttributes */
int i;
/* Try to copy cached options. */
p = backtrace_symbol_list;
for (;;)
{
- if (*p == '\0') /* end of backtrace_symbol_list */
+ if (*p == '\0') /* end of backtrace_symbol_list */
break;
if (strcmp(funcname, p) == 0)
int
errbacktrace(void)
{
- ErrorData *edata = &errordata[errordata_stack_depth];
+ ErrorData *edata = &errordata[errordata_stack_depth];
MemoryContext oldcontext;
recursion_depth++;
int
my_log2(long num)
{
- /* guard against too-large input, which would be invalid for pg_ceil_log2_*() */
+ /*
+ * guard against too-large input, which would be invalid for
+ * pg_ceil_log2_*()
+ */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
ProcessingMode Mode = InitProcessing;
-BackendType MyBackendType;
+BackendType MyBackendType;
/* List of lock files to be removed at proc exit */
static List *lock_files = NIL;
else if ((*newval)[i] == ' ' ||
(*newval)[i] == '\n' ||
(*newval)[i] == '\t')
- ; /* ignore these */
+ ; /* ignore these */
else
someval[j++] = (*newval)[i]; /* copy anything else */
}
AllocSet set = (AllocSet) context;
AllocBlock block;
Size keepersize PG_USED_FOR_ASSERTS_ONLY
- = set->keeper->endptr - ((char *) set);
+ = set->keeper->endptr - ((char *) set);
AssertArg(AllocSetIsValid(set));
else
{
/* Normal case, release the block */
- context->mem_allocated -= block->endptr - ((char*) block);
+ context->mem_allocated -= block->endptr - ((char *) block);
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
AllocSet set = (AllocSet) context;
AllocBlock block = set->blocks;
Size keepersize PG_USED_FOR_ASSERTS_ONLY
- = set->keeper->endptr - ((char *) set);
+ = set->keeper->endptr - ((char *) set);
AssertArg(AllocSetIsValid(set));
if (block->next)
block->next->prev = block->prev;
- context->mem_allocated -= block->endptr - ((char*) block);
+ context->mem_allocated -= block->endptr - ((char *) block);
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
/* Do the realloc */
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
- oldblksize = block->endptr - ((char *)block);
+ oldblksize = block->endptr - ((char *) block);
block = (AllocBlock) realloc(block, blksize);
if (block == NULL)
Size
MemoryContextMemAllocated(MemoryContext context, bool recurse)
{
- Size total = context->mem_allocated;
+ Size total = context->mem_allocated;
AssertArg(MemoryContextIsValid(context));
headerSize = offsetof(SlabContext, freelist) + freelistSize;
#ifdef MEMORY_CONTEXT_CHECKING
+
/*
- * With memory checking, we need to allocate extra space for the bitmap
- * of free chunks. The bitmap is an array of bools, so we don't need to
- * worry about alignment.
+ * With memory checking, we need to allocate extra space for the bitmap of
+ * free chunks. The bitmap is an array of bools, so we don't need to worry
+ * about alignment.
*/
headerSize += chunksPerBlock * sizeof(bool);
#endif
Size freeBlocksLen; /* current allocated length of freeBlocks[] */
/* The array of logical tapes. */
- int nTapes; /* # of logical tapes in set */
- LogicalTape *tapes; /* has nTapes nentri