summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/backup/basebackup_target.c12
-rw-r--r--src/backend/parser/parse_jsontable.c6
-rw-r--r--src/backend/replication/logical/tablesync.c23
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c6
-rw-r--r--src/bin/pg_dump/pg_dump.c38
5 files changed, 39 insertions, 46 deletions
diff --git a/src/backend/backup/basebackup_target.c b/src/backend/backup/basebackup_target.c
index 83928e32055..f280660a03f 100644
--- a/src/backend/backup/basebackup_target.c
+++ b/src/backend/backup/basebackup_target.c
@@ -62,7 +62,7 @@ BaseBackupAddTarget(char *name,
void *(*check_detail) (char *, char *),
bbsink *(*get_sink) (bbsink *, void *))
{
- BaseBackupTargetType *ttype;
+ BaseBackupTargetType *newtype;
MemoryContext oldcontext;
ListCell *lc;
@@ -96,11 +96,11 @@ BaseBackupAddTarget(char *name,
* name into a newly-allocated chunk of memory.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
- ttype = palloc(sizeof(BaseBackupTargetType));
- ttype->name = pstrdup(name);
- ttype->check_detail = check_detail;
- ttype->get_sink = get_sink;
- BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, ttype);
+ newtype = palloc(sizeof(BaseBackupTargetType));
+ newtype->name = pstrdup(name);
+ newtype->check_detail = check_detail;
+ newtype->get_sink = get_sink;
+ BaseBackupTargetTypeList = lappend(BaseBackupTargetTypeList, newtype);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c
index bc3272017ef..3e94071248e 100644
--- a/src/backend/parser/parse_jsontable.c
+++ b/src/backend/parser/parse_jsontable.c
@@ -341,13 +341,13 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
/* transform all nested columns into cross/union join */
foreach(lc, columns)
{
- JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
+ JsonTableColumn *col = castNode(JsonTableColumn, lfirst(lc));
Node *node;
- if (jtc->coltype != JTC_NESTED)
+ if (col->coltype != JTC_NESTED)
continue;
- node = transformNestedJsonTableColumn(cxt, jtc, plan);
+ node = transformNestedJsonTableColumn(cxt, col, plan);
/* join transformed node with previous sibling nodes */
res = res ? makeJsonTableSiblingJoin(cross, res, node) : node;
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index bfcb80b4955..d37d8a0d74a 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -707,7 +707,6 @@ fetch_remote_table_info(char *nspname, char *relname,
bool isnull;
int natt;
ListCell *lc;
- bool first;
Bitmapset *included_cols = NULL;
lrel->nspname = nspname;
@@ -759,18 +758,15 @@ fetch_remote_table_info(char *nspname, char *relname,
if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000)
{
WalRcvExecResult *pubres;
- TupleTableSlot *slot;
+ TupleTableSlot *tslot;
Oid attrsRow[] = {INT2VECTOROID};
StringInfoData pub_names;
- bool first = true;
-
initStringInfo(&pub_names);
foreach(lc, MySubscription->publications)
{
- if (!first)
+ if (foreach_current_index(lc) > 0)
appendStringInfo(&pub_names, ", ");
appendStringInfoString(&pub_names, quote_literal_cstr(strVal(lfirst(lc))));
- first = false;
}
/*
@@ -819,10 +815,10 @@ fetch_remote_table_info(char *nspname, char *relname,
* If we find a NULL value, it means all the columns should be
* replicated.
*/
- slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
- if (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
+ tslot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
+ if (tuplestore_gettupleslot(pubres->tuplestore, true, false, tslot))
{
- Datum cfval = slot_getattr(slot, 1, &isnull);
+ Datum cfval = slot_getattr(tslot, 1, &isnull);
if (!isnull)
{
@@ -838,9 +834,9 @@ fetch_remote_table_info(char *nspname, char *relname,
included_cols = bms_add_member(included_cols, elems[natt]);
}
- ExecClearTuple(slot);
+ ExecClearTuple(tslot);
}
- ExecDropSingleTupleTableSlot(slot);
+ ExecDropSingleTupleTableSlot(tslot);
walrcv_clear_result(pubres);
@@ -950,14 +946,11 @@ fetch_remote_table_info(char *nspname, char *relname,
/* Build the pubname list. */
initStringInfo(&pub_names);
- first = true;
foreach(lc, MySubscription->publications)
{
char *pubname = strVal(lfirst(lc));
- if (first)
- first = false;
- else
+ if (foreach_current_index(lc) > 0)
appendStringInfoString(&pub_names, ", ");
appendStringInfoString(&pub_names, quote_literal_cstr(pubname));
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 5b6a4805721..9c381ae7271 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -3109,10 +3109,10 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
if (JsonContainerIsScalar(&jb->root))
{
- bool res PG_USED_FOR_ASSERTS_ONLY;
+ bool result PG_USED_FOR_ASSERTS_ONLY;
- res = JsonbExtractScalar(&jb->root, jbv);
- Assert(res);
+ result = JsonbExtractScalar(&jb->root, jbv);
+ Assert(result);
}
else
JsonbInitBinary(jbv, jb);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index da6605175a0..2c689157329 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -3142,10 +3142,10 @@ dumpDatabase(Archive *fout)
PQExpBuffer loFrozenQry = createPQExpBuffer();
PQExpBuffer loOutQry = createPQExpBuffer();
PQExpBuffer loHorizonQry = createPQExpBuffer();
- int i_relfrozenxid,
- i_relfilenode,
- i_oid,
- i_relminmxid;
+ int ii_relfrozenxid,
+ ii_relfilenode,
+ ii_oid,
+ ii_relminmxid;
/*
* pg_largeobject
@@ -3163,10 +3163,10 @@ dumpDatabase(Archive *fout)
lo_res = ExecuteSqlQuery(fout, loFrozenQry->data, PGRES_TUPLES_OK);
- i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
- i_relminmxid = PQfnumber(lo_res, "relminmxid");
- i_relfilenode = PQfnumber(lo_res, "relfilenode");
- i_oid = PQfnumber(lo_res, "oid");
+ ii_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
+ ii_relminmxid = PQfnumber(lo_res, "relminmxid");
+ ii_relfilenode = PQfnumber(lo_res, "relfilenode");
+ ii_oid = PQfnumber(lo_res, "oid");
appendPQExpBufferStr(loHorizonQry, "\n-- For binary upgrade, set pg_largeobject relfrozenxid and relminmxid\n");
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
@@ -3178,12 +3178,12 @@ dumpDatabase(Archive *fout)
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
"WHERE oid = %u;\n",
- atooid(PQgetvalue(lo_res, i, i_relfrozenxid)),
- atooid(PQgetvalue(lo_res, i, i_relminmxid)),
- atooid(PQgetvalue(lo_res, i, i_oid)));
+ atooid(PQgetvalue(lo_res, i, ii_relfrozenxid)),
+ atooid(PQgetvalue(lo_res, i, ii_relminmxid)),
+ atooid(PQgetvalue(lo_res, i, ii_oid)));
- oid = atooid(PQgetvalue(lo_res, i, i_oid));
- relfilenumber = atooid(PQgetvalue(lo_res, i, i_relfilenode));
+ oid = atooid(PQgetvalue(lo_res, i, ii_oid));
+ relfilenumber = atooid(PQgetvalue(lo_res, i, ii_relfilenode));
if (oid == LargeObjectRelationId)
appendPQExpBuffer(loOutQry,
@@ -7081,21 +7081,21 @@ getConstraints(Archive *fout, TableInfo tblinfo[], int numTables)
appendPQExpBufferChar(tbloids, '{');
for (int i = 0; i < numTables; i++)
{
- TableInfo *tbinfo = &tblinfo[i];
+ TableInfo *tinfo = &tblinfo[i];
/*
* For partitioned tables, foreign keys have no triggers so they must
* be included anyway in case some foreign keys are defined.
*/
- if ((!tbinfo->hastriggers &&
- tbinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
- !(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
+ if ((!tinfo->hastriggers &&
+ tinfo->relkind != RELKIND_PARTITIONED_TABLE) ||
+ !(tinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;
/* OK, we need info for this table */
if (tbloids->len > 1) /* do we have more than the '{'? */
appendPQExpBufferChar(tbloids, ',');
- appendPQExpBuffer(tbloids, "%u", tbinfo->dobj.catId.oid);
+ appendPQExpBuffer(tbloids, "%u", tinfo->dobj.catId.oid);
}
appendPQExpBufferChar(tbloids, '}');
@@ -16800,7 +16800,7 @@ dumpSequence(Archive *fout, const TableInfo *tbinfo)
*/
if (OidIsValid(tbinfo->owning_tab) && !tbinfo->is_identity_sequence)
{
- TableInfo *owning_tab = findTableByOid(tbinfo->owning_tab);
+ owning_tab = findTableByOid(tbinfo->owning_tab);
if (owning_tab == NULL)
pg_fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",