pgindent run for 9.5
authorBruce Momjian <bruce@momjian.us>
Sun, 24 May 2015 01:35:49 +0000 (21:35 -0400)
committerBruce Momjian <bruce@momjian.us>
Sun, 24 May 2015 01:35:49 +0000 (21:35 -0400)
414 files changed:
contrib/btree_gin/btree_gin.c
contrib/btree_gist/btree_utils_num.c
contrib/btree_gist/btree_utils_var.c
contrib/fuzzystrmatch/dmetaphone.c
contrib/hstore/hstore_gist.c
contrib/hstore_plperl/hstore_plperl.c
contrib/hstore_plpython/hstore_plpython.c
contrib/ltree/crc32.c
contrib/ltree_plpython/ltree_plpython.c
contrib/pageinspect/brinfuncs.c
contrib/pageinspect/ginfuncs.c
contrib/pg_audit/pg_audit.c
contrib/pg_buffercache/pg_buffercache_pages.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pgcrypto/pgp-armor.c
contrib/pgcrypto/pgp-pgsql.c
contrib/pgcrypto/pgp.h
contrib/pgstattuple/pgstatapprox.c
contrib/postgres_fdw/postgres_fdw.c
contrib/test_decoding/test_decoding.c
contrib/tsm_system_rows/tsm_system_rows.c
contrib/tsm_system_time/tsm_system_time.c
src/backend/access/brin/brin.c
src/backend/access/brin/brin_inclusion.c
src/backend/access/brin/brin_minmax.c
src/backend/access/brin/brin_revmap.c
src/backend/access/brin/brin_tuple.c
src/backend/access/gin/ginget.c
src/backend/access/gin/ginutil.c
src/backend/access/gist/gist.c
src/backend/access/gist/gistscan.c
src/backend/access/gist/gistutil.c
src/backend/access/heap/heapam.c
src/backend/access/heap/hio.c
src/backend/access/index/genam.c
src/backend/access/nbtree/nbtinsert.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/nbtree/nbtree.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtsort.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/rmgrdesc/committsdesc.c
src/backend/access/rmgrdesc/replorigindesc.c
src/backend/access/rmgrdesc/xactdesc.c
src/backend/access/spgist/spgscan.c
src/backend/access/tablesample/bernoulli.c
src/backend/access/tablesample/system.c
src/backend/access/tablesample/tablesample.c
src/backend/access/transam/commit_ts.c
src/backend/access/transam/multixact.c
src/backend/access/transam/parallel.c
src/backend/access/transam/twophase.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xloginsert.c
src/backend/access/transam/xlogreader.c
src/backend/bootstrap/bootstrap.c
src/backend/catalog/Catalog.pm
src/backend/catalog/aclchk.c
src/backend/catalog/dependency.c
src/backend/catalog/genbki.pl
src/backend/catalog/index.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_enum.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_type.c
src/backend/catalog/toasting.c
src/backend/commands/analyze.c
src/backend/commands/copy.c
src/backend/commands/createas.c
src/backend/commands/dbcommands.c
src/backend/commands/dropcmds.c
src/backend/commands/event_trigger.c
src/backend/commands/explain.c
src/backend/commands/functioncmds.c
src/backend/commands/matview.c
src/backend/commands/policy.c
src/backend/commands/schemacmds.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/trigger.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/executor/execAmi.c
src/backend/executor/execIndexing.c
src/backend/executor/execMain.c
src/backend/executor/execQual.c
src/backend/executor/execUtils.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeBitmapHeapscan.c
src/backend/executor/nodeGroup.c
src/backend/executor/nodeHash.c
src/backend/executor/nodeIndexonlyscan.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeMaterial.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeMergejoin.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSamplescan.c
src/backend/executor/nodeSort.c
src/backend/executor/nodeWindowAgg.c
src/backend/executor/spi.c
src/backend/lib/bipartite_match.c
src/backend/lib/hyperloglog.c
src/backend/lib/pairingheap.c
src/backend/libpq/auth.c
src/backend/libpq/be-secure-openssl.c
src/backend/libpq/be-secure.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/libpq/pqmq.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/makefuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/plan/analyzejoins.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/var.c
src/backend/parser/analyze.c
src/backend/parser/parse_agg.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_func.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_type.c
src/backend/parser/parse_utilcmd.c
src/backend/port/atomics.c
src/backend/port/sysv_shmem.c
src/backend/port/win32_latch.c
src/backend/port/win32_sema.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/replication/basebackup.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/logicalfuncs.c
src/backend/replication/logical/origin.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/logical/snapbuild.c
src/backend/replication/slot.c
src/backend/replication/slotfuncs.c
src/backend/replication/walreceiverfuncs.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rewriteManip.c
src/backend/rewrite/rowsecurity.c
src/backend/storage/buffer/buf_init.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/freelist.c
src/backend/storage/file/fd.c
src/backend/storage/file/reinit.c
src/backend/storage/ipc/dsm_impl.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/shm_mq.c
src/backend/storage/ipc/sinval.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/proc.c
src/backend/storage/page/bufpage.c
src/backend/storage/smgr/md.c
src/backend/tcop/postgres.c
src/backend/tcop/utility.c
src/backend/tsearch/spell.c
src/backend/utils/adt/acl.c
src/backend/utils/adt/array_userfuncs.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/jsonb_util.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/levenshtein.c
src/backend/utils/adt/lockfuncs.c
src/backend/utils/adt/misc.c
src/backend/utils/adt/network_gist.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pg_upgrade_support.c
src/backend/utils/adt/pgstatfuncs.c
src/backend/utils/adt/rangetypes_spgist.c
src/backend/utils/adt/regexp.c
src/backend/utils/adt/regproc.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/tsquery_op.c
src/backend/utils/adt/txid.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/error/elog.c
src/backend/utils/fmgr/dfmgr.c
src/backend/utils/fmgr/funcapi.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
src/backend/utils/misc/guc.c
src/backend/utils/misc/rls.c
src/backend/utils/misc/sampling.c
src/backend/utils/sort/sortsupport.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/time/combocid.c
src/backend/utils/time/snapmgr.c
src/backend/utils/time/tqual.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivexlog.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/receivelog.h
src/bin/pg_basebackup/streamutil.c
src/bin/pg_basebackup/streamutil.h
src/bin/pg_basebackup/t/010_pg_basebackup.pl
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_ctl/t/001_start_stop.pl
src/bin/pg_ctl/t/002_status.pl
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_resetxlog/pg_resetxlog.c
src/bin/pg_rewind/RewindTest.pm
src/bin/pg_rewind/filemap.c
src/bin/pg_rewind/filemap.h
src/bin/pg_rewind/parsexlog.c
src/bin/pg_rewind/pg_rewind.c
src/bin/pg_rewind/t/001_basic.pl
src/bin/pg_rewind/t/002_databases.pl
src/bin/pg_rewind/t/003_extrafiles.pl
src/bin/pg_upgrade/check.c
src/bin/pg_upgrade/dump.c
src/bin/pg_upgrade/info.c
src/bin/pg_upgrade/option.c
src/bin/pg_upgrade/pg_upgrade.c
src/bin/pg_upgrade/pg_upgrade.h
src/bin/pg_upgrade/relfilenode.c
src/bin/pg_upgrade/server.c
src/bin/pg_upgrade/version.c
src/bin/pg_xlogdump/pg_xlogdump.c
src/bin/pgbench/pgbench.c
src/bin/pgbench/pgbench.h
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/common.h
src/bin/psql/copy.c
src/bin/psql/describe.c
src/bin/psql/help.c
src/bin/psql/print.c
src/bin/psql/print.h
src/bin/psql/startup.c
src/bin/psql/tab-complete.c
src/bin/scripts/common.c
src/bin/scripts/reindexdb.c
src/bin/scripts/t/102_vacuumdb_stages.pl
src/bin/scripts/vacuumdb.c
src/common/restricted_token.c
src/include/access/brin_page.h
src/include/access/commit_ts.h
src/include/access/gin.h
src/include/access/gist_private.h
src/include/access/hash.h
src/include/access/heapam.h
src/include/access/htup_details.h
src/include/access/multixact.h
src/include/access/parallel.h
src/include/access/relscan.h
src/include/access/stratnum.h
src/include/access/tablesample.h
src/include/access/xact.h
src/include/access/xlog.h
src/include/access/xloginsert.h
src/include/access/xlogreader.h
src/include/access/xlogrecord.h
src/include/access/xlogutils.h
src/include/bootstrap/bootstrap.h
src/include/catalog/binary_upgrade.h
src/include/catalog/index.h
src/include/catalog/indexing.h
src/include/catalog/objectaddress.h
src/include/catalog/opfam_internal.h
src/include/catalog/pg_aggregate.h
src/include/catalog/pg_amop.h
src/include/catalog/pg_amproc.h
src/include/catalog/pg_attribute.h
src/include/catalog/pg_cast.h
src/include/catalog/pg_class.h
src/include/catalog/pg_control.h
src/include/catalog/pg_description.h
src/include/catalog/pg_extension.h
src/include/catalog/pg_largeobject.h
src/include/catalog/pg_opclass.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_pltemplate.h
src/include/catalog/pg_policy.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_replication_origin.h
src/include/catalog/pg_seclabel.h
src/include/catalog/pg_shdescription.h
src/include/catalog/pg_shseclabel.h
src/include/catalog/pg_tablesample_method.h
src/include/catalog/pg_transform.h
src/include/catalog/pg_trigger.h
src/include/catalog/pg_type.h
src/include/commands/defrem.h
src/include/commands/event_trigger.h
src/include/commands/explain.h
src/include/commands/vacuum.h
src/include/common/fe_memutils.h
src/include/common/pg_lzcompress.h
src/include/common/restricted_token.h
src/include/common/string.h
src/include/executor/executor.h
src/include/executor/hashjoin.h
src/include/fmgr.h
src/include/funcapi.h
src/include/lib/bipartite_match.h
src/include/lib/hyperloglog.h
src/include/lib/pairingheap.h
src/include/libpq/libpq-be.h
src/include/libpq/libpq.h
src/include/libpq/pqmq.h
src/include/nodes/execnodes.h
src/include/nodes/nodes.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/optimizer/pathnode.h
src/include/optimizer/prep.h
src/include/optimizer/tlist.h
src/include/parser/parse_clause.h
src/include/parser/parse_func.h
src/include/parser/parse_relation.h
src/include/pgstat.h
src/include/port/atomics.h
src/include/port/atomics/arch-ia64.h
src/include/port/atomics/arch-x86.h
src/include/port/atomics/fallback.h
src/include/port/atomics/generic-acc.h
src/include/port/atomics/generic-gcc.h
src/include/port/atomics/generic-msvc.h
src/include/port/atomics/generic-sunpro.h
src/include/port/atomics/generic-xlc.h
src/include/port/atomics/generic.h
src/include/port/pg_crc32c.h
src/include/postmaster/bgworker.h
src/include/replication/origin.h
src/include/replication/output_plugin.h
src/include/replication/reorderbuffer.h
src/include/replication/walsender.h
src/include/rewrite/rowsecurity.h
src/include/storage/lmgr.h
src/include/storage/lock.h
src/include/storage/shm_mq.h
src/include/tcop/deparse_utility.h
src/include/tcop/fastpath.h
src/include/utils/acl.h
src/include/utils/aclchk_internal.h
src/include/utils/builtins.h
src/include/utils/guc.h
src/include/utils/guc_tables.h
src/include/utils/jsonapi.h
src/include/utils/jsonb.h
src/include/utils/lsyscache.h
src/include/utils/palloc.h
src/include/utils/pg_crc.h
src/include/utils/plancache.h
src/include/utils/rls.h
src/include/utils/ruleutils.h
src/include/utils/sampling.h
src/include/utils/selfuncs.h
src/include/utils/snapshot.h
src/include/utils/sortsupport.h
src/interfaces/ecpg/ecpglib/data.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/ecpglib/memory.c
src/interfaces/ecpg/preproc/parse.pl
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-misc.c
src/interfaces/libpq/fe-secure-openssl.c
src/interfaces/libpq/fe-secure.c
src/pl/plperl/plperl.c
src/pl/plpython/plpy_procedure.c
src/pl/plpython/plpy_typeio.c
src/port/gettimeofday.c
src/port/pg_crc32c_choose.c
src/port/pg_crc32c_sse42.c
src/port/win32setlocale.c
src/test/modules/test_ddl_deparse/test_ddl_deparse.c
src/test/modules/test_rls_hooks/test_rls_hooks.c
src/test/perl/TestLib.pm
src/test/regress/pg_regress.c
src/test/regress/regress.c
src/test/ssl/ServerSetup.pm
src/test/ssl/t/001_ssltests.pl
src/tools/msvc/Install.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Project.pm
src/tools/msvc/Solution.pm
src/tools/msvc/VCBuildProject.pm
src/tools/msvc/VSObjectFactory.pm
src/tools/msvc/config_default.pl
src/tools/msvc/vcregress.pl

index 6e3bf172e5021c1450a8195eee25a2eae19b63f4..f74e912ed7474ece0a3d2b6e650816ca326c0c3e 100644 (file)
@@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
                                cmp;
 
        cmp = DatumGetInt32(DirectFunctionCall2Coll(
-                               data->typecmp,
-                               PG_GET_COLLATION(),
-                               (data->strategy == BTLessStrategyNumber ||
-                                data->strategy == BTLessEqualStrategyNumber)
-                                ? data->datum : a,
-                               b));
+                                                                                               data->typecmp,
+                                                                                               PG_GET_COLLATION(),
+                                                                  (data->strategy == BTLessStrategyNumber ||
+                                                                data->strategy == BTLessEqualStrategyNumber)
+                                                                                               ? data->datum : a,
+                                                                                               b));
 
        switch (data->strategy)
        {
@@ -186,14 +186,14 @@ Datum                                                                                                                                             \
 gin_extract_value_##type(PG_FUNCTION_ARGS)                                                                     \
 {                                                                                                                                                      \
        return gin_btree_extract_value(fcinfo, is_varlena);                                             \
-}                                                                                                                                                      \
+}      \
 PG_FUNCTION_INFO_V1(gin_extract_query_##type);                                                         \
 Datum                                                                                                                                          \
 gin_extract_query_##type(PG_FUNCTION_ARGS)                                                                     \
 {                                                                                                                                                      \
        return gin_btree_extract_query(fcinfo,                                                                  \
                                                                   is_varlena, leftmostvalue, typecmp);         \
-}                                                                                                                                                      \
+}      \
 PG_FUNCTION_INFO_V1(gin_compare_prefix_##type);                                                                \
 Datum                                                                                                                                          \
 gin_compare_prefix_##type(PG_FUNCTION_ARGS)                                                                    \
@@ -209,6 +209,7 @@ leftmostvalue_int2(void)
 {
        return Int16GetDatum(SHRT_MIN);
 }
+
 GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
 
 static Datum
@@ -216,6 +217,7 @@ leftmostvalue_int4(void)
 {
        return Int32GetDatum(INT_MIN);
 }
+
 GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
 
 static Datum
@@ -226,6 +228,7 @@ leftmostvalue_int8(void)
         */
        return Int64GetDatum(SEQ_MINVALUE);
 }
+
 GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
 
 static Datum
@@ -233,6 +236,7 @@ leftmostvalue_float4(void)
 {
        return Float4GetDatum(-get_float4_infinity());
 }
+
 GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
 
 static Datum
@@ -240,6 +244,7 @@ leftmostvalue_float8(void)
 {
        return Float8GetDatum(-get_float8_infinity());
 }
+
 GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
 
 static Datum
@@ -250,6 +255,7 @@ leftmostvalue_money(void)
         */
        return Int64GetDatum(SEQ_MINVALUE);
 }
+
 GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
 
 static Datum
@@ -257,6 +263,7 @@ leftmostvalue_oid(void)
 {
        return ObjectIdGetDatum(0);
 }
+
 GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
 
 static Datum
@@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
 {
        return TimestampGetDatum(DT_NOBEGIN);
 }
+
 GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
 
 GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
@@ -273,6 +281,7 @@ leftmostvalue_time(void)
 {
        return TimeADTGetDatum(0);
 }
+
 GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
 
 static Datum
@@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
 
        return TimeTzADTPGetDatum(v);
 }
+
 GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
 
 static Datum
@@ -292,6 +302,7 @@ leftmostvalue_date(void)
 {
        return DateADTGetDatum(DATEVAL_NOBEGIN);
 }
+
 GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
 
 static Datum
@@ -304,6 +315,7 @@ leftmostvalue_interval(void)
        v->month = 0;
        return IntervalPGetDatum(v);
 }
+
 GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
 
 static Datum
@@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
 
        return MacaddrPGetDatum(v);
 }
+
 GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
 
 static Datum
@@ -320,6 +333,7 @@ leftmostvalue_inet(void)
 {
        return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
 }
+
 GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
 
 GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
@@ -329,6 +343,7 @@ leftmostvalue_text(void)
 {
        return PointerGetDatum(cstring_to_text_with_len("", 0));
 }
+
 GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
 
 static Datum
@@ -336,6 +351,7 @@ leftmostvalue_char(void)
 {
        return CharGetDatum(SCHAR_MIN);
 }
+
 GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
 
 GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
@@ -348,6 +364,7 @@ leftmostvalue_bit(void)
                                                           ObjectIdGetDatum(0),
                                                           Int32GetDatum(-1));
 }
+
 GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
 
 static Datum
@@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
                                                           ObjectIdGetDatum(0),
                                                           Int32GetDatum(-1));
 }
+
 GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
 
 /*
@@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
 {
        return PointerGetDatum(NULL);
 }
+
 GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
index 5bfe659f91774ef6c4544e305f35e304cec5cdd9..99cb41f5f57aeabc8c3e1977823d176bf487a05a 100644 (file)
@@ -13,7 +13,7 @@
 GISTENTRY *
 gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
 {
-       GISTENTRY *retval;
+       GISTENTRY  *retval;
 
        if (entry->leafkey)
        {
index 78e8662adddafdfc310097fff078b36408f3f642..8105a3b03507da77ea989991acfaaff970dddbad 100644 (file)
@@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
  * Create a leaf-entry to store in the index, from a single Datum.
  */
 static GBT_VARKEY *
-gbt_var_key_from_datum(const struct varlena *u)
+gbt_var_key_from_datum(const struct varlena * u)
 {
        int32           lowersize = VARSIZE(u);
        GBT_VARKEY *r;
index 7c8457e734427ed801f6e5441a2b0d5b9f4eb2e0..147c8501ee89a86fa3ad064f1aaccaf29b8bfc25 100644 (file)
@@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
  * in a case like this.
  */
 
-#define META_FREE(x) ((void)true) /* pfree((x)) */
+#define META_FREE(x) ((void)true)              /* pfree((x)) */
 #else                                                  /* not defined DMETAPHONE_MAIN */
 
 /* use the standard malloc library when not running in PostgreSQL */
index dde37fb6e6029d98b8f6b258faddd6b935c9eea1..0fb769de7da38c2e84dab89204170a0e5ebb1398 100644 (file)
@@ -72,7 +72,7 @@ typedef struct
 static pg_crc32
 crc32_sz(char *buf, int size)
 {
-       pg_crc32 crc;
+       pg_crc32        crc;
 
        INIT_TRADITIONAL_CRC32(crc);
        COMP_TRADITIONAL_CRC32(crc, buf, size);
index cdc224c30e095b94523f80647452d703cc4dc492..dcc74b12e835a6edb4db2a3aa730403a065d8c04 100644 (file)
@@ -9,7 +9,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(hstore_to_plperl);
-Datum hstore_to_plperl(PG_FUNCTION_ARGS);
+Datum          hstore_to_plperl(PG_FUNCTION_ARGS);
 
 Datum
 hstore_to_plperl(PG_FUNCTION_ARGS)
@@ -26,10 +26,10 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
        for (i = 0; i < count; i++)
        {
                const char *key;
-               SV         *value;
+               SV                 *value;
 
                key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
-               value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
+               value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
 
                (void) hv_store(hv, key, strlen(key), value, 0);
        }
@@ -39,7 +39,7 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
 
 
 PG_FUNCTION_INFO_V1(plperl_to_hstore);
-Datum plperl_to_hstore(PG_FUNCTION_ARGS);
+Datum          plperl_to_hstore(PG_FUNCTION_ARGS);
 
 Datum
 plperl_to_hstore(PG_FUNCTION_ARGS)
@@ -61,8 +61,8 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
        i = 0;
        while ((he = hv_iternext(hv)))
        {
-               char     *key = sv2cstr(HeSVKEY_force(he));
-               SV               *value = HeVAL(he);
+               char       *key = sv2cstr(HeSVKEY_force(he));
+               SV                 *value = HeVAL(he);
 
                pairs[i].key = pstrdup(key);
                pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
index 92cd4f800f6d0a88a42896219223fd3e2475eb8e..94404a506177c4dcf7ded264c79d4d3bb06846ac 100644 (file)
@@ -8,7 +8,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(hstore_to_plpython);
-Datum hstore_to_plpython(PG_FUNCTION_ARGS);
+Datum          hstore_to_plpython(PG_FUNCTION_ARGS);
 
 Datum
 hstore_to_plpython(PG_FUNCTION_ARGS)
@@ -31,9 +31,9 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
                        PyDict_SetItem(dict, key, Py_None);
                else
                {
-                       PyObject *value;
+                       PyObject   *value;
 
-                       value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
+                       value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
                        PyDict_SetItem(dict, key, value);
                        Py_XDECREF(value);
                }
@@ -45,7 +45,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
 
 
 PG_FUNCTION_INFO_V1(plpython_to_hstore);
-Datum plpython_to_hstore(PG_FUNCTION_ARGS);
+Datum          plpython_to_hstore(PG_FUNCTION_ARGS);
 
 Datum
 plpython_to_hstore(PG_FUNCTION_ARGS)
@@ -75,9 +75,9 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
 
                for (i = 0; i < pcount; i++)
                {
-                       PyObject *tuple;
-                       PyObject *key;
-                       PyObject *value;
+                       PyObject   *tuple;
+                       PyObject   *key;
+                       PyObject   *value;
 
                        tuple = PyList_GetItem(items, i);
                        key = PyTuple_GetItem(tuple, 0);
index 1c08d264f720992142503a47a2365fcf8b249fae..403dae0d7d464bbf0bb686ac3454d447fd260fc3 100644 (file)
 unsigned int
 ltree_crc32_sz(char *buf, int size)
 {
-       pg_crc32 crc;
+       pg_crc32        crc;
        char       *p = buf;
 
        INIT_TRADITIONAL_CRC32(crc);
        while (size > 0)
        {
-               char c = (char) TOLOWER(*p);
+               char            c = (char) TOLOWER(*p);
+
                COMP_TRADITIONAL_CRC32(crc, &c, 1);
                size--;
                p++;
index 111e3e356e55a90b41f11ba9d2b66e23b3f917cc..af166a720f08d625cd94ee916258b4523e14d763 100644 (file)
@@ -7,7 +7,7 @@ PG_MODULE_MAGIC;
 
 
 PG_FUNCTION_INFO_V1(ltree_to_plpython);
-Datum ltree_to_plpython(PG_FUNCTION_ARGS);
+Datum          ltree_to_plpython(PG_FUNCTION_ARGS);
 
 Datum
 ltree_to_plpython(PG_FUNCTION_ARGS)
index bd3191d5d28d50b923c7087a22b8e2673dd9f669..7adcfa89370dc227428921a5c6d3e7237e8ebe06 100644 (file)
@@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
 {
        bytea      *raw_page = PG_GETARG_BYTEA_P(0);
        Page            page = VARDATA(raw_page);
-       char *type;
+       char       *type;
 
        switch (BrinPageType(page))
        {
@@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
 static Page
 verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
 {
-       Page    page;
-       int             raw_page_size;
+       Page            page;
+       int                     raw_page_size;
 
        raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
 
@@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("input page too small"),
-                                errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
+                         errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
 
        page = VARDATA(raw_page);
 
@@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
                indexRel = index_open(indexRelid, AccessShareLock);
 
                state = palloc(offsetof(brin_page_state, columns) +
-                                          sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
+                         sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
 
                state->bdesc = brin_build_desc(indexRel);
                state->page = page;
@@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
                 */
                for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
                {
-                       Oid             output;
-                       bool    isVarlena;
+                       Oid                     output;
+                       bool            isVarlena;
                        BrinOpcInfo *opcinfo;
-                       int             i;
+                       int                     i;
                        brin_column_state *column;
 
                        opcinfo = state->bdesc->bd_info[attno - 1];
@@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
                 */
                if (state->dtup == NULL)
                {
-                       BrinTuple          *tup;
+                       BrinTuple  *tup;
                        MemoryContext mctx;
                        ItemId          itemId;
 
@@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
                        if (ItemIdIsUsed(itemId))
                        {
                                tup = (BrinTuple *) PageGetItem(state->page,
-                                                                                         PageGetItemId(state->page,
-                                                                                                                       state->offset));
+                                                                                               PageGetItemId(state->page,
+                                                                                                                         state->offset));
                                state->dtup = brin_deform_tuple(state->bdesc, tup);
                                state->attno = 1;
                                state->unusedItem = false;
@@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
                }
                else
                {
-                       int             att = state->attno - 1;
+                       int                     att = state->attno - 1;
 
                        values[0] = UInt16GetDatum(state->offset);
                        values[1] = UInt32GetDatum(state->dtup->bt_blkno);
@@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
                        values[5] = BoolGetDatum(state->dtup->bt_placeholder);
                        if (!state->dtup->bt_columns[att].bv_allnulls)
                        {
-                               BrinValues   *bvalues = &state->dtup->bt_columns[att];
-                               StringInfoData  s;
+                               BrinValues *bvalues = &state->dtup->bt_columns[att];
+                               StringInfoData s;
                                bool            first;
                                int                     i;
 
@@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
                                first = true;
                                for (i = 0; i < state->columns[att]->nstored; i++)
                                {
-                                       char   *val;
+                                       char       *val;
 
                                        if (!first)
                                                appendStringInfoString(&s, " .. ");
@@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
                }
 
                /*
-                * If we're beyond the end of the page, set flag to end the function in
-                * the following iteration.
+                * If we're beyond the end of the page, set flag to end the function
+                * in the following iteration.
                 */
                if (state->offset > PageGetMaxOffsetNumber(state->page))
                        state->done = true;
@@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
        struct
        {
                ItemPointerData *tids;
-               int             idx;
-       } *state;
+               int                     idx;
+       }                  *state;
        FuncCallContext *fctx;
 
        if (!superuser())
index 701b2ca763c94507372534b931db9d12c9164b8c..c0de3be8df89b08ab52f3e384fb03813b3c6e312 100644 (file)
@@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
        TupleDesc       tupd;
        GinPostingList *seg;
        GinPostingList *lastseg;
-}      gin_leafpage_items_state;
+} gin_leafpage_items_state;
 
 Datum
 gin_leafpage_items(PG_FUNCTION_ARGS)
index 4b75fefc34ae65bba9d44b9b4b1399ff69bd6043..a4b05a6df11709fce41b1156705c22c6e138ec17 100644 (file)
 
 PG_MODULE_MAGIC;
 
-void _PG_init(void);
+void           _PG_init(void);
 
 /* Prototypes for functions used with event triggers */
-Datum pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
-Datum pg_audit_sql_drop(PG_FUNCTION_ARGS);
+Datum          pg_audit_ddl_command_end(PG_FUNCTION_ARGS);
+Datum          pg_audit_sql_drop(PG_FUNCTION_ARGS);
 
 PG_FUNCTION_INFO_V1(pg_audit_ddl_command_end);
 PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
@@ -67,14 +67,14 @@ PG_FUNCTION_INFO_V1(pg_audit_sql_drop);
 #define LOG_ROLE               (1 << 4)        /* GRANT/REVOKE, CREATE/ALTER/DROP ROLE */
 #define LOG_WRITE              (1 << 5)        /* INSERT, UPDATE, DELETE, TRUNCATE */
 
-#define LOG_NONE               0                       /* nothing */
+#define LOG_NONE               0               /* nothing */
 #define LOG_ALL                        (0xFFFFFFFF)    /* All */
 
 /* GUC variable for pg_audit.log, which defines the classes to log. */
-char *auditLog = NULL;
+char      *auditLog = NULL;
 
 /* Bitmap of classes selected */
-static int auditLogBitmap = LOG_NONE;
+static int     auditLogBitmap = LOG_NONE;
 
 /*
  * String constants for log classes - used when processing tokens in the
@@ -97,7 +97,7 @@ static int auditLogBitmap = LOG_NONE;
  * the query are in pg_catalog.  Interactive sessions (eg: psql) can cause
  * a lot of noise in the logs which might be uninteresting.
  */
-bool auditLogCatalog = true;
+bool           auditLogCatalog = true;
 
 /*
  * GUC variable for pg_audit.log_level
@@ -106,8 +106,8 @@ bool auditLogCatalog = true;
  * at.  The default level is LOG, which goes into the server log but does
  * not go to the client.  Set to NOTICE in the regression tests.
  */
-char *auditLogLevelString = NULL;
-int auditLogLevel = LOG;
+char      *auditLogLevelString = NULL;
+int                    auditLogLevel = LOG;
 
 /*
  * GUC variable for pg_audit.log_parameter
@@ -115,7 +115,7 @@ int auditLogLevel = LOG;
  * Administrators can choose if parameters passed into a statement are
  * included in the audit log.
  */
-bool auditLogParameter = false;
+bool           auditLogParameter = false;
 
 /*
  * GUC variable for pg_audit.log_relation
@@ -124,7 +124,7 @@ bool auditLogParameter = false;
  * in READ/WRITE class queries.  By default, SESSION logs include the query but
  * do not have a log entry for each relation.
  */
-bool auditLogRelation = false;
+bool           auditLogRelation = false;
 
 /*
  * GUC variable for pg_audit.log_statement_once
@@ -134,7 +134,7 @@ bool auditLogRelation = false;
  * the audit log to facilitate searching, but this can cause the log to be
  * unnecessairly bloated in some environments.
  */
-bool auditLogStatementOnce = false;
+bool           auditLogStatementOnce = false;
 
 /*
  * GUC variable for pg_audit.role
@@ -143,7 +143,7 @@ bool auditLogStatementOnce = false;
  * Object-level auditing uses the privileges which are granted to this role to
  * determine if a statement should be logged.
  */
-char *auditRole = NULL;
+char      *auditRole = NULL;
 
 /*
  * String constants for the audit log fields.
@@ -213,23 +213,23 @@ char *auditRole = NULL;
  */
 typedef struct
 {
-       int64 statementId;                      /* Simple counter */
-       int64 substatementId;           /* Simple counter */
+       int64           statementId;    /* Simple counter */
+       int64           substatementId; /* Simple counter */
 
        LogStmtLevel logStmtLevel;      /* From GetCommandLogLevel when possible, */
-                                                               /* generated when not. */
-       NodeTag commandTag;                     /* same here */
+       /* generated when not. */
+       NodeTag         commandTag;             /* same here */
        const char *command;            /* same here */
        const char *objectType;         /* From event trigger when possible */
-                                                               /* generated when not. */
-       char *objectName;                       /* Fully qualified object identification */
+       /* generated when not. */
+       char       *objectName;         /* Fully qualified object identification */
        const char *commandText;        /* sourceText / queryString */
        ParamListInfo paramList;        /* QueryDesc/ProcessUtility parameters */
 
-       bool granted;                           /* Audit role has object permissions? */
-       bool logged;                            /* Track if we have logged this event, used */
-                                                               /* post-ProcessUtility to make sure we log */
-       bool statementLogged;           /* Track if we have logged the statement */
+       bool            granted;                /* Audit role has object permissions? */
+       bool            logged;                 /* Track if we have logged this event, used */
+       /* post-ProcessUtility to make sure we log */
+       bool            statementLogged;        /* Track if we have logged the statement */
 } AuditEvent;
 
 /*
@@ -239,9 +239,9 @@ typedef struct AuditEventStackItem
 {
        struct AuditEventStackItem *next;
 
-       AuditEvent auditEvent;
+       AuditEvent      auditEvent;
 
-       int64 stackId;
+       int64           stackId;
 
        MemoryContext contextAudit;
        MemoryContextCallback contextCallback;
@@ -288,7 +288,7 @@ stack_free(void *stackFree)
        while (nextItem != NULL)
        {
                /* Check if this item matches the item to be freed */
-               if (nextItem == (AuditEventStackItem *)stackFree)
+               if (nextItem == (AuditEventStackItem *) stackFree)
                {
                        /* Move top of stack to the item after the freed item */
                        auditEventStack = nextItem->next;
@@ -309,7 +309,8 @@ stack_free(void *stackFree)
                                substatementTotal = 0;
 
                                /*
-                                * Reset statement logged so that next statement will be logged.
+                                * Reset statement logged so that next statement will be
+                                * logged.
                                 */
                                statementLogged = false;
                        }
@@ -356,7 +357,7 @@ stack_push()
         * the stack at this item.
         */
        stackItem->contextCallback.func = stack_free;
-       stackItem->contextCallback.arg = (void *)stackItem;
+       stackItem->contextCallback.arg = (void *) stackItem;
        MemoryContextRegisterResetCallback(contextAudit,
                                                                           &stackItem->contextCallback);
 
@@ -431,7 +432,7 @@ append_valid_csv(StringInfoData *buffer, const char *appendStr)
 
                for (pChar = appendStr; *pChar; pChar++)
                {
-                       if (*pChar == '"') /* double single quotes */
+                       if (*pChar == '"')      /* double single quotes */
                                appendStringInfoCharMacro(buffer, *pChar);
 
                        appendStringInfoCharMacro(buffer, *pChar);
@@ -461,23 +462,23 @@ static void
 log_audit_event(AuditEventStackItem *stackItem)
 {
        /* By default, put everything in the MISC class. */
-       int                             class = LOG_MISC;
-       const char         *className = CLASS_MISC;
-       MemoryContext   contextOld;
-       StringInfoData  auditStr;
+       int                     class = LOG_MISC;
+       const char *className = CLASS_MISC;
+       MemoryContext contextOld;
+       StringInfoData auditStr;
 
 
        /* Classify the statement using log stmt level and the command tag */
        switch (stackItem->auditEvent.logStmtLevel)
        {
-               /* All mods go in WRITE class, execpt EXECUTE */
+                       /* All mods go in WRITE class, execpt EXECUTE */
                case LOGSTMT_MOD:
                        className = CLASS_WRITE;
                        class = LOG_WRITE;
 
                        switch (stackItem->auditEvent.commandTag)
                        {
-                               /* Currently, only EXECUTE is different */
+                                       /* Currently, only EXECUTE is different */
                                case T_ExecuteStmt:
                                        className = CLASS_MISC;
                                        class = LOG_MISC;
@@ -487,7 +488,7 @@ log_audit_event(AuditEventStackItem *stackItem)
                        }
                        break;
 
-               /* These are DDL, unless they are ROLE */
+                       /* These are DDL, unless they are ROLE */
                case LOGSTMT_DDL:
                        className = CLASS_DDL;
                        class = LOG_DDL;
@@ -495,7 +496,7 @@ log_audit_event(AuditEventStackItem *stackItem)
                        /* Identify role statements */
                        switch (stackItem->auditEvent.commandTag)
                        {
-                               /* We know these are all role statements */
+                                       /* We know these are all role statements */
                                case T_GrantStmt:
                                case T_GrantRoleStmt:
                                case T_CreateRoleStmt:
@@ -505,11 +506,12 @@ log_audit_event(AuditEventStackItem *stackItem)
                                        className = CLASS_ROLE;
                                        class = LOG_ROLE;
                                        break;
-                               /*
-                                * Rename and Drop are general and therefore we have to do an
-                                * additional check against the command string to see if they
-                                * are role or regular DDL.
-                                */
+
+                                       /*
+                                        * Rename and Drop are general and therefore we have to do
+                                        * an additional check against the command string to see
+                                        * if they are role or regular DDL.
+                                        */
                                case T_RenameStmt:
                                case T_DropStmt:
                                        if (pg_strcasecmp(stackItem->auditEvent.command,
@@ -527,11 +529,11 @@ log_audit_event(AuditEventStackItem *stackItem)
                        }
                        break;
 
-               /* Classify the rest */
+                       /* Classify the rest */
                case LOGSTMT_ALL:
                        switch (stackItem->auditEvent.commandTag)
                        {
-                               /* READ statements */
+                                       /* READ statements */
                                case T_CopyStmt:
                                case T_SelectStmt:
                                case T_PrepareStmt:
@@ -540,7 +542,7 @@ log_audit_event(AuditEventStackItem *stackItem)
                                        class = LOG_READ;
                                        break;
 
-                               /* FUNCTION statements */
+                                       /* FUNCTION statements */
                                case T_DoStmt:
                                        className = CLASS_FUNCTION;
                                        class = LOG_FUNCTION;
@@ -558,8 +560,8 @@ log_audit_event(AuditEventStackItem *stackItem)
        /*
         * Only log the statement if:
         *
-        * 1. If object was selected for audit logging (granted)
-        * 2. The statement belongs to a class that is being logged
+        * 1. If object was selected for audit logging (granted) 2. The statement
+        * belongs to a class that is being logged
         *
         * If neither of these is true, return.
         */
@@ -615,10 +617,10 @@ log_audit_event(AuditEventStackItem *stackItem)
                /* Handle parameter logging, if enabled. */
                if (auditLogParameter)
                {
-                       int                             paramIdx;
-                       int                             numParams;
-                       StringInfoData  paramStrResult;
-                       ParamListInfo   paramList = stackItem->auditEvent.paramList;
+                       int                     paramIdx;
+                       int                     numParams;
+                       StringInfoData paramStrResult;
+                       ParamListInfo paramList = stackItem->auditEvent.paramList;
 
                        numParams = paramList == NULL ? 0 : paramList->numParams;
 
@@ -630,9 +632,9 @@ log_audit_event(AuditEventStackItem *stackItem)
                                 paramIdx++)
                        {
                                ParamExternData *prm = &paramList->params[paramIdx];
-                               Oid                      typeOutput;
-                               bool                     typeIsVarLena;
-                               char                    *paramStr;
+                               Oid                     typeOutput;
+                               bool            typeIsVarLena;
+                               char       *paramStr;
 
                                /* Add a comma for each param */
                                if (paramIdx != 0)
@@ -663,7 +665,7 @@ log_audit_event(AuditEventStackItem *stackItem)
        else
                /* we were asked to not log it */
                appendStringInfoString(&auditStr,
-                               "<previously logged>,<previously logged>");
+                                                          "<previously logged>,<previously logged>");
 
        /*
         * Log the audit entry.  Note: use of INT64_FORMAT here is bad for
@@ -696,7 +698,7 @@ audit_on_acl(Datum aclDatum,
 {
        bool            result = false;
        Acl                *acl;
-       AclItem    *aclItemData;
+       AclItem    *aclItemData;
        int                     aclIndex;
        int                     aclTotal;
 
@@ -710,7 +712,7 @@ audit_on_acl(Datum aclDatum,
        /* Check privileges granted directly to auditOid */
        for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
        {
-               AclItem *aclItem = &aclItemData[aclIndex];
+               AclItem    *aclItem = &aclItemData[aclIndex];
 
                if (aclItem->ai_grantee == auditOid &&
                        aclItem->ai_privs & mask)
@@ -731,7 +733,7 @@ audit_on_acl(Datum aclDatum,
        {
                for (aclIndex = 0; aclIndex < aclTotal; aclIndex++)
                {
-                       AclItem *aclItem = &aclItemData[aclIndex];
+                       AclItem    *aclItem = &aclItemData[aclIndex];
 
                        /* Don't test public or auditOid (it has been tested already) */
                        if (aclItem->ai_grantee == ACL_ID_PUBLIC ||
@@ -838,9 +840,9 @@ audit_on_any_attribute(Oid relOid,
                                           Bitmapset *attributeSet,
                                           AclMode mode)
 {
-       bool result = false;
-       AttrNumber col;
-       Bitmapset *tmpSet;
+       bool            result = false;
+       AttrNumber      col;
+       Bitmapset  *tmpSet;
 
        /* If bms is empty then check for any column match */
        if (bms_is_empty(attributeSet))
@@ -891,9 +893,9 @@ audit_on_any_attribute(Oid relOid,
 static void
 log_select_dml(Oid auditOid, List *rangeTabls)
 {
-       ListCell *lr;
-       bool first = true;
-       bool found = false;
+       ListCell   *lr;
+       bool            first = true;
+       bool            found = false;
 
        /* Do not log if this is an internal statement */
        if (internalStatement)
@@ -901,8 +903,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 
        foreach(lr, rangeTabls)
        {
-               Oid relOid;
-               Relation rel;
+               Oid                     relOid;
+               Relation        rel;
                RangeTblEntry *rte = lfirst(lr);
 
                /* We only care about tables, and can ignore subqueries etc. */
@@ -912,8 +914,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
                found = true;
 
                /*
-                * If we are not logging all-catalog queries (auditLogCatalog is false)
-                * then filter out any system relations here.
+                * If we are not logging all-catalog queries (auditLogCatalog is
+                * false) then filter out any system relations here.
                 */
                relOid = rte->relid;
                rel = relation_open(relOid, NoLock);
@@ -982,63 +984,72 @@ log_select_dml(Oid auditOid, List *rangeTabls)
                {
                        case RELKIND_RELATION:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_TABLE;
+                               OBJECT_TYPE_TABLE;
+
                                break;
 
                        case RELKIND_INDEX:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_INDEX;
+                               OBJECT_TYPE_INDEX;
+
                                break;
 
                        case RELKIND_SEQUENCE:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_SEQUENCE;
+                               OBJECT_TYPE_SEQUENCE;
+
                                break;
 
                        case RELKIND_TOASTVALUE:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_TOASTVALUE;
+                               OBJECT_TYPE_TOASTVALUE;
+
                                break;
 
                        case RELKIND_VIEW:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_VIEW;
+                               OBJECT_TYPE_VIEW;
+
                                break;
 
                        case RELKIND_COMPOSITE_TYPE:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_COMPOSITE_TYPE;
+                               OBJECT_TYPE_COMPOSITE_TYPE;
+
                                break;
 
                        case RELKIND_FOREIGN_TABLE:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_FOREIGN_TABLE;
+                               OBJECT_TYPE_FOREIGN_TABLE;
+
                                break;
 
                        case RELKIND_MATVIEW:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_MATVIEW;
+                               OBJECT_TYPE_MATVIEW;
+
                                break;
 
                        default:
                                auditEventStack->auditEvent.objectType =
-                                       OBJECT_TYPE_UNKNOWN;
+                               OBJECT_TYPE_UNKNOWN;
+
                                break;
                }
 
                /* Get a copy of the relation name and assign it to object name */
                auditEventStack->auditEvent.objectName =
                        quote_qualified_identifier(get_namespace_name(
-                                                                          RelationGetNamespace(rel)),
+                                                                                                 RelationGetNamespace(rel)),
                                                                           RelationGetRelationName(rel));
                relation_close(rel, NoLock);
 
                /* Perform object auditing only if the audit role is valid */
                if (auditOid != InvalidOid)
                {
-                       AclMode auditPerms =
-                               (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
-                               rte->requiredPerms;
+                       AclMode         auditPerms =
+                       (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) &
+                       rte->requiredPerms;
 
                        /*
                         * If any of the required permissions for the relation are granted
@@ -1104,8 +1115,8 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 
        /*
         * If no tables were found that means that RangeTbls was empty or all
-        * relations were in the system schema.  In that case still log a
-        * session record.
+        * relations were in the system schema.  In that case still log a session
+        * record.
         */
        if (!found)
        {
@@ -1123,7 +1134,7 @@ log_select_dml(Oid auditOid, List *rangeTabls)
 static void
 log_function_execute(Oid objectId)
 {
-       HeapTuple proctup;
+       HeapTuple       proctup;
        Form_pg_proc proc;
        AuditEventStackItem *stackItem;
 
@@ -1159,6 +1170,7 @@ log_function_execute(Oid objectId)
        stackItem->auditEvent.commandTag = T_DoStmt;
        stackItem->auditEvent.command = COMMAND_EXECUTE;
        stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION;
+
        stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText;
 
        log_audit_event(stackItem);
@@ -1236,9 +1248,9 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
                standard_ExecutorStart(queryDesc, eflags);
 
        /*
-        * Move the stack memory context to the query memory context.  This needs to
-        * be done here because the query context does not exist before the call
-        * to standard_ExecutorStart() but the stack item is required by
+        * Move the stack memory context to the query memory context.  This needs
+        * to be done here because the query context does not exist before the
+        * call to standard_ExecutorStart() but the stack item is required by
         * pg_audit_ExecutorCheckPerms_hook() which is called during
         * standard_ExecutorStart().
         */
@@ -1253,7 +1265,7 @@ pg_audit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags)
 static bool
 pg_audit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort)
 {
-       Oid auditOid;
+       Oid                     auditOid;
 
        /* Get the audit oid if the role exists */
        auditOid = get_role_oid(auditRole, true);
@@ -1283,7 +1295,7 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
                                                         char *completionTag)
 {
        AuditEventStackItem *stackItem = NULL;
-       int64 stackId = 0;
+       int64           stackId = 0;
 
        /*
         * Don't audit substatements.  All the substatements we care about should
@@ -1328,19 +1340,22 @@ pg_audit_ProcessUtility_hook(Node *parsetree,
                                                                params, dest, completionTag);
 
        /*
-        * Process the audit event if there is one.  Also check that this event was
-        * not popped off the stack by a memory context being free'd elsewhere.
+        * Process the audit event if there is one.  Also check that this event
+        * was not popped off the stack by a memory context being free'd
+        * elsewhere.
         */
        if (stackItem && !IsAbortedTransactionBlockState())
        {
                /*
-                * Make sure the item we want to log is still on the stack - if not then
-                * something has gone wrong and an error will be raised.
+                * Make sure the item we want to log is still on the stack - if not
+                * then something has gone wrong and an error will be raised.
                 */
                stack_valid(stackId);
 
-               /* Log the utility command if logging is on, the command has not already
-                * been logged by another hook, and the transaction is not aborted.
+               /*
+                * Log the utility command if logging is on, the command has not
+                * already been logged by another hook, and the transaction is not
+                * aborted.
                 */
                if (auditLogBitmap != 0 && !stackItem->auditEvent.logged)
                        log_audit_event(stackItem);
@@ -1380,11 +1395,12 @@ Datum
 pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 {
        EventTriggerData *eventData;
-       int                               result, row;
-       TupleDesc                 spiTupDesc;
-       const char               *query;
-       MemoryContext     contextQuery;
-       MemoryContext     contextOld;
+       int                     result,
+                               row;
+       TupleDesc       spiTupDesc;
+       const char *query;
+       MemoryContext contextQuery;
+       MemoryContext contextOld;
 
        /* Continue only if session DDL logging is enabled */
        if (~auditLogBitmap & LOG_DDL)
@@ -1393,7 +1409,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
        /* Be sure the module was loaded */
        if (!auditEventStack)
                elog(ERROR, "pg_audit not loaded before call to "
-                                       "pg_audit_ddl_command_end()");
+                        "pg_audit_ddl_command_end()");
 
        /* This is an internal statement - do not log it */
        internalStatement = true;
@@ -1404,11 +1420,11 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 
        /* Switch memory context for query */
        contextQuery = AllocSetContextCreate(
-                                       CurrentMemoryContext,
-                                       "pg_audit_func_ddl_command_end temporary context",
-                                       ALLOCSET_DEFAULT_MINSIZE,
-                                       ALLOCSET_DEFAULT_INITSIZE,
-                                       ALLOCSET_DEFAULT_MAXSIZE);
+                                                                                CurrentMemoryContext,
+                                                  "pg_audit_func_ddl_command_end temporary context",
+                                                                                ALLOCSET_DEFAULT_MINSIZE,
+                                                                                ALLOCSET_DEFAULT_INITSIZE,
+                                                                                ALLOCSET_DEFAULT_MAXSIZE);
        contextOld = MemoryContextSwitchTo(contextQuery);
 
        /* Get information about triggered events */
@@ -1423,31 +1439,32 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 
        /* Return objects affected by the (non drop) DDL statement */
        query = "SELECT UPPER(object_type), object_identity\n"
-                       "  FROM pg_event_trigger_ddl_commands()";
+               "  FROM pg_event_trigger_ddl_commands()";
 
        /* Attempt to connect */
        result = SPI_connect();
        if (result < 0)
                elog(ERROR, "pg_audit_ddl_command_end: SPI_connect returned %d",
-                                       result);
+                        result);
 
        /* Execute the query */
        result = SPI_execute(query, true, 0);
        if (result != SPI_OK_SELECT)
                elog(ERROR, "pg_audit_ddl_command_end: SPI_execute returned %d",
-                                       result);
+                        result);
 
        /* Iterate returned rows */
        spiTupDesc = SPI_tuptable->tupdesc;
        for (row = 0; row < SPI_processed; row++)
        {
-               HeapTuple  spiTuple;
+               HeapTuple       spiTuple;
 
                spiTuple = SPI_tuptable->vals[row];
 
                /* Supply object name and type for audit event */
                auditEventStack->auditEvent.objectType =
-                       SPI_getvalue(spiTuple, spiTupDesc, 1);
+               SPI_getvalue(spiTuple, spiTupDesc, 1);
+
                auditEventStack->auditEvent.objectName =
                        SPI_getvalue(spiTuple, spiTupDesc, 2);
 
@@ -1473,11 +1490,12 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS)
 Datum
 pg_audit_sql_drop(PG_FUNCTION_ARGS)
 {
-       int                               result, row;
-       TupleDesc                 spiTupDesc;
-       const char               *query;
-       MemoryContext     contextQuery;
-       MemoryContext     contextOld;
+       int                     result,
+                               row;
+       TupleDesc       spiTupDesc;
+       const char *query;
+       MemoryContext contextQuery;
+       MemoryContext contextOld;
 
        if (~auditLogBitmap & LOG_DDL)
                PG_RETURN_NULL();
@@ -1485,7 +1503,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
        /* Be sure the module was loaded */
        if (!auditEventStack)
                elog(ERROR, "pg_audit not loaded before call to "
-                                       "pg_audit_sql_drop()");
+                        "pg_audit_sql_drop()");
 
        /* This is an internal statement - do not log it */
        internalStatement = true;
@@ -1496,44 +1514,45 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
 
        /* Switch memory context for the query */
        contextQuery = AllocSetContextCreate(
-                                       CurrentMemoryContext,
-                                       "pg_audit_func_ddl_command_end temporary context",
-                                       ALLOCSET_DEFAULT_MINSIZE,
-                                       ALLOCSET_DEFAULT_INITSIZE,
-                                       ALLOCSET_DEFAULT_MAXSIZE);
+                                                                                CurrentMemoryContext,
+                                                  "pg_audit_func_ddl_command_end temporary context",
+                                                                                ALLOCSET_DEFAULT_MINSIZE,
+                                                                                ALLOCSET_DEFAULT_INITSIZE,
+                                                                                ALLOCSET_DEFAULT_MAXSIZE);
        contextOld = MemoryContextSwitchTo(contextQuery);
 
        /* Return objects affected by the drop statement */
        query = "SELECT UPPER(object_type),\n"
-                       "       object_identity\n"
-                       "  FROM pg_event_trigger_dropped_objects()\n"
-                       " WHERE lower(object_type) <> 'type'\n"
-                       "   AND schema_name <> 'pg_toast'";
+               "       object_identity\n"
+               "  FROM pg_event_trigger_dropped_objects()\n"
+               " WHERE lower(object_type) <> 'type'\n"
+               "   AND schema_name <> 'pg_toast'";
 
        /* Attempt to connect */
        result = SPI_connect();
        if (result < 0)
                elog(ERROR, "pg_audit_ddl_drop: SPI_connect returned %d",
-                                       result);
+                        result);
 
        /* Execute the query */
        result = SPI_execute(query, true, 0);
        if (result != SPI_OK_SELECT)
                elog(ERROR, "pg_audit_ddl_drop: SPI_execute returned %d",
-                                       result);
+                        result);
 
        /* Iterate returned rows */
        spiTupDesc = SPI_tuptable->tupdesc;
        for (row = 0; row < SPI_processed; row++)
        {
-               HeapTuple  spiTuple;
+               HeapTuple       spiTuple;
 
                spiTuple = SPI_tuptable->vals[row];
 
                auditEventStack->auditEvent.objectType =
-                       SPI_getvalue(spiTuple, spiTupDesc, 1);
+               SPI_getvalue(spiTuple, spiTupDesc, 1);
+
                auditEventStack->auditEvent.objectName =
-                               SPI_getvalue(spiTuple, spiTupDesc, 2);
+                       SPI_getvalue(spiTuple, spiTupDesc, 2);
 
                log_audit_event(auditEventStack);
        }
@@ -1562,10 +1581,10 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS)
 static bool
 check_pg_audit_log(char **newVal, void **extra, GucSource source)
 {
-       List *flagRawList;
-       char *rawVal;
-       ListCell *lt;
-       int *flags;
+       List       *flagRawList;
+       char       *rawVal;
+       ListCell   *lt;
+       int                *flags;
 
        /* Make sure newval is a comma-separated list of tokens. */
        rawVal = pstrdup(*newVal);
@@ -1581,18 +1600,18 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source)
         * Check that we recognise each token, and add it to the bitmap we're
         * building up in a newly-allocated int *f.
         */
-       if (!(flags = (int *)malloc(sizeof(int))))
+       if (!(flags = (int *) malloc(sizeof(int))))
                return false;
 
        *flags = 0;
 
        foreach(lt, flagRawList)
        {
-               bool subtract = false;
-               int class;
+               bool            subtract = false;
+               int                     class;
 
                /* Retrieve a token */
-               char *token = (char *)lfirst(lt);
+               char       *token = (char *) lfirst(lt);
 
                /* If token is preceded by -, then the token is subtractive */
                if (strstr(token, "-") == token)
@@ -1651,7 +1670,7 @@ static void
 assign_pg_audit_log(const char *newVal, void *extra)
 {
        if (extra)
-               auditLogBitmap = *(int *)extra;
+               auditLogBitmap = *(int *) extra;
 }
 
 /*
@@ -1662,10 +1681,10 @@ assign_pg_audit_log(const char *newVal, void *extra)
 static bool
 check_pg_audit_log_level(char **newVal, void **extra, GucSource source)
 {
-       int *logLevel;
+       int                *logLevel;
 
        /* Allocate memory to store the log level */
-       if (!(logLevel = (int *)malloc(sizeof(int))))
+       if (!(logLevel = (int *) malloc(sizeof(int))))
                return false;
 
        /* Find the log level enum */
@@ -1718,7 +1737,7 @@ static void
 assign_pg_audit_log_level(const char *newVal, void *extra)
 {
        if (extra)
-               auditLogLevel = *(int *)extra;
+               auditLogLevel = *(int *) extra;
 }
 
 /*
@@ -1729,126 +1748,126 @@ _PG_init(void)
 {
        /* Define pg_audit.log */
        DefineCustomStringVariable(
-               "pg_audit.log",
-
-               "Specifies which classes of statements will be logged by session audit "
-               "logging. Multiple classes can be provided using a comma-separated "
-               "list and classes can be subtracted by prefacing the class with a "
-               "- sign.",
-
-               NULL,
-               &auditLog,
-               "none",
-               PGC_SUSET,
-               GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
-               check_pg_audit_log,
-               assign_pg_audit_log,
-               NULL);
+                                                          "pg_audit.log",
+
+        "Specifies which classes of statements will be logged by session audit "
+                "logging. Multiple classes can be provided using a comma-separated "
+                 "list and classes can be subtracted by prefacing the class with a "
+                                                          "- sign.",
+
+                                                          NULL,
+                                                          &auditLog,
+                                                          "none",
+                                                          PGC_SUSET,
+                                                          GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+                                                          check_pg_audit_log,
+                                                          assign_pg_audit_log,
+                                                          NULL);
 
        /* Define pg_audit.log_catalog */
        DefineCustomBoolVariable(
-               "pg_audit.log_catalog",
+                                                        "pg_audit.log_catalog",
 
                "Specifies that session logging should be enabled in the case where "
-               "all relations in a statement are in pg_catalog.  Disabling this "
-               "setting will reduce noise in the log from tools like psql and PgAdmin "
-               "that query the catalog heavily.",
+                  "all relations in a statement are in pg_catalog.  Disabling this "
+        "setting will reduce noise in the log from tools like psql and PgAdmin "
+                                                        "that query the catalog heavily.",
 
-               NULL,
-               &auditLogCatalog,
-               true,
-               PGC_SUSET,
-               GUC_NOT_IN_SAMPLE,
-               NULL, NULL, NULL);
+                                                        NULL,
+                                                        &auditLogCatalog,
+                                                        true,
+                                                        PGC_SUSET,
+                                                        GUC_NOT_IN_SAMPLE,
+                                                        NULL, NULL, NULL);
 
        /* Define pg_audit.log_level */
        DefineCustomStringVariable(
-               "pg_audit.log_level",
-
-               "Specifies the log level that will be used for log entries. This "
-               "setting is used for regression testing and may also be useful to end "
-               "users for testing or other purposes.  It is not intended to be used "
-               "in a production environment as it may leak which statements are being "
-               "logged to the user.",
-
-               NULL,
-               &auditLogLevelString,
-               "log",
-               PGC_SUSET,
-               GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
-               check_pg_audit_log_level,
-               assign_pg_audit_log_level,
-               NULL);
+                                                          "pg_audit.log_level",
+
+                  "Specifies the log level that will be used for log entries. This "
+         "setting is used for regression testing and may also be useful to end "
+          "users for testing or other purposes.  It is not intended to be used "
+        "in a production environment as it may leak which statements are being "
+                                                          "logged to the user.",
+
+                                                          NULL,
+                                                          &auditLogLevelString,
+                                                          "log",
+                                                          PGC_SUSET,
+                                                          GUC_LIST_INPUT | GUC_NOT_IN_SAMPLE,
+                                                          check_pg_audit_log_level,
+                                                          assign_pg_audit_log_level,
+                                                          NULL);
 
        /* Define pg_audit.log_parameter */
        DefineCustomBoolVariable(
-               "pg_audit.log_parameter",
+                                                        "pg_audit.log_parameter",
 
-               "Specifies that audit logging should include the parameters that were "
-               "passed with the statement. When parameters are present they will be "
-               "be included in CSV format after the statement text.",
+         "Specifies that audit logging should include the parameters that were "
+          "passed with the statement. When parameters are present they will be "
+                                          "be included in CSV format after the statement text.",
 
-               NULL,
-               &auditLogParameter,
-               false,
-               PGC_SUSET,
-               GUC_NOT_IN_SAMPLE,
-               NULL, NULL, NULL);
+                                                        NULL,
+                                                        &auditLogParameter,
+                                                        false,
+                                                        PGC_SUSET,
+                                                        GUC_NOT_IN_SAMPLE,
+                                                        NULL, NULL, NULL);
 
        /* Define pg_audit.log_relation */
        DefineCustomBoolVariable(
-               "pg_audit.log_relation",
+                                                        "pg_audit.log_relation",
 
-               "Specifies whether session audit logging should create a separate log "
-               "entry for each relation referenced in a SELECT or DML statement. "
-               "This is a useful shortcut for exhaustive logging without using object "
-               "audit logging.",
+         "Specifies whether session audit logging should create a separate log "
+                 "entry for each relation referenced in a SELECT or DML statement. "
+        "This is a useful shortcut for exhaustive logging without using object "
+                                                        "audit logging.",
 
-               NULL,
-               &auditLogRelation,
-               false,
-               PGC_SUSET,
-               GUC_NOT_IN_SAMPLE,
-               NULL, NULL, NULL);
+                                                        NULL,
+                                                        &auditLogRelation,
+                                                        false,
+                                                        PGC_SUSET,
+                                                        GUC_NOT_IN_SAMPLE,
+                                                        NULL, NULL, NULL);
 
        /* Define pg_audit.log_statement_once */
        DefineCustomBoolVariable(
-               "pg_audit.log_statement_once",
-
-               "Specifies whether logging will include the statement text and "
-               "parameters with the first log entry for a statement/substatement "
-               "combination or with every entry.  Disabling this setting will result "
-               "in less verbose logging but may make it more difficult to determine "
-               "the statement that generated a log entry, though the "
-               "statement/substatement pair along with the process id should suffice "
-               "to identify the statement text logged with a previous entry.",
-
-               NULL,
-               &auditLogStatementOnce,
-               false,
-               PGC_SUSET,
-               GUC_NOT_IN_SAMPLE,
-               NULL, NULL, NULL);
+                                                        "pg_audit.log_statement_once",
+
+                        "Specifies whether logging will include the statement text and "
+                 "parameters with the first log entry for a statement/substatement "
+         "combination or with every entry.  Disabling this setting will result "
+          "in less verbose logging but may make it more difficult to determine "
+                                         "the statement that generated a log entry, though the "
+         "statement/substatement pair along with the process id should suffice "
+                         "to identify the statement text logged with a previous entry.",
+
+                                                        NULL,
+                                                        &auditLogStatementOnce,
+                                                        false,
+                                                        PGC_SUSET,
+                                                        GUC_NOT_IN_SAMPLE,
+                                                        NULL, NULL, NULL);
 
        /* Define pg_audit.role */
        DefineCustomStringVariable(
-               "pg_audit.role",
+                                                          "pg_audit.role",
 
-               "Specifies the master role to use for object audit logging.  Muliple "
-               "audit roles can be defined by granting them to the master role. This "
-               "allows multiple groups to be in charge of different aspects of audit "
-               "logging.",
+          "Specifies the master role to use for object audit logging.  Muliple "
+         "audit roles can be defined by granting them to the master role. This "
+         "allows multiple groups to be in charge of different aspects of audit "
+                                                          "logging.",
 
-               NULL,
-               &auditRole,
-               "",
-               PGC_SUSET,
-               GUC_NOT_IN_SAMPLE,
-               NULL, NULL, NULL);
+                                                          NULL,
+                                                          &auditRole,
+                                                          "",
+                                                          PGC_SUSET,
+                                                          GUC_NOT_IN_SAMPLE,
+                                                          NULL, NULL, NULL);
 
        /*
-        * Install our hook functions after saving the existing pointers to preserve
-        * the chains.
+        * Install our hook functions after saving the existing pointers to
+        * preserve the chains.
         */
        next_ExecutorStart_hook = ExecutorStart_hook;
        ExecutorStart_hook = pg_audit_ExecutorStart_hook;
index 761c277c63be169f907aea827965b7c63f7ef297..6622d22f5f8d3a951664d35482bde453e4942d28 100644 (file)
@@ -34,6 +34,7 @@ typedef struct
        bool            isvalid;
        bool            isdirty;
        uint16          usagecount;
+
        /*
         * An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
         * being pinned by too many backends and each backend will only pin once
index 3cc687bdb70eb3607793569274745183057ce0a4..0eb991cdf0e86f50e182ef07b7bd2a22f7a5b0b6 100644 (file)
@@ -138,10 +138,10 @@ typedef struct Counters
 {
        int64           calls;                  /* # of times executed */
        double          total_time;             /* total execution time, in msec */
-       double      min_time;       /* minimim execution time in msec */
-       double      max_time;       /* maximum execution time in msec */
-       double      mean_time;      /* mean execution time in msec */
-       double      sum_var_time;   /* sum of variances in execution time in msec */
+       double          min_time;               /* minimim execution time in msec */
+       double          max_time;               /* maximum execution time in msec */
+       double          mean_time;              /* mean execution time in msec */
+       double          sum_var_time;   /* sum of variances in execution time in msec */
        int64           rows;                   /* total # of retrieved or affected rows */
        int64           shared_blks_hit;        /* # of shared buffer hits */
        int64           shared_blks_read;               /* # of shared disk blocks read */
@@ -1231,10 +1231,10 @@ pgss_store(const char *query, uint32 queryId,
                else
                {
                        /*
-                        * Welford's method for accurately computing variance.
-                        * See <http://www.johndcook.com/blog/standard_deviation/>
+                        * Welford's method for accurately computing variance. See
+                        * <http://www.johndcook.com/blog/standard_deviation/>
                         */
-                       double old_mean = e->counters.mean_time;
+                       double          old_mean = e->counters.mean_time;
 
                        e->counters.mean_time +=
                                (total_time - old_mean) / e->counters.calls;
@@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
                        values[i++] = Float8GetDatumFast(tmp.min_time);
                        values[i++] = Float8GetDatumFast(tmp.max_time);
                        values[i++] = Float8GetDatumFast(tmp.mean_time);
+
                        /*
                         * Note we are calculating the population variance here, not the
-                        * sample variance, as we have data for the whole population,
-                        * so Bessel's correction is not used, and we don't divide by
+                        * sample variance, as we have data for the whole population, so
+                        * Bessel's correction is not used, and we don't divide by
                         * tmp.calls - 1.
                         */
                        if (tmp.calls > 1)
@@ -2687,16 +2688,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                        break;
                case T_OnConflictExpr:
                        {
-                               OnConflictExpr   *conf = (OnConflictExpr *) node;
+                               OnConflictExpr *conf = (OnConflictExpr *) node;
 
                                APP_JUMB(conf->action);
                                JumbleExpr(jstate, (Node *) conf->arbiterElems);
                                JumbleExpr(jstate, conf->arbiterWhere);
-                               JumbleExpr(jstate, (Node  *) conf->onConflictSet);
+                               JumbleExpr(jstate, (Node *) conf->onConflictSet);
                                JumbleExpr(jstate, conf->onConflictWhere);
                                APP_JUMB(conf->constraint);
                                APP_JUMB(conf->exclRelIndex);
-                               JumbleExpr(jstate, (Node  *) conf->exclRelTlist);
+                               JumbleExpr(jstate, (Node *) conf->exclRelTlist);
                        }
                        break;
                case T_List:
index 24eb42fa891df32a4a00f2aa8a0b403c6fb18f90..5c8355808a979093ba984d7a83ecaa176ea65dc4 100644 (file)
@@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
        char       *line;
        char       *nextline;
        char       *eol,
-                               *colon;
+                          *colon;
        int                     hlen;
        char       *buf;
        int                     hdrlines;
index d0da05cd13a24f380d9183c41d4edd28575070bc..1842985e53db053a05701597d3a935ac6d959652 100644 (file)
@@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
                res = pgp_set_convert_crlf(ctx, atoi(val));
        else if (strcmp(key, "unicode-mode") == 0)
                res = pgp_set_unicode_mode(ctx, atoi(val));
+
        /*
         * The remaining options are for debugging/testing and are therefore not
         * documented in the user-facing docs.
@@ -834,22 +835,22 @@ static int
 parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
                                           char ***p_keys, char ***p_values)
 {
-       int             nkdims = ARR_NDIM(key_array);
-       int             nvdims = ARR_NDIM(val_array);
-       char   **keys,
-                  **values;
-       Datum  *key_datums,
-                  *val_datums;
-       bool   *key_nulls,
-                  *val_nulls;
-       int             key_count,
-                       val_count;
-       int             i;
+       int                     nkdims = ARR_NDIM(key_array);
+       int                     nvdims = ARR_NDIM(val_array);
+       char      **keys,
+                         **values;
+       Datum      *key_datums,
+                          *val_datums;
+       bool       *key_nulls,
+                          *val_nulls;
+       int                     key_count,
+                               val_count;
+       int                     i;
 
        if (nkdims > 1 || nkdims != nvdims)
                ereport(ERROR,
                                (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
-                               errmsg("wrong number of array subscripts")));
+                                errmsg("wrong number of array subscripts")));
        if (nkdims == 0)
                return 0;
 
@@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
 
        for (i = 0; i < key_count; i++)
        {
-               char *v;
+               char       *v;
 
                /* Check that the key doesn't contain anything funny */
                if (key_nulls[i])
@@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
                if (!string_is_ascii(v))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("header key must not contain non-ASCII characters")));
+                               errmsg("header key must not contain non-ASCII characters")));
                if (strstr(v, ": "))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
                if (!string_is_ascii(v))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("header value must not contain non-ASCII characters")));
+                         errmsg("header value must not contain non-ASCII characters")));
                if (strchr(v, '\n'))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
                SRF_RETURN_DONE(funcctx);
        else
        {
-               char      *values[2];
+               char       *values[2];
 
                /* we assume that the keys (and values) are in UTF-8. */
                utf8key = state->keys[funcctx->call_cntr];
index 2ce429d1b201d762cf2b7319ee54d3cdd4355a08..62b8517c27c682e50c0993d9b9d74c3d97934bbc 100644 (file)
@@ -278,11 +278,11 @@ void              pgp_cfb_free(PGP_CFB *ctx);
 int                    pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
 int                    pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
 
-void           pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
-                                                        int num_headers, char **keys, char **values);
+void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
+                                int num_headers, char **keys, char **values);
 int                    pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
-int                    pgp_extract_armor_headers(const uint8 *src, unsigned len,
-                                                                         int *nheaders, char ***keys, char ***values);
+int pgp_extract_armor_headers(const uint8 *src, unsigned len,
+                                                 int *nheaders, char ***keys, char ***values);
 
 int                    pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
 int                    pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
index ae5ed56f9869cc695269f85c68ad77bcbe0514ef..22c5f7a9eefeff992eae47dc539fa2b6cff5909c 100644 (file)
@@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
                CHECK_FOR_INTERRUPTS();
 
                /*
-                * If the page has only visible tuples, then we can find out the
-                * free space from the FSM and move on.
+                * If the page has only visible tuples, then we can find out the free
+                * space from the FSM and move on.
                 */
                if (visibilitymap_test(rel, blkno, &vmbuffer))
                {
@@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
                page = BufferGetPage(buf);
 
                /*
-                * It's not safe to call PageGetHeapFreeSpace() on new pages, so
-                * we treat them as being free space for our purposes.
+                * It's not safe to call PageGetHeapFreeSpace() on new pages, so we
+                * treat them as being free space for our purposes.
                 */
                if (!PageIsNew(page))
                        stat->free_space += PageGetHeapFreeSpace(page);
@@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
                scanned++;
 
                /*
-                * Look at each tuple on the page and decide whether it's live
-                * or dead, then count it and its size. Unlike lazy_scan_heap,
-                * we can afford to ignore problems and special cases.
+                * Look at each tuple on the page and decide whether it's live or
+                * dead, then count it and its size. Unlike lazy_scan_heap, we can
+                * afford to ignore problems and special cases.
                 */
                maxoff = PageGetMaxOffsetNumber(page);
 
@@ -179,9 +179,10 @@ statapprox_heap(Relation rel, output_type *stat)
                UnlockReleaseBuffer(buf);
        }
 
-       stat->table_len = (uint64) nblocks * BLCKSZ;
+       stat->table_len = (uint64) nblocks *BLCKSZ;
+
        stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
-                                                                                          stat->tuple_count+misc_count);
+                                                                                        stat->tuple_count + misc_count);
 
        /*
         * Calculate percentages if the relation has one or more pages.
@@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
                                 errmsg("cannot access temporary tables of other sessions")));
 
        /*
-        * We support only ordinary relations and materialised views,
-        * because we depend on the visibility map and free space map
-        * for our estimates about unscanned pages.
+        * We support only ordinary relations and materialised views, because we
+        * depend on the visibility map and free space map for our estimates about
+        * unscanned pages.
         */
        if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
                  rel->rd_rel->relkind == RELKIND_MATVIEW))
@@ -268,6 +269,6 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
        values[i++] = Int64GetDatum(stat.free_space);
        values[i++] = Float8GetDatum(stat.free_percent);
 
-       ret =  heap_form_tuple(tupdesc, values, nulls);
+       ret = heap_form_tuple(tupdesc, values, nulls);
        return HeapTupleGetDatum(ret);
 }
index d420cb2d0c00e45cf820fa1e8ef39f08f6fd09fc..6da01e1d6f35fcb28bbd80ea290faf5a4c7592a2 100644 (file)
@@ -203,7 +203,7 @@ typedef struct PgFdwAnalyzeState
        /* for random sampling */
        double          samplerows;             /* # of rows fetched */
        double          rowstoskip;             /* # of rows to skip before next sample */
-       ReservoirStateData rstate;              /* state for reservoir sampling*/
+       ReservoirStateData rstate;      /* state for reservoir sampling */
 
        /* working memory contexts */
        MemoryContext anl_cxt;          /* context for per-analyze lifespan data */
index ae2aca8a8db35c78fb4b4735a2fa70a45d8c2579..32d57430185229db4662c45184d61538b61fa7b0 100644 (file)
@@ -53,16 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
 static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
                                        ReorderBufferTXN *txn);
 static void pg_output_begin(LogicalDecodingContext *ctx,
-                                                       TestDecodingData *data,
-                                                       ReorderBufferTXN *txn,
-                                                       bool last_write);
+                               TestDecodingData *data,
+                               ReorderBufferTXN *txn,
+                               bool last_write);
 static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
                                         ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
 static void pg_decode_change(LogicalDecodingContext *ctx,
                                 ReorderBufferTXN *txn, Relation rel,
                                 ReorderBufferChange *change);
 static bool pg_decode_filter(LogicalDecodingContext *ctx,
-                                                        RepOriginId origin_id);
+                                RepOriginId origin_id);
 
 void
 _PG_init(void)
index 14efb27f0db0cfd7f581dfe21056a0632d4140b9..e325eaff498972b46595b3be60b2f5ce92c8ed6d 100644 (file)
@@ -33,14 +33,14 @@ PG_MODULE_MAGIC;
 typedef struct
 {
        SamplerRandomState randstate;
-       uint32                  seed;                   /* random seed */
-       BlockNumber             nblocks;                /* number of block in relation */
-       int32                   ntuples;                /* number of tuples to return */
-       int32                   donetuples;             /* tuples already returned */
-       OffsetNumber    lt;                             /* last tuple returned from current block */
-       BlockNumber             step;                   /* step size */
-       BlockNumber             lb;                             /* last block visited */
-       BlockNumber             doneblocks;             /* number of already returned blocks */
+       uint32          seed;                   /* random seed */
+       BlockNumber nblocks;            /* number of block in relation */
+       int32           ntuples;                /* number of tuples to return */
+       int32           donetuples;             /* tuples already returned */
+       OffsetNumber lt;                        /* last tuple returned from current block */
+       BlockNumber step;                       /* step size */
+       BlockNumber lb;                         /* last block visited */
+       BlockNumber doneblocks;         /* number of already returned blocks */
 } SystemSamplerData;
 
 
@@ -60,11 +60,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
 Datum
 tsm_system_rows_init(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       uint32                          seed = PG_GETARG_UINT32(1);
-       int32                           ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
-       HeapScanDesc            scan = tsdesc->heapScan;
-       SystemSamplerData  *sampler;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       uint32          seed = PG_GETARG_UINT32(1);
+       int32           ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+       HeapScanDesc scan = tsdesc->heapScan;
+       SystemSamplerData *sampler;
 
        if (ntuples < 1)
                ereport(ERROR,
@@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
 
        /* Find relative prime as step size for linear probing. */
        sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
        /*
         * Randomize start position so that blocks close to step size don't have
         * higher probability of being chosen on very short scan.
@@ -106,8 +107,8 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
        sampler->doneblocks++;
@@ -127,10 +128,10 @@ tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       OffsetNumber            maxoffset = PG_GETARG_UINT16(2);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-       OffsetNumber            tupoffset = sampler->lt;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       OffsetNumber tupoffset = sampler->lt;
 
        if (tupoffset == InvalidOffsetNumber)
                tupoffset = FirstOffsetNumber;
@@ -152,9 +153,9 @@ tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       bool                            visible = PG_GETARG_BOOL(3);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       bool            visible = PG_GETARG_BOOL(3);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        if (!visible)
                PG_RETURN_BOOL(false);
@@ -183,8 +184,8 @@ tsm_system_rows_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_reset(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        sampler->lt = InvalidOffsetNumber;
        sampler->donetuples = 0;
@@ -203,14 +204,14 @@ tsm_system_rows_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_rows_cost(PG_FUNCTION_ARGS)
 {
-       PlannerInfo        *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-       Path               *path = (Path *) PG_GETARG_POINTER(1);
-       RelOptInfo         *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-       List               *args = (List *) PG_GETARG_POINTER(3);
-       BlockNumber        *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-       double             *tuples = (double *) PG_GETARG_POINTER(5);
-       Node               *limitnode;
-       int32                   ntuples;
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       Path       *path = (Path *) PG_GETARG_POINTER(1);
+       RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+       List       *args = (List *) PG_GETARG_POINTER(3);
+       BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+       double     *tuples = (double *) PG_GETARG_POINTER(5);
+       Node       *limitnode;
+       int32           ntuples;
 
        limitnode = linitial(args);
        limitnode = estimate_expression_value(root, limitnode);
@@ -235,9 +236,9 @@ tsm_system_rows_cost(PG_FUNCTION_ARGS)
 
 
 static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
 {
-       uint32 c;
+       uint32          c;
 
        while (a != 0)
        {
@@ -253,8 +254,8 @@ static uint32
 random_relative_prime(uint32 n, SamplerRandomState randstate)
 {
        /* Pick random starting number, with some limits on what it can be. */
-       uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
-                  t;
+       uint32          r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+                               t;
 
        /*
         * This should only take 2 or 3 iterations as the probability of 2 numbers
index 9af9e749216a7c710b56ea1019ae0535ce09a249..7708fc07617488e9a57128a72eba9707004dc9f3 100644 (file)
@@ -35,16 +35,17 @@ PG_MODULE_MAGIC;
 typedef struct
 {
        SamplerRandomState randstate;
-       uint32                  seed;                   /* random seed */
-       BlockNumber             nblocks;                /* number of block in relation */
-       int32                   time;                   /* time limit for sampling */
-       TimestampTz             start_time;             /* start time of sampling */
-       TimestampTz             end_time;               /* end time of sampling */
-       OffsetNumber    lt;                             /* last tuple returned from current block */
-       BlockNumber             step;                   /* step size */
-       BlockNumber             lb;                             /* last block visited */
-       BlockNumber             estblocks;              /* estimated number of returned blocks (moving) */
-       BlockNumber             doneblocks;             /* number of already returned blocks */
+       uint32          seed;                   /* random seed */
+       BlockNumber nblocks;            /* number of block in relation */
+       int32           time;                   /* time limit for sampling */
+       TimestampTz start_time;         /* start time of sampling */
+       TimestampTz end_time;           /* end time of sampling */
+       OffsetNumber lt;                        /* last tuple returned from current block */
+       BlockNumber step;                       /* step size */
+       BlockNumber lb;                         /* last block visited */
+       BlockNumber estblocks;          /* estimated number of returned blocks
+                                                                * (moving) */
+       BlockNumber doneblocks;         /* number of already returned blocks */
 } SystemSamplerData;
 
 
@@ -63,11 +64,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
 Datum
 tsm_system_time_init(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       uint32                          seed = PG_GETARG_UINT32(1);
-       int32                           time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
-       HeapScanDesc            scan = tsdesc->heapScan;
-       SystemSamplerData  *sampler;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       uint32          seed = PG_GETARG_UINT32(1);
+       int32           time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
+       HeapScanDesc scan = tsdesc->heapScan;
+       SystemSamplerData *sampler;
 
        if (time < 1)
                ereport(ERROR,
@@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
 
        /* Find relative prime as step size for linear probing. */
        sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
+
        /*
         * Randomize start position so that blocks close to step size don't have
         * higher probability of being chosen on very short scan.
@@ -111,8 +113,8 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
        sampler->doneblocks++;
@@ -125,16 +127,16 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
         * Update the estimations for time limit at least 10 times per estimated
         * number of returned blocks to handle variations in block read speed.
         */
-       if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
+       if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
        {
-               TimestampTz     now = GetCurrentTimestamp();
-               long        secs;
-               int         usecs;
+               TimestampTz now = GetCurrentTimestamp();
+               long            secs;
+               int                     usecs;
                int                     usecs_remaining;
                int                     time_per_block;
 
                TimestampDifference(sampler->start_time, now, &secs, &usecs);
-               usecs += (int) secs * 1000000;
+               usecs += (int) secs *1000000;
 
                time_per_block = usecs / sampler->doneblocks;
 
@@ -144,7 +146,7 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
                        PG_RETURN_UINT32(InvalidBlockNumber);
 
                /* Remaining microseconds */
-               usecs_remaining = usecs + (int) secs * 1000000;
+               usecs_remaining = usecs + (int) secs *1000000;
 
                /* Recalculate estimated returned number of blocks */
                if (time_per_block < usecs_remaining && time_per_block > 0)
@@ -161,10 +163,10 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       OffsetNumber            maxoffset = PG_GETARG_UINT16(2);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-       OffsetNumber            tupoffset = sampler->lt;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       OffsetNumber tupoffset = sampler->lt;
 
        if (tupoffset == InvalidOffsetNumber)
                tupoffset = FirstOffsetNumber;
@@ -198,8 +200,8 @@ tsm_system_time_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_reset(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        sampler->lt = InvalidOffsetNumber;
        sampler->start_time = GetCurrentTimestamp();
@@ -221,18 +223,18 @@ tsm_system_time_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_time_cost(PG_FUNCTION_ARGS)
 {
-       PlannerInfo        *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-       Path               *path = (Path *) PG_GETARG_POINTER(1);
-       RelOptInfo         *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-       List               *args = (List *) PG_GETARG_POINTER(3);
-       BlockNumber        *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-       double             *tuples = (double *) PG_GETARG_POINTER(5);
-       Node               *limitnode;
-       int32                   time;
-       BlockNumber             relpages;
-       double                  reltuples;
-       double                  density;
-       double                  spc_random_page_cost;
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       Path       *path = (Path *) PG_GETARG_POINTER(1);
+       RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+       List       *args = (List *) PG_GETARG_POINTER(3);
+       BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+       double     *tuples = (double *) PG_GETARG_POINTER(5);
+       Node       *limitnode;
+       int32           time;
+       BlockNumber relpages;
+       double          reltuples;
+       double          density;
+       double          spc_random_page_cost;
 
        limitnode = linitial(args);
        limitnode = estimate_expression_value(root, limitnode);
@@ -269,10 +271,10 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
        /*
         * Assumption here is that we'll never read less than 1% of table pages,
         * this is here mainly because it is much less bad to overestimate than
-        * underestimate and using just spc_random_page_cost will probably lead
-        * to underestimations in general.
+        * underestimate and using just spc_random_page_cost will probably lead to
+        * underestimations in general.
         */
-       *pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
+       *pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
        *tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
        path->rows = *tuples;
 
@@ -280,9 +282,9 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
 }
 
 static uint32
-gcd (uint32 a, uint32 b)
+gcd(uint32 a, uint32 b)
 {
-       uint32 c;
+       uint32          c;
 
        while (a != 0)
        {
@@ -298,8 +300,8 @@ static uint32
 random_relative_prime(uint32 n, SamplerRandomState randstate)
 {
        /* Pick random starting number, with some limits on what it can be. */
-       uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
-                  t;
+       uint32          r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
+                               t;
 
        /*
         * This should only take 2 or 3 iterations as the probability of 2 numbers
index 199512551e5d381765d9daa349b3c207e8e21086..ff18b220c2b8f9605a0ca07c1e9e82d5d95e9892 100644 (file)
@@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
                                         */
                                        Assert((key->sk_flags & SK_ISNULL) ||
                                                   (key->sk_collation ==
-                                          bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
+                                         bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
 
                                        /* First time this column? look up consistent function */
                                        if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
@@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
        thisblock = ItemPointerGetBlockNumber(&htup->t_self);
 
        /*
-        * If we're in a block that belongs to a future range, summarize what we've
-        * got and start afresh.  Note the scan might have skipped many pages,
-        * if they were devoid of live tuples; make sure to insert index tuples
-        * for those too.
+        * If we're in a block that belongs to a future range, summarize what
+        * we've got and start afresh.  Note the scan might have skipped many
+        * pages, if they were devoid of live tuples; make sure to insert index
+        * tuples for those too.
         */
        while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
        {
@@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
 Datum
 brinbuildempty(PG_FUNCTION_ARGS)
 {
-
        Relation        index = (Relation) PG_GETARG_POINTER(0);
        Buffer          metabuf;
 
@@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
 {
        /* other arguments are not currently used */
        IndexBulkDeleteResult *stats =
-               (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+       (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
 
        /* allocate stats if first time through, else re-use existing struct */
        if (stats == NULL)
@@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
 {
        IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
        IndexBulkDeleteResult *stats =
-               (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+       (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
        Relation        heapRel;
 
        /* No-op in ANALYZE ONLY mode */
@@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
 
                page = BufferGetPage(state->bs_currentInsertBuf);
                RecordPageWithFreeSpace(state->bs_irel,
-                                                               BufferGetBlockNumber(state->bs_currentInsertBuf),
+                                                       BufferGetBlockNumber(state->bs_currentInsertBuf),
                                                                PageGetFreeSpace(page));
                ReleaseBuffer(state->bs_currentInsertBuf);
        }
index 1f0bc7fdb1f26fae7d0ed7e35f415fe26e46a86f..803b07f10a91351d49dd4720a4afc9be17ef6396 100644 (file)
  * 0 - the union of the values in the block range
  * 1 - whether an empty value is present in any tuple in the block range
  * 2 - whether the values in the block range cannot be merged (e.g. an IPv6
- *     address amidst IPv4 addresses).
+ *        address amidst IPv4 addresses).
  */
-#define        INCLUSION_UNION                         0
-#define        INCLUSION_UNMERGEABLE           1
-#define        INCLUSION_CONTAINS_EMPTY        2
+#define INCLUSION_UNION                                0
+#define INCLUSION_UNMERGEABLE          1
+#define INCLUSION_CONTAINS_EMPTY       2
 
 
 typedef struct InclusionOpaque
@@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
        unionval = column->bv_values[INCLUSION_UNION];
        switch (key->sk_strategy)
        {
-               /*
-                * Placement strategies
-                *
-                * These are implemented by logically negating the result of the
-                * converse placement operator; for this to work, the converse operator
-                * must be part of the opclass.  An error will be thrown by
-                * inclusion_get_strategy_procinfo() if the required strategy is not
-                * part of the opclass.
-                *
-                * These all return false if either argument is empty, so there is
-                * no need to check for empty elements.
-                */
+                       /*
+                        * Placement strategies
+                        *
+                        * These are implemented by logically negating the result of the
+                        * converse placement operator; for this to work, the converse
+                        * operator must be part of the opclass.  An error will be thrown
+                        * by inclusion_get_strategy_procinfo() if the required strategy
+                        * is not part of the opclass.
+                        *
+                        * These all return false if either argument is empty, so there is
+                        * no need to check for empty elements.
+                        */
 
                case RTLeftStrategyNumber:
                        finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-                                                                                                       RTOverRightStrategyNumber);
+                                                                                                 RTOverRightStrategyNumber);
                        result = FunctionCall2Coll(finfo, colloid, unionval, query);
                        PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 
                case RTBelowStrategyNumber:
                        finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-                                                                                                       RTOverAboveStrategyNumber);
+                                                                                                 RTOverAboveStrategyNumber);
                        result = FunctionCall2Coll(finfo, colloid, unionval, query);
                        PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
 
                case RTAboveStrategyNumber:
                        finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
-                                                                                                       RTOverBelowStrategyNumber);
+                                                                                                 RTOverBelowStrategyNumber);
                        result = FunctionCall2Coll(finfo, colloid, unionval, query);
                        PG_RETURN_BOOL(!DatumGetBool(result));
 
@@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
                         * strategies because some elements can be contained even though
                         * the union is not; instead we use the overlap operator.
                         *
-                        * We check for empty elements separately as they are not merged to
-                        * the union but contained by everything.
+                        * We check for empty elements separately as they are not merged
+                        * to the union but contained by everything.
                         */
 
                case RTContainedByStrategyNumber:
@@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
                        /*
                         * Adjacent strategy
                         *
-                        * We test for overlap first but to be safe we need to call
-                        * the actual adjacent operator also.
+                        * We test for overlap first but to be safe we need to call the
+                        * actual adjacent operator also.
                         *
                         * An empty element cannot be adjacent to any other, so there is
                         * no need to check for it.
@@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
                         * the contains operator.  Generally, inequality strategies do not
                         * make much sense for the types which will be used with the
                         * inclusion BRIN family of opclasses, but is is possible to
-                        * implement them with logical negation of the left-of and right-of
-                        * operators.
+                        * implement them with logical negation of the left-of and
+                        * right-of operators.
                         *
                         * NB: These strategies cannot be used with geometric datatypes
                         * that use comparison of areas!  The only exception is the "same"
index b105f980eca50388cab299ef71253cb4fcb57261..7cd98887c0ffe2985e85ebd97a810a681db21b5f 100644 (file)
@@ -33,7 +33,7 @@ Datum         brin_minmax_add_value(PG_FUNCTION_ARGS);
 Datum          brin_minmax_consistent(PG_FUNCTION_ARGS);
 Datum          brin_minmax_union(PG_FUNCTION_ARGS);
 static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
-                                       Oid subtype, uint16 strategynum);
+                                                        Oid subtype, uint16 strategynum);
 
 
 Datum
@@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
                                break;
                        /* max() >= scankey */
                        finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
-                                                                                                BTGreaterEqualStrategyNumber);
+                                                                                          BTGreaterEqualStrategyNumber);
                        matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
                                                                                value);
                        break;
@@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
        attr = bdesc->bd_tupdesc->attrs[attno - 1];
 
        /*
-        * Adjust "allnulls".  If A doesn't have values, just copy the values
-        * from B into A, and we're done.  We cannot run the operators in this
-        * case, because values in A might contain garbage.  Note we already
-        * established that B contains values.
+        * Adjust "allnulls".  If A doesn't have values, just copy the values from
+        * B into A, and we're done.  We cannot run the operators in this case,
+        * because values in A might contain garbage.  Note we already established
+        * that B contains values.
         */
        if (col_a->bv_allnulls)
        {
@@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
                                 strategynum, attr->atttypid, subtype, opfamily);
 
                oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
-                                                                                                Anum_pg_amop_amopopr, &isNull));
+                                                                                        Anum_pg_amop_amopopr, &isNull));
                ReleaseSysCache(tuple);
                Assert(!isNull && RegProcedureIsValid(oprid));
 
index 80795eca65089f24d2ae96c5845271cb2cd51ec8..62d440f76b8497fee1abfa3de1079ff534ab588f 100644 (file)
@@ -48,7 +48,7 @@ struct BrinRevmap
 {
        Relation        rm_irel;
        BlockNumber rm_pagesPerRange;
-       BlockNumber rm_lastRevmapPage; /* cached from the metapage */
+       BlockNumber rm_lastRevmapPage;          /* cached from the metapage */
        Buffer          rm_metaBuf;
        Buffer          rm_currBuf;
 };
@@ -57,7 +57,7 @@ struct BrinRevmap
 
 
 static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
-                                 BlockNumber heapBlk);
+                                BlockNumber heapBlk);
 static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
 static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
                                                        BlockNumber heapBlk);
@@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
 void
 brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
 {
-       BlockNumber     mapBlk PG_USED_FOR_ASSERTS_ONLY;
+       BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
 
        mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
 
@@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
                if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INDEX_CORRUPTED),
-                                        errmsg_internal("corrupted BRIN index: inconsistent range map")));
+                       errmsg_internal("corrupted BRIN index: inconsistent range map")));
                previptr = *iptr;
 
                blk = ItemPointerGetBlockNumber(iptr);
@@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
 static BlockNumber
 revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
 {
-       BlockNumber     targetblk;
+       BlockNumber targetblk;
 
        /* obtain revmap block number, skip 1 for metapage block */
        targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
@@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
        if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
                ereport(ERROR,
                                (errcode(ERRCODE_INDEX_CORRUPTED),
-                                errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
-                                               BrinPageType(page),
-                                               RelationGetRelationName(irel),
-                                               BufferGetBlockNumber(buf))));
+                 errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
+                                BrinPageType(page),
+                                RelationGetRelationName(irel),
+                                BufferGetBlockNumber(buf))));
 
        /* If the page is in use, evacuate it and restart */
        if (brin_start_evacuating_page(irel, buf))
index 22ce74a4f43f7af5f587ada7afb1bc9c3ba210c5..72356c066c72faacda2bce4b97b7c2027a72cab8 100644 (file)
@@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
                {
                        for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
                                TupleDescInitEntry(tupdesc, attno++, NULL,
-                                                                  brdesc->bd_info[i]->oi_typcache[j]->type_id,
+                                                                brdesc->bd_info[i]->oi_typcache[j]->type_id,
                                                                   -1, 0);
                }
 
index 3e2b8b5fedf05a68be4368d78a3d87ddc5cf679d..54b2db88a688c81b4da26faf89b89765119b9ffc 100644 (file)
@@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
        /*
         * Set up the scan keys, and check for unsatisfiable query.
         */
-       ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
+       ginFreeScanKeys(so);            /* there should be no keys yet, but just to be
+                                                                * sure */
        ginNewScanKey(scan);
 
        if (GinIsVoidRes(scan))
index 445466b4477a5a2213a8a959b504de51efe27a5f..cb4e32fe66b07a7e34efec3e8a1b9bb29b8a02bd 100644 (file)
@@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
        static const relopt_parse_elt tab[] = {
                {"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
                {"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
-                                                                                                                               pendingListCleanupSize)}
+                                                                                                        pendingListCleanupSize)}
        };
 
        options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
index 96b7701633f2013d36884765b137b2a16be56bc6..0e499598a428c8859168f8665662049797109f10 100644 (file)
@@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
                /* opclasses are not required to provide a Fetch method */
                if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
                        fmgr_info_copy(&(giststate->fetchFn[i]),
-                                                index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
+                                                  index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
                                                   scanCxt);
                else
                        giststate->fetchFn[i].fn_oid = InvalidOid;
index beb402357c0924889c09794435b55181c5de0f70..ad392948756e6df3e158d26588650b1331a820b5 100644 (file)
@@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
        }
 
        /*
-        * If we're doing an index-only scan, on the first call, also initialize
-        * tuple descriptor to represent the returned index tuples and create a
+        * If we're doing an index-only scan, on the first call, also initialize a
+        * tuple descriptor to represent the returned index tuples and create a
         * memory context to hold them during the scan.
         */
        if (scan->xs_want_itup && !scan->xs_itupdesc)
@@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
                 * descriptor. Instead, construct a descriptor with the original data
                 * types.
                 */
-               natts =  RelationGetNumberOfAttributes(scan->indexRelation);
+               natts = RelationGetNumberOfAttributes(scan->indexRelation);
                so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
                for (attno = 1; attno <= natts; attno++)
                {
@@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
                        fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
 
                        /*
-                        * Look up the datatype returned by the original ordering operator.
-                        * GiST always uses a float8 for the distance function, but the
-                        * ordering operator could be anything else.
+                        * Look up the datatype returned by the original ordering
+                        * operator. GiST always uses a float8 for the distance function,
+                        * but the ordering operator could be anything else.
                         *
                         * XXX: The distance function is only allowed to be lossy if the
                         * ordering operator's result type is float4 or float8.  Otherwise
index bf9fbf30a8b0f30d970acefbf79b722ba15a8c31..7d596a3e2e68c2b0be7b7f4e89ea5940579803b1 100644 (file)
@@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
                                                  isleaf);
                        cep = (GISTENTRY *)
                                DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
-                                                                                  giststate->supportCollation[i],
+                                                                                         giststate->supportCollation[i],
                                                                                                  PointerGetDatum(&centry)));
                        compatt[i] = cep->key;
                }
index cb86a4fa3e6a73ef5cbdb164a7b5b82307fce47e..caacc105d25c92118a3d6143f2b98914a3644e7e 100644 (file)
@@ -80,7 +80,7 @@ bool          synchronize_seqscans = true;
 static HeapScanDesc heap_beginscan_internal(Relation relation,
                                                Snapshot snapshot,
                                                int nkeys, ScanKey key,
-                                               bool allow_strat, bool allow_sync, bool allow_pagemode,
+                                         bool allow_strat, bool allow_sync, bool allow_pagemode,
                                                bool is_bitmapscan, bool is_samplescan,
                                                bool temp_snap);
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
@@ -1366,8 +1366,8 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
 static HeapScanDesc
 heap_beginscan_internal(Relation relation, Snapshot snapshot,
                                                int nkeys, ScanKey key,
-                                               bool allow_strat, bool allow_sync, bool allow_pagemode,
-                                               bool is_bitmapscan, bool is_samplescan, bool temp_snap)
+                                         bool allow_strat, bool allow_sync, bool allow_pagemode,
+                                         bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 {
        HeapScanDesc scan;
 
@@ -2284,9 +2284,9 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
 {
        /*
         * For now, parallel operations are required to be strictly read-only.
-        * Unlike heap_update() and heap_delete(), an insert should never create
-        * a combo CID, so it might be possible to relax this restriction, but
-        * not without more thought and testing.
+        * Unlike heap_update() and heap_delete(), an insert should never create a
+        * combo CID, so it might be possible to relax this restriction, but not
+        * without more thought and testing.
         */
        if (IsInParallelMode())
                ereport(ERROR,
@@ -2768,8 +2768,8 @@ l1:
                infomask = tp.t_data->t_infomask;
 
                /*
-                * Sleep until concurrent transaction ends -- except when there's a single
-                * locker and it's our own transaction.  Note we don't care
+                * Sleep until concurrent transaction ends -- except when there's a
+                * single locker and it's our own transaction.  Note we don't care
                 * which lock mode the locker has, because we need the strongest one.
                 *
                 * Before sleeping, we need to acquire tuple lock to establish our
@@ -2822,8 +2822,8 @@ l1:
                else if (!TransactionIdIsCurrentTransactionId(xwait))
                {
                        /*
-                        * Wait for regular transaction to end; but first, acquire
-                        * tuple lock.
+                        * Wait for regular transaction to end; but first, acquire tuple
+                        * lock.
                         */
                        LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
                        heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
@@ -3336,8 +3336,8 @@ l2:
                 *
                 * Before sleeping, we need to acquire tuple lock to establish our
                 * priority for the tuple (see heap_lock_tuple).  LockTuple will
-                * release us when we are next-in-line for the tuple.  Note we must not
-                * acquire the tuple lock until we're sure we're going to sleep;
+                * release us when we are next-in-line for the tuple.  Note we must
+                * not acquire the tuple lock until we're sure we're going to sleep;
                 * otherwise we're open for race conditions with other transactions
                 * holding the tuple lock which sleep on us.
                 *
@@ -3374,8 +3374,8 @@ l2:
                                 */
                                if (xmax_infomask_changed(oldtup.t_data->t_infomask,
                                                                                  infomask) ||
-                                       !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
-                                                                                xwait))
+                               !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+                                                                        xwait))
                                        goto l2;
                        }
 
@@ -3425,9 +3425,9 @@ l2:
                else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
                {
                        /*
-                        * If it's just a key-share locker, and we're not changing the
-                        * key columns, we don't need to wait for it to end; but we
-                        * need to preserve it as locker.
+                        * If it's just a key-share locker, and we're not changing the key
+                        * columns, we don't need to wait for it to end; but we need to
+                        * preserve it as locker.
                         */
                        checked_lockers = true;
                        locker_remains = true;
@@ -3436,8 +3436,8 @@ l2:
                else
                {
                        /*
-                        * Wait for regular transaction to end; but first, acquire
-                        * tuple lock.
+                        * Wait for regular transaction to end; but first, acquire tuple
+                        * lock.
                         */
                        LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
                        heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
@@ -3454,7 +3454,7 @@ l2:
                         */
                        if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
                                !TransactionIdEquals(xwait,
-                                                                        HeapTupleHeaderGetRawXmax(oldtup.t_data)))
+                                                                  HeapTupleHeaderGetRawXmax(oldtup.t_data)))
                                goto l2;
 
                        /* Otherwise check if it committed or aborted */
@@ -3779,7 +3779,7 @@ l2:
                HeapTupleClearHeapOnly(newtup);
        }
 
-       RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
+       RelationPutHeapTuple(relation, newbuf, heaptup, false);         /* insert new tuple */
 
        if (!already_marked)
        {
@@ -4477,7 +4477,7 @@ l3:
                if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
                        TransactionIdIsCurrentTransactionId(xwait))
                {
-                        /* ... but if the xmax changed in the meantime, start over */
+                       /* ... but if the xmax changed in the meantime, start over */
                        LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
                        if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
                                !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4501,8 +4501,8 @@ l3:
                         * for the tuple.  We must do this even if we are share-locking.
                         *
                         * If we are forced to "start over" below, we keep the tuple lock;
-                        * this arranges that we stay at the head of the line while rechecking
-                        * tuple state.
+                        * this arranges that we stay at the head of the line while
+                        * rechecking tuple state.
                         */
                        if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
                                                                          &have_tuple_lock))
@@ -4530,11 +4530,11 @@ l3:
                                {
                                        case LockWaitBlock:
                                                MultiXactIdWait((MultiXactId) xwait, status, infomask,
-                                                                               relation, &tuple->t_self, XLTW_Lock, NULL);
+                                                                 relation, &tuple->t_self, XLTW_Lock, NULL);
                                                break;
                                        case LockWaitSkip:
                                                if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-                                                                                                               status, infomask, relation,
+                                                                                                 status, infomask, relation,
                                                                                                                NULL))
                                                {
                                                        result = HeapTupleWouldBlock;
@@ -4545,12 +4545,12 @@ l3:
                                                break;
                                        case LockWaitError:
                                                if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
-                                                                                                               status, infomask, relation,
+                                                                                                 status, infomask, relation,
                                                                                                                NULL))
                                                        ereport(ERROR,
                                                                        (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
                                                                         errmsg("could not obtain lock on row in relation \"%s\"",
-                                                                                       RelationGetRelationName(relation))));
+                                                                               RelationGetRelationName(relation))));
 
                                                break;
                                }
@@ -4588,7 +4588,7 @@ l3:
                                                        ereport(ERROR,
                                                                        (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
                                                                         errmsg("could not obtain lock on row in relation \"%s\"",
-                                                                                       RelationGetRelationName(relation))));
+                                                                               RelationGetRelationName(relation))));
                                                break;
                                }
                        }
@@ -4613,9 +4613,9 @@ l3:
                        LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
 
                        /*
-                        * xwait is done, but if xwait had just locked the tuple then
-                        * some other xact could update this tuple before we get to
-                        * this point.  Check for xmax change, and start over if so.
+                        * xwait is done, but if xwait had just locked the tuple then some
+                        * other xact could update this tuple before we get to this point.
+                        * Check for xmax change, and start over if so.
                         */
                        if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
                                !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
@@ -4628,9 +4628,9 @@ l3:
                                 * Otherwise check if it committed or aborted.  Note we cannot
                                 * be here if the tuple was only locked by somebody who didn't
                                 * conflict with us; that would have been handled above.  So
-                                * that transaction must necessarily be gone by now.  But don't
-                                * check for this in the multixact case, because some locker
-                                * transactions might still be running.
+                                * that transaction must necessarily be gone by now.  But
+                                * don't check for this in the multixact case, because some
+                                * locker transactions might still be running.
                                 */
                                UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
                        }
@@ -4810,8 +4810,8 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
                        if (!ConditionalLockTupleTuplock(relation, tid, mode))
                                ereport(ERROR,
                                                (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-                                                errmsg("could not obtain lock on row in relation \"%s\"",
-                                                               RelationGetRelationName(relation))));
+                                       errmsg("could not obtain lock on row in relation \"%s\"",
+                                                  RelationGetRelationName(relation))));
                        break;
        }
        *have_tuple_lock = true;
@@ -5513,8 +5513,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple)
        MarkBufferDirty(buffer);
 
        /*
-        * Replace the speculative insertion token with a real t_ctid,
-        * pointing to itself like it does on regular tuples.
+        * Replace the speculative insertion token with a real t_ctid, pointing to
+        * itself like it does on regular tuples.
         */
        htup->t_ctid = tuple->t_self;
 
@@ -6447,23 +6447,23 @@ static bool
 DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
                                                LockTupleMode lockmode)
 {
-       bool    allow_old;
-       int             nmembers;
+       bool            allow_old;
+       int                     nmembers;
        MultiXactMember *members;
-       bool    result = false;
-       LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
+       bool            result = false;
+       LOCKMODE        wanted = tupleLockExtraInfo[lockmode].hwlock;
 
        allow_old = !(infomask & HEAP_LOCK_MASK) && HEAP_XMAX_IS_LOCKED_ONLY(infomask);
        nmembers = GetMultiXactIdMembers(multi, &members, allow_old,
                                                                         HEAP_XMAX_IS_LOCKED_ONLY(infomask));
        if (nmembers >= 0)
        {
-               int             i;
+               int                     i;
 
                for (i = 0; i < nmembers; i++)
                {
-                       TransactionId           memxid;
-                       LOCKMODE                        memlockmode;
+                       TransactionId memxid;
+                       LOCKMODE        memlockmode;
 
                        memlockmode = LOCKMODE_from_mxstatus(members[i].status);
 
@@ -7093,7 +7093,7 @@ log_heap_update(Relation reln, Buffer oldbuf,
        {
                XLogRegisterBufData(0,
                                                        ((char *) newtup->t_data) + SizeofHeapTupleHeader,
-                                                       newtup->t_len - SizeofHeapTupleHeader - suffixlen);
+                                                 newtup->t_len - SizeofHeapTupleHeader - suffixlen);
        }
        else
        {
@@ -7105,8 +7105,8 @@ log_heap_update(Relation reln, Buffer oldbuf,
                if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
                {
                        XLogRegisterBufData(0,
-                                                               ((char *) newtup->t_data) + SizeofHeapTupleHeader,
-                                                               newtup->t_data->t_hoff - SizeofHeapTupleHeader);
+                                                  ((char *) newtup->t_data) + SizeofHeapTupleHeader,
+                                                        newtup->t_data->t_hoff - SizeofHeapTupleHeader);
                }
 
                /* data after common prefix */
@@ -7289,8 +7289,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool *
                {
                        /*
                         * The OID column can appear in an index definition, but that's
-                        * OK, because we always copy the OID if present (see below). Other
-                        * system columns may not.
+                        * OK, because we always copy the OID if present (see below).
+                        * Other system columns may not.
                         */
                        if (attno == ObjectIdAttributeNumber)
                                continue;
index a9f0ca35e49e27bf8481d9b7bde682a238e2874d..6db73bf9d00774c909970a72992fc4262aa49ed7 100644 (file)
@@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
        ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
 
        /*
-        * Insert the correct position into CTID of the stored tuple, too
-        * (unless this is a speculative insertion, in which case the token is
-        * held in CTID field instead)
+        * Insert the correct position into CTID of the stored tuple, too (unless
+        * this is a speculative insertion, in which case the token is held in
+        * CTID field instead)
         */
        if (!token)
        {
index e6e4d28b74fbd7eaf520dba7bc76f43d5bf1dec4..1043362f914e2dd5bab62c3c39cfa5a18b532d6e 100644 (file)
@@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
         * Check permissions- if the user does not have access to view all of the
         * key columns then return NULL to avoid leaking data.
         *
-        * First check if RLS is enabled for the relation.  If so, return NULL
-        * to avoid leaking data.
+        * First check if RLS is enabled for the relation.  If so, return NULL to
+        * avoid leaking data.
         *
-        * Next we need to check table-level SELECT access and then, if
-        * there is no access there, check column-level permissions.
+        * Next we need to check table-level SELECT access and then, if there is
+        * no access there, check column-level permissions.
         */
 
        /*
@@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
        if (aclresult != ACLCHECK_OK)
        {
                /*
-                * No table-level access, so step through the columns in the
-                * index and make sure the user has SELECT rights on all of them.
+                * No table-level access, so step through the columns in the index and
+                * make sure the user has SELECT rights on all of them.
                 */
                for (keyno = 0; keyno < idxrec->indnatts; keyno++)
                {
                        AttrNumber      attnum = idxrec->indkey.values[keyno];
 
                        /*
-                        * Note that if attnum == InvalidAttrNumber, then this is an
-                        * index based on an expression and we return no detail rather
-                        * than try to figure out what column(s) the expression includes
-                        * and if the user has SELECT rights on them.
+                        * Note that if attnum == InvalidAttrNumber, then this is an index
+                        * based on an expression and we return no detail rather than try
+                        * to figure out what column(s) the expression includes and if the
+                        * user has SELECT rights on them.
                         */
                        if (attnum == InvalidAttrNumber ||
                                pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
index 4a60c5fa2c8d9bd1dcec7ff67e32c4dcda8a0864..77c2fdf90b43b6198a8ab541264092ef402ca47d 100644 (file)
@@ -160,8 +160,8 @@ top:
         */
        if (checkUnique != UNIQUE_CHECK_NO)
        {
-               TransactionId   xwait;
-               uint32                  speculativeToken;
+               TransactionId xwait;
+               uint32          speculativeToken;
 
                offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
                xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
@@ -171,9 +171,10 @@ top:
                {
                        /* Have to wait for the other guy ... */
                        _bt_relbuf(rel, buf);
+
                        /*
-                        * If it's a speculative insertion, wait for it to finish (ie.
-                        * to go ahead with the insertion, or kill the tuple).  Otherwise
+                        * If it's a speculative insertion, wait for it to finish (ie. to
+                        * go ahead with the insertion, or kill the tuple).  Otherwise
                         * wait for the transaction to finish as usual.
                         */
                        if (speculativeToken)
@@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
                                                                (errcode(ERRCODE_UNIQUE_VIOLATION),
                                                                 errmsg("duplicate key value violates unique constraint \"%s\"",
                                                                                RelationGetRelationName(rel)),
-                                                                key_desc ? errdetail("Key %s already exists.",
-                                                                                                         key_desc) : 0,
+                                                          key_desc ? errdetail("Key %s already exists.",
+                                                                                                       key_desc) : 0,
                                                                 errtableconstraint(heapRel,
                                                                                         RelationGetRelationName(rel))));
                                        }
index 0f4128253f4b578081de5da21dd513b1d949fee2..6e65db91eb564b4404c72ce83b44d03007afa706 100644 (file)
@@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
                                        lbuf = _bt_getbuf(rel, leftsib, BT_READ);
                                        lpage = BufferGetPage(lbuf);
                                        lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
+
                                        /*
                                         * If the left sibling is split again by another backend,
                                         * after we released the lock, we know that the first
@@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
        leafrightsib = opaque->btpo_next;
 
        /*
-        * Before attempting to lock the parent page, check that the right
-        * sibling is not in half-dead state.  A half-dead right sibling would
-        * have no downlink in the parent, which would be highly confusing later
-        * when we delete the downlink that follows the current page's downlink.
-        * (I believe the deletion would work correctly, but it would fail the
+        * Before attempting to lock the parent page, check that the right sibling
+        * is not in half-dead state.  A half-dead right sibling would have no
+        * downlink in the parent, which would be highly confusing later when we
+        * delete the downlink that follows the current page's downlink. (I
+        * believe the deletion would work correctly, but it would fail the
         * cross-check we make that the following downlink points to the right
         * sibling of the delete page.)
         */
index c2d52faa9602e30c49fb52b469e6a02a78f550b2..9431ab5d042811a45fd53df2918c34abac3249f1 100644 (file)
@@ -40,9 +40,8 @@ typedef struct
        BTSpool    *spool;
 
        /*
-        * spool2 is needed only when the index is a unique index. Dead tuples
-        * are put into spool2 instead of spool in order to avoid uniqueness
-        * check.
+        * spool2 is needed only when the index is a unique index. Dead tuples are
+        * put into spool2 instead of spool in order to avoid uniqueness check.
         */
        BTSpool    *spool2;
        double          indtuples;
index cfb1d64f86aed34a0a3103df15d886d080612c2f..d69a0577a87cdd3ed9797f3bf47f0a22272c1de9 100644 (file)
@@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
                offnum = OffsetNumberPrev(offnum);
 
        /*
-        * By here the scan position is now set for the first key.  If all
-        * further tuples are expected to match we set the SK_BT_MATCHED flag
-        * to avoid re-checking the scan key later.  This is a big win for
-        * slow key matches though is still significant even for fast datatypes.
+        * By here the scan position is now set for the first key.  If all further
+        * tuples are expected to match we set the SK_BT_MATCHED flag to avoid
+        * re-checking the scan key later.  This is a big win for slow key matches
+        * though is still significant even for fast datatypes.
         */
        switch (startKeys[0]->sk_strategy)
        {
index 625f490af80367a18253e9d6255066ae0944ca6b..f95f67ad4b5ceb7f9af4fb0e12ebd115b0a6e336 100644 (file)
@@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
                        {
                                for (i = 1; i <= keysz; i++)
                                {
-                                       SortSupport     entry;
+                                       SortSupport entry;
                                        Datum           attrDatum1,
                                                                attrDatum2;
                                        bool            isNull1,
index d1589f05eff95a6e8bb28e1eec0e47d36a84badb..91331bad651506d91b20ccf94d5fc4015b88eabf 100644 (file)
@@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
                Datum           test;
 
                /*
-                * If the scan key has already matched we can skip this key, as
-                * long as the index tuple does not contain NULL values.
+                * If the scan key has already matched we can skip this key, as long
+                * as the index tuple does not contain NULL values.
                 */
                if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
                        continue;
@@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
  * any items from the page, and so there is no need to search left from the
  * recorded offset.  (This observation also guarantees that the item is still
  * the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.)  This holds true even if the page has been modified
+ * TIDs can get recycled.)     This holds true even if the page has been modified
  * by inserts and page splits, so there is no need to consult the LSN.
  *
  * If the pin was released after reading the page, then we re-read it.  If it
index 088fd1bc8b684fc36e08a74f056d1951dce65b18..59975eae9a670875a035cdf7995b4df22448966a 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * committsdesc.c
- *    rmgr descriptor routines for access/transam/commit_ts.c
+ *       rmgr descriptor routines for access/transam/commit_ts.c
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/committsdesc.c
+ *       src/backend/access/rmgrdesc/committsdesc.c
  *
  *-------------------------------------------------------------------------
  */
@@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
        else if (info == COMMIT_TS_SETTS)
        {
                xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
-               int             nsubxids;
+               int                     nsubxids;
 
                appendStringInfo(buf, "set %s/%d for: %u",
                                                 timestamptz_to_str(xlrec->timestamp),
@@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
                                        sizeof(TransactionId));
                if (nsubxids > 0)
                {
-                       int             i;
+                       int                     i;
                        TransactionId *subxids;
 
                        subxids = palloc(sizeof(TransactionId) * nsubxids);
index 19bae9a0f84f1bed0e0520bf0a0f25e67113dbab..60cf0f679db0b99f4de8b6db45910dc14e0c0c80 100644 (file)
@@ -1,13 +1,13 @@
 /*-------------------------------------------------------------------------
  *
  * replorigindesc.c
- *    rmgr descriptor routines for replication/logical/replication_origin.c
+ *       rmgr descriptor routines for replication/logical/replication_origin.c
  *
  * Portions Copyright (c) 2015, PostgreSQL Global Development Group
  *
  *
  * IDENTIFICATION
- *    src/backend/access/rmgrdesc/replorigindesc.c
+ *       src/backend/access/rmgrdesc/replorigindesc.c
  *
  *-------------------------------------------------------------------------
  */
@@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
                case XLOG_REPLORIGIN_SET:
                        {
                                xl_replorigin_set *xlrec;
+
                                xlrec = (xl_replorigin_set *) rec;
 
                                appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
@@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
                case XLOG_REPLORIGIN_DROP:
                        {
                                xl_replorigin_drop *xlrec;
+
                                xlrec = (xl_replorigin_drop *) rec;
 
                                appendStringInfo(buf, "drop %u", xlrec->node_id);
index 793f9bb51fa82bcc3933eee13412e2a0d3dde2fe..7b5f98305070c9db43367a301983cef749ba04f1 100644 (file)
@@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
 
        memset(parsed, 0, sizeof(*parsed));
 
-       parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+       parsed->xinfo = 0;                      /* default, if no XLOG_XACT_HAS_INFO is
+                                                                * present */
 
        parsed->xact_time = xlrec->xact_time;
 
@@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
 
        if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
        {
-               xl_xact_subxacts   *xl_subxacts = (xl_xact_subxacts *) data;
+               xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
 
                parsed->nsubxacts = xl_subxacts->nsubxacts;
                parsed->subxacts = xl_subxacts->subxacts;
@@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
 
        memset(parsed, 0, sizeof(*parsed));
 
-       parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
+       parsed->xinfo = 0;                      /* default, if no XLOG_XACT_HAS_INFO is
+                                                                * present */
 
        parsed->xact_time = xlrec->xact_time;
 
@@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
 
        if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
        {
-               xl_xact_subxacts   *xl_subxacts = (xl_xact_subxacts *) data;
+               xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
 
                parsed->nsubxacts = xl_subxacts->nsubxacts;
                parsed->subxacts = xl_subxacts->subxacts;
@@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
        {
                appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
                                                 origin_id,
-                                                (uint32)(parsed.origin_lsn >> 32),
-                                                (uint32)parsed.origin_lsn,
+                                                (uint32) (parsed.origin_lsn >> 32),
+                                                (uint32) parsed.origin_lsn,
                                                 timestamptz_to_str(parsed.origin_timestamp));
        }
 }
index 06c6944fc72c67326514b92efff5efacbc46070c..8a0d9098c5edfadc3c530586592359c6a24372f4 100644 (file)
@@ -658,6 +658,7 @@ Datum
 spgcanreturn(PG_FUNCTION_ARGS)
 {
        Relation        index = (Relation) PG_GETARG_POINTER(0);
+
        /* int                  i = PG_GETARG_INT32(1); */
        SpGistCache *cache;
 
index c91f3f593e569d5730e70fa1291b48ae19efa97c..563a9168f0f06a49f228bcc4b95d970b20e9a456 100644 (file)
 /* tsdesc */
 typedef struct
 {
-       uint32 seed;                            /* random seed */
-       BlockNumber startblock;         /* starting block, we use ths for syncscan support */
+       uint32          seed;                   /* random seed */
+       BlockNumber startblock;         /* starting block, we use ths for syncscan
+                                                                * support */
        BlockNumber nblocks;            /* number of blocks */
        BlockNumber blockno;            /* current block */
-       float4 probability;                     /* probabilty that tuple will be returned (0.0-1.0) */
+       float4          probability;    /* probabilty that tuple will be returned
+                                                                * (0.0-1.0) */
        OffsetNumber lt;                        /* last tuple returned from current block */
-       SamplerRandomState randstate; /* random generator tsdesc */
+       SamplerRandomState randstate;           /* random generator tsdesc */
 } BernoulliSamplerData;
 
 /*
@@ -42,10 +44,10 @@ typedef struct
 Datum
 tsm_bernoulli_init(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       uint32                          seed = PG_GETARG_UINT32(1);
-       float4                          percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
-       HeapScanDesc            scan = tsdesc->heapScan;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       uint32          seed = PG_GETARG_UINT32(1);
+       float4          percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+       HeapScanDesc scan = tsdesc->heapScan;
        BernoulliSamplerData *sampler;
 
        if (percent < 0 || percent > 100)
@@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc            *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       BernoulliSamplerData   *sampler =
-               (BernoulliSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       BernoulliSamplerData *sampler =
+       (BernoulliSamplerData *) tsdesc->tsmdata;
 
        /*
-        * Bernoulli sampling scans all blocks on the table and supports
-        * syncscan so loop from startblock to startblock instead of
-        * from 0 to nblocks.
+        * Bernoulli sampling scans all blocks on the table and supports syncscan
+        * so loop from startblock to startblock instead of from 0 to nblocks.
         */
        if (sampler->blockno == InvalidBlockNumber)
                sampler->blockno = sampler->startblock;
@@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
  * tuples have same probability of being returned the visible and invisible
  * tuples will be returned in same ratio as they have in the actual table.
  * This means that there is no skew towards either visible or invisible tuples
- * and the  number returned visible tuples to from the executor node is the
+ * and the     number returned visible tuples to from the executor node is the
  * fraction of visible tuples which was specified in input.
  *
  * This is faster than doing the coinflip in the examinetuple because we don't
@@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc            *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       OffsetNumber                    maxoffset = PG_GETARG_UINT16(2);
-       BernoulliSamplerData   *sampler =
-               (BernoulliSamplerData *) tsdesc->tsmdata;
-       OffsetNumber                    tupoffset = sampler->lt;
-       float4                                  probability = sampler->probability;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+       BernoulliSamplerData *sampler =
+       (BernoulliSamplerData *) tsdesc->tsmdata;
+       OffsetNumber tupoffset = sampler->lt;
+       float4          probability = sampler->probability;
 
        if (tupoffset == InvalidOffsetNumber)
                tupoffset = FirstOffsetNumber;
@@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
 
        /*
         * Loop over tuple offsets until the random generator returns value that
-        * is within the probability of returning the tuple or until we reach
-        * end of the block.
+        * is within the probability of returning the tuple or until we reach end
+        * of the block.
         *
         * (This is our implementation of bernoulli trial)
         */
@@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_reset(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc            *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       BernoulliSamplerData   *sampler =
-               (BernoulliSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       BernoulliSamplerData *sampler =
+       (BernoulliSamplerData *) tsdesc->tsmdata;
 
        sampler->blockno = InvalidBlockNumber;
        sampler->lt = InvalidOffsetNumber;
@@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_bernoulli_cost(PG_FUNCTION_ARGS)
 {
-       PlannerInfo        *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-       Path               *path = (Path *) PG_GETARG_POINTER(1);
-       RelOptInfo         *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-       List               *args = (List *) PG_GETARG_POINTER(3);
-       BlockNumber        *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-       double             *tuples = (double *) PG_GETARG_POINTER(5);
-       Node               *pctnode;
-       float4                  samplesize;
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       Path       *path = (Path *) PG_GETARG_POINTER(1);
+       RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+       List       *args = (List *) PG_GETARG_POINTER(3);
+       BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+       double     *tuples = (double *) PG_GETARG_POINTER(5);
+       Node       *pctnode;
+       float4          samplesize;
 
        *pages = baserel->pages;
 
index 1412e511fafa4e7997b6113d54b2c91251b891f8..1d834369a4bd11fbf6127d9d8c8d7e3e4859ca01 100644 (file)
@@ -31,9 +31,9 @@
 typedef struct
 {
        BlockSamplerData bs;
-       uint32 seed;                            /* random seed */
+       uint32          seed;                   /* random seed */
        BlockNumber nblocks;            /* number of block in relation */
-       int samplesize;                         /* number of blocks to return */
+       int                     samplesize;             /* number of blocks to return */
        OffsetNumber lt;                        /* last tuple returned from current block */
 } SystemSamplerData;
 
@@ -44,11 +44,11 @@ typedef struct
 Datum
 tsm_system_init(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       uint32                          seed = PG_GETARG_UINT32(1);
-       float4                          percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
-       HeapScanDesc            scan = tsdesc->heapScan;
-       SystemSamplerData  *sampler;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       uint32          seed = PG_GETARG_UINT32(1);
+       float4          percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
+       HeapScanDesc scan = tsdesc->heapScan;
+       SystemSamplerData *sampler;
 
        if (percent < 0 || percent > 100)
                ereport(ERROR,
@@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
 Datum
 tsm_system_nextblock(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-       BlockNumber                     blockno;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       BlockNumber blockno;
 
        if (!BlockSampler_HasMore(&sampler->bs))
                PG_RETURN_UINT32(InvalidBlockNumber);
@@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
 Datum
 tsm_system_nexttuple(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       OffsetNumber            maxoffset = PG_GETARG_UINT16(2);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
-       OffsetNumber            tupoffset = sampler->lt;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       OffsetNumber maxoffset = PG_GETARG_UINT16(2);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       OffsetNumber tupoffset = sampler->lt;
 
        if (tupoffset == InvalidOffsetNumber)
                tupoffset = FirstOffsetNumber;
@@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
 Datum
 tsm_system_reset(PG_FUNCTION_ARGS)
 {
-       TableSampleDesc    *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
-       SystemSamplerData  *sampler = (SystemSamplerData *) tsdesc->tsmdata;
+       TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
+       SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
 
        sampler->lt = InvalidOffsetNumber;
        BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
@@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
 Datum
 tsm_system_cost(PG_FUNCTION_ARGS)
 {
-       PlannerInfo        *root = (PlannerInfo *) PG_GETARG_POINTER(0);
-       Path               *path = (Path *) PG_GETARG_POINTER(1);
-       RelOptInfo         *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
-       List               *args = (List *) PG_GETARG_POINTER(3);
-       BlockNumber        *pages = (BlockNumber *) PG_GETARG_POINTER(4);
-       double             *tuples = (double *) PG_GETARG_POINTER(5);
-       Node               *pctnode;
-       float4                  samplesize;
+       PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
+       Path       *path = (Path *) PG_GETARG_POINTER(1);
+       RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
+       List       *args = (List *) PG_GETARG_POINTER(3);
+       BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
+       double     *tuples = (double *) PG_GETARG_POINTER(5);
+       Node       *pctnode;
+       float4          samplesize;
 
        pctnode = linitial(args);
        pctnode = estimate_expression_value(root, pctnode);
index ef55d062e75400e910f40355a429f0f1d6e9b2e3..3398d02f854bfb48be7611028811fb4e6bc4ca44 100644 (file)
@@ -1,14 +1,14 @@
 /*-------------------------------------------------------------------------
  *
  * tablesample.c
- *        TABLESAMPLE internal API
+ *               TABLESAMPLE internal API
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
  *
  * IDENTIFICATION
- *        src/backend/access/tablesample/tablesample.c
+ *               src/backend/access/tablesample/tablesample.c
  *
  * TABLESAMPLE is the SQL standard clause for sampling the relations.
  *
@@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
        List       *args = tablesample->args;
        ListCell   *arg;
        ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
-       TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
+       TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
 
        /* Load functions */
        fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
@@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
        fcinfo.argnull[0] = false;
 
        /*
-        * Second arg for init function is always REPEATABLE
-        * When tablesample->repeatable is NULL then REPEATABLE clause was not
-        * specified.
-        * When specified, the expression cannot evaluate to NULL.
+        * Second arg for init function is always REPEATABLE When
+        * tablesample->repeatable is NULL then REPEATABLE clause was not
+        * specified. When specified, the expression cannot evaluate to NULL.
         */
        if (tablesample->repeatable)
        {
                ExprState  *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
                                                                                        (PlanState *) scanstate);
+
                fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
                                                                         &fcinfo.argnull[1], NULL);
                if (fcinfo.argnull[1])
                        ereport(ERROR,
                                        (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
-                                        errmsg("REPEATABLE clause must be NOT NULL numeric value")));
+                               errmsg("REPEATABLE clause must be NOT NULL numeric value")));
        }
        else
        {
@@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
 HeapTuple
 tablesample_getnext(TableSampleDesc *desc)
 {
-       HeapScanDesc    scan = desc->heapScan;
-       HeapTuple               tuple = &(scan->rs_ctup);
-       bool                    pagemode = scan->rs_pageatatime;
-       BlockNumber             blockno;
-       Page                    page;
-       bool                    page_all_visible;
-       ItemId                  itemid;
-       OffsetNumber    tupoffset,
-                                       maxoffset;
+       HeapScanDesc scan = desc->heapScan;
+       HeapTuple       tuple = &(scan->rs_ctup);
+       bool            pagemode = scan->rs_pageatatime;
+       BlockNumber blockno;
+       Page            page;
+       bool            page_all_visible;
+       ItemId          itemid;
+       OffsetNumber tupoffset,
+                               maxoffset;
 
        if (!scan->rs_inited)
        {
@@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
                        return NULL;
                }
                blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
-                                                                                       PointerGetDatum(desc)));
+                                                                                         PointerGetDatum(desc)));
                if (!BlockNumberIsValid(blockno))
                {
                        tuple->t_data = NULL;
@@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
                CHECK_FOR_INTERRUPTS();
 
                tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
-                                                                                        PointerGetDatum(desc),
-                                                                                        UInt32GetDatum(blockno),
-                                                                                        UInt16GetDatum(maxoffset)));
+                                                                                                PointerGetDatum(desc),
+                                                                                                UInt32GetDatum(blockno),
+                                                                                                UInt16GetDatum(maxoffset)));
 
                if (OffsetNumberIsValid(tupoffset))
                {
-                       bool    visible;
-                       bool    found;
+                       bool            visible;
+                       bool            found;
 
                        /* Skip invalid tuple pointers. */
                        itemid = PageGetItemId(page, tupoffset);
@@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
                                visible = SampleTupleVisible(tuple, tupoffset, scan);
 
                        /*
-                        * Let the sampling method examine the actual tuple and decide if we
-                        * should return it.
+                        * Let the sampling method examine the actual tuple and decide if
+                        * we should return it.
                         *
                         * Note that we let it examine even invisible tuples for
                         * statistical purposes, but not return them since user should
@@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
                        if (OidIsValid(desc->tsmexaminetuple.fn_oid))
                        {
                                found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
-                                                                                          PointerGetDatum(desc),
-                                                                                          UInt32GetDatum(blockno),
-                                                                                          PointerGetDatum(tuple),
-                                                                                          BoolGetDatum(visible)));
+                                                                                                  PointerGetDatum(desc),
+                                                                                                  UInt32GetDatum(blockno),
+                                                                                                  PointerGetDatum(tuple),
+                                                                                                  BoolGetDatum(visible)));
                                /* Should not happen if sampling method is well written. */
                                if (found && !visible)
                                        elog(ERROR, "Sampling method wanted to return invisible tuple");
@@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
                        LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
 
                blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
-                                                                                 PointerGetDatum(desc)));
+                                                                                         PointerGetDatum(desc)));
 
                /*
-                * Report our new scan position for synchronization purposes. We
-                * don't do that when moving backwards, however. That would just
-                * mess up any other forward-moving scanners.
+                * Report our new scan position for synchronization purposes. We don't
+                * do that when moving backwards, however. That would just mess up any
+                * other forward-moving scanners.
                 *
-                * Note: we do this before checking for end of scan so that the
-                * final state of the position hint is back at the start of the
-                * rel.  That's not strictly necessary, but otherwise when you run
-                * the same query multiple times the starting position would shift
-                * a little bit backwards on every invocation, which is confusing.
-                * We don't guarantee any specific ordering in general, though.
+                * Note: we do this before checking for end of scan so that the final
+                * state of the position hint is back at the start of the rel.  That's
+                * not strictly necessary, but otherwise when you run the same query
+                * multiple times the starting position would shift a little bit
+                * backwards on every invocation, which is confusing. We don't
+                * guarantee any specific ordering in general, though.
                 */
                if (scan->rs_syncscan)
                        ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
@@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
 {
        /*
         * If this scan is reading whole pages at a time, there is already
-        * visibility info present in rs_vistuples so we can just search it
-        * for the tupoffset.
+        * visibility info present in rs_vistuples so we can just search it for
+        * the tupoffset.
         */
        if (scan->rs_pageatatime)
        {
-               int start = 0,
-                       end = scan->rs_ntuples - 1;
+               int                     start = 0,
+                                       end = scan->rs_ntuples - 1;
 
                /*
                 * Do the binary search over rs_vistuples, it's already sorted by
                 * OffsetNumber so we don't need to do any sorting ourselves here.
                 *
-                * We could use bsearch() here but it's slower for integers because
-                * of the function call overhead and because it needs boiler plate code
+                * We could use bsearch() here but it's slower for integers because of
+                * the function call overhead and because it needs boiler plate code
                 * it would not save us anything code-wise anyway.
                 */
                while (start <= end)
                {
-                       int mid = start + (end - start) / 2;
+                       int                     mid = start + (end - start) / 2;
                        OffsetNumber curoffset = scan->rs_vistuples[mid];
 
                        if (curoffset == tupoffset)
@@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
                Snapshot        snapshot = scan->rs_snapshot;
                Buffer          buffer = scan->rs_cbuf;
 
-               bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
+               bool            visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
 
                CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
                                                                                snapshot);
index 63344327e3d1dfb0edbd609b10c39f562996e06f..5ad35c0d7f8956ff8048f6258828d3cb40d2aa9f 100644 (file)
@@ -55,8 +55,8 @@
  */
 typedef struct CommitTimestampEntry
 {
-       TimestampTz             time;
-       RepOriginId             nodeid;
+       TimestampTz time;
+       RepOriginId nodeid;
 } CommitTimestampEntry;
 
 #define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
@@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
 #define COMMIT_TS_XACTS_PER_PAGE \
        (BLCKSZ / SizeOfCommitTimestampEntry)
 
-#define TransactionIdToCTsPage(xid)    \
+#define TransactionIdToCTsPage(xid) \
        ((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
 #define TransactionIdToCTsEntry(xid)   \
        ((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
@@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
  */
 typedef struct CommitTimestampShared
 {
-       TransactionId   xidLastCommit;
+       TransactionId xidLastCommit;
        CommitTimestampEntry dataLastCommit;
 } CommitTimestampShared;
 
-CommitTimestampShared  *commitTsShared;
+CommitTimestampShared *commitTsShared;
 
 
 /* GUC variable */
-bool   track_commit_timestamp;
+bool           track_commit_timestamp;
 
 static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
                                         TransactionId *subxids, TimestampTz ts,
                                         RepOriginId nodeid, int pageno);
 static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
-                                                 RepOriginId nodeid, int slotno);
+                                                RepOriginId nodeid, int slotno);
 static int     ZeroCommitTsPage(int pageno, bool writeXlog);
 static bool CommitTsPagePrecedes(int page1, int page2);
 static void WriteZeroPageXlogRec(int pageno);
@@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
                return;
 
        /*
-        * Comply with the WAL-before-data rule: if caller specified it wants
-        * this value to be recorded in WAL, do so before touching the data.
+        * Comply with the WAL-before-data rule: if caller specified it wants this
+        * value to be recorded in WAL, do so before touching the data.
         */
        if (do_xlog)
                WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
@@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
        /*
         * We split the xids to set the timestamp to in groups belonging to the
         * same SLRU page; the first element in each such set is its head.  The
-        * first group has the main XID as the head; subsequent sets use the
-        * first subxid not on the previous page as head.  This way, we only have
-        * to lock/modify each SLRU page once.
+        * first group has the main XID as the head; subsequent sets use the first
+        * subxid not on the previous page as head.  This way, we only have to
+        * lock/modify each SLRU page once.
         */
        for (i = 0, headxid = xid;;)
        {
@@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
                        break;
 
                /*
-                * Set the new head and skip over it, as well as over the subxids
-                * we just wrote.
+                * Set the new head and skip over it, as well as over the subxids we
+                * just wrote.
                 */
                headxid = subxids[j];
                i += j - i + 1;
@@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("could not get commit timestamp data"),
-                                errhint("Make sure the configuration parameter \"%s\" is set.",
-                                                "track_commit_timestamp")));
+                         errhint("Make sure the configuration parameter \"%s\" is set.",
+                                         "track_commit_timestamp")));
 
        /* error if the given Xid doesn't normally commit */
        if (!TransactionIdIsNormal(xid))
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
+               errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
 
        /*
         * Return empty if the requested value is outside our valid range.
@@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
 TransactionId
 GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
 {
-       TransactionId   xid;
+       TransactionId xid;
 
        /* Error if module not enabled */
        if (!track_commit_timestamp)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("could not get commit timestamp data"),
-                                errhint("Make sure the configuration parameter \"%s\" is set.",
-                                                "track_commit_timestamp")));
+                         errhint("Make sure the configuration parameter \"%s\" is set.",
+                                         "track_commit_timestamp")));
 
        LWLockAcquire(CommitTsLock, LW_SHARED);
        xid = commitTsShared->xidLastCommit;
@@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
 Datum
 pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
 {
-       TransactionId   xid = PG_GETARG_UINT32(0);
-       TimestampTz             ts;
-       bool                    found;
+       TransactionId xid = PG_GETARG_UINT32(0);
+       TimestampTz ts;
+       bool            found;
 
        found = TransactionIdGetCommitTsData(xid, &ts, NULL);
 
@@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
 Datum
 pg_last_committed_xact(PG_FUNCTION_ARGS)
 {
-       TransactionId   xid;
-       TimestampTz             ts;
-       Datum       values[2];
-       bool        nulls[2];
-       TupleDesc   tupdesc;
+       TransactionId xid;
+       TimestampTz ts;
+       Datum           values[2];
+       bool            nulls[2];
+       TupleDesc       tupdesc;
        HeapTuple       htup;
 
        /* and construct a tuple with our data */
@@ -462,7 +462,7 @@ CommitTsShmemSize(void)
 void
 CommitTsShmemInit(void)
 {
-       bool    found;
+       bool            found;
 
        CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
        SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
@@ -495,8 +495,8 @@ BootStrapCommitTs(void)
 {
        /*
         * Nothing to do here at present, unlike most other SLRU modules; segments
-        * are created when the server is started with this module enabled.
-        * See StartupCommitTs.
+        * are created when the server is started with this module enabled. See
+        * StartupCommitTs.
         */
 }
 
@@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
 
 /*
  * Activate this module whenever necessary.
- *             This must happen during postmaster or standalong-backend startup,
- *             or during WAL replay anytime the track_commit_timestamp setting is
- *             changed in the master.
+ *             This must happen during postmaster or standalong-backend startup,
+ *             or during WAL replay anytime the track_commit_timestamp setting is
+ *             changed in the master.
  *
  * The reason why this SLRU needs separate activation/deactivation functions is
  * that it can be enabled/disabled during start and the activation/deactivation
@@ -612,7 +612,7 @@ ActivateCommitTs(void)
        /* Finally, create the current segment file, if necessary */
        if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
        {
-               int             slotno;
+               int                     slotno;
 
                LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
                slotno = ZeroCommitTsPage(pageno, false);
@@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
                                                 TransactionId *subxids, TimestampTz timestamp,
                                                 RepOriginId nodeid)
 {
-       xl_commit_ts_set        record;
+       xl_commit_ts_set record;
 
        record.timestamp = timestamp;
        record.nodeid = nodeid;
@@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
                        subxids = NULL;
 
                TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
-                                                                          setts->timestamp, setts->nodeid, false);
+                                                                        setts->timestamp, setts->nodeid, false);
                if (subxids)
                        pfree(subxids);
        }
index 0218378ccb51dd62bd79ca4d43c3377b42750d8d..9568ff1ddb7a01c54f6b20076e701fa928efadd4 100644 (file)
@@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
         */
        if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
                (MultiXactState->nextOffset - MultiXactState->oldestOffset
-                       > MULTIXACT_MEMBER_SAFE_THRESHOLD))
+                > MULTIXACT_MEMBER_SAFE_THRESHOLD))
        {
                /*
                 * For safety's sake, we release MultiXactGenLock while sending
@@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
        MultiXactIdSetOldestVisible();
 
        /*
-        * If we know the multi is used only for locking and not for updates,
-        * then we can skip checking if the value is older than our oldest
-        * visible multi.  It cannot possibly still be running.
+        * If we know the multi is used only for locking and not for updates, then
+        * we can skip checking if the value is older than our oldest visible
+        * multi.  It cannot possibly still be running.
         */
        if (onlyLock &&
                MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
@@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
         *
         * An ID older than MultiXactState->oldestMultiXactId cannot possibly be
         * useful; it has already been removed, or will be removed shortly, by
-        * truncation.  Returning the wrong values could lead
-        * to an incorrect visibility result.  However, to support pg_upgrade we
-        * need to allow an empty set to be returned regardless, if the caller is
-        * willing to accept it; the caller is expected to check that it's an
-        * allowed condition (such as ensuring that the infomask bits set on the
-        * tuple are consistent with the pg_upgrade scenario).  If the caller is
-        * expecting this to be called only on recently created multis, then we
-        * raise an error.
+        * truncation.  Returning the wrong values could lead to an incorrect
+        * visibility result.  However, to support pg_upgrade we need to allow an
+        * empty set to be returned regardless, if the caller is willing to accept
+        * it; the caller is expected to check that it's an allowed condition
+        * (such as ensuring that the infomask bits set on the tuple are
+        * consistent with the pg_upgrade scenario).  If the caller is expecting
+        * this to be called only on recently created multis, then we raise an
+        * error.
         *
         * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
         * seen, it implies undetected ID wraparound has occurred.  This raises a
@@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
         * enough to contain the next value that would be created.
         *
         * We need to do this pretty early during the first startup in binary
-        * upgrade mode: before StartupMultiXact() in fact, because this routine is
-        * called even before that by StartupXLOG().  And we can't do it earlier
-        * than at this point, because during that first call of this routine we
-        * determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
-        * needs.
+        * upgrade mode: before StartupMultiXact() in fact, because this routine
+        * is called even before that by StartupXLOG().  And we can't do it
+        * earlier than at this point, because during that first call of this
+        * routine we determine the MultiXactState->nextMXact value that
+        * MaybeExtendOffsetSlru needs.
         */
        if (IsBinaryUpgrade)
                MaybeExtendOffsetSlru();
@@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
 
        /*
         * Determine the offset of the oldest multixact that might still be
-        * referenced.  Normally, we can read the offset from the multixact itself,
-        * but there's an important special case: if there are no multixacts in
-        * existence at all, oldest_datminmxid obviously can't point to one.  It
-        * will instead point to the multixact ID that will be assigned the next
-        * time one is needed.
+        * referenced.  Normally, we can read the offset from the multixact
+        * itself, but there's an important special case: if there are no
+        * multixacts in existence at all, oldest_datminmxid obviously can't point
+        * to one.  It will instead point to the multixact ID that will be
+        * assigned the next time one is needed.
         *
         * NB: oldest_dataminmxid is the oldest multixact that might still be
         * referenced from a table, unlike in DetermineSafeOldestOffset, where we
@@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
         * obviously can't point to one.  It will instead point to the multixact
         * ID that will be assigned the next time one is needed.
         *
-        * NB: oldestMXact should be the oldest multixact that still exists in
-        * the SLRU, unlike in SetMultiXactIdLimit, where we do this same
-        * computation based on the oldest value that might be referenced in a
-        * table.
+        * NB: oldestMXact should be the oldest multixact that still exists in the
+        * SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
+        * based on the oldest value that might be referenced in a table.
         */
        LWLockAcquire(MultiXactGenLock, LW_SHARED);
        if (MultiXactState->nextMXact == oldestMXact)
@@ -2679,9 +2678,9 @@ int
 MultiXactMemberFreezeThreshold(void)
 {
        MultiXactOffset members;
-       uint32 multixacts;
-       uint32 victim_multixacts;
-       double fraction;
+       uint32          multixacts;
+       uint32          victim_multixacts;
+       double          fraction;
 
        ReadMultiXactCounts(&multixacts, &members);
 
@@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
 void
 TruncateMultiXact(void)
 {
-       MultiXactId             oldestMXact;
+       MultiXactId oldestMXact;
        MultiXactOffset oldestOffset;
        MultiXactOffset nextOffset;
        mxtruncinfo trunc;
index 8d6a3606794425b080c97842a5b15301b65c7afb..f4ba8518b1215d218dc1b8561f6ad83fe9f9bdc5 100644 (file)
@@ -39,7 +39,7 @@
  * without blocking.  That way, a worker that errors out can write the whole
  * message into the queue and terminate without waiting for the user backend.
  */
-#define        PARALLEL_ERROR_QUEUE_SIZE                       16384
+#define PARALLEL_ERROR_QUEUE_SIZE                      16384
 
 /* Magic number for parallel context TOC. */
 #define PARALLEL_MAGIC                                         0x50477c7c
@@ -71,7 +71,7 @@ typedef struct FixedParallelState
        BackendId       parallel_master_backend_id;
 
        /* Entrypoint for parallel workers. */
-       parallel_worker_main_type       entrypoint;
+       parallel_worker_main_type entrypoint;
 
        /* Mutex protects remaining fields. */
        slock_t         mutex;
@@ -90,10 +90,10 @@ typedef struct FixedParallelState
  * and < the number of workers before any user code is invoked; each parallel
  * worker will get a different parallel worker number.
  */
-int ParallelWorkerNumber = -1;
+int                    ParallelWorkerNumber = -1;
 
 /* Is there a parallel message pending which we need to receive? */
-bool ParallelMessagePending = false;
+bool           ParallelMessagePending = false;
 
 /* Pointer to our fixed parallel state. */
 static FixedParallelState *MyFixedParallelState;
@@ -115,8 +115,8 @@ static void ParallelWorkerMain(Datum main_arg);
 ParallelContext *
 CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
 {
-       MemoryContext   oldcontext;
-       ParallelContext *pcxt;
+       MemoryContext oldcontext;
+       ParallelContext *pcxt;
 
        /* It is unsafe to create a parallel context if not in parallel mode. */
        Assert(IsInParallelMode());
@@ -159,7 +159,7 @@ CreateParallelContextForExternalFunction(char *library_name,
                                                                                 char *function_name,
                                                                                 int nworkers)
 {
-       MemoryContext   oldcontext;
+       MemoryContext oldcontext;
        ParallelContext *pcxt;
 
        /* We might be running in a very short-lived memory context. */
@@ -184,15 +184,15 @@ CreateParallelContextForExternalFunction(char *library_name,
 void
 InitializeParallelDSM(ParallelContext *pcxt)
 {
-       MemoryContext   oldcontext;
-       Size    library_len = 0;
-       Size    guc_len = 0;
-       Size    combocidlen = 0;
-       Size    tsnaplen = 0;
-       Size    asnaplen = 0;
-       Size    tstatelen = 0;
-       Size    segsize = 0;
-       int             i;
+       MemoryContext oldcontext;
+       Size            library_len = 0;
+       Size            guc_len = 0;
+       Size            combocidlen = 0;
+       Size            tsnaplen = 0;
+       Size            asnaplen = 0;
+       Size            tstatelen = 0;
+       Size            segsize = 0;
+       int                     i;
        FixedParallelState *fps;
        Snapshot        transaction_snapshot = GetTransactionSnapshot();
        Snapshot        active_snapshot = GetActiveSnapshot();
@@ -205,8 +205,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
        shm_toc_estimate_keys(&pcxt->estimator, 1);
 
        /*
-        * Normally, the user will have requested at least one worker process,
-        * but if by chance they have not, we can skip a bunch of things here.
+        * Normally, the user will have requested at least one worker process, but
+        * if by chance they have not, we can skip a bunch of things here.
         */
        if (pcxt->nworkers > 0)
        {
@@ -228,8 +228,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
 
                /* Estimate space need for error queues. */
                StaticAssertStmt(BUFFERALIGN(PARALLEL_ERROR_QUEUE_SIZE) ==
-                       PARALLEL_ERROR_QUEUE_SIZE,
-                       "parallel error queue size not buffer-aligned");
+                                                PARALLEL_ERROR_QUEUE_SIZE,
+                                                "parallel error queue size not buffer-aligned");
                shm_toc_estimate_chunk(&pcxt->estimator,
                                                           PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
                shm_toc_estimate_keys(&pcxt->estimator, 1);
@@ -251,9 +251,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
         * memory segment; instead, just use backend-private memory.
         *
         * Also, if we can't create a dynamic shared memory segment because the
-        * maximum number of segments have already been created, then fall back
-        * to backend-private memory, and plan not to use any workers.  We hope
-        * this won't happen very often, but it's better to abandon the use of
+        * maximum number of segments have already been created, then fall back to
+        * backend-private memory, and plan not to use any workers.  We hope this
+        * won't happen very often, but it's better to abandon the use of
         * parallelism than to fail outright.
         */
        segsize = shm_toc_estimate(&pcxt->estimator);
@@ -290,13 +290,13 @@ InitializeParallelDSM(ParallelContext *pcxt)
        /* We can skip the rest of this if we're not budgeting for any workers. */
        if (pcxt->nworkers > 0)
        {
-               char   *libraryspace;
-               char   *gucspace;
-               char   *combocidspace;
-               char   *tsnapspace;
-               char   *asnapspace;
-               char   *tstatespace;
-               char   *error_queue_space;
+               char       *libraryspace;
+               char       *gucspace;
+               char       *combocidspace;
+               char       *tsnapspace;
+               char       *asnapspace;
+               char       *tstatespace;
+               char       *error_queue_space;
 
                /* Serialize shared libraries we have loaded. */
                libraryspace = shm_toc_allocate(pcxt->toc, library_len);
@@ -338,12 +338,12 @@ InitializeParallelDSM(ParallelContext *pcxt)
                 * should be transmitted via separate (possibly larger?) queues.
                 */
                error_queue_space =
-                  shm_toc_allocate(pcxt->toc,
-                                                       PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
+                       shm_toc_allocate(pcxt->toc,
+                                                        PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers);
                for (i = 0; i < pcxt->nworkers; ++i)
                {
-                       char *start;
-                       shm_mq *mq;
+                       char       *start;
+                       shm_mq     *mq;
 
                        start = error_queue_space + i * PARALLEL_ERROR_QUEUE_SIZE;
                        mq = shm_mq_create(start, PARALLEL_ERROR_QUEUE_SIZE);
@@ -355,8 +355,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
                /* Serialize extension entrypoint information. */
                if (pcxt->library_name != NULL)
                {
-                       Size    lnamelen = strlen(pcxt->library_name);
-                       char *extensionstate;
+                       Size            lnamelen = strlen(pcxt->library_name);
+                       char       *extensionstate;
 
                        extensionstate = shm_toc_allocate(pcxt->toc, lnamelen
                                                                                  + strlen(pcxt->function_name) + 2);
@@ -377,10 +377,10 @@ InitializeParallelDSM(ParallelContext *pcxt)
 void
 LaunchParallelWorkers(ParallelContext *pcxt)
 {
-       MemoryContext   oldcontext;
-       BackgroundWorker        worker;
-       int             i;
-       bool    any_registrations_failed = false;
+       MemoryContext oldcontext;
+       BackgroundWorker worker;
+       int                     i;
+       bool            any_registrations_failed = false;
 
        /* Skip this if we have no workers. */
        if (pcxt->nworkers == 0)
@@ -408,8 +408,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
         *
         * The caller must be able to tolerate ending up with fewer workers than
         * expected, so there is no need to throw an error here if registration
-        * fails.  It wouldn't help much anyway, because registering the worker
-        * in no way guarantees that it will start up and initialize successfully.
+        * fails.  It wouldn't help much anyway, because registering the worker in
+        * no way guarantees that it will start up and initialize successfully.
         */
        for (i = 0; i < pcxt->nworkers; ++i)
        {
@@ -421,8 +421,8 @@ LaunchParallelWorkers(ParallelContext *pcxt)
                else
                {
                        /*
-                        * If we weren't able to register the worker, then we've bumped
-                        * up against the max_worker_processes limit, and future
+                        * If we weren't able to register the worker, then we've bumped up
+                        * against the max_worker_processes limit, and future
                         * registrations will probably fail too, so arrange to skip them.
                         * But we still have to execute this code for the remaining slots
                         * to make sure that we forget about the error queues we budgeted
@@ -455,13 +455,13 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
 {
        for (;;)
        {
-               bool    anyone_alive = false;
-               int             i;
+               bool            anyone_alive = false;
+               int                     i;
 
                /*
-                * This will process any parallel messages that are pending, which
-                * may change the outcome of the loop that follows.  It may also
-                * throw an error propagated from a worker.
+                * This will process any parallel messages that are pending, which may
+                * change the outcome of the loop that follows.  It may also throw an
+                * error propagated from a worker.
                 */
                CHECK_FOR_INTERRUPTS();
 
@@ -502,7 +502,7 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
 void
 DestroyParallelContext(ParallelContext *pcxt)
 {
-       int             i;
+       int                     i;
 
        /*
         * Be careful about order of operations here!  We remove the parallel
@@ -548,7 +548,7 @@ DestroyParallelContext(ParallelContext *pcxt)
        /* Wait until the workers actually die. */
        for (i = 0; i < pcxt->nworkers; ++i)
        {
-               BgwHandleStatus status;
+               BgwHandleStatus status;
 
                if (pcxt->worker[i].bgwhandle == NULL)
                        continue;
@@ -626,9 +626,9 @@ HandleParallelMessages(void)
        dlist_foreach(iter, &pcxt_list)
        {
                ParallelContext *pcxt;
-               int             i;
-               Size    nbytes;
-               void   *data;
+               int                     i;
+               Size            nbytes;
+               void       *data;
 
                pcxt = dlist_container(ParallelContext, node, iter.cur);
                if (pcxt->worker == NULL)
@@ -637,14 +637,14 @@ HandleParallelMessages(void)
                for (i = 0; i < pcxt->nworkers; ++i)
                {
                        /*
-                        * Read as many messages as we can from each worker, but stop
-                        * when either (1) the error queue goes away, which can happen if
-                        * we receive a Terminate message from the worker; or (2) no more
+                        * Read as many messages as we can from each worker, but stop when
+                        * either (1) the error queue goes away, which can happen if we
+                        * receive a Terminate message from the worker; or (2) no more
                         * messages can be read from the worker without blocking.
                         */
                        while (pcxt->worker[i].error_mqh != NULL)
                        {
-                               shm_mq_result   res;
+                               shm_mq_result res;
 
                                res = shm_mq_receive(pcxt->worker[i].error_mqh, &nbytes,
                                                                         &data, true);
@@ -652,7 +652,7 @@ HandleParallelMessages(void)
                                        break;
                                else if (res == SHM_MQ_SUCCESS)
                                {
-                                       StringInfoData  msg;
+                                       StringInfoData msg;
 
                                        initStringInfo(&msg);
                                        appendBinaryStringInfo(&msg, data, nbytes);
@@ -661,7 +661,7 @@ HandleParallelMessages(void)
                                }
                                else
                                        ereport(ERROR,
-                                                       (errcode(ERRCODE_INTERNAL_ERROR), /* XXX: wrong errcode? */
+                                                       (errcode(ERRCODE_INTERNAL_ERROR),       /* XXX: wrong errcode? */
                                                         errmsg("lost connection to parallel worker")));
 
                                /* This might make the error queue go away. */
@@ -677,23 +677,24 @@ HandleParallelMessages(void)
 static void
 HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
 {
-       char    msgtype;
+       char            msgtype;
 
        msgtype = pq_getmsgbyte(msg);
 
        switch (msgtype)
        {
-               case 'K':                       /* BackendKeyData */
+               case 'K':                               /* BackendKeyData */
                        {
-                               int32   pid = pq_getmsgint(msg, 4);
+                               int32           pid = pq_getmsgint(msg, 4);
+
                                (void) pq_getmsgint(msg, 4);    /* discard cancel key */
                                (void) pq_getmsgend(msg);
                                pcxt->worker[i].pid = pid;
                                break;
                        }
 
-               case 'E':                       /* ErrorResponse */
-               case 'N':                       /* NoticeResponse */
+               case 'E':                               /* ErrorResponse */
+               case 'N':                               /* NoticeResponse */
                        {
                                ErrorData       edata;
                                ErrorContextCallback errctx;
@@ -725,14 +726,14 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg)
                                break;
                        }
 
-               case 'A':               /* NotifyResponse */
+               case 'A':                               /* NotifyResponse */
                        {
                                /* Propagate NotifyResponse. */
                                pq_putmessage(msg->data[0], &msg->data[1], msg->len - 1);
                                break;
                        }
 
-               case 'X':               /* Terminate, indicating clean exit */
+               case 'X':                               /* Terminate, indicating clean exit */
                        {
                                pfree(pcxt->worker[i].bgwhandle);
                                pfree(pcxt->worker[i].error_mqh);
@@ -797,18 +798,18 @@ static void
 ParallelWorkerMain(Datum main_arg)
 {
        dsm_segment *seg;
-       shm_toc *toc;
+       shm_toc    *toc;
        FixedParallelState *fps;
-       char   *error_queue_space;
-       shm_mq *mq;
+       char       *error_queue_space;
+       shm_mq     *mq;
        shm_mq_handle *mqh;
-       char   *libraryspace;
-       char   *gucspace;
-       char   *combocidspace;
-       char   *tsnapspace;
-       char   *asnapspace;
-       char   *tstatespace;
-       StringInfoData  msgbuf;
+       char       *libraryspace;
+       char       *gucspace;
+       char       *combocidspace;
+       char       *tsnapspace;
+       char       *asnapspace;
+       char       *tstatespace;
+       StringInfoData msgbuf;
 
        /* Establish signal handlers. */
        pqsignal(SIGTERM, die);
@@ -824,8 +825,8 @@ ParallelWorkerMain(Datum main_arg)
                                                                                                 ALLOCSET_DEFAULT_MAXSIZE);
 
        /*
-        * Now that we have a resource owner, we can attach to the dynamic
-        * shared memory segment and read the table of contents.
+        * Now that we have a resource owner, we can attach to the dynamic shared
+        * memory segment and read the table of contents.
         */
        seg = dsm_attach(DatumGetUInt32(main_arg));
        if (seg == NULL)
@@ -836,7 +837,7 @@ ParallelWorkerMain(Datum main_arg)
        if (toc == NULL)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("bad magic number in dynamic shared memory segment")));
+                          errmsg("bad magic number in dynamic shared memory segment")));
 
        /* Determine and set our worker number. */
        fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@@ -860,7 +861,7 @@ ParallelWorkerMain(Datum main_arg)
         */
        error_queue_space = shm_toc_lookup(toc, PARALLEL_KEY_ERROR_QUEUE);
        mq = (shm_mq *) (error_queue_space +
-               ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
+                                        ParallelWorkerNumber * PARALLEL_ERROR_QUEUE_SIZE);
        shm_mq_set_sender(mq, MyProc);
        mqh = shm_mq_attach(mq, seg, NULL);
        pq_redirect_to_shm_mq(mq, mqh);
@@ -870,9 +871,9 @@ ParallelWorkerMain(Datum main_arg)
        /*
         * Send a BackendKeyData message to the process that initiated parallelism
         * so that it has access to our PID before it receives any other messages
-        * from us.  Our cancel key is sent, too, since that's the way the protocol
-        * message is defined, but it won't actually be used for anything in this
-        * case.
+        * from us.  Our cancel key is sent, too, since that's the way the
+        * protocol message is defined, but it won't actually be used for anything
+        * in this case.
         */
        pq_beginmessage(&msgbuf, 'K');
        pq_sendint(&msgbuf, (int32) MyProcPid, sizeof(int32));
@@ -880,13 +881,13 @@ ParallelWorkerMain(Datum main_arg)
        pq_endmessage(&msgbuf);
 
        /*
-        * Hooray! Primary initialization is complete.  Now, we need to set up
-        * our backend-local state to match the original backend.
+        * Hooray! Primary initialization is complete.  Now, we need to set up our
+        * backend-local state to match the original backend.
         */
 
        /*
-        * Load libraries that were loaded by original backend.  We want to do this
-        * before restoring GUCs, because the libraries might define custom
+        * Load libraries that were loaded by original backend.  We want to do
+        * this before restoring GUCs, because the libraries might define custom
         * variables.
         */
        libraryspace = shm_toc_lookup(toc, PARALLEL_KEY_LIBRARY);
@@ -928,7 +929,8 @@ ParallelWorkerMain(Datum main_arg)
        SetUserIdAndSecContext(fps->current_user_id, fps->sec_context);
 
        /*
-        * We've initialized all of our state now; nothing should change hereafter.
+        * We've initialized all of our state now; nothing should change
+        * hereafter.
         */
        EnterParallelMode();
 
@@ -965,9 +967,9 @@ ParallelWorkerMain(Datum main_arg)
 static void
 ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
 {
-       char   *extensionstate;
-       char   *library_name;
-       char   *function_name;
+       char       *extensionstate;
+       char       *library_name;
+       char       *function_name;
        parallel_worker_main_type entrypt;
 
        extensionstate = shm_toc_lookup(toc, PARALLEL_KEY_EXTENSION_TRAMPOLINE);
@@ -988,7 +990,7 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc)
 static void
 ParallelErrorContext(void *arg)
 {
-       errcontext("parallel worker, pid %d", * (int32 *) arg);
+       errcontext("parallel worker, pid %d", *(int32 *) arg);
 }
 
 /*
index 4743cacefe67325e5d2cc25695d5c711210dacd0..177d1e1432e386b7448243d52c9117cfec64f02c 100644 (file)
@@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
        TimestampTz prepared_at;        /* time of preparation */
        XLogRecPtr      prepare_lsn;    /* XLOG offset of prepare record */
        Oid                     owner;                  /* ID of user that executed the xact */
-       BackendId       locking_backend; /* backend currently working on the xact */
+       BackendId       locking_backend;        /* backend currently working on the xact */
        bool            valid;                  /* TRUE if PGPROC entry is in proc array */
        char            gid[GIDSIZE];   /* The GID assigned to the prepared xact */
 }      GlobalTransactionData;
@@ -256,24 +256,24 @@ AtAbort_Twophase(void)
                return;
 
        /*
-        * What to do with the locked global transaction entry?  If we were in
-        * the process of preparing the transaction, but haven't written the WAL
+        * What to do with the locked global transaction entry?  If we were in the
+        * process of preparing the transaction, but haven't written the WAL
         * record and state file yet, the transaction must not be considered as
         * prepared.  Likewise, if we are in the process of finishing an
-        * already-prepared transaction, and fail after having already written
-        * the 2nd phase commit or rollback record to the WAL, the transaction
-        * should not be considered as prepared anymore.  In those cases, just
-        * remove the entry from shared memory.
+        * already-prepared transaction, and fail after having already written the
+        * 2nd phase commit or rollback record to the WAL, the transaction should
+        * not be considered as prepared anymore.  In those cases, just remove the
+        * entry from shared memory.
         *
-        * Otherwise, the entry must be left in place so that the transaction
-        * can be finished later, so just unlock it.
+        * Otherwise, the entry must be left in place so that the transaction can
+        * be finished later, so just unlock it.
         *
         * If we abort during prepare, after having written the WAL record, we
         * might not have transferred all locks and other state to the prepared
         * transaction yet.  Likewise, if we abort during commit or rollback,
-        * after having written the WAL record, we might not have released
-        * all the resources held by the transaction yet.  In those cases, the
-        * in-memory state can be wrong, but it's too late to back out.
+        * after having written the WAL record, we might not have released all the
+        * resources held by the transaction yet.  In those cases, the in-memory
+        * state can be wrong, but it's too late to back out.
         */
        if (!MyLockedGxact->valid)
        {
@@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
        TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
 
        /*
-        * Remember that we have this GlobalTransaction entry locked for us.
-        * If we abort after this, we must release it.
+        * Remember that we have this GlobalTransaction entry locked for us. If we
+        * abort after this, we must release it.
         */
        MyLockedGxact = gxact;
 
@@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
                if (gxact->locking_backend != InvalidBackendId)
                        ereport(ERROR,
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                        errmsg("prepared transaction with identifier \"%s\" is busy",
-                                                       gid)));
+                               errmsg("prepared transaction with identifier \"%s\" is busy",
+                                          gid)));
 
                if (user != gxact->owner && !superuser_arg(user))
                        ereport(ERROR,
@@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
 
        /*
         * In case we fail while running the callbacks, mark the gxact invalid so
-        * no one else will try to commit/rollback, and so it will be recycled
-        * if we fail after this point.  It is still locked by our backend so it
+        * no one else will try to commit/rollback, and so it will be recycled if
+        * we fail after this point.  It is still locked by our backend so it
         * won't go away yet.
         *
         * (We assume it's safe to do this without taking TwoPhaseStateLock.)
@@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
                                StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
 
                        /*
-                        * We're done with recovering this transaction. Clear MyLockedGxact,
-                        * like we do in PrepareTransaction() during normal operation.
+                        * We're done with recovering this transaction. Clear
+                        * MyLockedGxact, like we do in PrepareTransaction() during normal
+                        * operation.
                         */
                        PostPrepare_Twophase();
 
index 23401057e2c0531cd3fd082cd3aaca7e1b7e5653..b53d95faf8608ecd9cd9d733029985fef2f3ab37 100644 (file)
@@ -102,9 +102,9 @@ int                 synchronous_commit = SYNCHRONOUS_COMMIT_ON;
  * The XIDs are stored sorted in numerical order (not logical order) to make
  * lookups as fast as possible.
  */
-TransactionId  XactTopTransactionId = InvalidTransactionId;
-int                            nParallelCurrentXids = 0;
-TransactionId  *ParallelCurrentXids;
+TransactionId XactTopTransactionId = InvalidTransactionId;
+int                    nParallelCurrentXids = 0;
+TransactionId *ParallelCurrentXids;
 
 /*
  * MyXactAccessedTempRel is set when a temporary relation is accessed.
@@ -142,7 +142,7 @@ typedef enum TBlockState
        /* transaction block states */
        TBLOCK_BEGIN,                           /* starting transaction block */
        TBLOCK_INPROGRESS,                      /* live transaction */
-       TBLOCK_PARALLEL_INPROGRESS,     /* live transaction inside parallel worker */
+       TBLOCK_PARALLEL_INPROGRESS, /* live transaction inside parallel worker */
        TBLOCK_END,                                     /* COMMIT received */
        TBLOCK_ABORT,                           /* failed xact, awaiting ROLLBACK */
        TBLOCK_ABORT_END,                       /* failed xact, ROLLBACK received */
@@ -184,7 +184,7 @@ typedef struct TransactionStateData
        bool            prevXactReadOnly;               /* entry-time xact r/o state */
        bool            startedInRecovery;              /* did we start in recovery? */
        bool            didLogXid;              /* has xid been included in WAL record? */
-       int                     parallelModeLevel;      /* Enter/ExitParallelMode counter */
+       int                     parallelModeLevel;              /* Enter/ExitParallelMode counter */
        struct TransactionStateData *parent;            /* back link to parent */
 } TransactionStateData;
 
@@ -494,8 +494,8 @@ AssignTransactionId(TransactionState s)
        Assert(s->state == TRANS_INPROGRESS);
 
        /*
-        * Workers synchronize transaction state at the beginning of each
-        * parallel operation, so we can't account for new XIDs at this point.
+        * Workers synchronize transaction state at the beginning of each parallel
+        * operation, so we can't account for new XIDs at this point.
         */
        if (IsInParallelMode())
                elog(ERROR, "cannot assign XIDs during a parallel operation");
@@ -788,10 +788,10 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
                return false;
 
        /*
-        * In parallel workers, the XIDs we must consider as current are stored
-        * in ParallelCurrentXids rather than the transaction-state stack.  Note
-        * that the XIDs in this array are sorted numerically rather than
-        * according to transactionIdPrecedes order.
+        * In parallel workers, the XIDs we must consider as current are stored in
+        * ParallelCurrentXids rather than the transaction-state stack.  Note that
+        * the XIDs in this array are sorted numerically rather than according to
+        * transactionIdPrecedes order.
         */
        if (nParallelCurrentXids > 0)
        {
@@ -1204,7 +1204,7 @@ RecordTransactionCommit(void)
                                                        nchildren, children, nrels, rels,
                                                        nmsgs, invalMessages,
                                                        RelcacheInitFileInval, forceSyncCommit,
-                                                       InvalidTransactionId /* plain commit */);
+                                                       InvalidTransactionId /* plain commit */ );
 
                /*
                 * Record plain commit ts if not replaying remote actions, or if no
@@ -1505,7 +1505,7 @@ RecordTransactionAbort(bool isSubXact)
        RelFileNode *rels;
        int                     nchildren;
        TransactionId *children;
-       TimestampTz     xact_time;
+       TimestampTz xact_time;
 
        /*
         * If we haven't been assigned an XID, nobody will care whether we aborted
@@ -2316,8 +2316,8 @@ PrepareTransaction(void)
 
        /*
         * In normal commit-processing, this is all non-critical post-transaction
-        * cleanup.  When the transaction is prepared, however, it's important that
-        * the locks and other per-backend resources are transferred to the
+        * cleanup.  When the transaction is prepared, however, it's important
+        * that the locks and other per-backend resources are transferred to the
         * prepared transaction's PGPROC entry.  Note that if an error is raised
         * here, it's too late to abort the transaction. XXX: This probably should
         * be in a critical section, to force a PANIC if any of this fails, but
@@ -2358,9 +2358,8 @@ PrepareTransaction(void)
 
        /*
         * Allow another backend to finish the transaction.  After
-        * PostPrepare_Twophase(), the transaction is completely detached from
-        * our backend.  The rest is just non-critical cleanup of backend-local
-        * state.
+        * PostPrepare_Twophase(), the transaction is completely detached from our
+        * backend.  The rest is just non-critical cleanup of backend-local state.
         */
        PostPrepare_Twophase();
 
@@ -2417,7 +2416,7 @@ AbortTransaction(void)
 {
        TransactionState s = CurrentTransactionState;
        TransactionId latestXid;
-       bool    is_parallel_worker;
+       bool            is_parallel_worker;
 
        /* Prevent cancel/die interrupt while cleaning up */
        HOLD_INTERRUPTS();
@@ -2520,9 +2519,9 @@ AbortTransaction(void)
                latestXid = InvalidTransactionId;
 
                /*
-                * Since the parallel master won't get our value of XactLastRecEnd in this
-                * case, we nudge WAL-writer ourselves in this case.  See related comments in
-                * RecordTransactionAbort for why this matters.
+                * Since the parallel master won't get our value of XactLastRecEnd in
+                * this case, we nudge WAL-writer ourselves in this case.  See related
+                * comments in RecordTransactionAbort for why this matters.
                 */
                XLogSetAsyncXactLSN(XactLastRecEnd);
        }
@@ -3720,7 +3719,7 @@ DefineSavepoint(char *name)
        if (IsInParallelMode())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot define savepoints during a parallel operation")));
+                       errmsg("cannot define savepoints during a parallel operation")));
 
        switch (s->blockState)
        {
@@ -3787,7 +3786,7 @@ ReleaseSavepoint(List *options)
        if (IsInParallelMode())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot release savepoints during a parallel operation")));
+                  errmsg("cannot release savepoints during a parallel operation")));
 
        switch (s->blockState)
        {
@@ -3900,7 +3899,7 @@ RollbackToSavepoint(List *options)
        if (IsInParallelMode())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot rollback to savepoints during a parallel operation")));
+               errmsg("cannot rollback to savepoints during a parallel operation")));
 
        switch (s->blockState)
        {
@@ -4017,17 +4016,18 @@ BeginInternalSubTransaction(char *name)
 
        /*
         * Workers synchronize transaction state at the beginning of each parallel
-        * operation, so we can't account for new subtransactions after that point.
-        * We might be able to make an exception for the type of subtransaction
-        * established by this function, which is typically used in contexts where
-        * we're going to release or roll back the subtransaction before proceeding
-        * further, so that no enduring change to the transaction state occurs.
-        * For now, however, we prohibit this case along with all the others.
+        * operation, so we can't account for new subtransactions after that
+        * point. We might be able to make an exception for the type of
+        * subtransaction established by this function, which is typically used in
+        * contexts where we're going to release or roll back the subtransaction
+        * before proceeding further, so that no enduring change to the
+        * transaction state occurs. For now, however, we prohibit this case along
+        * with all the others.
         */
        if (IsInParallelMode())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot start subtransactions during a parallel operation")));
+               errmsg("cannot start subtransactions during a parallel operation")));
 
        switch (s->blockState)
        {
@@ -4094,7 +4094,7 @@ ReleaseCurrentSubTransaction(void)
        if (IsInParallelMode())
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
-                                errmsg("cannot commit subtransactions during a parallel operation")));
+               errmsg("cannot commit subtransactions during a parallel operation")));
 
        if (s->blockState != TBLOCK_SUBINPROGRESS)
                elog(ERROR, "ReleaseCurrentSubTransaction: unexpected state %s",
@@ -4773,7 +4773,8 @@ Size
 EstimateTransactionStateSpace(void)
 {
        TransactionState s;
-       Size    nxids = 5; /* iso level, deferrable, top & current XID, XID count */
+       Size            nxids = 5;              /* iso level, deferrable, top & current XID,
+                                                                * XID count */
 
        for (s = CurrentTransactionState; s != NULL; s = s->parent)
        {
@@ -4804,8 +4805,8 @@ void
 SerializeTransactionState(Size maxsize, char *start_address)
 {
        TransactionState s;
-       Size    nxids = 0;
-       Size    i = 0;
+       Size            nxids = 0;
+       Size            i = 0;
        TransactionId *workspace;
        TransactionId *result = (TransactionId *) start_address;
 
@@ -4830,8 +4831,8 @@ SerializeTransactionState(Size maxsize, char *start_address)
        }
 
        /*
-        * OK, we need to generate a sorted list of XIDs that our workers
-        * should view as current.  First, figure out how many there are.
+        * OK, we need to generate a sorted list of XIDs that our workers should
+        * view as current.  First, figure out how many there are.
         */
        for (s = CurrentTransactionState; s != NULL; s = s->parent)
        {
@@ -5060,22 +5061,22 @@ xactGetCommittedChildren(TransactionId **ptr)
  */
 XLogRecPtr
 XactLogCommitRecord(TimestampTz commit_time,
-                                        int nsubxacts, TransactionId *subxacts,
-                                        int nrels, RelFileNode *rels,
-                                        int nmsgs, SharedInvalidationMessage *msgs,
-                                        bool relcacheInval, bool forceSync,
-                                        TransactionId twophase_xid)
+                                       int nsubxacts, TransactionId *subxacts,
+                                       int nrels, RelFileNode *rels,
+                                       int nmsgs, SharedInvalidationMessage *msgs,
+                                       bool relcacheInval, bool forceSync,
+                                       TransactionId twophase_xid)
 {
-       xl_xact_commit          xlrec;
-       xl_xact_xinfo           xl_xinfo;
-       xl_xact_dbinfo          xl_dbinfo;
-       xl_xact_subxacts        xl_subxacts;
+       xl_xact_commit xlrec;
+       xl_xact_xinfo xl_xinfo;
+       xl_xact_dbinfo xl_dbinfo;
+       xl_xact_subxacts xl_subxacts;
        xl_xact_relfilenodes xl_relfilenodes;
-       xl_xact_invals          xl_invals;
-       xl_xact_twophase        xl_twophase;
-       xl_xact_origin          xl_origin;
+       xl_xact_invals xl_invals;
+       xl_xact_twophase xl_twophase;
+       xl_xact_origin xl_origin;
 
-       uint8                           info;
+       uint8           info;
 
        Assert(CritSectionCount > 0);
 
@@ -5198,17 +5199,17 @@ XactLogCommitRecord(TimestampTz commit_time,
  */
 XLogRecPtr
 XactLogAbortRecord(TimestampTz abort_time,
-                                       int nsubxacts, TransactionId *subxacts,
-                                       int nrels, RelFileNode *rels,
-                                       TransactionId twophase_xid)
+                                  int nsubxacts, TransactionId *subxacts,
+                                  int nrels, RelFileNode *rels,
+                                  TransactionId twophase_xid)
 {
-       xl_xact_abort           xlrec;
-       xl_xact_xinfo           xl_xinfo;
-       xl_xact_subxacts        xl_subxacts;
+       xl_xact_abort xlrec;
+       xl_xact_xinfo xl_xinfo;
+       xl_xact_subxacts xl_subxacts;
        xl_xact_relfilenodes xl_relfilenodes;
-       xl_xact_twophase        xl_twophase;
+       xl_xact_twophase xl_twophase;
 
-       uint8                           info;
+       uint8           info;
 
        Assert(CritSectionCount > 0);
 
@@ -5289,7 +5290,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 {
        TransactionId max_xid;
        int                     i;
-       TimestampTz     commit_time;
+       TimestampTz commit_time;
 
        max_xid = TransactionIdLatest(xid, parsed->nsubxacts, parsed->subxacts);
 
@@ -5351,13 +5352,13 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
                 * recovered. It's unlikely but it's good to be safe.
                 */
                TransactionIdAsyncCommitTree(
-                       xid, parsed->nsubxacts, parsed->subxacts, lsn);
+                                                         xid, parsed->nsubxacts, parsed->subxacts, lsn);
 
                /*
                 * We must mark clog before we update the ProcArray.
                 */
                ExpireTreeKnownAssignedTransactionIds(
-                       xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+                                                 xid, parsed->nsubxacts, parsed->subxacts, max_xid);
 
                /*
                 * Send any cache invalidations attached to the commit. We must
@@ -5365,9 +5366,9 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
                 * occurs in CommitTransaction().
                 */
                ProcessCommittedInvalidationMessages(
-                       parsed->msgs, parsed->nmsgs,
-                       XactCompletionRelcacheInitFileInval(parsed->xinfo),
-                       parsed->dbId, parsed->tsId);
+                                                                                        parsed->msgs, parsed->nmsgs,
+                                                 XactCompletionRelcacheInitFileInval(parsed->xinfo),
+                                                                                        parsed->dbId, parsed->tsId);
 
                /*
                 * Release locks, if any. We do this for both two phase and normal one
@@ -5383,7 +5384,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
        {
                /* recover apply progress */
                replorigin_advance(origin_id, parsed->origin_lsn, lsn,
-                                                  false /* backward */, false /* WAL */);
+                                                  false /* backward */ , false /* WAL */ );
        }
 
        /* Make sure files supposed to be dropped are dropped */
@@ -5447,8 +5448,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
 static void
 xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
 {
-       int                             i;
-       TransactionId   max_xid;
+       int                     i;
+       TransactionId max_xid;
 
        /*
         * Make sure nextXid is beyond any XID mentioned in the record.
@@ -5495,7 +5496,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid)
                 * We must update the ProcArray after we have marked clog.
                 */
                ExpireTreeKnownAssignedTransactionIds(
-                       xid, parsed->nsubxacts, parsed->subxacts, max_xid);
+                                                 xid, parsed->nsubxacts, parsed->subxacts, max_xid);
 
                /*
                 * There are no flat files that need updating, nor invalidation
@@ -5557,7 +5558,7 @@ xact_redo(XLogReaderState *record)
                xl_xact_parsed_abort parsed;
 
                ParseAbortRecord(XLogRecGetInfo(record), xlrec,
-                                                 &parsed);
+                                                &parsed);
 
                if (info == XLOG_XACT_ABORT)
                {
index b913bf3ebcbbd0da80fd539aaf788140cf0f9ab8..087b6be084d6eb9c1bdf60f5e59dfc01dc636579 100644 (file)
@@ -81,8 +81,8 @@ extern uint32 bootstrap_data_checksum_version;
 
 
 /* User-settable parameters */
-int                    max_wal_size = 64;              /* 1 GB */
-int                    min_wal_size = 5;               /* 80 MB */
+int                    max_wal_size = 64;      /* 1 GB */
+int                    min_wal_size = 5;       /* 80 MB */
 int                    wal_keep_segments = 0;
 int                    XLOGbuffers = -1;
 int                    XLogArchiveTimeout = 0;
@@ -951,14 +951,14 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
        /*
         * Check to see if my copy of RedoRecPtr or doPageWrites is out of date.
         * If so, may have to go back and have the caller recompute everything.
-        * This can only happen just after a checkpoint, so it's better to be
-        * slow in this case and fast otherwise.
+        * This can only happen just after a checkpoint, so it's better to be slow
+        * in this case and fast otherwise.
         *
         * If we aren't doing full-page writes then RedoRecPtr doesn't actually
         * affect the contents of the XLOG record, so we'll update our local copy
         * but not force a recomputation.  (If doPageWrites was just turned off,
-        * we could recompute the record without full pages, but we choose not
-        * to bother.)
+        * we could recompute the record without full pages, but we choose not to
+        * bother.)
         */
        if (RedoRecPtr != Insert->RedoRecPtr)
        {
@@ -970,8 +970,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
        if (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr && doPageWrites)
        {
                /*
-                * Oops, some buffer now needs to be backed up that the caller
-                * didn't back up.  Start over.
+                * Oops, some buffer now needs to be backed up that the caller didn't
+                * back up.  Start over.
                 */
                WALInsertLockRelease();
                END_CRIT_SECTION();
@@ -1100,8 +1100,8 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
                {
                        appendStringInfo(&buf, "error decoding record: out of memory");
                }
-               else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
-                                                         &errormsg))
+               else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
+                                                                  &errormsg))
                {
                        appendStringInfo(&buf, "error decoding record: %s",
                                                         errormsg ? errormsg : "no error message");
@@ -1932,11 +1932,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
                /*
                 * Fill the new page's header
                 */
-               NewPage   ->xlp_magic = XLOG_PAGE_MAGIC;
+               NewPage->xlp_magic = XLOG_PAGE_MAGIC;
 
                /* NewPage->xlp_info = 0; */    /* done by memset */
-               NewPage   ->xlp_tli = ThisTimeLineID;
-               NewPage   ->xlp_pageaddr = NewPageBeginPtr;
+               NewPage->xlp_tli = ThisTimeLineID;
+               NewPage->xlp_pageaddr = NewPageBeginPtr;
 
                /* NewPage->xlp_rem_len = 0; */ /* done by memset */
 
@@ -1954,7 +1954,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
                 * compress a few records.
                 */
                if (!Insert->forcePageWrites)
-                       NewPage   ->xlp_info |= XLP_BKP_REMOVABLE;
+                       NewPage->xlp_info |= XLP_BKP_REMOVABLE;
 
                /*
                 * If first page of an XLOG segment file, make it a long header.
@@ -1966,7 +1966,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
                        NewLongPage->xlp_sysid = ControlFile->system_identifier;
                        NewLongPage->xlp_seg_size = XLogSegSize;
                        NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
-                       NewPage   ->xlp_info |= XLP_LONG_HEADER;
+                       NewPage->xlp_info |= XLP_LONG_HEADER;
                }
 
                /*
@@ -2008,10 +2008,10 @@ CalculateCheckpointSegments(void)
         *
         * a) we keep WAL for two checkpoint cycles, back to the "prev" checkpoint.
         * b) during checkpoint, we consume checkpoint_completion_target *
-        *    number of segments consumed between checkpoints.
+        *        number of segments consumed between checkpoints.
         *-------
         */
-       target = (double ) max_wal_size / (2.0 + CheckPointCompletionTarget);
+       target = (double) max_wal_size / (2.0 + CheckPointCompletionTarget);
 
        /* round down */
        CheckPointSegments = (int) target;
@@ -2052,15 +2052,15 @@ XLOGfileslop(XLogRecPtr PriorRedoPtr)
         * remove enough segments to stay below the maximum.
         */
        minSegNo = PriorRedoPtr / XLOG_SEG_SIZE + min_wal_size - 1;
-       maxSegNo =  PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
+       maxSegNo = PriorRedoPtr / XLOG_SEG_SIZE + max_wal_size - 1;
 
        /*
         * Between those limits, recycle enough segments to get us through to the
         * estimated end of next checkpoint.
         *
         * To estimate where the next checkpoint will finish, assume that the
-        * system runs steadily consuming CheckPointDistanceEstimate
-        * bytes between every checkpoint.
+        * system runs steadily consuming CheckPointDistanceEstimate bytes between
+        * every checkpoint.
         *
         * The reason this calculation is done from the prior checkpoint, not the
         * one that just finished, is that this behaves better if some checkpoint
@@ -3005,11 +3005,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
        /*
         * XXX: What should we use as max_segno? We used to use XLOGfileslop when
         * that was a constant, but that was always a bit dubious: normally, at a
-        * checkpoint, XLOGfileslop was the offset from the checkpoint record,
-        * but here, it was the offset from the insert location. We can't do the
+        * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
+        * here, it was the offset from the insert location. We can't do the
         * normal XLOGfileslop calculation here because we don't have access to
-        * the prior checkpoint's redo location. So somewhat arbitrarily, just
-        * use CheckPointSegments.
+        * the prior checkpoint's redo location. So somewhat arbitrarily, just use
+        * CheckPointSegments.
         */
        max_segno = logsegno + CheckPointSegments;
        if (!InstallXLogFileSegment(&installed_segno, tmppath,
@@ -3098,7 +3098,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
                nread = upto - nbytes;
 
                /*
-                * The part that is not read from the source file is filled with zeros.
+                * The part that is not read from the source file is filled with
+                * zeros.
                 */
                if (nread < sizeof(buffer))
                        memset(buffer, 0, sizeof(buffer));
@@ -3153,8 +3154,8 @@ XLogFileCopy(char *dstfname, char *srcfname, int upto)
 
        /*
         * Now move the segment into place with its final name.  (Or just return
-        * the path to the file we created, if the caller wants to handle the
-        * rest on its own.)
+        * the path to the file we created, if the caller wants to handle the rest
+        * on its own.)
         */
        if (dstfname)
        {
@@ -3690,8 +3691,8 @@ RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
 
                /*
                 * Remove files that are on a timeline older than the new one we're
-                * switching to, but with a segment number >= the first segment on
-                * the new timeline.
+                * switching to, but with a segment number >= the first segment on the
+                * new timeline.
                 */
                if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
                        strcmp(xlde->d_name + 8, switchseg + 8) > 0)
@@ -3768,12 +3769,13 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
                                                segname)));
 
 #ifdef WIN32
+
                /*
                 * On Windows, if another process (e.g another backend) holds the file
                 * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
                 * will still show up in directory listing until the last handle is
-                * closed. To avoid confusing the lingering deleted file for a live WAL
-                * file that needs to be archived, rename it before deleting it.
+                * closed. To avoid confusing the lingering deleted file for a live
+                * WAL file that needs to be archived, rename it before deleting it.
                 *
                 * If another process holds the file open without FILE_SHARE_DELETE
                 * flag, rename will fail. We'll try again at the next checkpoint.
@@ -3783,8 +3785,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
                {
                        ereport(LOG,
                                        (errcode_for_file_access(),
-                                errmsg("could not rename old transaction log file \"%s\": %m",
-                                                       path)));
+                          errmsg("could not rename old transaction log file \"%s\": %m",
+                                         path)));
                        return;
                }
                rc = unlink(newpath);
@@ -3795,8 +3797,8 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
                {
                        ereport(LOG,
                                        (errcode_for_file_access(),
-                                errmsg("could not remove old transaction log file \"%s\": %m",
-                                                       path)));
+                          errmsg("could not remove old transaction log file \"%s\": %m",
+                                         path)));
                        return;
                }
                CheckpointStats.ckpt_segs_removed++;
@@ -4609,11 +4611,11 @@ XLOGShmemInit(void)
        int                     i;
 
 #ifdef WAL_DEBUG
+
        /*
-        * Create a memory context for WAL debugging that's exempt from the
-        * normal "no pallocs in critical section" rule. Yes, that can lead to a
-        * PANIC if an allocation fails, but wal_debug is not for production use
-        * anyway.
+        * Create a memory context for WAL debugging that's exempt from the normal
+        * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
+        * an allocation fails, but wal_debug is not for production use anyway.
         */
        if (walDebugCxt == NULL)
        {
@@ -5044,7 +5046,7 @@ readRecoveryCommandFile(void)
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                                 errmsg("invalid value for recovery parameter \"recovery_target\""),
-                                                errhint("The only allowed value is \"immediate\".")));
+                                          errhint("The only allowed value is \"immediate\".")));
                        ereport(DEBUG2,
                                        (errmsg_internal("recovery_target = '%s'",
                                                                         item->value)));
@@ -5135,9 +5137,9 @@ readRecoveryCommandFile(void)
        }
 
        /*
-        * Override any inconsistent requests. Not that this is a change
-        * of behaviour in 9.5; prior to this we simply ignored a request
-        * to pause if hot_standby = off, which was surprising behaviour.
+        * Override any inconsistent requests. Not that this is a change of
+        * behaviour in 9.5; prior to this we simply ignored a request to pause if
+        * hot_standby = off, which was surprising behaviour.
         */
        if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
                recoveryTargetActionSet &&
@@ -6043,7 +6045,7 @@ StartupXLOG(void)
        if (read_backup_label(&checkPointLoc, &backupEndRequired,
                                                  &backupFromStandby))
        {
-               List    *tablespaces = NIL;
+               List       *tablespaces = NIL;
 
                /*
                 * Archive recovery was requested, and thanks to the backup label
@@ -6099,7 +6101,7 @@ StartupXLOG(void)
                        foreach(lc, tablespaces)
                        {
                                tablespaceinfo *ti = lfirst(lc);
-                               char    *linkloc;
+                               char       *linkloc;
 
                                linkloc = psprintf("pg_tblspc/%s", ti->oid);
 
@@ -6112,26 +6114,26 @@ StartupXLOG(void)
                                 */
                                if (lstat(linkloc, &st) == 0 && S_ISDIR(st.st_mode))
                                {
-                                       if (!rmtree(linkloc,true))
+                                       if (!rmtree(linkloc, true))
                                                ereport(ERROR,
                                                                (errcode_for_file_access(),
-                                                                errmsg("could not remove directory \"%s\": %m",
-                                                                               linkloc)));
+                                                         errmsg("could not remove directory \"%s\": %m",
+                                                                        linkloc)));
                                }
                                else
                                {
                                        if (unlink(linkloc) < 0 && errno != ENOENT)
                                                ereport(ERROR,
                                                                (errcode_for_file_access(),
-                                                                errmsg("could not remove symbolic link \"%s\": %m",
-                                                                               linkloc)));
+                                                 errmsg("could not remove symbolic link \"%s\": %m",
+                                                                linkloc)));
                                }
 
                                if (symlink(ti->path, linkloc) < 0)
                                        ereport(ERROR,
                                                        (errcode_for_file_access(),
-                                                        errmsg("could not create symbolic link \"%s\": %m",
-                                                                       linkloc)));
+                                                 errmsg("could not create symbolic link \"%s\": %m",
+                                                                linkloc)));
 
                                pfree(ti->oid);
                                pfree(ti->path);
@@ -6222,9 +6224,9 @@ StartupXLOG(void)
         * in place if the database had been cleanly shut down, but it seems
         * safest to just remove them always and let them be rebuilt during the
         * first backend startup.  These files needs to be removed from all
-        * directories including pg_tblspc, however the symlinks are created
-        * only after reading tablesapce_map file in case of archive recovery
-        * from backup, so needs to clear old relcache files here after creating
+        * directories including pg_tblspc, however the symlinks are created only
+        * after reading tablesapce_map file in case of archive recovery from
+        * backup, so needs to clear old relcache files here after creating
         * symlinks.
         */
        RelationCacheInitFileRemove();
@@ -6442,9 +6444,9 @@ StartupXLOG(void)
                 * Also set backupEndPoint and use minRecoveryPoint as the backup end
                 * location if we're starting recovery from a base backup which was
                 * taken from a standby. In this case, the database system status in
-                * pg_control must indicate that the database was already in
-                * recovery. Usually that will be DB_IN_ARCHIVE_RECOVERY but also can
-                * be DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
+                * pg_control must indicate that the database was already in recovery.
+                * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
+                * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
                 * before reaching this point; e.g. because restore_command or
                 * primary_conninfo were faulty.
                 *
@@ -6500,10 +6502,10 @@ StartupXLOG(void)
 
                /*
                 * If there was a tablespace_map file, it's done its job and the
-                * symlinks have been created.  We must get rid of the map file
-                * so that if we crash during recovery, we don't create symlinks
-                * again.  It seems prudent though to just rename the file out of
-                * the way rather than delete it completely.
+                * symlinks have been created.  We must get rid of the map file so
+                * that if we crash during recovery, we don't create symlinks again.
+                * It seems prudent though to just rename the file out of the way
+                * rather than delete it completely.
                 */
                if (haveTblspcMap)
                {
@@ -6859,7 +6861,8 @@ StartupXLOG(void)
                                {
                                        /*
                                         * Before we continue on the new timeline, clean up any
-                                        * (possibly bogus) future WAL segments on the old timeline.
+                                        * (possibly bogus) future WAL segments on the old
+                                        * timeline.
                                         */
                                        RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
 
@@ -6890,32 +6893,33 @@ StartupXLOG(void)
                        {
                                if (!reachedConsistency)
                                        ereport(FATAL,
-                                               (errmsg("requested recovery stop point is before consistent recovery point")));
+                                                       (errmsg("requested recovery stop point is before consistent recovery point")));
 
                                /*
                                 * This is the last point where we can restart recovery with a
                                 * new recovery target, if we shutdown and begin again. After
-                                * this, Resource Managers may choose to do permanent corrective
-                                * actions at end of recovery.
+                                * this, Resource Managers may choose to do permanent
+                                * corrective actions at end of recovery.
                                 */
                                switch (recoveryTargetAction)
                                {
                                        case RECOVERY_TARGET_ACTION_SHUTDOWN:
-                                                       /*
-                                                        * exit with special return code to request shutdown
-                                                        * of postmaster.  Log messages issued from
-                                                        * postmaster.
-                                                        */
-                                                       proc_exit(3);
+
+                                               /*
+                                                * exit with special return code to request shutdown
+                                                * of postmaster.  Log messages issued from
+                                                * postmaster.
+                                                */
+                                               proc_exit(3);
 
                                        case RECOVERY_TARGET_ACTION_PAUSE:
-                                                       SetRecoveryPause(true);
-                                                       recoveryPausesHere();
+                                               SetRecoveryPause(true);
+                                               recoveryPausesHere();
 
-                                                       /* drop into promote */
+                                               /* drop into promote */
 
                                        case RECOVERY_TARGET_ACTION_PROMOTE:
-                                                       break;
+                                               break;
                                }
                        }
 
@@ -7259,8 +7263,8 @@ StartupXLOG(void)
                 * too.
                 *
                 * If a .done or .ready file already exists for the old timeline,
-                * however, we had already determined that the segment is complete,
-                * so we can let it be archived normally. (In particular, if it was
+                * however, we had already determined that the segment is complete, so
+                * we can let it be archived normally. (In particular, if it was
                 * restored from the archive to begin with, it's expected to have a
                 * .done file).
                 */
@@ -7291,8 +7295,8 @@ StartupXLOG(void)
                                if (rename(origpath, partialpath) != 0)
                                        ereport(ERROR,
                                                        (errcode_for_file_access(),
-                                                        errmsg("could not rename file \"%s\" to \"%s\": %m",
-                                                                       origpath, partialpath)));
+                                                errmsg("could not rename file \"%s\" to \"%s\": %m",
+                                                               origpath, partialpath)));
                                XLogArchiveNotify(partialfname);
                        }
                }
@@ -7366,8 +7370,8 @@ StartupXLOG(void)
        XLogReportParameters();
 
        /*
-        * Local WAL inserts enabled, so it's time to finish initialization
-        * of commit timestamp.
+        * Local WAL inserts enabled, so it's time to finish initialization of
+        * commit timestamp.
         */
        CompleteCommitTsInitialization();
 
@@ -7961,7 +7965,7 @@ LogCheckpointStart(int flags, bool restartpoint)
                 (flags & CHECKPOINT_WAIT) ? " wait" : "",
                 (flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
                 (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
-                (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" :"");
+                (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
 }
 
 /*
@@ -8056,8 +8060,8 @@ static void
 UpdateCheckPointDistanceEstimate(uint64 nbytes)
 {
        /*
-        * To estimate the number of segments consumed between checkpoints, keep
-        * moving average of the amount of WAL generated in previous checkpoint
+        * To estimate the number of segments consumed between checkpoints, keep a
+        * moving average of the amount of WAL generated in previous checkpoint
         * cycles. However, if the load is bursty, with quiet periods and busy
         * periods, we want to cater for the peak load. So instead of a plain
         * moving average, let the average decline slowly if the previous cycle
@@ -9473,8 +9477,8 @@ xlog_redo(XLogReaderState *record)
                }
 
                /*
-                * Update the commit timestamp tracking. If there was a change
-                * it needs to be activated or deactivated accordingly.
+                * Update the commit timestamp tracking. If there was a change it
+                * needs to be activated or deactivated accordingly.
                 */
                if (track_commit_timestamp != xlrec.track_commit_timestamp)
                {
@@ -9483,6 +9487,7 @@ xlog_redo(XLogReaderState *record)
                        if (track_commit_timestamp)
                                ActivateCommitTs();
                        else
+
                                /*
                                 * We can't create a new WAL record here, but that's OK as
                                 * master did the WAL logging already and we will replay the
@@ -9996,7 +10001,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                        char       *relpath = NULL;
                        int                     rllen;
                        StringInfoData buflinkpath;
-                       char    *s = linkpath;
+                       char       *s = linkpath;
 
                        /* Skip special stuff */
                        if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
@@ -10023,10 +10028,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                        linkpath[rllen] = '\0';
 
                        /*
-                        * Add the escape character '\\' before newline in a string
-                        * to ensure that we can distinguish between the newline in
-                        * the tablespace path and end of line while reading
-                        * tablespace_map file during archive recovery.
+                        * Add the escape character '\\' before newline in a string to
+                        * ensure that we can distinguish between the newline in the
+                        * tablespace path and end of line while reading tablespace_map
+                        * file during archive recovery.
                         */
                        initStringInfo(&buflinkpath);
 
@@ -10054,8 +10059,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                        ti->rpath = relpath ? pstrdup(relpath) : NULL;
                        ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
 
-                       if(tablespaces)
-                          *tablespaces = lappend(*tablespaces, ti);
+                       if (tablespaces)
+                               *tablespaces = lappend(*tablespaces, ti);
 
                        appendStringInfo(&tblspc_mapfbuf, "%s %s\n", ti->oid, ti->path);
 
@@ -10150,10 +10155,10 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
                                }
                                else
                                        ereport(ERROR,
-                                                       (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                        errmsg("a backup is already in progress"),
-                                                        errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
-                                                                        TABLESPACE_MAP)));
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("a backup is already in progress"),
+                                                  errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
+                                                                  TABLESPACE_MAP)));
 
                                fp = AllocateFile(TABLESPACE_MAP, "w");
 
@@ -10353,8 +10358,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
                                                        BACKUP_LABEL_FILE)));
 
                /*
-                * Remove tablespace_map file if present, it is created
-                * only if there are tablespaces.
+                * Remove tablespace_map file if present, it is created only if there
+                * are tablespaces.
                 */
                unlink(TABLESPACE_MAP);
        }
@@ -10773,10 +10778,12 @@ read_tablespace_map(List **tablespaces)
        tablespaceinfo *ti;
        FILE       *lfp;
        char            tbsoid[MAXPGPATH];
-       char            *tbslinkpath;
+       char       *tbslinkpath;
        char            str[MAXPGPATH];
-       int                     ch, prev_ch = -1,
-                               i = 0, n;
+       int                     ch,
+                               prev_ch = -1,
+                               i = 0,
+                               n;
 
        /*
         * See if tablespace_map file is present
@@ -10794,9 +10801,9 @@ read_tablespace_map(List **tablespaces)
 
        /*
         * Read and parse the link name and path lines from tablespace_map file
-        * (this code is pretty crude, but we are not expecting any variability
-        * in the file format).  While taking backup we embed escape character
-        * '\\' before newline in tablespace path, so that during reading of
+        * (this code is pretty crude, but we are not expecting any variability in
+        * the file format).  While taking backup we embed escape character '\\'
+        * before newline in tablespace path, so that during reading of
         * tablespace_map file, we could distinguish newline in tablespace path
         * and end of line.  Now while reading tablespace_map file, remove the
         * escape character that has been added in tablespace path during backup.
@@ -10808,8 +10815,8 @@ read_tablespace_map(List **tablespaces)
                        str[i] = '\0';
                        if (sscanf(str, "%s %n", tbsoid, &n) != 1)
                                ereport(FATAL,
-                                       (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                               errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
+                                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                        errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
                        tbslinkpath = str + n;
                        i = 0;
 
@@ -10821,7 +10828,7 @@ read_tablespace_map(List **tablespaces)
                        continue;
                }
                else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
-                       str[i-1] = ch;
+                       str[i - 1] = ch;
                else
                        str[i++] = ch;
                prev_ch = ch;
@@ -10868,7 +10875,7 @@ BackupInProgress(void)
 
 /*
  * CancelBackup: rename the "backup_label" and "tablespace_map"
- *               files to cancel backup mode
+ *                              files to cancel backup mode
  *
  * If the "backup_label" file exists, it will be renamed to "backup_label.old".
  * Similarly, if the "tablespace_map" file exists, it will be renamed to
@@ -11115,8 +11122,8 @@ static bool
 WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                                        bool fetching_ckpt, XLogRecPtr tliRecPtr)
 {
-       static TimestampTz      last_fail_time = 0;
-       TimestampTz     now;
+       static TimestampTz last_fail_time = 0;
+       TimestampTz now;
 
        /*-------
         * Standby mode is implemented by a state machine:
@@ -11270,9 +11277,10 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                         */
                                        now = GetCurrentTimestamp();
                                        if (!TimestampDifferenceExceeds(last_fail_time, now,
-                                                                                                       wal_retrieve_retry_interval))
+                                                                                               wal_retrieve_retry_interval))
                                        {
-                                               long            secs, wait_time;
+                                               long            secs,
+                                                                       wait_time;
                                                int                     usecs;
 
                                                TimestampDifference(last_fail_time, now, &secs, &usecs);
@@ -11280,7 +11288,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                                                        (secs * 1000 + usecs / 1000);
 
                                                WaitLatch(&XLogCtl->recoveryWakeupLatch,
-                                                                 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+                                                        WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
                                                                  wait_time);
                                                ResetLatch(&XLogCtl->recoveryWakeupLatch);
                                                now = GetCurrentTimestamp();
@@ -11605,8 +11613,8 @@ fsync_pgdata(char *datadir)
                return;
 
        /*
-        * If possible, hint to the kernel that we're soon going to fsync
-        * the data directory and its contents.
+        * If possible, hint to the kernel that we're soon going to fsync the data
+        * directory and its contents.
         */
 #if defined(HAVE_SYNC_FILE_RANGE) || \
        (defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED))
index 419736da3103ddc65aaf09ed614a90a7ed1fd307..b96c39ac657dd92649254b8d2501e080c0c3a543 100644 (file)
@@ -33,7 +33,7 @@
 #include "pg_trace.h"
 
 /* Buffer size required to store a compressed version of backup block image */
-#define PGLZ_MAX_BLCKSZ        PGLZ_MAX_OUTPUT(BLCKSZ)
+#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
 
 /*
  * For each block reference registered with XLogRegisterBuffer, we fill in
@@ -58,7 +58,7 @@ typedef struct
 
        /* buffer to store a compressed version of backup block image */
        char            compressed_page[PGLZ_MAX_BLCKSZ];
-}      registered_buffer;
+} registered_buffer;
 
 static registered_buffer *registered_buffers;
 static int     max_registered_buffers;         /* allocated size */
@@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
                                   XLogRecPtr RedoRecPtr, bool doPageWrites,
                                   XLogRecPtr *fpw_lsn);
 static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
-                                                                       uint16 hole_length, char *dest, uint16 *dlen);
+                                               uint16 hole_length, char *dest, uint16 *dlen);
 
 /*
  * Begin constructing a WAL record. This must be called before the
@@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
                                                                                        &compressed_len);
                        }
 
-                       /* Fill in the remaining fields in the XLogRecordBlockHeader struct */
+                       /*
+                        * Fill in the remaining fields in the XLogRecordBlockHeader
+                        * struct
+                        */
                        bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
 
                        /*
@@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
  * the length of compressed block image.
  */
 static bool
-XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
+XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
                                                char *dest, uint16 *dlen)
 {
        int32           orig_len = BLCKSZ - hole_length;
@@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
                source = page;
 
        /*
-        * We recheck the actual size even if pglz_compress() reports success
-        * and see if the number of bytes saved by compression is larger than
-        * the length of extra data needed for the compressed version of block
-        * image.
+        * We recheck the actual size even if pglz_compress() reports success and
+        * see if the number of bytes saved by compression is larger than the
+        * length of extra data needed for the compressed version of block image.
         */
        len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
        if (len >= 0 &&
                len + extra_bytes < orig_len)
        {
-               *dlen = (uint16) len;           /* successful compression */
+               *dlen = (uint16) len;   /* successful compression */
                return true;
        }
        return false;
index 3661e7229aabb8e2597de0a18f76fdd6bd277013..a9e926c5a283d81d23ea00d330a15f5e0fd0b144 100644 (file)
@@ -1086,50 +1086,53 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg)
                                         blk->bimg_len == BLCKSZ))
                                {
                                        report_invalid_record(state,
-                                         "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
+                                                                                 "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
                                                                                  (unsigned int) blk->hole_offset,
                                                                                  (unsigned int) blk->hole_length,
                                                                                  (unsigned int) blk->bimg_len,
                                                                                  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
                                        goto err;
                                }
+
                                /*
-                                * cross-check that hole_offset == 0 and hole_length == 0
-                                * if the HAS_HOLE flag is not set.
+                                * cross-check that hole_offset == 0 and hole_length == 0 if
+                                * the HAS_HOLE flag is not set.
                                 */
                                if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
                                        (blk->hole_offset != 0 || blk->hole_length != 0))
                                {
                                        report_invalid_record(state,
-                                         "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
+                                                                                 "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
                                                                                  (unsigned int) blk->hole_offset,
                                                                                  (unsigned int) blk->hole_length,
                                                                                  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
                                        goto err;
                                }
+
                                /*
-                                * cross-check that bimg_len < BLCKSZ
-                                * if the IS_COMPRESSED flag is set.
+                                * cross-check that bimg_len < BLCKSZ if the IS_COMPRESSED
+                                * flag is set.
                                 */
                                if ((blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
                                        blk->bimg_len == BLCKSZ)
                                {
                                        report_invalid_record(state,
-                                         "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
+                                                                                 "BKPIMAGE_IS_COMPRESSED set, but block image length %u at %X/%X",
                                                                                  (unsigned int) blk->bimg_len,
                                                                                  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
                                        goto err;
                                }
+
                                /*
-                                * cross-check that bimg_len = BLCKSZ if neither
-                                * HAS_HOLE nor IS_COMPRESSED flag is set.
+                                * cross-check that bimg_len = BLCKSZ if neither HAS_HOLE nor
+                                * IS_COMPRESSED flag is set.
                                 */
                                if (!(blk->bimg_info & BKPIMAGE_HAS_HOLE) &&
                                        !(blk->bimg_info & BKPIMAGE_IS_COMPRESSED) &&
                                        blk->bimg_len != BLCKSZ)
                                {
                                        report_invalid_record(state,
-                                         "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
+                                                                                 "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_IS_COMPRESSED set, but block image length is %u at %X/%X",
                                                                                  (unsigned int) blk->data_len,
                                                                                  (uint32) (state->ReadRecPtr >> 32), (uint32) state->ReadRecPtr);
                                        goto err;
@@ -1294,8 +1297,8 @@ bool
 RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
 {
        DecodedBkpBlock *bkpb;
-       char   *ptr;
-       char    tmp[BLCKSZ];
+       char       *ptr;
+       char            tmp[BLCKSZ];
 
        if (!record->blocks[block_id].in_use)
                return false;
index e42187a7d5da76babf397d8030c73c1f8150fad8..95d6c146fa51e1f56399d7e8b27c7adfb41fd7dd 100644 (file)
@@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
                        proc_exit(1);           /* should never return */
 
                case BootstrapProcess:
+
                        /*
                         * There was a brief instant during which mode was Normal; this is
                         * okay.  We need to be in bootstrap mode during BootStrapXLOG for
index ac1dafb0411990c565dd023445367140adf49822..5e704181eccf922a91041410c9c7ca9e8806c5ed 100644 (file)
@@ -189,7 +189,8 @@ sub Catalogs
                                                }
                                                else
                                                {
-                                                       die "unknown column option $attopt on column $attname"
+                                                       die
+"unknown column option $attopt on column $attname";
                                                }
                                        }
                                        push @{ $catalog{columns} }, \%row;
index 943909c82254657dcd8894f2fb58561573154f7a..50a00cf8c8a38808fcc3546e988cd869b0877a2d 100644 (file)
@@ -397,14 +397,14 @@ ExecuteGrantStmt(GrantStmt *stmt)
        istmt.behavior = stmt->behavior;
 
        /*
-        * Convert the RoleSpec list into an Oid list.  Note that at this point
-        * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+        * Convert the RoleSpec list into an Oid list.  Note that at this point we
+        * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
         * there shouldn't be any additional work needed to support this case.
         */
        foreach(cell, stmt->grantees)
        {
-               RoleSpec *grantee = (RoleSpec *) lfirst(cell);
-               Oid grantee_uid;
+               RoleSpec   *grantee = (RoleSpec *) lfirst(cell);
+               Oid                     grantee_uid;
 
                switch (grantee->roletype)
                {
@@ -892,14 +892,14 @@ ExecAlterDefaultPrivilegesStmt(AlterDefaultPrivilegesStmt *stmt)
        iacls.behavior = action->behavior;
 
        /*
-        * Convert the RoleSpec list into an Oid list.  Note that at this point
-        * we insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
+        * Convert the RoleSpec list into an Oid list.  Note that at this point we
+        * insert an ACL_ID_PUBLIC into the list if appropriate, so downstream
         * there shouldn't be any additional work needed to support this case.
         */
        foreach(cell, action->grantees)
        {
-               RoleSpec *grantee = (RoleSpec *) lfirst(cell);
-               Oid grantee_uid;
+               RoleSpec   *grantee = (RoleSpec *) lfirst(cell);
+               Oid                     grantee_uid;
 
                switch (grantee->roletype)
                {
diff --git