Pre-beta mechanical code beautification.
authorTom Lane <tgl@sss.pgh.pa.us>
Fri, 19 May 2023 21:24:48 +0000 (17:24 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Fri, 19 May 2023 21:24:48 +0000 (17:24 -0400)
Run pgindent, pgperltidy, and reformat-dat-files.

This set of diffs is a bit larger than typical.  We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop).  We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up.  Going
forward, that should make for fewer random-seeming changes to existing
code.

Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql

402 files changed:
contrib/amcheck/t/001_verify_heapam.pl
contrib/amcheck/t/003_cic_2pc.pl
contrib/amcheck/verify_heapam.c
contrib/auto_explain/t/001_auto_explain.pl
contrib/basebackup_to_shell/t/001_basic.pl
contrib/basic_archive/basic_archive.c
contrib/dblink/dblink.c
contrib/intarray/bench/bench.pl
contrib/intarray/bench/create_test.pl
contrib/ltree/ltree_gist.c
contrib/ltree/ltree_io.c
contrib/ltree/ltxtquery_io.c
contrib/pg_prewarm/t/001_basic.pl
contrib/pg_walinspect/pg_walinspect.c
contrib/postgres_fdw/connection.c
contrib/postgres_fdw/postgres_fdw.c
contrib/postgres_fdw/shippable.c
contrib/seg/seg-validate.pl
contrib/test_decoding/t/001_repl_stats.pl
contrib/test_decoding/test_decoding.c
doc/src/sgml/mk_feature_tables.pl
src/backend/access/brin/brin.c
src/backend/access/common/reloptions.c
src/backend/access/gist/gist.c
src/backend/access/gist/gistbuildbuffers.c
src/backend/access/gist/gistget.c
src/backend/access/gist/gistxlog.c
src/backend/access/hash/hashfunc.c
src/backend/access/heap/heapam.c
src/backend/access/heap/heapam_handler.c
src/backend/access/heap/hio.c
src/backend/access/heap/pruneheap.c
src/backend/access/heap/vacuumlazy.c
src/backend/access/heap/visibilitymap.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/rmgrdesc/dbasedesc.c
src/backend/access/rmgrdesc/gindesc.c
src/backend/access/spgist/spgscan.c
src/backend/access/table/tableam.c
src/backend/access/transam/multixact.c
src/backend/access/transam/parallel.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xloginsert.c
src/backend/access/transam/xlogprefetcher.c
src/backend/access/transam/xlogreader.c
src/backend/access/transam/xlogrecovery.c
src/backend/backup/basebackup.c
src/backend/backup/basebackup_copy.c
src/backend/catalog/Catalog.pm
src/backend/catalog/aclchk.c
src/backend/catalog/genbki.pl
src/backend/catalog/indexing.c
src/backend/catalog/namespace.c
src/backend/catalog/pg_operator.c
src/backend/catalog/pg_shdepend.c
src/backend/commands/alter.c
src/backend/commands/collationcmds.c
src/backend/commands/dbcommands.c
src/backend/commands/dropcmds.c
src/backend/commands/explain.c
src/backend/commands/functioncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/schemacmds.c
src/backend/commands/subscriptioncmds.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/view.c
src/backend/executor/execExpr.c
src/backend/executor/execExprInterp.c
src/backend/executor/execIndexing.c
src/backend/executor/execSRF.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeHash.c
src/backend/executor/nodeHashjoin.c
src/backend/executor/nodeIncrementalSort.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeTableFuncscan.c
src/backend/executor/nodeWindowAgg.c
src/backend/executor/spi.c
src/backend/jit/llvm/llvmjit.c
src/backend/jit/llvm/llvmjit_deform.c
src/backend/jit/llvm/llvmjit_expr.c
src/backend/libpq/be-secure-gssapi.c
src/backend/libpq/be-secure-openssl.c
src/backend/libpq/hba.c
src/backend/nodes/gen_node_support.pl
src/backend/optimizer/path/costsize.c
src/backend/optimizer/util/appendinfo.c
src/backend/optimizer/util/relnode.c
src/backend/parser/check_keywords.pl
src/backend/parser/parse_expr.c
src/backend/parser/parse_merge.c
src/backend/parser/parse_utilcmd.c
src/backend/partitioning/partbounds.c
src/backend/postmaster/fork_process.c
src/backend/regex/regc_lex.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/origin.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/logical/snapbuild.c
src/backend/replication/logical/tablesync.c
src/backend/replication/logical/worker.c
src/backend/replication/pgoutput/pgoutput.c
src/backend/replication/syncrep.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rowsecurity.c
src/backend/snowball/snowball_create.pl
src/backend/statistics/extended_stats.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/file/buffile.c
src/backend/storage/ipc/dsm_impl.c
src/backend/storage/lmgr/generate-lwlocknames.pl
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/storage/smgr/md.c
src/backend/tsearch/spell.c
src/backend/utils/Gen_dummy_probes.pl
src/backend/utils/Gen_fmgrtab.pl
src/backend/utils/activity/pgstat.c
src/backend/utils/activity/pgstat_shmem.c
src/backend/utils/activity/pgstat_xact.c
src/backend/utils/adt/datetime.c
src/backend/utils/adt/float.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/jsonpath.c
src/backend/utils/adt/jsonpath_exec.c
src/backend/utils/adt/jsonpath_internal.h
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/tsquery_op.c
src/backend/utils/adt/tsvector_op.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xid8funcs.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/relmapper.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/generate-errcodes.pl
src/backend/utils/init/postinit.c
src/backend/utils/init/usercontext.c
src/backend/utils/mb/Unicode/UCS_to_BIG5.pl
src/backend/utils/mb/Unicode/UCS_to_EUC_CN.pl
src/backend/utils/mb/Unicode/UCS_to_EUC_JIS_2004.pl
src/backend/utils/mb/Unicode/UCS_to_EUC_JP.pl
src/backend/utils/mb/Unicode/UCS_to_EUC_KR.pl
src/backend/utils/mb/Unicode/UCS_to_EUC_TW.pl
src/backend/utils/mb/Unicode/UCS_to_GB18030.pl
src/backend/utils/mb/Unicode/UCS_to_JOHAB.pl
src/backend/utils/mb/Unicode/UCS_to_SHIFT_JIS_2004.pl
src/backend/utils/mb/Unicode/UCS_to_SJIS.pl
src/backend/utils/mb/Unicode/UCS_to_UHC.pl
src/backend/utils/mb/Unicode/UCS_to_most.pl
src/backend/utils/mb/Unicode/convutils.pm
src/backend/utils/misc/guc.c
src/backend/utils/misc/guc_tables.c
src/backend/utils/mmgr/dsa.c
src/backend/utils/mmgr/freepage.c
src/backend/utils/mmgr/mcxt.c
src/backend/utils/resowner/resowner.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/time/snapmgr.c
src/bin/initdb/initdb.c
src/bin/initdb/t/001_initdb.pl
src/bin/pg_amcheck/t/002_nonesuch.pl
src/bin/pg_amcheck/t/003_check.pl
src/bin/pg_amcheck/t/004_verify_heapam.pl
src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivewal.c
src/bin/pg_basebackup/t/010_pg_basebackup.pl
src/bin/pg_basebackup/t/020_pg_receivewal.pl
src/bin/pg_basebackup/t/030_pg_recvlogical.pl
src/bin/pg_basebackup/walmethods.c
src/bin/pg_basebackup/walmethods.h
src/bin/pg_checksums/t/002_actions.pl
src/bin/pg_controldata/t/001_pg_controldata.pl
src/bin/pg_ctl/t/001_start_stop.pl
src/bin/pg_ctl/t/004_logrotate.pl
src/bin/pg_dump/compress_io.c
src/bin/pg_dump/compress_lz4.c
src/bin/pg_dump/compress_zstd.c
src/bin/pg_dump/compress_zstd.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_dump/t/002_pg_dump.pl
src/bin/pg_dump/t/004_pg_dump_parallel.pl
src/bin/pg_dump/t/010_dump_connstr.pl
src/bin/pg_resetwal/t/002_corrupted.pl
src/bin/pg_rewind/t/001_basic.pl
src/bin/pg_rewind/t/006_options.pl
src/bin/pg_rewind/t/007_standby_source.pl
src/bin/pg_rewind/t/008_min_recovery_point.pl
src/bin/pg_rewind/t/009_growing_files.pl
src/bin/pg_rewind/t/RewindTest.pm
src/bin/pg_test_fsync/pg_test_fsync.c
src/bin/pg_upgrade/check.c
src/bin/pg_upgrade/info.c
src/bin/pg_upgrade/pg_upgrade.c
src/bin/pg_upgrade/t/002_pg_upgrade.pl
src/bin/pg_verifybackup/t/002_algorithm.pl
src/bin/pg_verifybackup/t/003_corruption.pl
src/bin/pg_verifybackup/t/004_options.pl
src/bin/pg_verifybackup/t/006_encoding.pl
src/bin/pg_verifybackup/t/007_wal.pl
src/bin/pg_verifybackup/t/008_untar.pl
src/bin/pg_verifybackup/t/009_extract.pl
src/bin/pg_verifybackup/t/010_client_untar.pl
src/bin/pg_waldump/t/002_save_fullpage.pl
src/bin/pgbench/pgbench.c
src/bin/pgbench/t/001_pgbench_with_server.pl
src/bin/pgbench/t/002_pgbench_no_server.pl
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/create_help.pl
src/bin/psql/crosstabview.c
src/bin/psql/describe.c
src/bin/psql/settings.h
src/bin/psql/t/001_basic.pl
src/bin/psql/t/010_tab_completion.pl
src/bin/psql/t/020_cancel.pl
src/bin/scripts/t/020_createdb.pl
src/bin/scripts/t/040_createuser.pl
src/bin/scripts/t/090_reindexdb.pl
src/bin/scripts/t/100_vacuumdb.pl
src/bin/scripts/t/200_connstr.pl
src/bin/scripts/vacuumdb.c
src/common/unicode/generate-norm_test_table.pl
src/common/unicode/generate-unicode_norm_table.pl
src/common/unicode/generate-unicode_normprops_table.pl
src/fe_utils/print.c
src/include/access/amapi.h
src/include/access/brin_tuple.h
src/include/access/gist_private.h
src/include/access/tableam.h
src/include/access/xlogreader.h
src/include/catalog/pg_aggregate.dat
src/include/catalog/pg_auth_members.h
src/include/catalog/pg_database.dat
src/include/catalog/pg_proc.dat
src/include/catalog/pg_subscription.h
src/include/catalog/reformat_dat_file.pl
src/include/catalog/renumber_oids.pl
src/include/executor/hashjoin.h
src/include/executor/tuptable.h
src/include/fe_utils/print.h
src/include/funcapi.h
src/include/nodes/primnodes.h
src/include/port/win32ntdll.h
src/include/replication/reorderbuffer.h
src/include/storage/bufmgr.h
src/include/storage/lock.h
src/include/storage/lwlock.h
src/include/storage/predicate_internals.h
src/include/storage/proc.h
src/include/utils/backend_status.h
src/include/utils/pg_locale.h
src/include/utils/rel.h
src/include/utils/varlena.h
src/interfaces/ecpg/ecpglib/data.c
src/interfaces/ecpg/ecpglib/descriptor.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/include/pgtypes_interval.h
src/interfaces/ecpg/pgtypeslib/dt.h
src/interfaces/ecpg/pgtypeslib/interval.c
src/interfaces/ecpg/pgtypeslib/timestamp.c
src/interfaces/ecpg/preproc/check_rules.pl
src/interfaces/ecpg/preproc/parse.pl
src/interfaces/ecpg/preproc/type.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-exec.c
src/interfaces/libpq/fe-lobj.c
src/interfaces/libpq/fe-misc.c
src/interfaces/libpq/fe-print.c
src/interfaces/libpq/fe-protocol3.c
src/interfaces/libpq/fe-secure-common.c
src/interfaces/libpq/fe-secure-gssapi.c
src/interfaces/libpq/fe-secure-openssl.c
src/interfaces/libpq/fe-secure.c
src/interfaces/libpq/libpq-int.h
src/interfaces/libpq/t/001_uri.pl
src/interfaces/libpq/t/003_load_balance_host_list.pl
src/interfaces/libpq/t/004_load_balance_dns.pl
src/pl/plperl/plc_perlboot.pl
src/pl/plperl/text2macro.pl
src/port/dirmod.c
src/test/authentication/t/001_password.pl
src/test/authentication/t/002_saslprep.pl
src/test/authentication/t/003_peer.pl
src/test/authentication/t/004_file_inclusion.pl
src/test/icu/t/010_database.pl
src/test/kerberos/t/001_auth.pl
src/test/ldap/LdapServer.pm
src/test/ldap/t/001_auth.pl
src/test/modules/commit_ts/t/002_standby.pl
src/test/modules/commit_ts/t/003_standby_2.pl
src/test/modules/commit_ts/t/004_restart.pl
src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
src/test/modules/libpq_pipeline/libpq_pipeline.c
src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
src/test/modules/test_custom_rmgrs/t/001_basic.pl
src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c
src/test/modules/test_ddl_deparse/test_ddl_deparse.c
src/test/modules/test_misc/t/001_constraint_validation.pl
src/test/modules/test_misc/t/002_tablespace.pl
src/test/modules/test_misc/t/003_check_guc.pl
src/test/modules/test_pg_dump/t/001_base.pl
src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
src/test/perl/PostgreSQL/Test/Cluster.pm
src/test/perl/PostgreSQL/Test/RecursiveCopy.pm
src/test/perl/PostgreSQL/Test/SimpleTee.pm
src/test/perl/PostgreSQL/Test/Utils.pm
src/test/perl/PostgreSQL/Version.pm
src/test/recovery/t/001_stream_rep.pl
src/test/recovery/t/002_archiving.pl
src/test/recovery/t/003_recovery_targets.pl
src/test/recovery/t/005_replay_delay.pl
src/test/recovery/t/006_logical_decoding.pl
src/test/recovery/t/009_twophase.pl
src/test/recovery/t/010_logical_decoding_timelines.pl
src/test/recovery/t/012_subtransactions.pl
src/test/recovery/t/013_crash_restart.pl
src/test/recovery/t/014_unlogged_reinit.pl
src/test/recovery/t/016_min_consistency.pl
src/test/recovery/t/017_shm.pl
src/test/recovery/t/018_wal_optimize.pl
src/test/recovery/t/019_replslot_limit.pl
src/test/recovery/t/020_archive_status.pl
src/test/recovery/t/022_crash_temp_files.pl
src/test/recovery/t/023_pitr_prepared_xact.pl
src/test/recovery/t/024_archive_recovery.pl
src/test/recovery/t/025_stuck_on_old_timeline.pl
src/test/recovery/t/027_stream_regress.pl
src/test/recovery/t/028_pitr_timelines.pl
src/test/recovery/t/029_stats_restart.pl
src/test/recovery/t/031_recovery_conflict.pl
src/test/recovery/t/032_relfilenode_reuse.pl
src/test/recovery/t/033_replay_tsp_drops.pl
src/test/recovery/t/034_create_database.pl
src/test/recovery/t/035_standby_logical_decoding.pl
src/test/regress/pg_regress.c
src/test/ssl/t/001_ssltests.pl
src/test/ssl/t/002_scram.pl
src/test/ssl/t/003_sslinfo.pl
src/test/ssl/t/SSL/Backend/OpenSSL.pm
src/test/ssl/t/SSL/Server.pm
src/test/subscription/t/001_rep_changes.pl
src/test/subscription/t/005_encoding.pl
src/test/subscription/t/012_collation.pl
src/test/subscription/t/014_binary.pl
src/test/subscription/t/015_stream.pl
src/test/subscription/t/018_stream_subxact_abort.pl
src/test/subscription/t/023_twophase_stream.pl
src/test/subscription/t/025_rep_changes_for_schema.pl
src/test/subscription/t/026_stats.pl
src/test/subscription/t/027_nosuperuser.pl
src/test/subscription/t/028_row_filter.pl
src/test/subscription/t/030_origin.pl
src/test/subscription/t/031_column_list.pl
src/test/subscription/t/032_subscribe_use_index.pl
src/test/subscription/t/033_run_as_table_owner.pl
src/test/subscription/t/100_bugs.pl
src/timezone/zic.c
src/tools/PerfectHash.pm
src/tools/check_bison_recursion.pl
src/tools/ci/windows_build_config.pl
src/tools/copyright.pl
src/tools/gen_export.pl
src/tools/gen_keywordlist.pl
src/tools/git_changelog
src/tools/mark_pgdllimport.pl
src/tools/msvc/Install.pm
src/tools/msvc/MSBuildProject.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Project.pm
src/tools/msvc/Solution.pm
src/tools/msvc/VSObjectFactory.pm
src/tools/msvc/build.pl
src/tools/msvc/config_default.pl
src/tools/msvc/dummylib/Win32/Registry.pm
src/tools/msvc/dummylib/Win32API/File.pm
src/tools/msvc/gendef.pl
src/tools/msvc/pgbison.pl
src/tools/msvc/vcregress.pl
src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl
src/tools/pginclude/pgcheckdefines
src/tools/pgindent/pgindent
src/tools/pgindent/typedefs.list
src/tools/win32tzlist.pl
src/tutorial/funcs.c

index 1aedebe4307bc6449fa94aaf0dcd3d8877fc9d43..46d5b53181e8899968da1f5a75b19a607399f1d1 100644 (file)
@@ -81,7 +81,7 @@ sub relation_filepath
        my ($relname) = @_;
 
        my $pgdata = $node->data_dir;
-       my $rel    = $node->safe_psql('postgres',
+       my $rel = $node->safe_psql('postgres',
                qq(SELECT pg_relation_filepath('$relname')));
        die "path not found for relation $relname" unless defined $rel;
        return "$pgdata/$rel";
@@ -267,7 +267,7 @@ sub check_all_options_uncorrupted
                                        for my $endblock (qw(NULL 0))
                                        {
                                                my $opts =
-                                                   "on_error_stop := $stop, "
+                                                       "on_error_stop := $stop, "
                                                  . "check_toast := $check_toast, "
                                                  . "skip := $skip, "
                                                  . "startblock := $startblock, "
index 5323ed11ae94523a5aca929d347dcd3463e4f031..3279a2505a38dddb853188c86a994442d661a082 100644 (file)
@@ -38,30 +38,35 @@ $node->safe_psql('postgres', q(CREATE TABLE tbl(i int)));
 
 my $main_h = $node->background_psql('postgres');
 
-$main_h->query_safe(q(
+$main_h->query_safe(
+       q(
 BEGIN;
 INSERT INTO tbl VALUES(0);
 ));
 
 my $cic_h = $node->background_psql('postgres');
 
-$cic_h->query_until(qr/start/, q(
+$cic_h->query_until(
+       qr/start/, q(
 \echo start
 CREATE INDEX CONCURRENTLY idx ON tbl(i);
 ));
 
-$main_h->query_safe(q(
+$main_h->query_safe(
+       q(
 PREPARE TRANSACTION 'a';
 ));
 
-$main_h->query_safe(q(
+$main_h->query_safe(
+       q(
 BEGIN;
 INSERT INTO tbl VALUES(0);
 ));
 
 $node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
 
-$main_h->query_safe(q(
+$main_h->query_safe(
+       q(
 PREPARE TRANSACTION 'b';
 BEGIN;
 INSERT INTO tbl VALUES(0);
@@ -69,7 +74,8 @@ INSERT INTO tbl VALUES(0);
 
 $node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
 
-$main_h->query_safe(q(
+$main_h->query_safe(
+       q(
 PREPARE TRANSACTION 'c';
 COMMIT PREPARED 'c';
 ));
@@ -97,7 +103,8 @@ PREPARE TRANSACTION 'persists_forever';
 $node->restart;
 
 my $reindex_h = $node->background_psql('postgres');
-$reindex_h->query_until(qr/start/, q(
+$reindex_h->query_until(
+       qr/start/, q(
 \echo start
 DROP INDEX CONCURRENTLY idx;
 CREATE INDEX CONCURRENTLY idx ON tbl(i);
index 34d73ad442fb75405a84c6c845634b64971c4c37..97f3253522b1dcffc6c9e52caa27fa1de4a146ff 100644 (file)
@@ -407,7 +407,7 @@ verify_heapam(PG_FUNCTION_ARGS)
                OffsetNumber successor[MaxOffsetNumber];
                bool            lp_valid[MaxOffsetNumber];
                bool            xmin_commit_status_ok[MaxOffsetNumber];
-               XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+               XidCommitStatus xmin_commit_status[MaxOffsetNumber];
 
                CHECK_FOR_INTERRUPTS();
 
@@ -444,7 +444,7 @@ verify_heapam(PG_FUNCTION_ARGS)
                for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
                         ctx.offnum = OffsetNumberNext(ctx.offnum))
                {
-                       BlockNumber     nextblkno;
+                       BlockNumber nextblkno;
                        OffsetNumber nextoffnum;
 
                        successor[ctx.offnum] = InvalidOffsetNumber;
@@ -484,9 +484,9 @@ verify_heapam(PG_FUNCTION_ARGS)
 
                                /*
                                 * Since we've checked that this redirect points to a line
-                                * pointer between FirstOffsetNumber and maxoff, it should
-                                * now be safe to fetch the referenced line pointer. We expect
-                                * it to be LP_NORMAL; if not, that's corruption.
+                                * pointer between FirstOffsetNumber and maxoff, it should now
+                                * be safe to fetch the referenced line pointer. We expect it
+                                * to be LP_NORMAL; if not, that's corruption.
                                 */
                                rditem = PageGetItemId(ctx.page, rdoffnum);
                                if (!ItemIdIsUsed(rditem))
@@ -610,8 +610,8 @@ verify_heapam(PG_FUNCTION_ARGS)
                        {
                                /*
                                 * We should not have set successor[ctx.offnum] to a value
-                                * other than InvalidOffsetNumber unless that line pointer
-                                * is LP_NORMAL.
+                                * other than InvalidOffsetNumber unless that line pointer is
+                                * LP_NORMAL.
                                 */
                                Assert(ItemIdIsNormal(next_lp));
 
@@ -642,8 +642,8 @@ verify_heapam(PG_FUNCTION_ARGS)
                        }
 
                        /*
-                        * If the next line pointer is a redirect, or if it's a tuple
-                        * but the XMAX of this tuple doesn't match the XMIN of the next
+                        * If the next line pointer is a redirect, or if it's a tuple but
+                        * the XMAX of this tuple doesn't match the XMIN of the next
                         * tuple, then the two aren't part of the same update chain and
                         * there is nothing more to do.
                         */
@@ -667,8 +667,8 @@ verify_heapam(PG_FUNCTION_ARGS)
                        }
 
                        /*
-                        * This tuple and the tuple to which it points seem to be part
-                        * of an update chain.
+                        * This tuple and the tuple to which it points seem to be part of
+                        * an update chain.
                         */
                        predecessor[nextoffnum] = ctx.offnum;
 
@@ -721,8 +721,8 @@ verify_heapam(PG_FUNCTION_ARGS)
                        }
 
                        /*
-                        * If the current tuple's xmin is aborted but the successor tuple's
-                        * xmin is in-progress or committed, that's corruption.
+                        * If the current tuple's xmin is aborted but the successor
+                        * tuple's xmin is in-progress or committed, that's corruption.
                         */
                        if (xmin_commit_status_ok[ctx.offnum] &&
                                xmin_commit_status[ctx.offnum] == XID_ABORTED &&
@@ -1025,7 +1025,7 @@ check_tuple_visibility(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
        HeapTupleHeader tuphdr = ctx->tuphdr;
 
        ctx->tuple_could_be_pruned = true;      /* have not yet proven otherwise */
-       *xmin_commit_status_ok = false;         /* have not yet proven otherwise */
+       *xmin_commit_status_ok = false; /* have not yet proven otherwise */
 
        /* If xmin is normal, it should be within valid range */
        xmin = HeapTupleHeaderGetXmin(tuphdr);
@@ -1837,7 +1837,7 @@ check_tuple(HeapCheckContext *ctx, bool *xmin_commit_status_ok,
         * therefore cannot check it.
         */
        if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
-                                                       xmin_commit_status))
+                                                               xmin_commit_status))
                return;
 
        /*
@@ -1897,8 +1897,8 @@ FullTransactionIdFromXidAndCtx(TransactionId xid, const HeapCheckContext *ctx)
        diff = (int32) (ctx->next_xid - xid);
 
        /*
-        * In cases of corruption we might see a 32bit xid that is before epoch
-        * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+        * In cases of corruption we might see a 32bit xid that is before epoch 0.
+        * We can't represent that as a 64bit xid, due to 64bit xids being
         * unsigned integers, without the modulo arithmetic of 32bit xid. There's
         * no really nice way to deal with that, but it works ok enough to use
         * FirstNormalFullTransactionId in that case, as a freshly initdb'd
index 7873feb04475489937a7d6807d72e3c8ac87ab6f..abb422f8de6910760a542ad5b1d53caac60bff6a 100644 (file)
@@ -19,7 +19,7 @@ sub query_log
        local $ENV{PGOPTIONS} = join " ",
          map { "-c $_=$params->{$_}" } keys %$params;
 
-       my $log    = $node->logfile();
+       my $log = $node->logfile();
        my $offset = -s $log;
 
        $node->safe_psql("postgres", $sql);
@@ -113,7 +113,7 @@ $log_contents = query_log(
        "SELECT * FROM pg_class;",
        {
                "auto_explain.log_verbose" => "on",
-               "compute_query_id"         => "on"
+               "compute_query_id" => "on"
        });
 
 like(
@@ -127,7 +127,7 @@ $log_contents = query_log(
        "SELECT * FROM pg_class;",
        {
                "auto_explain.log_verbose" => "on",
-               "compute_query_id"         => "regress"
+               "compute_query_id" => "regress"
        });
 
 unlike(
index 84ad93f6140d1c8fcae49206e6ca9ee7e97c4c9b..e2cdd2ecb0cc698fd6142e6eaeb93b9296d7797e 100644 (file)
@@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
 # This is only needed on Windows machines that don't use UNIX sockets.
 $node->init(
        'allows_streaming' => 1,
-       'auth_extra'       => [ '--create-role', 'backupuser' ]);
+       'auth_extra' => [ '--create-role', 'backupuser' ]);
 
 $node->append_conf('postgresql.conf',
        "shared_preload_libraries = 'basebackup_to_shell'");
@@ -50,7 +50,7 @@ $node->command_fails_like(
        'fails if basebackup_to_shell.command is not set');
 
 # Configure basebackup_to_shell.command and reload the configuration file.
-my $backup_path         = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
 my $escaped_backup_path = $backup_path;
 $escaped_backup_path =~ s{\\}{\\\\}g
   if ($PostgreSQL::Test::Utils::windows_os);
index cd852888ce6b214a392e264b510ae11d630e7bf0..4d78c31859fa7d3d6c4078cac5e48698c0b75322 100644 (file)
@@ -407,8 +407,8 @@ basic_archive_shutdown(ArchiveModuleState *state)
        MemoryContext basic_archive_context;
 
        /*
-        * If we didn't get to storing the pointer to our allocated state, we don't
-        * have anything to clean up.
+        * If we didn't get to storing the pointer to our allocated state, we
+        * don't have anything to clean up.
         */
        if (data == NULL)
                return;
index 55f75eff36181c3642ab4ff5185858e7ba7dbce6..3a3e916f9ef7fc1c0466be7d7440d4ff1a13611b 100644 (file)
@@ -1287,7 +1287,7 @@ dblink_get_connections(PG_FUNCTION_ARGS)
 
        if (astate)
                PG_RETURN_DATUM(makeArrayResult(astate,
-                                                                                         CurrentMemoryContext));
+                                                                               CurrentMemoryContext));
        else
                PG_RETURN_NULL();
 }
index bd6dd83c93935a380e269d5b65326d69faa65b66..067654986e2fe992391108ba7f96afbc13cc006f 100755 (executable)
@@ -83,7 +83,7 @@ else
        $outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
 }
 my $sql =
-    "select $outf from "
+       "select $outf from "
   . join(', ', keys %table)
   . " where "
   . join(' AND ', @where) . ';';
@@ -100,9 +100,9 @@ if ($opt{e})
        print @plan;
 }
 
-my $t0    = [gettimeofday];
+my $t0 = [gettimeofday];
 my $count = 0;
-my $b     = $opt{b};
+my $b = $opt{b};
 $b ||= 1;
 my @a;
 foreach (1 .. $b)
index 5bdcebddbe2236f37ce99022c74617aea8edc458..6efe9151ca5557ac96b4af5abd358e171109b9a5 100755 (executable)
@@ -19,7 +19,7 @@ create table message_section_map (
 
 EOT
 
-open(my $msg, '>', "message.tmp")             || die;
+open(my $msg, '>', "message.tmp") || die;
 open(my $map, '>', "message_section_map.tmp") || die;
 
 srand(1);
index 21b7d020286143e26c315387b7000f9c38d6a97c..932f69bff2d180a58239db80023c68ebeec6429f 100644 (file)
@@ -43,7 +43,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
                                 ltree *left, ltree *right)
 {
        int32           size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
-       (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+               (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
        ltree_gist *result = palloc(size);
 
        SET_VARSIZE(result, size);
index 5dce70bd1a6a45bcb9b5249927d8e09b42b69143..0a12c77a621f8e44f5a28310f70fc145a2c01b3f 100644 (file)
@@ -175,7 +175,7 @@ Datum
 ltree_in(PG_FUNCTION_ARGS)
 {
        char       *buf = (char *) PG_GETARG_POINTER(0);
-       ltree      *res;
+       ltree      *res;
 
        if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
                PG_RETURN_NULL();
@@ -584,7 +584,7 @@ parse_lquery(const char *buf, struct Node *escontext)
  */
 static bool
 finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
-       struct Node *escontext)
+                               struct Node *escontext)
 {
        if (is_lquery)
        {
@@ -745,7 +745,7 @@ Datum
 lquery_in(PG_FUNCTION_ARGS)
 {
        char       *buf = (char *) PG_GETARG_POINTER(0);
-       lquery     *res;
+       lquery     *res;
 
        if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
                PG_RETURN_NULL();
index 0d29e156303be5e223c69bb6dfb5f9c26bf358cf..121fc55e469ea2280b9f1c2a75b74ecefe6080c2 100644 (file)
@@ -186,8 +186,8 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("word is too long")));
 
-       if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
-                                       state->curop - state->op, lenval, flag))
+       if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+                                  state->curop - state->op, lenval, flag))
                return false;
 
        while (state->curop - state->op + lenval + 1 >= state->lenop)
@@ -408,7 +408,7 @@ PG_FUNCTION_INFO_V1(ltxtq_in);
 Datum
 ltxtq_in(PG_FUNCTION_ARGS)
 {
-       ltxtquery *res;
+       ltxtquery  *res;
 
        if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
                PG_RETURN_NULL();
index 9811c51cee5e4104da676b54dadaff6d731901a2..6b7c869afc0575f0876fe2505f904ffd832929c9 100644 (file)
@@ -21,7 +21,7 @@ $node->start;
 
 # setup
 $node->safe_psql("postgres",
-           "CREATE EXTENSION pg_prewarm;\n"
+               "CREATE EXTENSION pg_prewarm;\n"
          . "CREATE TABLE test(c1 int);\n"
          . "INSERT INTO test SELECT generate_series(1, 100);");
 
index 1cd3744d5dfa7f084821d938b8441bb4308a1eb3..796a74f322bde084cf86f21bfb53efe3538ad831 100644 (file)
@@ -252,8 +252,8 @@ GetWALBlockInfo(FunctionCallInfo fcinfo, XLogReaderState *record,
        int                     block_id;
        ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
        RmgrData        desc;
-       const char      *record_type;
-       StringInfoData  rec_desc;
+       const char *record_type;
+       StringInfoData rec_desc;
 
        Assert(XLogRecHasAnyBlockRefs(record));
 
index da32d503bc5a31f63c15216f53ad0d92be04c7dc..d918ba89e19c6f09e4f42016245fb2d95d707c47 100644 (file)
@@ -61,7 +61,7 @@ typedef struct ConnCacheEntry
        bool            have_error;             /* have any subxacts aborted in this xact? */
        bool            changing_xact_state;    /* xact state change in process */
        bool            parallel_commit;        /* do we commit (sub)xacts in parallel? */
-       bool            parallel_abort; /* do we abort (sub)xacts in parallel? */
+       bool            parallel_abort; /* do we abort (sub)xacts in parallel? */
        bool            invalidated;    /* true if reconnect is pending */
        bool            keep_connections;       /* setting value of keep_connections
                                                                         * server option */
index 95dbe8b06cc558349f2ce14929f0c9a868998a9b..428ea3810fe31bc98c1fc59138053345e532f624 100644 (file)
@@ -2024,9 +2024,8 @@ postgresGetForeignModifyBatchSize(ResultRelInfo *resultRelInfo)
 
        /*
         * Should never get called when the insert is being performed on a table
-        * that is also among the target relations of an UPDATE operation,
-        * because postgresBeginForeignInsert() currently rejects such insert
-        * attempts.
+        * that is also among the target relations of an UPDATE operation, because
+        * postgresBeginForeignInsert() currently rejects such insert attempts.
         */
        Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
 
@@ -5167,15 +5166,15 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
         */
        if (method != ANALYZE_SAMPLE_OFF)
        {
-               bool    can_tablesample;
+               bool            can_tablesample;
 
                reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
                                                                                                                  &can_tablesample);
 
                /*
-                * Make sure we're not choosing TABLESAMPLE when the remote relation does
-                * not support that. But only do this for "auto" - if the user explicitly
-                * requested BERNOULLI/SYSTEM, it's better to fail.
+                * Make sure we're not choosing TABLESAMPLE when the remote relation
+                * does not support that. But only do this for "auto" - if the user
+                * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
                 */
                if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
                        method = ANALYZE_SAMPLE_RANDOM;
@@ -5189,35 +5188,35 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
                else
                {
                        /*
-                        * All supported sampling methods require sampling rate,
-                        * not target rows directly, so we calculate that using
-                        * the remote reltuples value. That's imperfect, because
-                        * it might be off a good deal, but that's not something
-                        * we can (or should) address here.
+                        * All supported sampling methods require sampling rate, not
+                        * target rows directly, so we calculate that using the remote
+                        * reltuples value. That's imperfect, because it might be off a
+                        * good deal, but that's not something we can (or should) address
+                        * here.
                         *
-                        * If reltuples is too low (i.e. when table grew), we'll
-                        * end up sampling more rows - but then we'll apply the
-                        * local sampling, so we get the expected sample size.
-                        * This is the same outcome as without remote sampling.
+                        * If reltuples is too low (i.e. when table grew), we'll end up
+                        * sampling more rows - but then we'll apply the local sampling,
+                        * so we get the expected sample size. This is the same outcome as
+                        * without remote sampling.
                         *
-                        * If reltuples is too high (e.g. after bulk DELETE), we
-                        * will end up sampling too few rows.
+                        * If reltuples is too high (e.g. after bulk DELETE), we will end
+                        * up sampling too few rows.
                         *
-                        * We can't really do much better here - we could try
-                        * sampling a bit more rows, but we don't know how off
-                        * the reltuples value is so how much is "a bit more"?
+                        * We can't really do much better here - we could try sampling a
+                        * bit more rows, but we don't know how off the reltuples value is
+                        * so how much is "a bit more"?
                         *
-                        * Furthermore, the targrows value for partitions is
-                        * determined based on table size (relpages), which can
-                        * be off in different ways too. Adjusting the sampling
-                        * rate here might make the issue worse.
+                        * Furthermore, the targrows value for partitions is determined
+                        * based on table size (relpages), which can be off in different
+                        * ways too. Adjusting the sampling rate here might make the issue
+                        * worse.
                         */
                        sample_frac = targrows / reltuples;
 
                        /*
                         * We should never get sampling rate outside the valid range
-                        * (between 0.0 and 1.0), because those cases should be covered
-                        * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+                        * (between 0.0 and 1.0), because those cases should be covered by
+                        * the previous branch that sets ANALYZE_SAMPLE_OFF.
                         */
                        Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
                }
index eb33d2a9932e30db33f8d72da7c2ecda6238aeb6..07c11b75e92808322bb1c32295821deab9551166 100644 (file)
@@ -183,7 +183,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
 
        /* See if we already cached the result. */
        entry = (ShippableCacheEntry *)
-               hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+               hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
 
        if (!entry)
        {
@@ -196,7 +196,7 @@ is_shippable(Oid objectId, Oid classId, PgFdwRelationInfo *fpinfo)
                 * cache invalidation.
                 */
                entry = (ShippableCacheEntry *)
-                       hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+                       hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
 
                entry->shippable = shippable;
        }
index 00bc23aa955398d52595c14cb314811df45a1920..67c0015e6bea20509fe667ad98d5ebb7a0eb4b53 100755 (executable)
@@ -6,14 +6,14 @@ use strict;
 use warnings;
 
 my $integer = '[+-]?[0-9]+';
-my $real    = '[+-]?[0-9]+\.[0-9]+';
+my $real = '[+-]?[0-9]+\.[0-9]+';
 
-my $RANGE     = '(\.\.)(\.)?';
-my $PLUMIN    = q(\'\+\-\');
-my $FLOAT     = "(($integer)|($real))([eE]($integer))?";
+my $RANGE = '(\.\.)(\.)?';
+my $PLUMIN = q(\'\+\-\');
+my $FLOAT = "(($integer)|($real))([eE]($integer))?";
 my $EXTENSION = '<|>|~';
 
-my $boundary  = "($EXTENSION)?$FLOAT";
+my $boundary = "($EXTENSION)?$FLOAT";
 my $deviation = $FLOAT;
 
 my $rule_1 = $boundary . $PLUMIN . $deviation;
index dede36ff160d7f62ee3d3e9049e39a04af7b0f07..7c2d87561ce170a6357165fb85b8ffe276dfeeff 100644 (file)
@@ -92,7 +92,7 @@ regression_slot3|t|t),
 # replication statistics data is fine after restart.
 
 $node->stop;
-my $datadir           = $node->data_dir;
+my $datadir = $node->data_dir;
 my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3";
 
 rmtree($slot3_replslotdir);
index 628c6a2595786f91cc09627e10e2930b46106f3d..12d1d0505d77b43c347aa9940016f0c0e5ffafe2 100644 (file)
@@ -288,7 +288,7 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
 {
        TestDecodingData *data = ctx->output_plugin_private;
        TestDecodingTxnData *txndata =
-       MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+               MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
 
        txndata->xact_wrote_changes = false;
        txn->output_plugin_private = txndata;
@@ -348,7 +348,7 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
 {
        TestDecodingData *data = ctx->output_plugin_private;
        TestDecodingTxnData *txndata =
-       MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+               MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
 
        txndata->xact_wrote_changes = false;
        txn->output_plugin_private = txndata;
index 5a16da0d060d8de737726ac2a35a459bb746aa6c..824be729a0384dc0bc4b0fefc252307f13e78335 100644 (file)
@@ -34,7 +34,7 @@ print "<tbody>\n";
 while (<$feat>)
 {
        chomp;
-       my ($feature_id,      $feature_name, $subfeature_id,
+       my ($feature_id, $feature_name, $subfeature_id,
                $subfeature_name, $is_supported, $comments) = split /\t/;
 
        $is_supported eq $yesno || next;
index e91fd7e2bdb21dca4291c763c7f6a66031f2e191..3c6a956eaa3f2c9dfb016e23a98b0667259bddb5 100644 (file)
@@ -700,8 +700,8 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
                                        }
 
                                        /*
-                                        * If we found a scan key eliminating the range, no need to
-                                        * check additional ones.
+                                        * If we found a scan key eliminating the range, no need
+                                        * to check additional ones.
                                         */
                                        if (!addrange)
                                                break;
@@ -1223,7 +1223,7 @@ brin_build_desc(Relation rel)
         * Obtain BrinOpcInfo for each indexed column.  While at it, accumulate
         * the number of columns stored, since the number is opclass-defined.
         */
-       opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
+       opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
        for (keyno = 0; keyno < tupdesc->natts; keyno++)
        {
                FmgrInfo   *opcInfoFn;
@@ -1801,8 +1801,8 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
                bval = &dtup->bt_columns[keyno];
 
                /*
-                * Does the range have actual NULL values? Either of the flags can
-                * be set, but we ignore the state before adding first row.
+                * Does the range have actual NULL values? Either of the flags can be
+                * set, but we ignore the state before adding first row.
                 *
                 * We have to remember this, because we'll modify the flags and we
                 * need to know if the range started as empty.
@@ -1842,12 +1842,12 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
 
                /*
                 * If the range was had actual NULL values (i.e. did not start empty),
-                * make sure we don't forget about the NULL values. Either the allnulls
-                * flag is still set to true, or (if the opclass cleared it) we need to
-                * set hasnulls=true.
+                * make sure we don't forget about the NULL values. Either the
+                * allnulls flag is still set to true, or (if the opclass cleared it)
+                * we need to set hasnulls=true.
                 *
-                * XXX This can only happen when the opclass modified the tuple, so the
-                * modified flag should be set.
+                * XXX This can only happen when the opclass modified the tuple, so
+                * the modified flag should be set.
                 */
                if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
                {
@@ -1859,9 +1859,9 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
        /*
         * After updating summaries for all the keys, mark it as not empty.
         *
-        * If we're actually changing the flag value (i.e. tuple started as empty),
-        * we should have modified the tuple. So we should not see empty range that
-        * was not modified.
+        * If we're actually changing the flag value (i.e. tuple started as
+        * empty), we should have modified the tuple. So we should not see empty
+        * range that was not modified.
         */
        Assert(!dtup->bt_empty_range || modified);
        dtup->bt_empty_range = false;
index 90cb3951fca14b03f336aac0da901666cc84fe23..11cc43167769b751ebc35a1f01d1ccda1b0a1d82 100644 (file)
@@ -1717,7 +1717,7 @@ allocateReloptStruct(Size base, relopt_value *options, int numoptions)
                        if (optstr->fill_cb)
                        {
                                const char *val = optval->isset ? optval->values.string_val :
-                               optstr->default_isnull ? NULL : optstr->default_val;
+                                       optstr->default_isnull ? NULL : optstr->default_val;
 
                                size += optstr->fill_cb(val, NULL);
                        }
@@ -1796,8 +1796,8 @@ fillRelOptions(void *rdopts, Size basesize,
                                                if (optstring->fill_cb)
                                                {
                                                        Size            size =
-                                                       optstring->fill_cb(string_val,
-                                                                                          (char *) rdopts + offset);
+                                                               optstring->fill_cb(string_val,
+                                                                                                  (char *) rdopts + offset);
 
                                                        if (size)
                                                        {
index b5c1754e788c2f1b177456f135ea7760c29337f9..516465f8b7dbfef5f6d008ed3e89da29da139a86 100644 (file)
@@ -1117,7 +1117,7 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
        for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
        {
                IndexTuple      ituple = (IndexTuple)
-               PageGetItem(page, PageGetItemId(page, offset));
+                       PageGetItem(page, PageGetItemId(page, offset));
 
                if (downlink == NULL)
                        downlink = CopyIndexTuple(ituple);
index 95cbed43371abdc0a3fd5147223b72bb1abcf4a7..1423b4b047cf28960c016ed903824bf0f6cd69f3 100644 (file)
@@ -598,7 +598,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
        {
                GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc);
                GISTNodeBuffer *newNodeBuffer;
-               int                             i = foreach_current_index(lc);
+               int                     i = foreach_current_index(lc);
 
                /* Decompress parent index tuple of node buffer page. */
                gistDeCompressAtt(giststate, r,
index 7382b0921d91193de6494d2ed50e4e6170796a26..e2c9b5f069c6b4139b531b524d3d1b724c20dd99 100644 (file)
@@ -657,7 +657,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
                                        if (so->killedItems == NULL)
                                        {
                                                MemoryContext oldCxt =
-                                               MemoryContextSwitchTo(so->giststate->scanCxt);
+                                                       MemoryContextSwitchTo(so->giststate->scanCxt);
 
                                                so->killedItems =
                                                        (OffsetNumber *) palloc(MaxIndexTuplesPerPage
@@ -694,7 +694,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
                                if (so->killedItems == NULL)
                                {
                                        MemoryContext oldCxt =
-                                       MemoryContextSwitchTo(so->giststate->scanCxt);
+                                               MemoryContextSwitchTo(so->giststate->scanCxt);
 
                                        so->killedItems =
                                                (OffsetNumber *) palloc(MaxIndexTuplesPerPage
index a2ddfd5e694058ce10ab02ffdd81e7a3505cd558..15249aa9212c470c006c8e8d2619e8e46d3a255b 100644 (file)
@@ -125,7 +125,7 @@ gistRedoPageUpdateRecord(XLogReaderState *record)
                if (data - begin < datalen)
                {
                        OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
-                       OffsetNumberNext(PageGetMaxOffsetNumber(page));
+                               OffsetNumberNext(PageGetMaxOffsetNumber(page));
 
                        while (data - begin < datalen)
                        {
index d850edd1d51c99ee9f3b4a05cc3c4d5426fbc2ff..37646cc9a109a862c4dcb53072c83aff76a3d8f7 100644 (file)
@@ -289,7 +289,8 @@ hashtext(PG_FUNCTION_ARGS)
        }
        else
        {
-               Size            bsize, rsize;
+               Size            bsize,
+                                       rsize;
                char       *buf;
                const char *keydata = VARDATA_ANY(key);
                size_t          keylen = VARSIZE_ANY_EXHDR(key);
@@ -304,8 +305,8 @@ hashtext(PG_FUNCTION_ARGS)
 
                /*
                 * In principle, there's no reason to include the terminating NUL
-                * character in the hash, but it was done before and the behavior
-                * must be preserved.
+                * character in the hash, but it was done before and the behavior must
+                * be preserved.
                 */
                result = hash_any((uint8_t *) buf, bsize + 1);
 
@@ -343,7 +344,8 @@ hashtextextended(PG_FUNCTION_ARGS)
        }
        else
        {
-               Size            bsize, rsize;
+               Size            bsize,
+                                       rsize;
                char       *buf;
                const char *keydata = VARDATA_ANY(key);
                size_t          keylen = VARSIZE_ANY_EXHDR(key);
@@ -357,8 +359,8 @@ hashtextextended(PG_FUNCTION_ARGS)
 
                /*
                 * In principle, there's no reason to include the terminating NUL
-                * character in the hash, but it was done before and the behavior
-                * must be preserved.
+                * character in the hash, but it was done before and the behavior must
+                * be preserved.
                 */
                result = hash_any_extended((uint8_t *) buf, bsize + 1,
                                                                   PG_GETARG_INT64(1));
index 0124f37911f823f5be3ac9f866ed7f0e429cf915..7ed72abe597a47a031ee56c06d0743ec464aa279 100644 (file)
@@ -2491,7 +2491,7 @@ static inline bool
 xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
 {
        const uint16 interesting =
-       HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+               HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
 
        if ((new_infomask & interesting) != (old_infomask & interesting))
                return true;
index cbb35aa73d6024c8e9bc5a140492f81edf0a9cda..646135cc21c5a22603b654f087b89e3b052c4203 100644 (file)
@@ -334,8 +334,8 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
         * Note: heap_update returns the tid (location) of the new tuple in the
         * t_self field.
         *
-        * If the update is not HOT, we must update all indexes. If the update
-        * is HOT, it could be that we updated summarized columns, so we either
+        * If the update is not HOT, we must update all indexes. If the update is
+        * HOT, it could be that we updated summarized columns, so we either
         * update only summarized indexes, or none at all.
         */
        if (result != TM_Ok)
index fb95c19e90fab90db655394ba71c9555c33ac7f9..c275b08494d02c87fa00df36d28db67d8e2525bd 100644 (file)
@@ -376,7 +376,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate,
                if (use_fsm && i >= not_in_fsm_pages)
                {
                        Size            freespace = BufferGetPageSize(victim_buffers[i]) -
-                       SizeOfPageHeaderData;
+                               SizeOfPageHeaderData;
 
                        RecordPageWithFreeSpace(relation, curBlock, freespace);
                }
index 20df39c14972960d79750f67c52719a4ce53498d..47b9e209154d5c74e168b4c0cafdfa9816b768e5 100644 (file)
@@ -532,7 +532,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
                if (!TransactionIdIsValid(prstate->old_snap_xmin))
                {
                        TransactionId horizon =
-                       GlobalVisTestNonRemovableHorizon(prstate->vistest);
+                               GlobalVisTestNonRemovableHorizon(prstate->vistest);
 
                        TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
                                                                                                &prstate->old_snap_xmin,
index cda8889f5ea83037b31f88067d16e64ce4d7e9ee..4eb953f904747472d849c71a7111ef8206fe09b3 100644 (file)
@@ -389,6 +389,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
        Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
        Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
                   params->truncate != VACOPTVALUE_AUTO);
+
        /*
         * While VacuumFailSafeActive is reset to false before calling this, we
         * still need to reset it here due to recursive calls.
@@ -1813,12 +1814,12 @@ retry:
                {
                        /*
                         * We have no freeze plans to execute, so there's no added cost
-                        * from following the freeze path.  That's why it was chosen.
-                        * This is important in the case where the page only contains
-                        * totally frozen tuples at this point (perhaps only following
-                        * pruning).  Such pages can be marked all-frozen in the VM by our
-                        * caller, even though none of its tuples were newly frozen here
-                        * (note that the "no freeze" path never sets pages all-frozen).
+                        * from following the freeze path.  That's why it was chosen. This
+                        * is important in the case where the page only contains totally
+                        * frozen tuples at this point (perhaps only following pruning).
+                        * Such pages can be marked all-frozen in the VM by our caller,
+                        * even though none of its tuples were newly frozen here (note
+                        * that the "no freeze" path never sets pages all-frozen).
                         *
                         * We never increment the frozen_pages instrumentation counter
                         * here, since it only counts pages with newly frozen tuples
@@ -3117,8 +3118,8 @@ dead_items_max_items(LVRelState *vacrel)
 {
        int64           max_items;
        int                     vac_work_mem = IsAutoVacuumWorkerProcess() &&
-       autovacuum_work_mem != -1 ?
-       autovacuum_work_mem : maintenance_work_mem;
+               autovacuum_work_mem != -1 ?
+               autovacuum_work_mem : maintenance_work_mem;
 
        if (vacrel->nindexes > 0)
        {
index ac91d1a14da02e7e85c9adb25d4e10634c78502e..7d54ec9c0f71c8dc165587dd471a89529560dab8 100644 (file)
@@ -626,7 +626,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
 static Buffer
 vm_extend(Relation rel, BlockNumber vm_nblocks)
 {
-       Buffer buf;
+       Buffer          buf;
 
        buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
                                                          EB_CREATE_FORK_IF_NEEDED |
index 41aa1c4ccd1c1e8bbcccb647ef19302c6c8af47f..6be8915229e7185b3231b262ba01ff8838621a94 100644 (file)
@@ -2947,7 +2947,7 @@ void
 _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
 {
        IndexBulkDeleteResult *stats = vstate->stats;
-       Relation    heaprel = vstate->info->heaprel;
+       Relation        heaprel = vstate->info->heaprel;
 
        Assert(stats->pages_newly_deleted >= vstate->npendingpages);
 
@@ -3027,7 +3027,7 @@ _bt_pendingfsm_add(BTVacState *vstate,
        if (vstate->npendingpages > 0)
        {
                FullTransactionId lastsafexid =
-               vstate->pendingpages[vstate->npendingpages - 1].safexid;
+                       vstate->pendingpages[vstate->npendingpages - 1].safexid;
 
                Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
        }
index 7d12e0ef9126029fc5463295518c568b3c175889..3922120d647c0799dad88e057909354435eef8ab 100644 (file)
@@ -27,7 +27,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
        if (info == XLOG_DBASE_CREATE_FILE_COPY)
        {
                xl_dbase_create_file_copy_rec *xlrec =
-               (xl_dbase_create_file_copy_rec *) rec;
+                       (xl_dbase_create_file_copy_rec *) rec;
 
                appendStringInfo(buf, "copy dir %u/%u to %u/%u",
                                                 xlrec->src_tablespace_id, xlrec->src_db_id,
@@ -36,7 +36,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
        else if (info == XLOG_DBASE_CREATE_WAL_LOG)
        {
                xl_dbase_create_wal_log_rec *xlrec =
-               (xl_dbase_create_wal_log_rec *) rec;
+                       (xl_dbase_create_wal_log_rec *) rec;
 
                appendStringInfo(buf, "create dir %u/%u",
                                                 xlrec->tablespace_id, xlrec->db_id);
index 9ef4981ad143ae521690848f786ae142179b4cb5..246a6a6b857d9da03f78c3bbafdd8848447b2cdb 100644 (file)
@@ -120,7 +120,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
                                        else
                                        {
                                                ginxlogInsertDataInternal *insertData =
-                                               (ginxlogInsertDataInternal *) payload;
+                                                       (ginxlogInsertDataInternal *) payload;
 
                                                appendStringInfo(buf, " pitem: %u-%u/%u",
                                                                                 PostingItemGetBlockNumber(&insertData->newitem),
@@ -156,7 +156,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
                                else
                                {
                                        ginxlogVacuumDataLeafPage *xlrec =
-                                       (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+                                               (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
 
                                        desc_recompress_leaf(buf, &xlrec->data);
                                }
index f3236991655491aef0c5b6049d1794845cb76225..cbfaf0c00ac1c133083fefc898bc47c4a9cd1cce 100644 (file)
@@ -115,7 +115,7 @@ spgAllocSearchItem(SpGistScanOpaque so, bool isnull, double *distances)
 {
        /* allocate distance array only for non-NULL items */
        SpGistSearchItem *item =
-       palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
+               palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
 
        item->isNull = isnull;
 
@@ -130,7 +130,7 @@ static void
 spgAddStartItem(SpGistScanOpaque so, bool isnull)
 {
        SpGistSearchItem *startEntry =
-       spgAllocSearchItem(so, isnull, so->zeroDistances);
+               spgAllocSearchItem(so, isnull, so->zeroDistances);
 
        ItemPointerSet(&startEntry->heapPtr,
                                   isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
@@ -768,7 +768,7 @@ spgTestLeafTuple(SpGistScanOpaque so,
                                 storeRes_func storeRes)
 {
        SpGistLeafTuple leafTuple = (SpGistLeafTuple)
-       PageGetItem(page, PageGetItemId(page, offset));
+               PageGetItem(page, PageGetItemId(page, offset));
 
        if (leafTuple->tupstate != SPGIST_LIVE)
        {
@@ -896,7 +896,7 @@ redirect:
                        else                            /* page is inner */
                        {
                                SpGistInnerTuple innerTuple = (SpGistInnerTuple)
-                               PageGetItem(page, PageGetItemId(page, offset));
+                                       PageGetItem(page, PageGetItemId(page, offset));
 
                                if (innerTuple->tupstate != SPGIST_LIVE)
                                {
@@ -974,7 +974,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
                else
                {
                        IndexOrderByDistance *distances =
-                       palloc(sizeof(distances[0]) * so->numberOfOrderBys);
+                               palloc(sizeof(distances[0]) * so->numberOfOrderBys);
                        int                     i;
 
                        for (i = 0; i < so->numberOfOrderBys; i++)
index a5e6c92f35eec75a743263b5b3904feb547c8f10..771438c8cecb9c0f9160b54fc8011ef4740fd6d8 100644 (file)
@@ -112,7 +112,7 @@ TableScanDesc
 table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
 {
        uint32          flags = SO_TYPE_SEQSCAN |
-       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
        Oid                     relid = RelationGetRelid(relation);
        Snapshot        snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
@@ -176,7 +176,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
 {
        Snapshot        snapshot;
        uint32          flags = SO_TYPE_SEQSCAN |
-       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
        Assert(RelationGetRelid(relation) == pscan->phs_relid);
 
index fe6698d5ffa4b7b55e9fe756fcfd264f2ea532a7..abb022e0670fa71cb7e9528c79c78a37b5c9eea0 100644 (file)
@@ -3270,7 +3270,7 @@ multixact_redo(XLogReaderState *record)
        else if (info == XLOG_MULTIXACT_CREATE_ID)
        {
                xl_multixact_create *xlrec =
-               (xl_multixact_create *) XLogRecGetData(record);
+                       (xl_multixact_create *) XLogRecGetData(record);
                TransactionId max_xid;
                int                     i;
 
index 7133ec0b22c98ba27760c1629878ab03ee3fd221..2b8bc2f58dd8e152f6085c92e51e415aa9cf800d 100644 (file)
@@ -375,8 +375,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
                shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
 
                /*
-                * Serialize the transaction snapshot if the transaction
-                * isolation level uses a transaction snapshot.
+                * Serialize the transaction snapshot if the transaction isolation
+                * level uses a transaction snapshot.
                 */
                if (IsolationUsesXactSnapshot())
                {
@@ -1497,8 +1497,8 @@ ParallelWorkerMain(Datum main_arg)
        RestoreClientConnectionInfo(clientconninfospace);
 
        /*
-        * Initialize SystemUser now that MyClientConnectionInfo is restored.
-        * Also ensure that auth_method is actually valid, aka authn_id is not NULL.
+        * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
+        * ensure that auth_method is actually valid, aka authn_id is not NULL.
         */
        if (MyClientConnectionInfo.authn_id)
                InitializeSystemUser(MyClientConnectionInfo.authn_id,
index 6a837e1539d79cd5ceb338539a3bdf956dc754bc..8daaa535edf80225b8e4cbf0fa5cf0b9c6459006 100644 (file)
@@ -3152,10 +3152,9 @@ CommitTransactionCommand(void)
                        break;
 
                        /*
-                        * The user issued a SAVEPOINT inside a transaction block.
-                        * Start a subtransaction.  (DefineSavepoint already did
-                        * PushTransaction, so as to have someplace to put the SUBBEGIN
-                        * state.)
+                        * The user issued a SAVEPOINT inside a transaction block. Start a
+                        * subtransaction.  (DefineSavepoint already did PushTransaction,
+                        * so as to have someplace to put the SUBBEGIN state.)
                         */
                case TBLOCK_SUBBEGIN:
                        StartSubTransaction();
@@ -4696,9 +4695,9 @@ RollbackAndReleaseCurrentSubTransaction(void)
 
        s = CurrentTransactionState;    /* changed by pop */
        Assert(s->blockState == TBLOCK_SUBINPROGRESS ||
-                               s->blockState == TBLOCK_INPROGRESS ||
-                               s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
-                               s->blockState == TBLOCK_STARTED);
+                  s->blockState == TBLOCK_INPROGRESS ||
+                  s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
+                  s->blockState == TBLOCK_STARTED);
 }
 
 /*
index bc5a8e05697ca035bb078034295110fb6c0cff65..b2430f617c079311df6734156021cdea0fad9679 100644 (file)
@@ -5460,8 +5460,8 @@ StartupXLOG(void)
        missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
 
        /*
-        * Reset ps status display, so as no information related to recovery
-        * shows up.
+        * Reset ps status display, so as no information related to recovery shows
+        * up.
         */
        set_ps_display("");
 
@@ -5596,9 +5596,9 @@ StartupXLOG(void)
        if (!XLogRecPtrIsInvalid(missingContrecPtr))
        {
                /*
-                * We should only have a missingContrecPtr if we're not switching to
-                * a new timeline. When a timeline switch occurs, WAL is copied from
-                * the old timeline to the new only up to the end of the last complete
+                * We should only have a missingContrecPtr if we're not switching to a
+                * new timeline. When a timeline switch occurs, WAL is copied from the
+                * old timeline to the new only up to the end of the last complete
                 * record, so there can't be an incomplete WAL record that we need to
                 * disregard.
                 */
@@ -8494,7 +8494,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
                                 */
                                if (rllen > datadirpathlen &&
                                        strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
-                                               IS_DIR_SEP(linkpath[datadirpathlen]))
+                                       IS_DIR_SEP(linkpath[datadirpathlen]))
                                        relpath = pstrdup(linkpath + datadirpathlen + 1);
 
                                /*
index ea7e2f67af604e625f98b6ed1594951dafef69a7..54247e1d81bfc1ea4b9e07b5e5f9403093847a08 100644 (file)
@@ -897,8 +897,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
         *
         * XLogReader machinery is only able to handle records up to a certain
         * size (ignoring machine resource limitations), so make sure that we will
-        * not emit records larger than the sizes advertised to be supported.
-        * This cap is based on DecodeXLogRecordRequiredSpace().
+        * not emit records larger than the sizes advertised to be supported. This
+        * cap is based on DecodeXLogRecordRequiredSpace().
         */
        if (total_len >= XLogRecordMaxSize)
                ereport(ERROR,
index 906e3d94690ffc784c7ce7088a71a601807d9baa..539928cb854a3af3a813740c7c7b110516041165 100644 (file)
@@ -569,7 +569,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
                                if (record_type == XLOG_DBASE_CREATE_FILE_COPY)
                                {
                                        xl_dbase_create_file_copy_rec *xlrec =
-                                       (xl_dbase_create_file_copy_rec *) record->main_data;
+                                               (xl_dbase_create_file_copy_rec *) record->main_data;
                                        RelFileLocator rlocator =
                                        {InvalidOid, xlrec->db_id, InvalidRelFileNumber};
 
@@ -596,7 +596,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
                                if (record_type == XLOG_SMGR_CREATE)
                                {
                                        xl_smgr_create *xlrec = (xl_smgr_create *)
-                                       record->main_data;
+                                               record->main_data;
 
                                        if (xlrec->forkNum == MAIN_FORKNUM)
                                        {
@@ -624,7 +624,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
                                else if (record_type == XLOG_SMGR_TRUNCATE)
                                {
                                        xl_smgr_truncate *xlrec = (xl_smgr_truncate *)
-                                       record->main_data;
+                                               record->main_data;
 
                                        /*
                                         * Don't consider prefetching anything in the truncated
index 631f260f7918974dec61ca7dcfec46b1caaed804..2e7b1ba8e184fe50b88a03443a687f9d17bc9bb0 100644 (file)
@@ -282,7 +282,7 @@ XLogRecPtr
 XLogReleasePreviousRecord(XLogReaderState *state)
 {
        DecodedXLogRecord *record;
-       XLogRecPtr              next_lsn;
+       XLogRecPtr      next_lsn;
 
        if (!state->record)
                return InvalidXLogRecPtr;
index 188f6d6f85afc2a1b229ef15194d6d5c00a38c37..4883fcb512be2706b42427ce9a9a72ff3ac0d551 100644 (file)
@@ -3215,7 +3215,7 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
                         XLogRecPtr targetRecPtr, char *readBuf)
 {
        XLogPageReadPrivate *private =
-       (XLogPageReadPrivate *) xlogreader->private_data;
+               (XLogPageReadPrivate *) xlogreader->private_data;
        int                     emode = private->emode;
        uint32          targetPageOff;
        XLogSegNo       targetSegNo PG_USED_FOR_ASSERTS_ONLY;
index 5baea7535b7865deba8bfc62a611a467461de3f0..45be21131c5cb80c40c88fe8e208bad3550234f9 100644 (file)
@@ -1609,10 +1609,10 @@ sendFile(bbsink *sink, const char *readfilename, const char *tarfilename,
                                                 *
                                                 * There's no guarantee that this will actually
                                                 * happen, though: the torn write could take an
-                                                * arbitrarily long time to complete. Retrying multiple
-                                                * times wouldn't fix this problem, either, though
-                                                * it would reduce the chances of it happening in
-                                                * practice. The only real fix here seems to be to
+                                                * arbitrarily long time to complete. Retrying
+                                                * multiple times wouldn't fix this problem, either,
+                                                * though it would reduce the chances of it happening
+                                                * in practice. The only real fix here seems to be to
                                                 * have some kind of interlock that allows us to wait
                                                 * until we can be certain that no write to the block
                                                 * is in progress. Since we don't have any such thing
index 73a3f4a97093b856d2bf721f9947e83fc9c35a9d..1db80cde1b2cf60bb2d64a83b7c702f8a0d41404 100644 (file)
@@ -350,6 +350,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
 
        tupdesc = CreateTemplateTupleDesc(2);
        TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
+
        /*
         * int8 may seem like a surprising data type for this, but in theory int4
         * would not be wide enough for this, as TimeLineID is unsigned.
@@ -360,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
        tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
 
        /* Data row */
-       values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
+       values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
        values[1] = Int64GetDatum(tli);
        do_tup_output(tstate, values, nulls);
 
index 656b57934e665c18f35f7c20cf58cb2bf3f21e60..84aaeb002a60f939ab254bac8d6f9b266038533a 100644 (file)
@@ -28,25 +28,25 @@ sub ParseHeader
        # There are a few types which are given one name in the C source, but a
        # different name at the SQL level.  These are enumerated here.
        my %RENAME_ATTTYPE = (
-               'int16'         => 'int2',
-               'int32'         => 'int4',
-               'int64'         => 'int8',
-               'Oid'           => 'oid',
-               'NameData'      => 'name',
+               'int16' => 'int2',
+               'int32' => 'int4',
+               'int64' => 'int8',
+               'Oid' => 'oid',
+               'NameData' => 'name',
                'TransactionId' => 'xid',
-               'XLogRecPtr'    => 'pg_lsn');
+               'XLogRecPtr' => 'pg_lsn');
 
        my %catalog;
        my $declaring_attributes = 0;
-       my $is_varlen            = 0;
-       my $is_client_code       = 0;
+       my $is_varlen = 0;
+       my $is_client_code = 0;
 
-       $catalog{columns}      = [];
-       $catalog{toasting}     = [];
-       $catalog{indexing}     = [];
-       $catalog{other_oids}   = [];
+       $catalog{columns} = [];
+       $catalog{toasting} = [];
+       $catalog{indexing} = [];
+       $catalog{other_oids} = [];
        $catalog{foreign_keys} = [];
-       $catalog{client_code}  = [];
+       $catalog{client_code} = [];
 
        open(my $ifh, '<', $input_file) || die "$input_file: $!";
 
@@ -102,10 +102,10 @@ sub ParseHeader
                {
                        push @{ $catalog{toasting} },
                          {
-                               parent_table          => $1,
-                               toast_oid             => $2,
-                               toast_index_oid       => $3,
-                               toast_oid_macro       => $4,
+                               parent_table => $1,
+                               toast_oid => $2,
+                               toast_index_oid => $3,
+                               toast_oid_macro => $4,
                                toast_index_oid_macro => $5
                          };
                }
@@ -116,11 +116,11 @@ sub ParseHeader
                        push @{ $catalog{indexing} },
                          {
                                is_unique => $1 ? 1 : 0,
-                               is_pkey   => $2 ? 1 : 0,
-                               index_name      => $3,
-                               index_oid       => $4,
+                               is_pkey => $2 ? 1 : 0,
+                               index_name => $3,
+                               index_oid => $4,
                                index_oid_macro => $5,
-                               index_decl      => $6
+                               index_decl => $6
                          };
                }
                elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
@@ -128,7 +128,7 @@ sub ParseHeader
                        push @{ $catalog{other_oids} },
                          {
                                other_name => $1,
-                               other_oid  => $2
+                               other_oid => $2
                          };
                }
                elsif (
@@ -138,16 +138,16 @@ sub ParseHeader
                        push @{ $catalog{foreign_keys} },
                          {
                                is_array => $1 ? 1 : 0,
-                               is_opt   => $2 ? 1 : 0,
-                               fk_cols  => $3,
+                               is_opt => $2 ? 1 : 0,
+                               fk_cols => $3,
                                pk_table => $4,
-                               pk_cols  => $5
+                               pk_cols => $5
                          };
                }
                elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
                {
-                       $catalog{catname}            = $1;
-                       $catalog{relation_oid}       = $2;
+                       $catalog{catname} = $1;
+                       $catalog{relation_oid} = $2;
                        $catalog{relation_oid_macro} = $3;
 
                        $catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
@@ -155,15 +155,15 @@ sub ParseHeader
                          /BKI_SHARED_RELATION/ ? ' shared_relation' : '';
                        if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
                        {
-                               $catalog{rowtype_oid}        = $1;
+                               $catalog{rowtype_oid} = $1;
                                $catalog{rowtype_oid_clause} = " rowtype_oid $1";
-                               $catalog{rowtype_oid_macro}  = $2;
+                               $catalog{rowtype_oid_macro} = $2;
                        }
                        else
                        {
-                               $catalog{rowtype_oid}        = '';
+                               $catalog{rowtype_oid} = '';
                                $catalog{rowtype_oid_clause} = '';
-                               $catalog{rowtype_oid_macro}  = '';
+                               $catalog{rowtype_oid_macro} = '';
                        }
                        $catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
                        $declaring_attributes = 1;
@@ -209,8 +209,8 @@ sub ParseHeader
                                        $atttype = '_' . $atttype;
                                }
 
-                               $column{type}      = $atttype;
-                               $column{name}      = $attname;
+                               $column{type} = $atttype;
+                               $column{name} = $attname;
                                $column{is_varlen} = 1 if $is_varlen;
 
                                foreach my $attopt (@attopts)
@@ -243,14 +243,14 @@ sub ParseHeader
                                                # BKI_LOOKUP implicitly makes an FK reference
                                                push @{ $catalog{foreign_keys} },
                                                  {
-                                                       is_array =>
-                                                         ($atttype eq 'oidvector' || $atttype eq '_oid')
+                                                       is_array => (
+                                                               $atttype eq 'oidvector' || $atttype eq '_oid')
                                                        ? 1
                                                        : 0,
-                                                       is_opt   => $column{lookup_opt},
-                                                       fk_cols  => $attname,
+                                                       is_opt => $column{lookup_opt},
+                                                       fk_cols => $attname,
                                                        pk_table => $column{lookup},
-                                                       pk_cols  => 'oid'
+                                                       pk_cols => 'oid'
                                                  };
                                        }
                                        else
@@ -285,7 +285,7 @@ sub ParseData
        $input_file =~ /(\w+)\.dat$/
          or die "Input file $input_file needs to be a .dat file.\n";
        my $catname = $1;
-       my $data    = [];
+       my $data = [];
 
        if ($preserve_formatting)
        {
@@ -433,7 +433,7 @@ sub AddDefaultValues
 sub GenerateArrayTypes
 {
        my $pgtype_schema = shift;
-       my $types         = shift;
+       my $types = shift;
        my @array_types;
 
        foreach my $elem_type (@$types)
@@ -444,9 +444,9 @@ sub GenerateArrayTypes
                my %array_type;
 
                # Set up metadata fields for array type.
-               $array_type{oid}           = $elem_type->{array_type_oid};
+               $array_type{oid} = $elem_type->{array_type_oid};
                $array_type{autogenerated} = 1;
-               $array_type{line_number}   = $elem_type->{line_number};
+               $array_type{line_number} = $elem_type->{line_number};
 
                # Set up column values derived from the element type.
                $array_type{typname} = '_' . $elem_type->{typname};
@@ -499,8 +499,8 @@ sub GenerateArrayTypes
 sub RenameTempFile
 {
        my $final_name = shift;
-       my $extension  = shift;
-       my $temp_name  = $final_name . $extension;
+       my $extension = shift;
+       my $temp_name = $final_name . $extension;
 
        if (-f $final_name
                && compare($temp_name, $final_name) == 0)
index 45cdcd3dc6c89fcde13ecdc46ccc29059743287a..bc2ad773c90b5e04f4d8094ff87aa30946cf10d5 100644 (file)
@@ -3389,8 +3389,8 @@ pg_class_aclmask_ext(Oid table_oid, Oid roleid, AclMode mask,
                result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
 
        /*
-        * Check if ACL_MAINTAIN is being checked and, if so, and not already set as
-        * part of the result, then check if the user is a member of the
+        * Check if ACL_MAINTAIN is being checked and, if so, and not already set
+        * as part of the result, then check if the user is a member of the
         * pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
         * MATERIALIZED VIEW, and REINDEX on all relations.
         */
index 2c5bfe23a1f318d45b3e879ffeafe1edf3510b51..4a7205472c165fd7dfa915c23bd0ca2924dee2ba 100644 (file)
@@ -29,12 +29,12 @@ my $include_path;
 my $num_errors = 0;
 
 GetOptions(
-       'output:s'       => \$output_path,
-       'set-version:s'  => \$major_version,
+       'output:s' => \$output_path,
+       'set-version:s' => \$major_version,
        'include-path:s' => \$include_path) || usage();
 
 # Sanity check arguments.
-die "No input files.\n"                  unless @ARGV;
+die "No input files.\n" unless @ARGV;
 die "--set-version must be specified.\n" unless $major_version;
 die "Invalid version string: $major_version\n"
   unless $major_version =~ /^\d+$/;
@@ -67,7 +67,7 @@ foreach my $header (@ARGV)
 
        my $catalog = Catalog::ParseHeader($header);
        my $catname = $catalog->{catname};
-       my $schema  = $catalog->{columns};
+       my $schema = $catalog->{columns};
 
        if (defined $catname)
        {
@@ -100,9 +100,9 @@ foreach my $header (@ARGV)
                        if (defined $row->{descr})
                        {
                                my %descr = (
-                                       objoid      => $row->{oid},
-                                       classoid    => $catalog->{relation_oid},
-                                       objsubid    => 0,
+                                       objoid => $row->{oid},
+                                       classoid => $catalog->{relation_oid},
+                                       objsubid => 0,
                                        description => $row->{descr});
 
                                if ($catalog->{shared_relation})
@@ -364,7 +364,7 @@ open(my $ef, '<', $encfile) || die "$encfile: $!";
 
 # We're parsing an enum, so start with 0 and increment
 # every time we find an enum member.
-my $encid             = 0;
+my $encid = 0;
 my $collect_encodings = 0;
 while (<$ef>)
 {
@@ -387,27 +387,27 @@ close $ef;
 
 # Map lookup name to the corresponding hash table.
 my %lookup_kind = (
-       pg_am          => \%amoids,
-       pg_authid      => \%authidoids,
-       pg_class       => \%classoids,
-       pg_collation   => \%collationoids,
-       pg_language    => \%langoids,
-       pg_namespace   => \%namespaceoids,
-       pg_opclass     => \%opcoids,
-       pg_operator    => \%operoids,
-       pg_opfamily    => \%opfoids,
-       pg_proc        => \%procoids,
-       pg_tablespace  => \%tablespaceoids,
-       pg_ts_config   => \%tsconfigoids,
-       pg_ts_dict     => \%tsdictoids,
-       pg_ts_parser   => \%tsparseroids,
+       pg_am => \%amoids,
+       pg_authid => \%authidoids,
+       pg_class => \%classoids,
+       pg_collation => \%collationoids,
+       pg_language => \%langoids,
+       pg_namespace => \%namespaceoids,
+       pg_opclass => \%opcoids,
+       pg_operator => \%operoids,
+       pg_opfamily => \%opfoids,
+       pg_proc => \%procoids,
+       pg_tablespace => \%tablespaceoids,
+       pg_ts_config => \%tsconfigoids,
+       pg_ts_dict => \%tsdictoids,
+       pg_ts_parser => \%tsparseroids,
        pg_ts_template => \%tstemplateoids,
-       pg_type        => \%typeoids,
-       encoding       => \%encids);
+       pg_type => \%typeoids,
+       encoding => \%encids);
 
 
 # Open temp files
-my $tmpext  = ".tmp$$";
+my $tmpext = ".tmp$$";
 my $bkifile = $output_path . 'postgres.bki';
 open my $bki, '>', $bkifile . $tmpext
   or die "can't open $bkifile$tmpext: $!";
@@ -600,7 +600,7 @@ EOM
                        # each element of the array as per the lookup rule.
                        if ($column->{lookup})
                        {
-                               my $lookup     = $lookup_kind{ $column->{lookup} };
+                               my $lookup = $lookup_kind{ $column->{lookup} };
                                my $lookup_opt = $column->{lookup_opt};
                                my @lookupnames;
                                my @lookupoids;
@@ -790,7 +790,7 @@ foreach my $catname (@catnames)
 
                printf $fk_info
                  "\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n",
-                 $catname,   $catalog->{relation_oid},
+                 $catname, $catalog->{relation_oid},
                  $pktabname, $catalogs{$pktabname}->{relation_oid},
                  $fkinfo->{fk_cols},
                  $fkinfo->{pk_cols},
@@ -809,9 +809,9 @@ close $fk_info;
 close $constraints;
 
 # Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile,          $tmpext);
-Catalog::RenameTempFile($schemafile,       $tmpext);
-Catalog::RenameTempFile($fk_info_file,     $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($fk_info_file, $tmpext);
 Catalog::RenameTempFile($constraints_file, $tmpext);
 
 exit($num_errors != 0 ? 1 : 0);
@@ -845,13 +845,13 @@ sub gen_pg_attribute
                push @tables_needing_macros, $table_name;
 
                # Generate entries for user attributes.
-               my $attnum          = 0;
+               my $attnum = 0;
                my $priorfixedwidth = 1;
                foreach my $attr (@{ $table->{columns} })
                {
                        $attnum++;
                        my %row;
-                       $row{attnum}   = $attnum;
+                       $row{attnum} = $attnum;
                        $row{attrelid} = $table->{relation_oid};
 
                        morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth);
@@ -877,18 +877,18 @@ sub gen_pg_attribute
                {
                        $attnum = 0;
                        my @SYS_ATTRS = (
-                               { name => 'ctid',     type => 'tid' },
-                               { name => 'xmin',     type => 'xid' },
-                               { name => 'cmin',     type => 'cid' },
-                               { name => 'xmax',     type => 'xid' },
-                               { name => 'cmax',     type => 'cid' },
+                               { name => 'ctid', type => 'tid' },
+                               { name => 'xmin', type => 'xid' },
+                               { name => 'cmin', type => 'cid' },
+                               { name => 'xmax', type => 'xid' },
+                               { name => 'cmax', type => 'cid' },
                                { name => 'tableoid', type => 'oid' });
                        foreach my $attr (@SYS_ATTRS)
                        {
                                $attnum--;
                                my %row;
-                               $row{attnum}        = $attnum;
-                               $row{attrelid}      = $table->{relation_oid};
+                               $row{attnum} = $attnum;
+                               $row{attrelid} = $table->{relation_oid};
                                $row{attstattarget} = '0';
 
                                morph_row_for_pgattr(\%row, $schema, $attr, 1);
@@ -916,10 +916,10 @@ sub morph_row_for_pgattr
        # Copy the type data from pg_type, and add some type-dependent items
        my $type = $types{$atttype};
 
-       $row->{atttypid}   = $type->{oid};
-       $row->{attlen}     = $type->{typlen};
-       $row->{attbyval}   = $type->{typbyval};
-       $row->{attalign}   = $type->{typalign};
+       $row->{atttypid} = $type->{oid};
+       $row->{attlen} = $type->{typlen};
+       $row->{attbyval} = $type->{typbyval};
+       $row->{attalign} = $type->{typalign};
        $row->{attstorage} = $type->{typstorage};
 
        # set attndims if it's an array type
@@ -946,7 +946,7 @@ sub morph_row_for_pgattr
                # At this point the width of type name is still symbolic,
                # so we need a special test.
                $row->{attnotnull} =
-                   $row->{attlen} eq 'NAMEDATALEN' ? 't'
+                       $row->{attlen} eq 'NAMEDATALEN' ? 't'
                  : $row->{attlen} > 0              ? 't'
                  :                                   'f';
        }
@@ -962,15 +962,15 @@ sub morph_row_for_pgattr
 # Write an entry to postgres.bki.
 sub print_bki_insert
 {
-       my $row    = shift;
+       my $row = shift;
        my $schema = shift;
 
        my @bki_values;
 
        foreach my $column (@$schema)
        {
-               my $attname   = $column->{name};
-               my $atttype   = $column->{type};
+               my $attname = $column->{name};
+               my $atttype = $column->{type};
                my $bki_value = $row->{$attname};
 
                # Fold backslash-zero to empty string if it's the entire string,
@@ -1002,7 +1002,7 @@ sub print_bki_insert
 # quite identical, to the corresponding values in postgres.bki.
 sub morph_row_for_schemapg
 {
-       my $row           = shift;
+       my $row = shift;
        my $pgattr_schema = shift;
 
        foreach my $column (@$pgattr_schema)
@@ -1027,7 +1027,7 @@ sub morph_row_for_schemapg
                # don't change.
                elsif ($atttype eq 'bool')
                {
-                       $row->{$attname} = 'true'  if $row->{$attname} eq 't';
+                       $row->{$attname} = 'true' if $row->{$attname} eq 't';
                        $row->{$attname} = 'false' if $row->{$attname} eq 'f';
                }
 
@@ -1089,7 +1089,7 @@ sub form_pg_type_symbol
        # Skip for rowtypes of bootstrap catalogs, since they have their
        # own naming convention defined elsewhere.
        return
-            if $typename eq 'pg_type'
+                if $typename eq 'pg_type'
          or $typename eq 'pg_proc'
          or $typename eq 'pg_attribute'
          or $typename eq 'pg_class';
index feddff654e6678476eeaee35cf6bc90b2d638250..522da0ac8556f7971ad907cbc57ee667e0cef2b3 100644 (file)
@@ -148,8 +148,8 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple,
 #endif                                                 /* USE_ASSERT_CHECKING */
 
                /*
-                * Skip insertions into non-summarizing indexes if we only need
-                * to update summarizing indexes.
+                * Skip insertions into non-summarizing indexes if we only need to
+                * update summarizing indexes.
                 */
                if (onlySummarized && !indexInfo->ii_Summarizing)
                        continue;
index 73ddb67882f8c36e7c6f0a56c7db797f82ca14e8..69ab1b8e4b898e3cf8e912826e256eed1d96965e 100644 (file)
@@ -3842,7 +3842,7 @@ recomputeNamespacePath(void)
                                if (OidIsValid(namespaceId) &&
                                        !list_member_oid(oidlist, namespaceId) &&
                                        object_aclcheck(NamespaceRelationId, namespaceId, roleid,
-                                                                                 ACL_USAGE) == ACLCHECK_OK &&
+                                                                       ACL_USAGE) == ACLCHECK_OK &&
                                        InvokeNamespaceSearchHook(namespaceId, false))
                                        oidlist = lappend_oid(oidlist, namespaceId);
                        }
@@ -3870,7 +3870,7 @@ recomputeNamespacePath(void)
                        if (OidIsValid(namespaceId) &&
                                !list_member_oid(oidlist, namespaceId) &&
                                object_aclcheck(NamespaceRelationId, namespaceId, roleid,
-                                                                         ACL_USAGE) == ACLCHECK_OK &&
+                                                               ACL_USAGE) == ACLCHECK_OK &&
                                InvokeNamespaceSearchHook(namespaceId, false))
                                oidlist = lappend_oid(oidlist, namespaceId);
                }
@@ -4006,7 +4006,7 @@ InitTempTableNamespace(void)
         * temp table creation request is made by someone with appropriate rights.
         */
        if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
-                                                        ACL_CREATE_TEMP) != ACLCHECK_OK)
+                                               ACL_CREATE_TEMP) != ACLCHECK_OK)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 errmsg("permission denied to create temporary tables in database \"%s\"",
index 792b0ef4140bd6e45ea4edeb8b837bdd6d88cca4..95918a77a153ffc8e62955d7eee4dcc7c66c9cff 100644 (file)
@@ -625,7 +625,7 @@ get_other_operator(List *otherOp, Oid otherLeftTypeId, Oid otherRightTypeId,
        /* not in catalogs, different from operator, so make shell */
 
        aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(),
-                                                                         ACL_CREATE);
+                                                               ACL_CREATE);
        if (aclresult != ACLCHECK_OK)
                aclcheck_error(aclresult, OBJECT_SCHEMA,
                                           get_namespace_name(otherNamespace));
index 64d326f073c32801903f2a68fe40dedcaaafcd51..91c7f3426f95cb19ea1187e0f6b995faf4a58f82 100644 (file)
@@ -1414,6 +1414,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
                                        /* FALLTHROUGH */
 
                                case SHARED_DEPENDENCY_OWNER:
+
                                        /*
                                         * Save it for deletion below, if it's a local object or a
                                         * role grant. Other shared objects, such as databases,
index 10f28f94bcadd2855f01bc3f8d06527faf673f6e..e95dc31bde3582c10f20ffdadcf162ecb224e2bf 100644 (file)
@@ -231,7 +231,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
                if (OidIsValid(namespaceId))
                {
                        aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
-                                                                                         ACL_CREATE);
+                                                                               ACL_CREATE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                           get_namespace_name(namespaceId));
@@ -1035,7 +1035,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
                                AclResult       aclresult;
 
                                aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId,
-                                                                                                 ACL_CREATE);
+                                                                                       ACL_CREATE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                                   get_namespace_name(namespaceId));
index c91fe66d9b20b8d0ba4bbddc0a9baa1ea242d460..2969a2bb212778b6792555ba8f09d0e7b3f3b5a7 100644 (file)
@@ -270,8 +270,8 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
                         */
                        if (!IsBinaryUpgrade)
                        {
-                               char *langtag = icu_language_tag(colliculocale,
-                                                                                                icu_validation_level);
+                               char       *langtag = icu_language_tag(colliculocale,
+                                                                                                          icu_validation_level);
 
                                if (langtag && strcmp(colliculocale, langtag) != 0)
                                {
@@ -476,17 +476,18 @@ AlterCollation(AlterCollationStmt *stmt)
 Datum
 pg_collation_actual_version(PG_FUNCTION_ARGS)
 {
-       Oid              collid = PG_GETARG_OID(0);
-       char     provider;
-       char    *locale;
-       char    *version;
-       Datum    datum;
+       Oid                     collid = PG_GETARG_OID(0);
+       char            provider;
+       char       *locale;
+       char       *version;
+       Datum           datum;
 
        if (collid == DEFAULT_COLLATION_OID)
        {
                /* retrieve from pg_database */
 
                HeapTuple       dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
+
                if (!HeapTupleIsValid(dbtup))
                        ereport(ERROR,
                                        (errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -506,7 +507,8 @@ pg_collation_actual_version(PG_FUNCTION_ARGS)
        {
                /* retrieve from pg_collation */
 
-               HeapTuple       colltp          = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+               HeapTuple       colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+
                if (!HeapTupleIsValid(colltp))
                        ereport(ERROR,
                                        (errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -657,11 +659,10 @@ create_collation_from_locale(const char *locale, int nspid,
        Oid                     collid;
 
        /*
-        * Some systems have locale names that don't consist entirely of
-        * ASCII letters (such as "bokm&aring;l" or "fran&ccedil;ais").
-        * This is pretty silly, since we need the locale itself to
-        * interpret the non-ASCII characters. We can't do much with
-        * those, so we filter them out.
+        * Some systems have locale names that don't consist entirely of ASCII
+        * letters (such as "bokm&aring;l" or "fran&ccedil;ais"). This is pretty
+        * silly, since we need the locale itself to interpret the non-ASCII
+        * characters. We can't do much with those, so we filter them out.
         */
        if (!pg_is_ascii(locale))
        {
@@ -681,19 +682,18 @@ create_collation_from_locale(const char *locale, int nspid,
                return -1;
        }
        if (enc == PG_SQL_ASCII)
-               return -1;              /* C/POSIX are already in the catalog */
+               return -1;                              /* C/POSIX are already in the catalog */
 
        /* count valid locales found in operating system */
        (*nvalidp)++;
 
        /*
-        * Create a collation named the same as the locale, but quietly
-        * doing nothing if it already exists.  This is the behavior we
-        * need even at initdb time, because some versions of "locale -a"
-        * can report the same locale name more than once.  And it's
-        * convenient for later import runs, too, since you just about
-        * always want to add on new locales without a lot of chatter
-        * about existing ones.
+        * Create a collation named the same as the locale, but quietly doing
+        * nothing if it already exists.  This is the behavior we need even at
+        * initdb time, because some versions of "locale -a" can report the same
+        * locale name more than once.  And it's convenient for later import runs,
+        * too, since you just about always want to add on new locales without a
+        * lot of chatter about existing ones.
         */
        collid = CollationCreate(locale, nspid, GetUserId(),
                                                         COLLPROVIDER_LIBC, true, enc,
@@ -995,8 +995,8 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
                param.nvalidp = &nvalid;
 
                /*
-                * Enumerate the locales that are either installed on or supported
-                * by the OS.
+                * Enumerate the locales that are either installed on or supported by
+                * the OS.
                 */
                if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
                                                                 (LPARAM) &param, NULL))
index 2e242eeff242bd13008f041ca685faa3aad4b967..99d4080ea959bfa11f336477bcdf1e71e9e118df 100644 (file)
@@ -259,7 +259,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
        List       *rlocatorlist = NIL;
        LockRelId       relid;
        Snapshot        snapshot;
-       SMgrRelation    smgr;
+       SMgrRelation smgr;
        BufferAccessStrategy bstrategy;
 
        /* Get pg_class relfilenumber. */
@@ -1065,8 +1065,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
                 */
                if (!IsBinaryUpgrade && dbiculocale != src_iculocale)
                {
-                       char *langtag = icu_language_tag(dbiculocale,
-                                                                                        icu_validation_level);
+                       char       *langtag = icu_language_tag(dbiculocale,
+                                                                                                  icu_validation_level);
 
                        if (langtag && strcmp(dbiculocale, langtag) != 0)
                        {
@@ -1219,7 +1219,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
                dst_deftablespace = get_tablespace_oid(tablespacename, false);
                /* check permissions */
                aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(),
-                                                                                  ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                   tablespacename);
@@ -1406,8 +1406,8 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
         * If we're going to be reading data for the to-be-created database into
         * shared_buffers, take a lock on it. Nobody should know that this
         * database exists yet, but it's good to maintain the invariant that an
-        * AccessExclusiveLock on the database is sufficient to drop all
-        * of its buffers without worrying about more being read later.
+        * AccessExclusiveLock on the database is sufficient to drop all of its
+        * buffers without worrying about more being read later.
         *
         * Note that we need to do this before entering the
         * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
@@ -1933,7 +1933,7 @@ movedb(const char *dbname, const char *tblspcname)
         * Permission checks
         */
        aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(),
-                                                                          ACL_CREATE);
+                                                               ACL_CREATE);
        if (aclresult != ACLCHECK_OK)
                aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                           tblspcname);
@@ -3110,7 +3110,7 @@ dbase_redo(XLogReaderState *record)
        if (info == XLOG_DBASE_CREATE_FILE_COPY)
        {
                xl_dbase_create_file_copy_rec *xlrec =
-               (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
+                       (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
                char       *src_path;
                char       *dst_path;
                char       *parent_path;
@@ -3182,7 +3182,7 @@ dbase_redo(XLogReaderState *record)
        else if (info == XLOG_DBASE_CREATE_WAL_LOG)
        {
                xl_dbase_create_wal_log_rec *xlrec =
-               (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
+                       (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
                char       *dbpath;
                char       *parent_path;
 
index 82bda158895eaf0495d52c8644b86292c5e460b3..469a6c2ee968bf5abaa42a98cd1598fd0fc0f2d1 100644 (file)
@@ -493,6 +493,7 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
                case OBJECT_TABLE:
                case OBJECT_TABLESPACE:
                case OBJECT_VIEW:
+
                        /*
                         * These are handled elsewhere, so if someone gets here the code
                         * is probably wrong or should be revisited.
index 5334c503e1236f49264f801660d5b509224b814d..15f9bddcdf3fa751973183b7ece5c766ae32e249 100644 (file)
@@ -1523,7 +1523,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
                        {
                                BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
                                const char *indexname =
-                               explain_get_index_name(bitmapindexscan->indexid);
+                                       explain_get_index_name(bitmapindexscan->indexid);
 
                                if (es->format == EXPLAIN_FORMAT_TEXT)
                                        appendStringInfo(es->str, " on %s",
@@ -3008,7 +3008,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
                for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
                {
                        IncrementalSortInfo *incsort_info =
-                       &incrsortstate->shared_info->sinfo[n];
+                               &incrsortstate->shared_info->sinfo[n];
 
                        /*
                         * If a worker hasn't processed any sort groups at all, then
@@ -4212,7 +4212,7 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es)
 {
        ListCell   *cell;
        const char *label =
-       (list_length(css->custom_ps) != 1 ? "children" : "child");
+               (list_length(css->custom_ps) != 1 ? "children" : "child");
 
        foreach(cell, css->custom_ps)
                ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
index 71caa3b9f308b94a3bf79d51d3c600ac69a00163..49c7864c7cfaa1e734f609404d1f78b84c4757e4 100644 (file)
@@ -151,7 +151,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
                namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
                                                                                                                &typname);
                aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
-                                                                                 ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                   get_namespace_name(namespaceId));
@@ -2117,7 +2117,7 @@ ExecuteDoStmt(ParseState *pstate, DoStmt *stmt, bool atomic)
                AclResult       aclresult;
 
                aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(),
-                                                                                ACL_USAGE);
+                                                                       ACL_USAGE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_LANGUAGE,
                                                   NameStr(languageStruct->lanname));
index e6ee99e51f6d203992024d36fd73e2994589f3bd..a5168c9f097756ae74a889abdda354bb8b1a1208 100644 (file)
@@ -748,7 +748,7 @@ DefineIndex(Oid relationId,
                AclResult       aclresult;
 
                aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
-                                                                                 ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                   get_namespace_name(namespaceId));
@@ -780,7 +780,7 @@ DefineIndex(Oid relationId,
                AclResult       aclresult;
 
                aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
-                                                                                  ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                   get_tablespace_name(tablespaceId));
@@ -2708,7 +2708,7 @@ ExecReindex(ParseState *pstate, ReindexStmt *stmt, bool isTopLevel)
                        AclResult       aclresult;
 
                        aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
-                                                                                          GetUserId(), ACL_CREATE);
+                                                                               GetUserId(), ACL_CREATE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                           get_tablespace_name(params.tablespaceOid));
@@ -3066,11 +3066,12 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
                /*
                 * The table can be reindexed if the user has been granted MAINTAIN on
                 * the table or one of its partition ancestors or the user is a
-                * superuser, the table owner, or the database/schema owner (but in the
-                * latter case, only if it's not a shared relation).  pg_class_aclcheck
-                * includes the superuser case, and depending on objectKind we already
-                * know that the user has permission to run REINDEX on this database or
-                * schema per the permission checks at the beginning of this routine.
+                * superuser, the table owner, or the database/schema owner (but in
+                * the latter case, only if it's not a shared relation).
+                * pg_class_aclcheck includes the superuser case, and depending on
+                * objectKind we already know that the user has permission to run
+                * REINDEX on this database or schema per the permission checks at the
+                * beginning of this routine.
                 */
                if (classtuple->relisshared &&
                        pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
@@ -3312,7 +3313,7 @@ ReindexMultipleInternal(List *relids, ReindexParams *params)
                        AclResult       aclresult;
 
                        aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
-                                                                                          GetUserId(), ACL_CREATE);
+                                                                               GetUserId(), ACL_CREATE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                           get_tablespace_name(params->tablespaceOid));
index b6a71154a8784404d0988e30bd8e25189c1f6dcf..6eb3dc6bab60feaa17cfa23f72b094ecbfbb7524 100644 (file)
@@ -400,7 +400,7 @@ AlterSchemaOwner_internal(HeapTuple tup, Relation rel, Oid newOwnerId)
                 * no special case for them.
                 */
                aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
-                                                                                ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_DATABASE,
                                                   get_database_name(MyDatabaseId));
index e8b288d01cbdb13f40bcd8ede6a11b3b14f6fb9f..1c88c2bccbf8d10d65315ed863055883f236cdad 100644 (file)
@@ -604,9 +604,9 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
                PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
 
        /*
-        * We don't want to allow unprivileged users to be able to trigger attempts
-        * to access arbitrary network destinations, so require the user to have
-        * been specifically authorized to create subscriptions.
+        * We don't want to allow unprivileged users to be able to trigger
+        * attempts to access arbitrary network destinations, so require the user
+        * to have been specifically authorized to create subscriptions.
         */
        if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
                ereport(ERROR,
@@ -631,10 +631,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
         * exempt a subscription from this requirement.
         */
        if (!opts.passwordrequired && !superuser_arg(owner))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                        errmsg("password_required=false is superuser-only"),
-                                        errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                errmsg("password_required=false is superuser-only"),
+                                errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
 
        /*
         * If built with appropriate switch, whine when regression-testing
@@ -1113,8 +1113,8 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
        if (!sub->passwordrequired && !superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                                errmsg("password_required=false is superuser-only"),
-                                                errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+                                errmsg("password_required=false is superuser-only"),
+                                errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
 
        /* Lock the subscription so nobody else can do anything with it. */
        LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
@@ -1827,8 +1827,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
        if (!form->subpasswordrequired && !superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                                errmsg("password_required=false is superuser-only"),
-                                                errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+                                errmsg("password_required=false is superuser-only"),
+                                errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
 
        /* Must be able to become new owner */
        check_can_set_role(GetUserId(), newOwnerId);
@@ -1837,8 +1837,8 @@ AlterSubscriptionOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
         * current owner must have CREATE on database
         *
         * This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
-        * other object types behave differently (e.g. you can't give a table to
-        * user who lacks CREATE privileges on a schema).
+        * other object types behave differently (e.g. you can't give a table to a
+        * user who lacks CREATE privileges on a schema).
         */
        aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
                                                                GetUserId(), ACL_CREATE);
index c7a8a689b7c851dbc3b5a0943a0f87dbf2709265..4d49d70c339b63b909911964671a4f03fcafe2f6 100644 (file)
@@ -806,7 +806,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
                AclResult       aclresult;
 
                aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(),
-                                                                                  ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                   get_tablespace_name(tablespaceId));
@@ -1931,7 +1931,7 @@ ExecuteTruncateGuts(List *explicit_rels,
        resultRelInfo = resultRelInfos;
        foreach(cell, rels)
        {
-               UserContext     ucxt;
+               UserContext ucxt;
 
                if (run_as_table_owner)
                        SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@@ -2143,7 +2143,7 @@ ExecuteTruncateGuts(List *explicit_rels,
        resultRelInfo = resultRelInfos;
        foreach(cell, rels)
        {
-               UserContext     ucxt;
+               UserContext ucxt;
 
                if (run_as_table_owner)
                        SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
@@ -2635,7 +2635,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
                                if (CompressionMethodIsValid(attribute->attcompression))
                                {
                                        const char *compression =
-                                       GetCompressionMethodName(attribute->attcompression);
+                                               GetCompressionMethodName(attribute->attcompression);
 
                                        if (def->compression == NULL)
                                                def->compression = pstrdup(compression);
@@ -13947,7 +13947,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
 
                                /* New owner must have CREATE privilege on namespace */
                                aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId,
-                                                                                                 ACL_CREATE);
+                                                                                       ACL_CREATE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                                   get_namespace_name(namespaceOid));
@@ -14377,7 +14377,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
                if (check_option)
                {
                        const char *view_updatable_error =
-                       view_query_is_auto_updatable(view_query, true);
+                               view_query_is_auto_updatable(view_query, true);
 
                        if (view_updatable_error)
                                ereport(ERROR,
@@ -14656,7 +14656,7 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
                AclResult       aclresult;
 
                aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(),
-                                                                                  ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_TABLESPACE,
                                                   get_tablespace_name(new_tablespaceoid));
@@ -17134,7 +17134,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
        if (IsA(stmt, RenameStmt))
        {
                aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace,
-                                                                                 GetUserId(), ACL_CREATE);
+                                                                       GetUserId(), ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                   get_namespace_name(classform->relnamespace));
index 3dfbf6a917887f7b78e0bf388a4eb7a9333afb18..13b0dee1468b1899597aee5bc08115afc24769bd 100644 (file)
@@ -1278,7 +1278,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
 
                        /* Check permissions, similarly complaining only if interactive */
                        aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
-                                                                                          ACL_CREATE);
+                                                                               ACL_CREATE);
                        if (aclresult != ACLCHECK_OK)
                        {
                                if (source >= PGC_S_INTERACTIVE)
@@ -1408,7 +1408,7 @@ PrepareTempTablespaces(void)
 
                /* Check permissions similarly */
                aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
-                                                                                  ACL_CREATE);
+                                                                       ACL_CREATE);
                if (aclresult != ACLCHECK_OK)
                        continue;
 
index 3440dbc4405b8cf2da3ac7e295ae922abea2d3d2..216482095d2b58c5c4769cf896e7381151cf207d 100644 (file)
@@ -734,7 +734,7 @@ DefineDomain(CreateDomainStmt *stmt)
 
        /* Check we have creation rights in target namespace */
        aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(),
-                                                                         ACL_CREATE);
+                                                               ACL_CREATE);
        if (aclresult != ACLCHECK_OK)
                aclcheck_error(aclresult, OBJECT_SCHEMA,
                                           get_namespace_name(domainNamespace));
@@ -3743,8 +3743,8 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
 
                        /* New owner must have CREATE privilege on namespace */
                        aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace,
-                                                                                         newOwnerId,
-                                                                                         ACL_CREATE);
+                                                                               newOwnerId,
+                                                                               ACL_CREATE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_SCHEMA,
                                                           get_namespace_name(typTup->typnamespace));
index 707114bdd06739b36f77444c923259e52d2d49cc..d63d3c58ca8db9976bb204123c6d595270e541fa 100644 (file)
@@ -86,7 +86,7 @@ typedef struct
 int                    Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256;
 char      *createrole_self_grant = "";
 bool           createrole_self_grant_enabled = false;
-GrantRoleOptions       createrole_self_grant_options;
+GrantRoleOptions createrole_self_grant_options;
 
 /* Hook to check passwords in CreateRole() and AlterRole() */
 check_password_hook_type check_password_hook = NULL;
@@ -169,7 +169,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
        DefElem    *dadminmembers = NULL;
        DefElem    *dvalidUntil = NULL;
        DefElem    *dbypassRLS = NULL;
-       GrantRoleOptions        popt;
+       GrantRoleOptions popt;
 
        /* The defaults can vary depending on the original statement type */
        switch (stmt->stmt_type)
@@ -535,8 +535,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
         *
         * The grantor of record for this implicit grant is the bootstrap
         * superuser, which means that the CREATEROLE user cannot revoke the
-        * grant. They can however grant the created role back to themselves
-        * with different options, since they enjoy ADMIN OPTION on it.
+        * grant. They can however grant the created role back to themselves with
+        * different options, since they enjoy ADMIN OPTION on it.
         */
        if (!superuser())
        {
@@ -561,8 +561,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
                                        BOOTSTRAP_SUPERUSERID, &poptself);
 
                /*
-                * We must make the implicit grant visible to the code below, else
-                * the additional grants will fail.
+                * We must make the implicit grant visible to the code below, else the
+                * additional grants will fail.
                 */
                CommandCounterIncrement();
 
@@ -585,8 +585,8 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt)
         * Add the specified members to this new role. adminmembers get the admin
         * option, rolemembers don't.
         *
-        * NB: No permissions check is required here. If you have enough rights
-        * to create a role, you can add any members you like.
+        * NB: No permissions check is required here. If you have enough rights to
+        * create a role, you can add any members you like.
         */
        AddRoleMems(currentUserId, stmt->role, roleid,
                                rolemembers, roleSpecsToIds(rolemembers),
@@ -647,7 +647,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
        DefElem    *dbypassRLS = NULL;
        Oid                     roleid;
        Oid                     currentUserId = GetUserId();
-       GrantRoleOptions        popt;
+       GrantRoleOptions popt;
 
        check_rolespec_name(stmt->role,
                                                _("Cannot alter reserved roles."));
@@ -862,7 +862,7 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
         */
        if (dissuper)
        {
-               bool    should_be_super = boolVal(dissuper->arg);
+               bool            should_be_super = boolVal(dissuper->arg);
 
                if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID)
                        ereport(ERROR,
@@ -1021,9 +1021,9 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
                shdepLockAndCheckObject(AuthIdRelationId, roleid);
 
                /*
-                * To mess with a superuser you gotta be superuser; otherwise you
-                * need CREATEROLE plus admin option on the target role; unless you're
-                * just trying to change your own settings
+                * To mess with a superuser you gotta be superuser; otherwise you need
+                * CREATEROLE plus admin option on the target role; unless you're just
+                * trying to change your own settings
                 */
                if (roleform->rolsuper)
                {
@@ -1037,7 +1037,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
                else
                {
                        if ((!have_createrole_privilege() ||
-                               !is_admin_of_role(GetUserId(), roleid))
+                                !is_admin_of_role(GetUserId(), roleid))
                                && roleid != GetUserId())
                                ereport(ERROR,
                                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -1490,14 +1490,14 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
        Oid                     grantor;
        List       *grantee_ids;
        ListCell   *item;
-       GrantRoleOptions        popt;
+       GrantRoleOptions popt;
        Oid                     currentUserId = GetUserId();
 
        /* Parse options list. */
        InitGrantRoleOptions(&popt);
        foreach(item, stmt->opt)
        {
-               DefElem    *opt = (DefElem *) lfirst(item);
+               DefElem    *opt = (DefElem *) lfirst(item);
                char       *optval = defGetString(opt);
 
                if (strcmp(opt->defname, "admin") == 0)
@@ -1546,8 +1546,8 @@ GrantRole(ParseState *pstate, GrantRoleStmt *stmt)
        /*
         * Step through all of the granted roles and add, update, or remove
         * entries in pg_auth_members as appropriate. If stmt->is_grant is true,
-        * we are adding new grants or, if they already exist, updating options
-        * on those grants. If stmt->is_grant is false, we are revoking grants or
+        * we are adding new grants or, if they already exist, updating options on
+        * those grants. If stmt->is_grant is false, we are revoking grants or
         * removing options from them.
         */
        foreach(item, stmt->granted_roles)
@@ -1848,8 +1848,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
                                                                                ObjectIdGetDatum(grantorId));
 
                /*
-                * If we found a tuple, update it with new option values, unless
-                * there are no changes, in which case issue a WARNING.
+                * If we found a tuple, update it with new option values, unless there
+                * are no changes, in which case issue a WARNING.
                 *
                 * If we didn't find a tuple, just insert one.
                 */
@@ -1932,8 +1932,8 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
                                        popt->inherit;
                        else
                        {
-                               HeapTuple               mrtup;
-                               Form_pg_authid  mrform;
+                               HeapTuple       mrtup;
+                               Form_pg_authid mrform;
 
                                mrtup = SearchSysCache1(AUTHOID, memberid);
                                if (!HeapTupleIsValid(mrtup))
@@ -2332,8 +2332,8 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
        /*
         * If popt.specified == 0, we're revoking the grant entirely; otherwise,
         * we expect just one bit to be set, and we're revoking the corresponding
-        * option. As of this writing, there's no syntax that would allow for
-        * an attempt to revoke multiple options at once, and the logic below
+        * option. As of this writing, there's no syntax that would allow for an
+        * attempt to revoke multiple options at once, and the logic below
         * wouldn't work properly if such syntax were added, so assert that our
         * caller isn't trying to do that.
         */
@@ -2365,7 +2365,7 @@ plan_single_revoke(CatCList *memlist, RevokeRoleGrantAction *actions,
                        }
                        else
                        {
-                               bool    revoke_admin_option_only;
+                               bool            revoke_admin_option_only;
 
                                /*
                                 * Revoking the grant entirely, or ADMIN option on a grant,
@@ -2572,7 +2572,7 @@ check_createrole_self_grant(char **newval, void **extra, GucSource source)
 void
 assign_createrole_self_grant(const char *newval, void *extra)
 {
-       unsigned        options = * (unsigned *) extra;
+       unsigned        options = *(unsigned *) extra;
 
        createrole_self_grant_enabled = (options != 0);
        createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN
index ff98c773f551a61828d5d6b4e9bf61359861d8f8..9bd77546b9dc28fa0aae3fa2e886ca00e1c9d8d0 100644 (file)
@@ -437,7 +437,7 @@ DefineView(ViewStmt *stmt, const char *queryString,
        if (check_option)
        {
                const char *view_updatable_error =
-               view_query_is_auto_updatable(viewParse, true);
+                       view_query_is_auto_updatable(viewParse, true);
 
                if (view_updatable_error)
                        ereport(ERROR,
index bf257a41c8559c38aaa035c85a2a66883736d5f1..e6e616865c23153defa9bc0be70644e1985c2900 100644 (file)
@@ -1214,8 +1214,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
 
                                /* Check permission to call function */
                                aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid,
-                                                                                        GetUserId(),
-                                                                                        ACL_EXECUTE);
+                                                                                       GetUserId(),
+                                                                                       ACL_EXECUTE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                                   get_func_name(cmpfuncid));
@@ -1224,8 +1224,8 @@ ExecInitExprRec(Expr *node, ExprState *state,
                                if (OidIsValid(opexpr->hashfuncid))
                                {
                                        aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid,
-                                                                                                GetUserId(),
-                                                                                                ACL_EXECUTE);
+                                                                                               GetUserId(),
+                                                                                               ACL_EXECUTE);
                                        if (aclresult != ACLCHECK_OK)
                                                aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                                           get_func_name(opexpr->hashfuncid));
@@ -3613,7 +3613,7 @@ ExecBuildAggTrans(AggState *aggstate, AggStatePerPhase phase,
                         * column sorted on.
                         */
                        TargetEntry *source_tle =
-                       (TargetEntry *) linitial(pertrans->aggref->args);
+                               (TargetEntry *) linitial(pertrans->aggref->args);
 
                        Assert(list_length(pertrans->aggref->args) == 1);
 
index 7cc443ec520f5664ffbf62bdf055096203499c85..7a4d7a4eeec60bed516c003f5160a61608e411ba 100644 (file)
@@ -1659,7 +1659,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                {
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerGroup pergroup_allaggs =
-                       aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
+                               aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
 
                        if (pergroup_allaggs == NULL)
                                EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@@ -1684,7 +1684,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(pertrans->transtypeByVal);
 
@@ -1712,7 +1712,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(pertrans->transtypeByVal);
 
@@ -1730,7 +1730,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(pertrans->transtypeByVal);
 
@@ -1747,7 +1747,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(!pertrans->transtypeByVal);
 
@@ -1768,7 +1768,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(!pertrans->transtypeByVal);
 
@@ -1785,7 +1785,7 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
                        AggState   *aggstate = castNode(AggState, state->parent);
                        AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
                        AggStatePerGroup pergroup =
-                       &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+                               &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
 
                        Assert(!pertrans->transtypeByVal);
 
index da28e5e40ca47afa71dd3687d7e565a2c7c87656..1d82b64b89784c38802fa2036d7380693800eb8e 100644 (file)
@@ -354,8 +354,8 @@ ExecInsertIndexTuples(ResultRelInfo *resultRelInfo,
                        continue;
 
                /*
-                * Skip processing of non-summarizing indexes if we only
-                * update summarizing indexes
+                * Skip processing of non-summarizing indexes if we only update
+                * summarizing indexes
                 */
                if (onlySummarizing && !indexInfo->ii_Summarizing)
                        continue;
index d09a7758dcc76adb6d6da1480b2fd3debb4c6b05..73bf9152a4b87a30d423b0fb80662b99bfbc3c60 100644 (file)
@@ -260,7 +260,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
                        if (first_time)
                        {
                                MemoryContext oldcontext =
-                               MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+                                       MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
 
                                tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
                                rsinfo.setResult = tupstore;
@@ -290,7 +290,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
                                        if (tupdesc == NULL)
                                        {
                                                MemoryContext oldcontext =
-                                               MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+                                                       MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
 
                                                /*
                                                 * This is the first non-NULL result from the
@@ -395,7 +395,7 @@ no_function_result:
        if (rsinfo.setResult == NULL)
        {
                MemoryContext oldcontext =
-               MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+                       MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
 
                tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
                rsinfo.setResult = tupstore;
index ad81a675aa3d2cb0bc0dde6452c69cddd6082669..468db94fe5ba07a19842aeb3a841ca956e5953af 100644 (file)
@@ -3690,7 +3690,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 
                /* Check permission to call aggregate function */
                aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
-                                                                        ACL_EXECUTE);
+                                                                       ACL_EXECUTE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_AGGREGATE,
                                                   get_func_name(aggref->aggfnoid));
@@ -3757,7 +3757,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
                        if (OidIsValid(finalfn_oid))
                        {
                                aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
-                                                                                        ACL_EXECUTE);
+                                                                                       ACL_EXECUTE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                                   get_func_name(finalfn_oid));
@@ -3766,7 +3766,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
                        if (OidIsValid(serialfn_oid))
                        {
                                aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
-                                                                                        ACL_EXECUTE);
+                                                                                       ACL_EXECUTE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                                   get_func_name(serialfn_oid));
@@ -3775,7 +3775,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
                        if (OidIsValid(deserialfn_oid))
                        {
                                aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
-                                                                                        ACL_EXECUTE);
+                                                                                       ACL_EXECUTE);
                                if (aclresult != ACLCHECK_OK)
                                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                                   get_func_name(deserialfn_oid));
index 301e4acba3c26b05ee62d684352c0b3318f861e6..8b5c35b82b88461c2f61987d2265c1249961d87d 100644 (file)
@@ -1339,7 +1339,7 @@ ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
                        else
                        {
                                size_t          tuple_size =
-                               MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
+                                       MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
 
                                /* It belongs in a later batch. */
                                hashtable->batches[batchno].estimated_size += tuple_size;
@@ -1381,7 +1381,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable)
        for (i = 1; i < old_nbatch; ++i)
        {
                ParallelHashJoinBatch *shared =
-               NthParallelHashJoinBatch(old_batches, i);
+                       NthParallelHashJoinBatch(old_batches, i);
 
                old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
                                                                                 ParallelWorkerNumber + 1,
@@ -3337,7 +3337,7 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
                        while (DsaPointerIsValid(batch->chunks))
                        {
                                HashMemoryChunk chunk =
-                               dsa_get_address(hashtable->area, batch->chunks);
+                                       dsa_get_address(hashtable->area, batch->chunks);
                                dsa_pointer next = chunk->next.shared;
 
                                dsa_free(hashtable->area, batch->chunks);
index e40436db38ebbb8e21c1fc135bb533120e71aa4e..980746128bcbe7c917f14bdf936426b86de9f6fe 100644 (file)
@@ -1216,7 +1216,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
                {
                        SharedTuplestoreAccessor *inner_tuples;
                        Barrier    *batch_barrier =
-                       &hashtable->batches[batchno].shared->batch_barrier;
+                               &hashtable->batches[batchno].shared->batch_barrier;
 
                        switch (BarrierAttach(batch_barrier))
                        {
@@ -1330,22 +1330,22 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
        BufFile    *file = *fileptr;
 
        /*
-        * The batch file is lazily created. If this is the first tuple
-        * written to this batch, the batch file is created and its buffer is
-        * allocated in the spillCxt context, NOT in the batchCxt.
+        * The batch file is lazily created. If this is the first tuple written to
+        * this batch, the batch file is created and its buffer is allocated in
+        * the spillCxt context, NOT in the batchCxt.
         *
-        * During the build phase, buffered files are created for inner
-        * batches. Each batch's buffered file is closed (and its buffer freed)
-        * after the batch is loaded into memory during the outer side scan.
-        * Therefore, it is necessary to allocate the batch file buffer in a
-        * memory context which outlives the batch itself.
+        * During the build phase, buffered files are created for inner batches.
+        * Each batch's buffered file is closed (and its buffer freed) after the
+        * batch is loaded into memory during the outer side scan. Therefore, it
+        * is necessary to allocate the batch file buffer in a memory context
+        * which outlives the batch itself.
         *
-        * Also, we use spillCxt instead of hashCxt for a better accounting of
-        * the spilling memory consumption.
+        * Also, we use spillCxt instead of hashCxt for a better accounting of the
+        * spilling memory consumption.
         */
        if (file == NULL)
        {
-               MemoryContext   oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
+               MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
 
                file = BufFileCreateTemp(false);
                *fileptr = file;
@@ -1622,7 +1622,7 @@ ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
 {
        int                     plan_node_id = state->js.ps.plan->plan_node_id;
        ParallelHashJoinState *pstate =
-       shm_toc_lookup(pcxt->toc, plan_node_id, false);
+               shm_toc_lookup(pcxt->toc, plan_node_id, false);
 
        /*
         * It would be possible to reuse the shared hash table in single-batch
@@ -1657,7 +1657,7 @@ ExecHashJoinInitializeWorker(HashJoinState *state,
        HashState  *hashNode;
        int                     plan_node_id = state->js.ps.plan->plan_node_id;
        ParallelHashJoinState *pstate =
-       shm_toc_lookup(pwcxt->toc, plan_node_id, false);
+               shm_toc_lookup(pwcxt->toc, plan_node_id, false);
 
        /* Attach to the space for shared temporary files. */
        SharedFileSetAttach(&pstate->fileset, pwcxt->seg);
index 26ceafec5f825c1b15b9bb6f6e3f6f42e1da7aac..34257ce34be9d693aa5a6312c399d250bfb550fc 100644 (file)
@@ -1007,9 +1007,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
        if (incrsortstate->ss.ps.instrument != NULL)
        {
                IncrementalSortGroupInfo *fullsortGroupInfo =
-               &incrsortstate->incsort_info.fullsortGroupInfo;
+                       &incrsortstate->incsort_info.fullsortGroupInfo;
                IncrementalSortGroupInfo *prefixsortGroupInfo =
-               &incrsortstate->incsort_info.prefixsortGroupInfo;
+                       &incrsortstate->incsort_info.prefixsortGroupInfo;
 
                fullsortGroupInfo->groupCount = 0;
                fullsortGroupInfo->maxDiskSpaceUsed = 0;
index 7f5002527f0e0599b51b3066b785fbdf68b28911..2a5fec8d017e5fe38dc4651c3cceb6743add5345 100644 (file)
@@ -111,7 +111,7 @@ typedef struct UpdateContext
 {
        bool            updated;                /* did UPDATE actually occur? */
        bool            crossPartUpdate;        /* was it a cross-partition update? */
-       TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
+       TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
 
        /*
         * Lock mode to acquire on the latest tuple version before performing
@@ -881,7 +881,7 @@ ExecInsert(ModifyTableContext *context,
                        {
                                TupleDesc       tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
                                TupleDesc       plan_tdesc =
-                               CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+                                       CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
 
                                resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
                                        MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
index 0c6c912778993b270c2c70991dcb1775b9521a04..791cbd2372400a3bafd5ba7ef5e80b8ab60e9bcf 100644 (file)
@@ -352,7 +352,7 @@ tfuncInitialize(TableFuncScanState *tstate, ExprContext *econtext, Datum doc)
        int                     colno;
        Datum           value;
        int                     ordinalitycol =
-       ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+               ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
 
        /*
         * Install the document as a possibly-toasted Datum into the tablefunc
index 4f0618f27ab51b3fc2610e555b18f88e53340ba8..310ac23e3a134a5133dbf406c8ec4710862a6a6d 100644 (file)
@@ -2582,7 +2582,7 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
 
                /* Check permission to call window function */
                aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(),
-                                                                        ACL_EXECUTE);
+                                                                       ACL_EXECUTE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                   get_func_name(wfunc->winfnoid));
@@ -2821,7 +2821,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
        if (!OidIsValid(aggform->aggminvtransfn))
                use_ma_code = false;    /* sine qua non */
        else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
-               aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
+                        aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
                use_ma_code = true;             /* decision forced by safety */
        else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
                use_ma_code = false;    /* non-moving frame head */
@@ -2871,7 +2871,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
                ReleaseSysCache(procTuple);
 
                aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner,
-                                                                        ACL_EXECUTE);
+                                                                       ACL_EXECUTE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                   get_func_name(transfn_oid));
@@ -2880,7 +2880,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
                if (OidIsValid(invtransfn_oid))
                {
                        aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner,
-                                                                                ACL_EXECUTE);
+                                                                               ACL_EXECUTE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                           get_func_name(invtransfn_oid));
@@ -2890,7 +2890,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
                if (OidIsValid(finalfn_oid))
                {
                        aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
-                                                                                ACL_EXECUTE);
+                                                                               ACL_EXECUTE);
                        if (aclresult != ACLCHECK_OK)
                                aclcheck_error(aclresult, OBJECT_FUNCTION,
                                                           get_func_name(finalfn_oid));
index 256632c98587c52fbe1db3f994b95e2e369a0dc2..33975687b3828a752a8f8006ca05338e6b58ae2a 100644 (file)
@@ -3345,7 +3345,7 @@ SPI_register_trigger_data(TriggerData *tdata)
        if (tdata->tg_newtable)
        {
                EphemeralNamedRelation enr =
-               palloc(sizeof(EphemeralNamedRelationData));
+                       palloc(sizeof(EphemeralNamedRelationData));
                int                     rc;
 
                enr->md.name = tdata->tg_trigger->tgnewtable;
@@ -3362,7 +3362,7 @@ SPI_register_trigger_data(TriggerData *tdata)
        if (tdata->tg_oldtable)
        {
                EphemeralNamedRelation enr =
-               palloc(sizeof(EphemeralNamedRelationData));
+                       palloc(sizeof(EphemeralNamedRelationData));
                int                     rc;
 
                enr->md.name = tdata->tg_trigger->tgoldtable;
index a8b73a9cf14b5900cdbbc0073e3514069a87a5d3..04ae3052a8261ec11f2c091b5e0bb0b5e54738a0 100644 (file)
@@ -799,9 +799,9 @@ llvm_session_initialize(void)
        LLVMInitializeNativeAsmParser();
 
        /*
-        * When targeting an LLVM version with opaque pointers enabled by
-        * default, turn them off for the context we build our code in.  We don't
-        * need to do so for other contexts (e.g. llvm_ts_context).  Once the IR is
+        * When targeting an LLVM version with opaque pointers enabled by default,
+        * turn them off for the context we build our code in.  We don't need to
+        * do so for other contexts (e.g. llvm_ts_context).  Once the IR is
         * generated, it carries the necessary information.
         */
 #if LLVM_VERSION_MAJOR > 14
@@ -1118,7 +1118,7 @@ llvm_resolve_symbol(const char *symname, void *ctx)
 
 static LLVMErrorRef
 llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
-                                        LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
+                                        LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
                                         LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
                                         LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
 {
@@ -1175,7 +1175,7 @@ static LLVMOrcObjectLayerRef
 llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
 {
        LLVMOrcObjectLayerRef objlayer =
-       LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
+               LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
 
 #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
        if (jit_debugging_support)
index 6b15588da6dc2cd67f8eec6996d1be9cb8d9f08f..15d4a7b431a250f4c6e0388919bca866e5990c69 100644 (file)
@@ -650,7 +650,7 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc,
                {
                        LLVMValueRef v_tmp_loaddata;
                        LLVMTypeRef vartypep =
-                       LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
+                               LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
 
                        v_tmp_loaddata =
                                LLVMBuildPointerCast(b, v_attdatap, vartypep, "");
index 774db57ae2e16c5fb664e67b2966041a448d50a7..00d7b8110b9ebe281edaf9cb5bd5eb4ddfdd8e7d 100644 (file)
@@ -1047,7 +1047,7 @@ llvm_compile_expr(ExprState *state)
                                        else
                                        {
                                                LLVMValueRef v_value =
-                                               LLVMBuildLoad(b, v_resvaluep, "");
+                                                       LLVMBuildLoad(b, v_resvaluep, "");
 
                                                v_value = LLVMBuildZExt(b,
                                                                                                LLVMBuildICmp(b, LLVMIntEQ,
@@ -2127,8 +2127,7 @@ llvm_compile_expr(ExprState *state)
 
                                        /*
                                         * pergroup = &aggstate->all_pergroups
-                                        * [op->d.agg_trans.setoff]
-                                        * [op->d.agg_trans.transno];
+                                        * [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
                                         */
                                        v_allpergroupsp =
                                                l_load_struct_gep(b, v_aggstatep,
index 7f52e1ee23d1a050fdf325937e99fa62b493c360..43d45810cd1c903ae1c2da8c37300334836b3abb 100644 (file)
@@ -527,8 +527,8 @@ secure_open_gssapi(Port *port)
 
        /*
         * Use the configured keytab, if there is one.  As we now require MIT
-        * Kerberos, we might consider using the credential store extensions in the
-        * future instead of the environment variable.
+        * Kerberos, we might consider using the credential store extensions in
+        * the future instead of the environment variable.
         */
        if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
        {
index dc4153a2f2e5cdabad9be629e0a18a2b6aee5002..05276ab95cee07f5a284a4358cae41005970ac86 100644 (file)
@@ -1104,8 +1104,8 @@ prepare_cert_name(char *name)
        if (namelen > MAXLEN)
        {
                /*
-                * Keep the end of the name, not the beginning, since the most specific
-                * field is likely to give users the most information.
+                * Keep the end of the name, not the beginning, since the most
+                * specific field is likely to give users the most information.
                 */
                truncated = name + namelen - MAXLEN;
                truncated[0] = truncated[1] = truncated[2] = '.';
@@ -1165,8 +1165,8 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
 
                /*
                 * Get the Subject and Issuer for logging, but don't let maliciously
-                * huge certs flood the logs, and don't reflect non-ASCII bytes into it
-                * either.
+                * huge certs flood the logs, and don't reflect non-ASCII bytes into
+                * it either.
                 */
                subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
                sub_prepared = prepare_cert_name(subject);
index d786a0183525211bbda2f1b157ee1d00e2251e2c..1ef113649fb7e97cd20bb0a82cbab131f5945eaf 100644 (file)
@@ -2693,8 +2693,9 @@ load_hba(void)
        if (!ok)
        {
                /*
-                * File contained one or more errors, so bail out.  MemoryContextDelete
-                * is enough to clean up everything, including regexes.
+                * File contained one or more errors, so bail out.
+                * MemoryContextDelete is enough to clean up everything, including
+                * regexes.
                 */
                MemoryContextDelete(hbacxt);
                return false;
@@ -3056,8 +3057,9 @@ load_ident(void)
        if (!ok)
        {
                /*
-                * File contained one or more errors, so bail out.  MemoryContextDelete
-                * is enough to clean up everything, including regexes.
+                * File contained one or more errors, so bail out.
+                * MemoryContextDelete is enough to clean up everything, including
+                * regexes.
                 */
                MemoryContextDelete(ident_context);
                return false;
index ecbcadb8bf57e827161ba15bb32f165d42402307..b89b491d35006ae1fd1106cea4720aec7e3bbc70 100644 (file)
@@ -106,7 +106,7 @@ my @nodetag_only_files = qw(
 # In HEAD, these variables should be left undef, since we don't promise
 # ABI stability during development.
 
-my $last_nodetag    = undef;
+my $last_nodetag = undef;
 my $last_nodetag_no = undef;
 
 # output file names
@@ -161,9 +161,9 @@ push @node_types, qw(List);
 # (Ideally we'd mark List as "special copy/equal" not "no copy/equal".
 # But until there's other use-cases for that, just hot-wire the tests
 # that would need to distinguish.)
-push @no_copy,            qw(List);
-push @no_equal,           qw(List);
-push @no_query_jumble,    qw(List);
+push @no_copy, qw(List);
+push @no_equal, qw(List);
+push @no_query_jumble, qw(List);
 push @special_read_write, qw(List);
 
 # Nodes with custom copy/equal implementations are skipped from
@@ -230,7 +230,7 @@ foreach my $infile (@ARGV)
        }
        $file_content .= $raw_file_content;
 
-       my $lineno   = 0;
+       my $lineno = 0;
        my $prevline = '';
        foreach my $line (split /\n/, $file_content)
        {
@@ -247,7 +247,7 @@ foreach my $infile (@ARGV)
                        if ($line =~ /;$/)
                        {
                                # found the end, re-attach any previous line(s)
-                               $line     = $prevline . $line;
+                               $line = $prevline . $line;
                                $prevline = '';
                        }
                        elsif ($prevline eq ''
@@ -272,7 +272,7 @@ foreach my $infile (@ARGV)
                        if ($subline == 1)
                        {
                                $is_node_struct = 0;
-                               $supertype      = undef;
+                               $supertype = undef;
                                next if $line eq '{';
                                die "$infile:$lineno: expected opening brace\n";
                        }
@@ -280,7 +280,7 @@ foreach my $infile (@ARGV)
                        elsif ($subline == 2
                                && $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/)
                        {
-                               $node_attrs        = $1;
+                               $node_attrs = $1;
                                $node_attrs_lineno = $lineno;
                                # hack: don't count the line
                                $subline--;
@@ -296,8 +296,8 @@ foreach my $infile (@ARGV)
                                }
                                elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types)
                                {
-                                       $is_node_struct  = 1;
-                                       $supertype       = $1;
+                                       $is_node_struct = 1;
+                                       $supertype = $1;
                                        $supertype_field = $2;
                                        next;
                                }
@@ -339,7 +339,7 @@ foreach my $infile (@ARGV)
                                                }
                                                elsif ($attr eq 'no_copy_equal')
                                                {
-                                                       push @no_copy,  $in_struct;
+                                                       push @no_copy, $in_struct;
                                                        push @no_equal, $in_struct;
                                                }
                                                elsif ($attr eq 'no_query_jumble')
@@ -373,7 +373,7 @@ foreach my $infile (@ARGV)
                                        push @node_types, $in_struct;
 
                                        # field names, types, attributes
-                                       my @f  = @my_fields;
+                                       my @f = @my_fields;
                                        my %ft = %my_field_types;
                                        my %fa = %my_field_attrs;
 
@@ -405,7 +405,7 @@ foreach my $infile (@ARGV)
                                                unshift @f, @superfields;
                                        }
                                        # save in global info structure
-                                       $node_type_info{$in_struct}->{fields}      = \@f;
+                                       $node_type_info{$in_struct}->{fields} = \@f;
                                        $node_type_info{$in_struct}->{field_types} = \%ft;
                                        $node_type_info{$in_struct}->{field_attrs} = \%fa;
 
@@ -428,9 +428,9 @@ foreach my $infile (@ARGV)
                                }
 
                                # start new cycle
-                               $in_struct      = undef;
-                               $node_attrs     = '';
-                               @my_fields      = ();
+                               $in_struct = undef;
+                               $node_attrs = '';
+                               @my_fields = ();
                                %my_field_types = ();
                                %my_field_attrs = ();
                        }
@@ -441,10 +441,10 @@ foreach my $infile (@ARGV)
                        {
                                if ($is_node_struct)
                                {
-                                       my $type       = $1;
-                                       my $name       = $2;
+                                       my $type = $1;
+                                       my $name = $2;
                                        my $array_size = $3;
-                                       my $attrs      = $4;
+                                       my $attrs = $4;
 
                                        # strip "const"
                                        $type =~ s/^const\s*//;
@@ -499,9 +499,9 @@ foreach my $infile (@ARGV)
                        {
                                if ($is_node_struct)
                                {
-                                       my $type  = $1;
-                                       my $name  = $2;
-                                       my $args  = $3;
+                                       my $type = $1;
+                                       my $name = $2;
+                                       my $args = $3;
                                        my $attrs = $4;
 
                                        my @attrs;
@@ -540,20 +540,20 @@ foreach my $infile (@ARGV)
                        if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node')
                        {
                                $in_struct = $1;
-                               $subline   = 0;
+                               $subline = 0;
                        }
                        # one node type typedef'ed directly from another
                        elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types)
                        {
                                my $alias_of = $1;
-                               my $n        = $2;
+                               my $n = $2;
 
                                # copy everything over
                                push @node_types, $n;
-                               my @f  = @{ $node_type_info{$alias_of}->{fields} };
+                               my @f = @{ $node_type_info{$alias_of}->{fields} };
                                my %ft = %{ $node_type_info{$alias_of}->{field_types} };
                                my %fa = %{ $node_type_info{$alias_of}->{field_attrs} };
-                               $node_type_info{$n}->{fields}      = \@f;
+                               $node_type_info{$n}->{fields} = \@f;
                                $node_type_info{$n}->{field_types} = \%ft;
                                $node_type_info{$n}->{field_attrs} = \%fa;
                        }
@@ -608,7 +608,7 @@ open my $nt, '>', "$output_path/nodetags.h$tmpext"
 
 printf $nt $header_comment, 'nodetags.h';
 
-my $tagno    = 0;
+my $tagno = 0;
 my $last_tag = undef;
 foreach my $n (@node_types, @extra_tags)
 {
@@ -669,7 +669,7 @@ foreach my $n (@node_types)
 {
        next if elem $n, @abstract_types;
        next if elem $n, @nodetag_only;
-       my $struct_no_copy  = (elem $n, @no_copy);
+       my $struct_no_copy = (elem $n, @no_copy);
        my $struct_no_equal = (elem $n, @no_equal);
        next if $struct_no_copy && $struct_no_equal;
 
@@ -705,15 +705,15 @@ _equal${n}(const $n *a, const $n *b)
        # print instructions for each field
        foreach my $f (@{ $node_type_info{$n}->{fields} })
        {
-               my $t            = $node_type_info{$n}->{field_types}{$f};
-               my @a            = @{ $node_type_info{$n}->{field_attrs}{$f} };
-               my $copy_ignore  = $struct_no_copy;
+               my $t = $node_type_info{$n}->{field_types}{$f};
+               my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
+               my $copy_ignore = $struct_no_copy;
                my $equal_ignore = $struct_no_equal;
 
                # extract per-field attributes
                my $array_size_field;
                my $copy_as_field;
-               my $copy_as_scalar  = 0;
+               my $copy_as_scalar = 0;
                my $equal_as_scalar = 0;
                foreach my $a (@a)
                {
@@ -768,7 +768,7 @@ _equal${n}(const $n *a, const $n *b)
                # select instructions by field type
                if ($t eq 'char*')
                {
-                       print $cff "\tCOPY_STRING_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore;
                }
                elsif ($t eq 'Bitmapset*' || $t eq 'Relids')
@@ -779,7 +779,7 @@ _equal${n}(const $n *a, const $n *b)
                }
                elsif ($t eq 'int' && $f =~ 'location$')
                {
-                       print $cff "\tCOPY_LOCATION_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore;
                }
                elsif (elem $t, @scalar_types or elem $t, @enum_types)
@@ -828,7 +828,7 @@ _equal${n}(const $n *a, const $n *b)
                elsif ($t eq 'function pointer')
                {
                        # we can copy and compare as a scalar
-                       print $cff "\tCOPY_SCALAR_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
                }
                # node type
@@ -846,13 +846,13 @@ _equal${n}(const $n *a, const $n *b)
                          and $1 ne 'List'
                          and !$equal_ignore;
 
-                       print $cff "\tCOPY_NODE_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore;
                }
                # array (inline)
                elsif ($t =~ /^\w+\[\w+\]$/)
                {
-                       print $cff "\tCOPY_ARRAY_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore;
                }
                elsif ($t eq 'struct CustomPathMethods*'
@@ -861,7 +861,7 @@ _equal${n}(const $n *a, const $n *b)
                        # Fields of these types are required to be a pointer to a
                        # static table of callback functions.  So we don't copy
                        # the table itself, just reference the original one.
-                       print $cff "\tCOPY_SCALAR_FIELD($f);\n"    unless $copy_ignore;
+                       print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
                        print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
                }
                else
@@ -1073,7 +1073,7 @@ _read${n}(void)
                {
                        print $off "\tWRITE_FLOAT_FIELD($f.startup);\n";
                        print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n";
-                       print $rff "\tREAD_FLOAT_FIELD($f.startup);\n"   unless $no_read;
+                       print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
                        print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read;
                }
                elsif ($t eq 'Selectivity')
@@ -1278,8 +1278,8 @@ _jumble${n}(JumbleState *jstate, Node *node)
        # print instructions for each field
        foreach my $f (@{ $node_type_info{$n}->{fields} })
        {
-               my $t                   = $node_type_info{$n}->{field_types}{$f};
-               my @a                   = @{ $node_type_info{$n}->{field_attrs}{$f} };
+               my $t = $node_type_info{$n}->{field_types}{$f};
+               my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
                my $query_jumble_ignore = $struct_no_query_jumble;
                my $query_jumble_location = 0;
 
index 0b271dae84f69e671d32291e4e8372a146e55008..ef475d95a18c80ed0e4c7882bb8af7bba9fd9396 100644 (file)
@@ -2011,7 +2011,7 @@ cost_incremental_sort(Path *path,
        {
                PathKey    *key = (PathKey *) lfirst(l);
                EquivalenceMember *member = (EquivalenceMember *)
-               linitial(key->pk_eclass->ec_members);
+                       linitial(key->pk_eclass->ec_members);
 
                /*
                 * Check if the expression contains Var with "varno 0" so that we
index c1b1557570f4b3e8a21958e16488f0a0baa727cb..f456b3b0a44885bcb2b453883ff81d7e59d0eb2e 100644 (file)
@@ -370,7 +370,7 @@ adjust_appendrel_attrs_mutator(Node *node,
                        if (leaf_relid)
                        {
                                RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
-                               list_nth(context->root->row_identity_vars, var->varattno - 1);
+                                       list_nth(context->root->row_identity_vars, var->varattno - 1);
 
                                if (bms_is_member(leaf_relid, ridinfo->rowidrels))
                                {
index 04ea04b5b64134490515220698b144b1a65f07a6..32a407f54b555e9865b198947d7338b50a176460 100644 (file)
@@ -1158,7 +1158,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
                {
                        /* UPDATE/DELETE/MERGE row identity vars are always needed */
                        RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
-                       list_nth(root->row_identity_vars, var->varattno - 1);
+                               list_nth(root->row_identity_vars, var->varattno - 1);
 
                        /* Update reltarget width estimate from RowIdentityVarInfo */
                        joinrel->reltarget->width += ridinfo->rowidwidth;
index ddfdf20d337b78712318127c2fc617aaaee72ca5..e9b6f40eaa61b486eb365b7fec2ce8baa0023af9 100644 (file)
@@ -9,7 +9,7 @@
 use strict;
 use warnings;
 
-my $gram_filename   = $ARGV[0];
+my $gram_filename = $ARGV[0];
 my $kwlist_filename = $ARGV[1];
 
 my $errors = 0;
@@ -47,10 +47,10 @@ $, = ' ';     # set output field separator
 $\ = "\n";    # set output record separator
 
 my %keyword_categories;
-$keyword_categories{'unreserved_keyword'}     = 'UNRESERVED_KEYWORD';
-$keyword_categories{'col_name_keyword'}       = 'COL_NAME_KEYWORD';
+$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
+$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
 $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
-$keyword_categories{'reserved_keyword'}       = 'RESERVED_KEYWORD';
+$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
 
 open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
 
@@ -183,7 +183,7 @@ kwlist_line: while (<$kwlist>)
        if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/)
        {
                my ($kwstring) = $1;
-               my ($kwname)   = $2;
+               my ($kwname) = $2;
                my ($kwcat_id) = $3;
                my ($collabel) = $4;
 
index 0b3632735bfbe6e28e94042eabc5887fcf7b7bed..346fd272b6d17ad3eda86b3ff0a2041ca59763b9 100644 (file)
@@ -3357,7 +3357,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format,
        if (format->format_type == JS_FORMAT_JSON)
        {
                JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
-               format->encoding : JS_ENC_UTF8;
+                       format->encoding : JS_ENC_UTF8;
 
                if (targettype != BYTEAOID &&
                        format->encoding != JS_ENC_DEFAULT)
index d8866373b8f6764d04772bc4cba0cc6dc7635347..91b1156d9918dab49b4bd666524340c0553bb088 100644 (file)
@@ -165,8 +165,8 @@ transformMergeStmt(ParseState *pstate, MergeStmt *stmt)
 
        /*
         * Set up the MERGE target table.  The target table is added to the
-        * namespace below and to joinlist in transform_MERGE_to_join, so don't
-        * do it here.
+        * namespace below and to joinlist in transform_MERGE_to_join, so don't do
+        * it here.
         */
        qry->resultRelation = setTargetTable(pstate, stmt->relation,
                                                                                 stmt->relation->inh,
index b1255e3b709f0d8affed36522b6d82dd986e0132..d67580fc77a772742806336d6abf1bf6ec01ce7a 100644 (file)
@@ -993,7 +993,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
        if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
        {
                aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(),
-                                                                        ACL_USAGE);
+                                                                       ACL_USAGE);
                if (aclresult != ACLCHECK_OK)
                        aclcheck_error(aclresult, OBJECT_TYPE,
                                                   RelationGetRelationName(relation));
@@ -2355,7 +2355,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                                 * mentioned above.
                                 */
                                Datum           attoptions =
-                               get_attoptions(RelationGetRelid(index_rel), i + 1);
+                                       get_attoptions(RelationGetRelid(index_rel), i + 1);
 
                                defopclass = GetDefaultOpClass(attform->atttypid,
                                                                                           index_rel->rd_rel->relam);
index c685621416e9694d2f57eb108c3b50b1a4342dfb..7c5d9110fb09d857d946db1ca142c1752d40aa48 100644 (file)
@@ -2340,9 +2340,9 @@ merge_default_partitions(PartitionMap *outer_map,
                /*
                 * The default partitions have to be joined with each other, so merge
                 * them.  Note that each of the default partitions isn't merged yet
-                * (see, process_outer_partition()/process_inner_partition()), so
-                * they should be merged successfully.  The merged partition will act
-                * as the default partition of the join relation.
+                * (see, process_outer_partition()/process_inner_partition()), so they
+                * should be merged successfully.  The merged partition will act as
+                * the default partition of the join relation.
                 */
                Assert(outer_merged_index == -1);
                Assert(inner_merged_index == -1);
@@ -3193,7 +3193,7 @@ check_new_partition_bound(char *relname, Relation parent,
                                                                 * datums list.
                                                                 */
                                                                PartitionRangeDatum *datum =
-                                                               list_nth(spec->upperdatums, abs(cmpval) - 1);
+                                                                       list_nth(spec->upperdatums, abs(cmpval) - 1);
 
                                                                /*
                                                                 * The new partition overlaps with the
index 509587636e27293f40703894e416e6d3acb29ae3..6f9c2765d6894de4def4a0dfcaefa5567a60c5b8 100644 (file)
@@ -58,8 +58,8 @@ fork_process(void)
        /*
         * We start postmaster children with signals blocked.  This allows them to
         * install their own handlers before unblocking, to avoid races where they
-        * might run the postmaster's handler and miss an important control signal.
-        * With more analysis this could potentially be relaxed.
+        * might run the postmaster's handler and miss an important control
+        * signal. With more analysis this could potentially be relaxed.
         */
        sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
        result = fork();
index 38c09b112321d25037ec1e638240bd5a79bf8355..9087ef95af3e9a5442ad8ac9d5610dd762fb182d 100644 (file)
@@ -759,6 +759,7 @@ lexescape(struct vars *v)
                        RETV(PLAIN, c);
                        break;
                default:
+
                        /*
                         * Throw an error for unrecognized ASCII alpha escape sequences,
                         * which reserves them for future use if needed.
index 052505e46f8b1fed227d0a4c5ade4b6d36c7da82..dc9c5c82d940190f376790a5eaa0f17deaf27d82 100644 (file)
@@ -259,7 +259,7 @@ libpqrcv_check_conninfo(const char *conninfo, bool must_use_password)
 
        if (must_use_password)
        {
-               bool    uses_password = false;
+               bool            uses_password = false;
 
                for (opt = opts; opt->keyword != NULL; ++opt)
                {
index beef399b429df72537c1b0b051e05c1ba816f857..d91055a44091ec57e4c05aa58cbcb728c7884f03 100644 (file)
@@ -155,7 +155,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
                case XLOG_PARAMETER_CHANGE:
                        {
                                xl_parameter_change *xlrec =
-                               (xl_parameter_change *) XLogRecGetData(buf->record);
+                                       (xl_parameter_change *) XLogRecGetData(buf->record);
 
                                /*
                                 * If wal_level on the primary is reduced to less than
@@ -164,8 +164,8 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
                                 * invalidated when this WAL record is replayed; and further,
                                 * slot creation fails when wal_level is not sufficient; but
                                 * all these operations are not synchronized, so a logical
-                                * slot may creep in while the wal_level is being
-                                * reduced. Hence this extra check.
+                                * slot may creep in while the wal_level is being reduced.
+                                * Hence this extra check.
                                 */
                                if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
                                {
@@ -752,7 +752,7 @@ DecodePrepare(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
        SnapBuild  *builder = ctx->snapshot_builder;
        XLogRecPtr      origin_lsn = parsed->origin_lsn;
        TimestampTz prepare_time = parsed->xact_time;
-       RepOriginId     origin_id = XLogRecGetOrigin(buf->record);
+       RepOriginId origin_id = XLogRecGetOrigin(buf->record);
        int                     i;
        TransactionId xid = parsed->twophase_xid;
 
@@ -828,7 +828,7 @@ DecodeAbort(LogicalDecodingContext *ctx, XLogRecordBuffer *buf,
        int                     i;
        XLogRecPtr      origin_lsn = InvalidXLogRecPtr;
        TimestampTz abort_time = parsed->xact_time;
-       RepOriginId     origin_id = XLogRecGetOrigin(buf->record);
+       RepOriginId origin_id = XLogRecGetOrigin(buf->record);
        bool            skip_xact;
 
        if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
index 7e1f677f7a05016d5e05955347c395e6c3fc0383..41243d0187aac8553f393cf5224cd2d72fd88ec0 100644 (file)
@@ -341,8 +341,8 @@ CreateInitDecodingContext(const char *plugin,
        MemoryContext old_context;
 
        /*
-        * On a standby, this check is also required while creating the
-        * slot. Check the comments in the function.
+        * On a standby, this check is also required while creating the slot.
+        * Check the comments in the function.
         */
        CheckLogicalDecodingRequirements();
 
index 2c04c8707dc29d8a8be0d756f483cae55d454519..b0255ffd25a77a8cfdc7869e011836115e9efc8d 100644 (file)
@@ -833,7 +833,7 @@ replorigin_redo(XLogReaderState *record)
                case XLOG_REPLORIGIN_SET:
                        {
                                xl_replorigin_set *xlrec =
-                               (xl_replorigin_set *) XLogRecGetData(record);
+                                       (xl_replorigin_set *) XLogRecGetData(record);
 
                                replorigin_advance(xlrec->node_id,
                                                                   xlrec->remote_lsn, record->EndRecPtr,
index b85b890010e6c37536960ff21627501a0765def0..26d252bd87571c616f2b34dee666c95649086def 100644 (file)
@@ -1408,7 +1408,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
        {
                dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
                ReorderBufferChange *next_change =
-               dlist_container(ReorderBufferChange, node, next);
+                       dlist_container(ReorderBufferChange, node, next);
 
                /* txn stays the same */
                state->entries[off].lsn = next_change->lsn;
@@ -1439,8 +1439,8 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
                {
                        /* successfully restored changes from disk */
                        ReorderBufferChange *next_change =
-                       dlist_head_element(ReorderBufferChange, node,
-                                                          &entry->txn->changes);
+                               dlist_head_element(ReorderBufferChange, node,
+                                                                  &entry->txn->changes);
 
                        elog(DEBUG2, "restored %u/%u changes from disk",
                                 (uint32) entry->txn->nentries_mem,
@@ -1582,7 +1582,7 @@ ReorderBufferCleanupTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
                dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
 
        /* now remove reference from buffer */
-       hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
+       hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
        Assert(found);
 
        /* remove entries spilled to disk */
@@ -3580,8 +3580,8 @@ ReorderBufferCheckMemoryLimit(ReorderBuffer *rb)
        ReorderBufferTXN *txn;
 
        /*
-        * Bail out if logical_replication_mode is buffered and we haven't exceeded
-        * the memory limit.
+        * Bail out if logical_replication_mode is buffered and we haven't
+        * exceeded the memory limit.
         */
        if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
                rb->size < logical_decoding_work_mem * 1024L)
@@ -3841,7 +3841,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
                        {
                                char       *data;
                                Size            inval_size = sizeof(SharedInvalidationMessage) *
-                               change->data.inval.ninvalidations;
+                                       change->data.inval.ninvalidations;
 
                                sz += inval_size;
 
@@ -4010,10 +4010,10 @@ ReorderBufferStreamTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
         * After that we need to reuse the snapshot from the previous run.
         *
         * Unlike DecodeCommit which adds xids of all the subtransactions in
-        * snapshot's xip array via SnapBuildCommitTxn, we can't do that here
-        * but we do add them to subxip array instead via ReorderBufferCopySnap.
-        * This allows the catalog changes made in subtransactions decoded till
-        * now to be visible.
+        * snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
+        * we do add them to subxip array instead via ReorderBufferCopySnap. This
+        * allows the catalog changes made in subtransactions decoded till now to
+        * be visible.
         */
        if (txn->snapshot_now == NULL)
        {
@@ -4206,7 +4206,7 @@ ReorderBufferRestoreChanges(ReorderBuffer *rb, ReorderBufferTXN *txn,
        dlist_foreach_modify(cleanup_iter, &txn->changes)
        {
                ReorderBufferChange *cleanup =
-               dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+                       dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
 
                dlist_delete(&cleanup->node);
                ReorderBufferReturnChange(rb, cleanup, true);
@@ -4431,7 +4431,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
                case REORDER_BUFFER_CHANGE_INVALIDATION:
                        {
                                Size            inval_size = sizeof(SharedInvalidationMessage) *
-                               change->data.inval.ninvalidations;
+                                       change->data.inval.ninvalidations;
 
                                change->data.inval.invalidations =
                                        MemoryContextAlloc(rb->context, inval_size);
@@ -4936,7 +4936,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
                dlist_foreach_modify(it, &ent->chunks)
                {
                        ReorderBufferChange *change =
-                       dlist_container(ReorderBufferChange, node, it.cur);
+                               dlist_container(ReorderBufferChange, node, it.cur);
 
                        dlist_delete(&change->node);
                        ReorderBufferReturnChange(rb, change, true);
index 62542827e4b81a96fd61ba7f1e9ca963011f2fff..0786bb0ab712288eae2473b4e4b439d35b3cc425 100644 (file)
@@ -574,7 +574,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
        Assert(builder->building_full_snapshot);
 
        /* don't allow older snapshots */
-       InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
+       InvalidateCatalogSnapshot();    /* about to overwrite MyProc->xmin */
        if (HaveRegisteredOrActiveSnapshot())
                elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
        Assert(!HistoricSnapshotActive());
@@ -1338,8 +1338,8 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
         */
 
        /*
-        * xl_running_xacts record is older than what we can use, we might not have
-        * all necessary catalog rows anymore.
+        * xl_running_xacts record is older than what we can use, we might not
+        * have all necessary catalog rows anymore.
         */
        if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
                NormalTransactionIdPrecedes(running->oldestRunningXid,
index 0c71ae9ba74788e4ba16634281758f2997637ca8..c56d42dcd2c2c3aec1f8a74e5c33c70fa955d104 100644 (file)
@@ -563,7 +563,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
                                 * the lock.
                                 */
                                int                     nsyncworkers =
-                               logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+                                       logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
 
                                /* Now safe to release the LWLock */
                                LWLockRelease(LogicalRepWorkerLock);
index 4b6709881442de7594d1abcc89cad0e7f301403f..78926f8647bfaabec15bc496a6c8a18e1e67d3fc 100644 (file)
@@ -2399,7 +2399,7 @@ apply_handle_insert(StringInfo s)
        LogicalRepRelMapEntry *rel;
        LogicalRepTupleData newtup;
        LogicalRepRelId relid;
-       UserContext             ucxt;
+       UserContext ucxt;
        ApplyExecutionData *edata;
        EState     *estate;
        TupleTableSlot *remoteslot;
@@ -2547,7 +2547,7 @@ apply_handle_update(StringInfo s)
 {
        LogicalRepRelMapEntry *rel;
        LogicalRepRelId relid;
-       UserContext             ucxt;
+       UserContext ucxt;
        ApplyExecutionData *edata;
        EState     *estate;
        LogicalRepTupleData oldtup;
@@ -2732,7 +2732,7 @@ apply_handle_delete(StringInfo s)
        LogicalRepRelMapEntry *rel;
        LogicalRepTupleData oldtup;
        LogicalRepRelId relid;
-       UserContext             ucxt;
+       UserContext ucxt;
        ApplyExecutionData *edata;
        EState     *estate;
        TupleTableSlot *remoteslot;
@@ -3079,8 +3079,8 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
                                        if (map)
                                        {
                                                TupleConversionMap *PartitionToRootMap =
-                                               convert_tuples_by_name(RelationGetDescr(partrel),
-                                                                                          RelationGetDescr(parentrel));
+                                                       convert_tuples_by_name(RelationGetDescr(partrel),
+                                                                                                  RelationGetDescr(parentrel));
 
                                                remoteslot =
                                                        execute_attr_map_slot(PartitionToRootMap->attrMap,
@@ -3414,7 +3414,7 @@ get_flush_position(XLogRecPtr *write, XLogRecPtr *flush,
        dlist_foreach_modify(iter, &lsn_mapping)
        {
                FlushPosition *pos =
-               dlist_container(FlushPosition, node, iter.cur);
+                       dlist_container(FlushPosition, node, iter.cur);
 
                *write = pos->remote_end;
 
@@ -4702,11 +4702,11 @@ ApplyWorkerMain(Datum main_arg)
 
                ereport(DEBUG1,
                                (errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
-                                               MySubscription->name,
-                                               MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
-                                               MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
-                                               MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
-                                               "?")));
+                                                                MySubscription->name,
+                                                                MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
+                                                                MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
+                                                                MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
+                                                                "?")));
        }
        else
        {
@@ -5080,10 +5080,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
        }
 
        /*
-        * If we are processing this transaction using a parallel apply worker then
-        * either we send the changes to the parallel worker or if the worker is busy
-        * then serialize the changes to the file which will later be processed by
-        * the parallel worker.
+        * If we are processing this transaction using a parallel apply worker
+        * then either we send the changes to the parallel worker or if the worker
+        * is busy then serialize the changes to the file which will later be
+        * processed by the parallel worker.
         */
        *winfo = pa_find_worker(xid);
 
@@ -5097,9 +5097,10 @@ get_transaction_apply_action(TransactionId xid, ParallelApplyWorkerInfo **winfo)
        }
 
        /*
-        * If there is no parallel worker involved to process this transaction then
-        * we either directly apply the change or serialize it to a file which will
-        * later be applied when the transaction finish message is processed.
+        * If there is no parallel worker involved to process this transaction
+        * then we either directly apply the change or serialize it to a file
+        * which will later be applied when the transaction finish message is
+        * processed.
         */
        else if (in_streamed_transaction)
        {
index f88389de84730184a9dcae9aa5ad6c1d9a40b3aa..b08ca55041750e356441d1ceb4979b7d82e84a7c 100644 (file)
@@ -887,8 +887,8 @@ pgoutput_row_filter_init(PGOutputData *data, List *publications,
         * are multiple lists (one for each operation) to which row filters will
         * be appended.
         *
-        * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
-        * filter expression" so it takes precedence.
+        * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
+        * expression" so it takes precedence.
         */
        foreach(lc, publications)
        {
index c263a596901894586f525aac528cba256e15db00..0ea71b5c4348e423d256b8a44d640a40467c488f 100644 (file)
@@ -330,7 +330,7 @@ static void
 SyncRepQueueInsert(int mode)
 {
        dlist_head *queue;
-       dlist_iter iter;
+       dlist_iter      iter;
 
        Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
        queue = &WalSndCtl->SyncRepQueue[mode];
@@ -879,7 +879,7 @@ SyncRepWakeQueue(bool all, int mode)
 
        dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
        {
-               PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
+               PGPROC     *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
 
                /*
                 * Assume the queue is ordered by LSN
index 980dc1816ff20ab54d88aa05c34f3a7b56241ed9..0e4f76efa827716e24102fcc7201263431e677c6 100644 (file)
@@ -3548,7 +3548,7 @@ rewriteTargetView(Query *parsetree, Relation view)
                if (parsetree->withCheckOptions != NIL)
                {
                        WithCheckOption *parent_wco =
-                       (WithCheckOption *) linitial(parsetree->withCheckOptions);
+                               (WithCheckOption *) linitial(parsetree->withCheckOptions);
 
                        if (parent_wco->cascaded)
                        {
index 569c1c94679cc561ed6f5fa7fbce66f751bde5cf..5c3fe4eda285f7c746d9188a1c8d646df6b6a1cd 100644 (file)
@@ -581,7 +581,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
        if (row_security_policy_hook_restrictive)
        {
                List       *hook_policies =
-               (*row_security_policy_hook_restrictive) (cmd, relation);
+                       (*row_security_policy_hook_restrictive) (cmd, relation);
 
                /*
                 * As with built-in restrictive policies, we sort any hook-provided
@@ -603,7 +603,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id,
        if (row_security_policy_hook_permissive)
        {
                List       *hook_policies =
-               (*row_security_policy_hook_permissive) (cmd, relation);
+                       (*row_security_policy_hook_permissive) (cmd, relation);
 
                foreach(item, hook_policies)
                {
index f4b58ada1cb14898ca7a4358598cb214e5938925..35d1cd9621b0d7308d97d7a34ae1975b48a99b6c 100644 (file)
@@ -10,34 +10,34 @@ my $input_path = '';
 my $depfile;
 
 our @languages = qw(
-       arabic
-       armenian
-       basque
-       catalan
-       danish
-       dutch
-       english
-       finnish
-       french
-       german
-       greek
-       hindi
-       hungarian
-       indonesian
-       irish
-       italian
-       lithuanian
-       nepali
-       norwegian
-       portuguese
-       romanian
-       russian
-       serbian
-       spanish
-       swedish
-       tamil
-       turkish
-       yiddish
+  arabic
+  armenian
+  basque
+  catalan
+  danish
+  dutch
+  english
+  finnish
+  french
+  german
+  greek
+  hindi
+  hungarian
+  indonesian
+  irish
+  italian
+  lithuanian
+  nepali
+  norwegian
+  portuguese
+  romanian
+  russian
+  serbian
+  spanish
+  swedish
+  tamil
+  turkish
+  yiddish
 );
 
 # Names of alternative dictionaries for all-ASCII words.  If not
@@ -48,13 +48,12 @@ our @languages = qw(
 
 our %ascii_languages = (
        'hindi' => 'english',
-       'russian' => 'english',
-);
+       'russian' => 'english',);
 
 GetOptions(
-       'depfile'    => \$depfile,
-       'outdir:s'   => \$outdir_path,
-       'input:s'    => \$input_path) || usage();
+       'depfile' => \$depfile,
+       'outdir:s' => \$outdir_path,
+       'input:s' => \$input_path) || usage();
 
 # Make sure input_path ends in a slash if needed.
 if ($input_path ne '' && substr($input_path, -1) ne '/')
@@ -110,8 +109,8 @@ sub GenerateTsearchFiles
        foreach my $lang (@languages)
        {
                my $asclang = $ascii_languages{$lang} || $lang;
-               my $txt     = $tmpl;
-               my $stop    = '';
+               my $txt = $tmpl;
+               my $stop = '';
                my $stopword_path = "$input_path/stopwords/$lang.stop";
 
                if (-s "$stopword_path")
index 54e3bb4aa2746874ec4bada491ccb5619968ce15..28b52d8aa1f88f9087cbe3617e7a57f74bf30cb2 100644 (file)
@@ -2237,8 +2237,8 @@ compute_expr_stats(Relation onerel, double totalrows,
                if (tcnt > 0)
                {
                        AttributeOpts *aopt =
-                       get_attribute_options(stats->attr->attrelid,
-                                                                 stats->attr->attnum);
+                               get_attribute_options(stats->attr->attrelid,
+                                                                         stats->attr->attnum);
 
                        stats->exprvals = exprvals;
                        stats->exprnulls = exprnulls;
index 0bbf09564acc23109526939d0f137480bcbbfd1f..aafec4a09d5851fda155d568766b10f4d37c4240 100644 (file)
@@ -2667,7 +2667,7 @@ BufferSync(int flags)
        {
                BufferDesc *bufHdr = NULL;
                CkptTsStatus *ts_stat = (CkptTsStatus *)
-               DatumGetPointer(binaryheap_first(ts_heap));
+                       DatumGetPointer(binaryheap_first(ts_heap));
 
                buf_id = CkptBufferIds[ts_stat->index].buf_id;
                Assert(buf_id != -1);
index 84ead85942abdfa3563148113f078411a85798ab..41ab64100e3b273319725121d04d14e52b018513 100644 (file)
@@ -98,8 +98,7 @@ struct BufFile
 
        /*
         * XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
-        * wasting per-file alignment padding when some users create many
-        * files.
+        * wasting per-file alignment padding when some users create many files.
         */
        PGAlignedBlock buffer;
 };
index f0965c3481612d5bdf7156f7ec89721466c05e5b..6399fa2ad514218f9c429bc5f71108b086f76ffb 100644 (file)
@@ -357,14 +357,15 @@ dsm_impl_posix_resize(int fd, off_t size)
        /*
         * Block all blockable signals, except SIGQUIT.  posix_fallocate() can run
         * for quite a long time, and is an all-or-nothing operation.  If we
-        * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery
-        * conflicts), the retry loop might never succeed.
+        * allowed SIGUSR1 to interrupt us repeatedly (for example, due to
+        * recovery conflicts), the retry loop might never succeed.
         */
        if (IsUnderPostmaster)
                sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
 
        pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
 #if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
+
        /*
         * On Linux, a shm_open fd is backed by a tmpfs file.  If we were to use
         * ftruncate, the file would contain a hole.  Accessing memory backed by a
@@ -374,8 +375,8 @@ dsm_impl_posix_resize(int fd, off_t size)
         * SIGBUS later.
         *
         * We still use a traditional EINTR retry loop to handle SIGCONT.
-        * posix_fallocate() doesn't restart automatically, and we don't want
-        * this to fail if you attach a debugger.
+        * posix_fallocate() doesn't restart automatically, and we don't want this
+        * to fail if you attach a debugger.
         */
        do
        {
@@ -383,9 +384,9 @@ dsm_impl_posix_resize(int fd, off_t size)
        } while (rc == EINTR);
 
        /*
-        * The caller expects errno to be set, but posix_fallocate() doesn't
-        * set it.  Instead it returns error numbers directly.  So set errno,
-        * even though we'll also return rc to indicate success or failure.
+        * The caller expects errno to be set, but posix_fallocate() doesn't set
+        * it.  Instead it returns error numbers directly.  So set errno, even
+        * though we'll also return rc to indicate success or failure.
         */
        errno = rc;
 #else
index c124f49d800173b8ca09729da00239c3803f8481..863c88252b2ce224ebff50e38617b8142eabadd4 100644 (file)
@@ -10,10 +10,9 @@ use Getopt::Long;
 my $output_path = '.';
 
 my $lastlockidx = -1;
-my $continue    = "\n";
+my $continue = "\n";
 
-GetOptions(
-       'outdir:s'       => \$output_path);
+GetOptions('outdir:s' => \$output_path);
 
 open my $lwlocknames, '<', $ARGV[0] or die;
 
@@ -48,7 +47,7 @@ while (<$lwlocknames>)
        $trimmedlockname =~ s/Lock$//;
        die "lock names must end with 'Lock'" if $trimmedlockname eq $lockname;
 
-       die "lwlocknames.txt not in order"   if $lockidx < $lastlockidx;
+       die "lwlocknames.txt not in order" if $lockidx < $lastlockidx;
        die "lwlocknames.txt has duplicates" if $lockidx == $lastlockidx;
 
        while ($lastlockidx < $lockidx - 1)
@@ -59,7 +58,7 @@ while (<$lwlocknames>)
        }
        printf $c "%s   \"%s\"", $continue, $trimmedlockname;
        $lastlockidx = $lockidx;
-       $continue    = ",\n";
+       $continue = ",\n";
 
        print $h "#define $lockname (&MainLWLockArray[$lockidx].lock)\n";
 }
@@ -71,7 +70,8 @@ printf $h "#define NUM_INDIVIDUAL_LWLOCKS             %s\n", $lastlockidx + 1;
 close $h;
 close $c;
 
-rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!";
+rename($htmp, "$output_path/lwlocknames.h")
+  || die "rename: $htmp to $output_path/lwlocknames.h: $!";
 rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!";
 
 close $lwlocknames;
index 42595b38b2c0086d4014e463794c73d889679dc9..193f50fc0f41f1b56909a556571babed3a6bc322 100644 (file)
@@ -3936,6 +3936,7 @@ GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
        dclist_foreach(proc_iter, waitQueue)
        {
                PGPROC     *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
+
                if (queued_proc == blocked_proc)
                        break;
                data->waiter_pids[data->npids++] = queued_proc->pid;
index 59347ab9518080ec6acc87f5b975d88497c58b52..01d738f306bb8d62c0aa58ba279392426d3f1e79 100644 (file)
@@ -1118,9 +1118,9 @@ LWLockDequeueSelf(LWLock *lock)
        LWLockWaitListLock(lock);
 
        /*
-        * Remove ourselves from the waitlist, unless we've already been
-        * removed. The removal happens with the wait list lock held, so there's
-        * no race in this check.
+        * Remove ourselves from the waitlist, unless we've already been removed.
+        * The removal happens with the wait list lock held, so there's no race in
+        * this check.
         */
        on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
        if (on_waitlist)
index 203b189559d48df1c299efbacc2e5a5814d53ad2..533f6165412ae0c975d2841c57951fa88e27bf13 100644 (file)
@@ -625,7 +625,7 @@ RWConflictExists(const SERIALIZABLEXACT *reader, const SERIALIZABLEXACT *writer)
        dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts)
        {
                RWConflict      conflict =
-               dlist_container(RWConflictData, outLink, iter.cur);
+                       dlist_container(RWConflictData, outLink, iter.cur);
 
                if (conflict->sxactIn == writer)
                        return true;
@@ -708,7 +708,7 @@ FlagSxactUnsafe(SERIALIZABLEXACT *sxact)
        dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts)
        {
                RWConflict      conflict =
-               dlist_container(RWConflictData, inLink, iter.cur);
+                       dlist_container(RWConflictData, inLink, iter.cur);
 
                Assert(!SxactIsReadOnly(conflict->sxactOut));
                Assert(sxact == conflict->sxactIn);
@@ -1587,7 +1587,7 @@ GetSafeSnapshotBlockingPids(int blocked_pid, int *output, int output_size)
                dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts)
                {
                        RWConflict      possibleUnsafeConflict =
-                       dlist_container(RWConflictData, inLink, iter.cur);
+                               dlist_container(RWConflictData, inLink, iter.cur);
 
                        output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
 
@@ -1825,8 +1825,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
                /*
                 * If we didn't find any possibly unsafe conflicts because every
                 * uncommitted writable transaction turned out to be doomed, then we
-                * can "opt out" immediately.  See comments above the earlier check for
-                * PredXact->WritableSxactCount == 0.
+                * can "opt out" immediately.  See comments above the earlier check
+                * for PredXact->WritableSxactCount == 0.
                 */
                if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
                {
@@ -2613,7 +2613,7 @@ DeleteLockTarget(PREDICATELOCKTARGET *target, uint32 targettaghash)
        dlist_foreach_modify(iter, &target->predicateLocks)
        {
                PREDICATELOCK *predlock =
-               dlist_container(PREDICATELOCK, targetLink, iter.cur);
+                       dlist_container(PREDICATELOCK, targetLink, iter.cur);
                bool            found;
 
                dlist_delete(&(predlock->xactLink));
@@ -2754,7 +2754,7 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
                dlist_foreach_modify(iter, &oldtarget->predicateLocks)
                {
                        PREDICATELOCK *oldpredlock =
-                       dlist_container(PREDICATELOCK, targetLink, iter.cur);
+                               dlist_container(PREDICATELOCK, targetLink, iter.cur);
                        PREDICATELOCK *newpredlock;
                        SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
 
@@ -2976,7 +2976,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
                dlist_foreach_modify(iter, &oldtarget->predicateLocks)
                {
                        PREDICATELOCK *oldpredlock =
-                       dlist_container(PREDICATELOCK, targetLink, iter.cur);
+                               dlist_container(PREDICATELOCK, targetLink, iter.cur);
                        PREDICATELOCK *newpredlock;
                        SerCommitSeqNo oldCommitSeqNo;
                        SERIALIZABLEXACT *oldXact;
@@ -3194,7 +3194,7 @@ SetNewSxactGlobalXmin(void)
        dlist_foreach(iter, &PredXact->activeList)
        {
                SERIALIZABLEXACT *sxact =
-               dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
+                       dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
 
                if (!SxactIsRolledBack(sxact)
                        && !SxactIsCommitted(sxact)
@@ -3440,7 +3440,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
                dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
                {
                        RWConflict      possibleUnsafeConflict =
-                       dlist_container(RWConflictData, inLink, iter.cur);
+                               dlist_container(RWConflictData, inLink, iter.cur);
 
                        Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
                        Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
@@ -3471,7 +3471,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
        dlist_foreach_modify(iter, &MySerializableXact->outConflicts)
        {
                RWConflict      conflict =
-               dlist_container(RWConflictData, outLink, iter.cur);
+                       dlist_container(RWConflictData, outLink, iter.cur);
 
                if (isCommit
                        && !SxactIsReadOnly(MySerializableXact)
@@ -3496,7 +3496,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
        dlist_foreach_modify(iter, &MySerializableXact->inConflicts)
        {
                RWConflict      conflict =
-               dlist_container(RWConflictData, inLink, iter.cur);
+                       dlist_container(RWConflictData, inLink, iter.cur);
 
                if (!isCommit
                        || SxactIsCommitted(conflict->sxactOut)
@@ -3515,7 +3515,7 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
                dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
                {
                        RWConflict      possibleUnsafeConflict =
-                       dlist_container(RWConflictData, outLink, iter.cur);
+                               dlist_container(RWConflictData, outLink, iter.cur);
 
                        roXact = possibleUnsafeConflict->sxactIn;
                        Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
@@ -3564,8 +3564,8 @@ ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
         * xmin and purge any transactions which finished before this transaction
         * was launched.
         *
-        * For parallel queries in read-only transactions, it might run twice.
-        * We only release the reference on the first call.
+        * For parallel queries in read-only transactions, it might run twice. We
+        * only release the reference on the first call.
         */
        needToClear = false;
        if ((partiallyReleasing ||
@@ -3641,7 +3641,7 @@ ClearOldPredicateLocks(void)
        dlist_foreach_modify(iter, FinishedSerializableTransactions)
        {
                SERIALIZABLEXACT *finishedSxact =
-               dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
+                       dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
 
                if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
                        || TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
@@ -3700,7 +3700,7 @@ ClearOldPredicateLocks(void)
        dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks)
        {
                PREDICATELOCK *predlock =
-               dlist_container(PREDICATELOCK, xactLink, iter.cur);
+                       dlist_container(PREDICATELOCK, xactLink, iter.cur);
                bool            canDoPartialCleanup;
 
                LWLockAcquire(SerializableXactHashLock, LW_SHARED);
@@ -3787,7 +3787,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
        dlist_foreach_modify(iter, &sxact->predicateLocks)
        {
                PREDICATELOCK *predlock =
-               dlist_container(PREDICATELOCK, xactLink, iter.cur);
+                       dlist_container(PREDICATELOCK, xactLink, iter.cur);
                PREDICATELOCKTAG tag;
                PREDICATELOCKTARGET *target;
                PREDICATELOCKTARGETTAG targettag;
@@ -3864,7 +3864,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
                dlist_foreach_modify(iter, &sxact->outConflicts)
                {
                        RWConflict      conflict =
-                       dlist_container(RWConflictData, outLink, iter.cur);
+                               dlist_container(RWConflictData, outLink, iter.cur);
 
                        if (summarize)
                                conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
@@ -3876,7 +3876,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
        dlist_foreach_modify(iter, &sxact->inConflicts)
        {
                RWConflict      conflict =
-               dlist_container(RWConflictData, inLink, iter.cur);
+                       dlist_container(RWConflictData, inLink, iter.cur);
 
                if (summarize)
                        conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
@@ -4134,7 +4134,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
        dlist_foreach_modify(iter, &target->predicateLocks)
        {
                PREDICATELOCK *predlock =
-               dlist_container(PREDICATELOCK, targetLink, iter.cur);
+                       dlist_container(PREDICATELOCK, targetLink, iter.cur);
                SERIALIZABLEXACT *sxact = predlock->tag.myXact;
 
                if (sxact == MySerializableXact)
@@ -4407,7 +4407,7 @@ CheckTableForSerializableConflictIn(Relation relation)
                dlist_foreach_modify(iter, &target->predicateLocks)
                {
                        PREDICATELOCK *predlock =
-                       dlist_container(PREDICATELOCK, targetLink, iter.cur);
+                               dlist_container(PREDICATELOCK, targetLink, iter.cur);
 
                        if (predlock->tag.myXact != MySerializableXact
                                && !RWConflictExists(predlock->tag.myXact, MySerializableXact))
@@ -4519,7 +4519,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
                dlist_foreach(iter, &writer->outConflicts)
                {
                        RWConflict      conflict =
-                       dlist_container(RWConflictData, outLink, iter.cur);
+                               dlist_container(RWConflictData, outLink, iter.cur);
                        SERIALIZABLEXACT *t2 = conflict->sxactIn;
 
                        if (SxactIsPrepared(t2)
@@ -4566,7 +4566,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
                        dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts)
                        {
                                const RWConflict conflict =
-                               dlist_container(RWConflictData, inLink, iter.cur);
+                                       dlist_container(RWConflictData, inLink, iter.cur);
                                const SERIALIZABLEXACT *t0 = conflict->sxactOut;
 
                                if (!SxactIsDoomed(t0)
@@ -4664,7 +4664,7 @@ PreCommit_CheckForSerializationFailure(void)
        dlist_foreach(near_iter, &MySerializableXact->inConflicts)
        {
                RWConflict      nearConflict =
-               dlist_container(RWConflictData, inLink, near_iter.cur);
+                       dlist_container(RWConflictData, inLink, near_iter.cur);
 
                if (!SxactIsCommitted(nearConflict->sxactOut)
                        && !SxactIsDoomed(nearConflict->sxactOut))
@@ -4674,7 +4674,7 @@ PreCommit_CheckForSerializationFailure(void)
                        dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts)
                        {
                                RWConflict      farConflict =
-                               dlist_container(RWConflictData, inLink, far_iter.cur);
+                                       dlist_container(RWConflictData, inLink, far_iter.cur);
 
                                if (farConflict->sxactOut == MySerializableXact
                                        || (!SxactIsCommitted(farConflict->sxactOut)
@@ -4770,7 +4770,7 @@ AtPrepare_PredicateLocks(void)
        dlist_foreach(iter, &sxact->predicateLocks)
        {
                PREDICATELOCK *predlock =
-               dlist_container(PREDICATELOCK, xactLink, iter.cur);
+                       dlist_container(PREDICATELOCK, xactLink, iter.cur);
 
                record.type = TWOPHASEPREDICATERECORD_LOCK;
                lockRecord->target = predlock->tag.myTarget->tag;
index 22b4278610c03dfbbb2053f3b18582356a3482f2..dac921219fae2a5eae166578720661d89a81c506 100644 (file)
@@ -101,7 +101,7 @@ ProcGlobalShmemSize(void)
 {
        Size            size = 0;
        Size            TotalProcs =
-       add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+               add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
 
        /* ProcGlobal */
        size = add_size(size, sizeof(PROC_HDR));
@@ -331,7 +331,7 @@ InitProcess(void)
 
        if (!dlist_is_empty(procgloballist))
        {
-               MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
+               MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
                SpinLockRelease(ProcStructLock);
        }
        else
@@ -1009,7 +1009,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
        uint32          hashcode = locallock->hashcode;
        LWLock     *partitionLock = LockHashPartitionLock(hashcode);
        dclist_head *waitQueue = &lock->waitProcs;
-       PGPROC     *insert_before = NULL;
+       PGPROC     *insert_before = NULL;
        LOCKMASK        myHeldLocks = MyProc->heldLocks;
        TimestampTz standbyWaitStart = 0;
        bool            early_deadlock = false;
@@ -1244,7 +1244,7 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
                if (InHotStandby)
                {
                        bool            maybe_log_conflict =
-                       (standbyWaitStart != 0 && !logged_recovery_conflict);
+                               (standbyWaitStart != 0 && !logged_recovery_conflict);
 
                        /* Set a timer and wait for that or for the lock to be granted */
                        ResolveRecoveryConflictWithLock(locallock->tag.lock,
index 42e350125525a4e813b654d110e459bc151b9de1..65bb22541c6676167e5efed8ec884db8416ee3c7 100644 (file)
@@ -549,7 +549,7 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
 
        while (remblocks > 0)
        {
-               BlockNumber     segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
+               BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
                off_t           seekpos = (off_t) BLCKSZ * segstartblock;
                int                     numblocks;
 
@@ -597,9 +597,9 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum,
                        /*
                         * Even if we don't want to use fallocate, we can still extend a
                         * bit more efficiently than writing each 8kB block individually.
-                        * pg_pwrite_zeros() (via FileZero()) uses
-                        * pg_pwritev_with_retry() to avoid multiple writes or needing a
-                        * zeroed buffer for the whole length of the extension.
+                        * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
+                        * to avoid multiple writes or needing a zeroed buffer for the
+                        * whole length of the extension.
                         */
                        ret = FileZero(v->mdfd_vfd,
                                                   seekpos, (off_t) BLCKSZ * numblocks,
index fe4fd3a929b16432f004e15ddff141e8bffaa6bf..8a2cb55876921611aed2e535e8e8b67bb4364c30 100644 (file)
@@ -2256,7 +2256,7 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag)
                                                {
                                                        /* prefix success */
                                                        char       *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
-                                                       VoidString : prefix->aff[j]->flag;
+                                                               VoidString : prefix->aff[j]->flag;
 
                                                        if (FindWord(Conf, pnewword, ff, flag))
                                                                cur += addToResult(forms, cur, pnewword);
index 668b915a4d57016891cf85a2af3befe2656f3d82..f289b19344beec0417890682dffacada0e0e7ed4 100644 (file)
@@ -44,7 +44,7 @@ sub Run()
 
        # Initialize.
        openARGV();
-       $Hold    = '';
+       $Hold = '';
        $CondReg = 0;
        $doPrint = $doAutoPrint;
   CYCLE:
index 2792373fedacbb7dfdbb8f910e055eb3dd4c51d8..764216c56dd392735ad4397f78ae5f814a2092e7 100644 (file)
@@ -24,7 +24,7 @@ my $output_path = '';
 my $include_path;
 
 GetOptions(
-       'output:s'       => \$output_path,
+       'output:s' => \$output_path,
        'include-path:s' => \$include_path) || usage();
 
 # Make sure output_path ends in a slash.
@@ -34,7 +34,7 @@ if ($output_path ne '' && substr($output_path, -1) ne '/')
 }
 
 # Sanity check arguments.
-die "No input files.\n"                   unless @ARGV;
+die "No input files.\n" unless @ARGV;
 die "--include-path must be specified.\n" unless $include_path;
 
 # Read all the input files into internal data structures.
@@ -56,7 +56,7 @@ foreach my $datfile (@ARGV)
 
        my $catalog = Catalog::ParseHeader($header);
        my $catname = $catalog->{catname};
-       my $schema  = $catalog->{columns};
+       my $schema = $catalog->{columns};
 
        $catalogs{$catname} = $catalog;
        $catalog_data{$catname} = Catalog::ParseData($datfile, $schema, 0);
@@ -72,14 +72,14 @@ foreach my $row (@{ $catalog_data{pg_proc} })
 
        push @fmgr,
          {
-               oid    => $bki_values{oid},
-               name   => $bki_values{proname},
-               lang   => $bki_values{prolang},
-               kind   => $bki_values{prokind},
+               oid => $bki_values{oid},
+               name => $bki_values{proname},
+               lang => $bki_values{prolang},
+               kind => $bki_values{prokind},
                strict => $bki_values{proisstrict},
                retset => $bki_values{proretset},
-               nargs  => $bki_values{pronargs},
-               args   => $bki_values{proargtypes},
+               nargs => $bki_values{pronargs},
+               args => $bki_values{proargtypes},
                prosrc => $bki_values{prosrc},
          };
 
@@ -88,10 +88,10 @@ foreach my $row (@{ $catalog_data{pg_proc} })
 }
 
 # Emit headers for both files
-my $tmpext     = ".tmp$$";
-my $oidsfile   = $output_path . 'fmgroids.h';
+my $tmpext = ".tmp$$";
+my $oidsfile = $output_path . 'fmgroids.h';
 my $protosfile = $output_path . 'fmgrprotos.h';
-my $tabfile    = $output_path . 'fmgrtab.c';
+my $tabfile = $output_path . 'fmgrtab.c';
 
 open my $ofh, '>', $oidsfile . $tmpext
   or die "Could not open $oidsfile$tmpext: $!";
@@ -213,7 +213,8 @@ $bmap{'t'} = 'true';
 $bmap{'f'} = 'false';
 my @fmgr_builtin_oid_index;
 my $last_builtin_oid = 0;
-my $fmgr_count       = 0;
+my $fmgr_count = 0;
+
 foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
 {
        next if $s->{lang} ne 'internal';
@@ -273,9 +274,9 @@ close($pfh);
 close($tfh);
 
 # Finally, rename the completed files into place.
-Catalog::RenameTempFile($oidsfile,   $tmpext);
+Catalog::RenameTempFile($oidsfile, $tmpext);
 Catalog::RenameTempFile($protosfile, $tmpext);
-Catalog::RenameTempFile($tabfile,    $tmpext);
+Catalog::RenameTempFile($tabfile, $tmpext);
 
 sub usage
 {
index f6edfc76ac4be9d8246dcc0c83d0ee282ed69d25..0cdb552631ee2dc51b60c6a31a6cd4f80d52320f 100644 (file)
@@ -1186,7 +1186,7 @@ pgstat_flush_pending_entries(bool nowait)
        while (cur)
        {
                PgStat_EntryRef *entry_ref =
-               dlist_container(PgStat_EntryRef, pending_node, cur);
+                       dlist_container(PgStat_EntryRef, pending_node, cur);
                PgStat_HashKey key = entry_ref->shared_entry->key;
                PgStat_Kind kind = key.kind;
                const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
index 09fffd0e82ad7e7c24e1f7744bfa7c546413978d..d1149adf70272766e8ce1d14b69af0c2a0be931e 100644 (file)
@@ -865,7 +865,7 @@ pgstat_drop_entry(PgStat_Kind kind, Oid dboid, Oid objoid)
        if (pgStatEntryRefHash)
        {
                PgStat_EntryRefHashEntry *lohashent =
-               pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
+                       pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
 
                if (lohashent)
                        pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
index 91cdd9222e31c22d4e25f7ce5a354642601e7828..369239d5014c5c8205ce4b1875d16131f5ab7b7a 100644 (file)
@@ -76,7 +76,7 @@ AtEOXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state, bool isCommit)
        dclist_foreach_modify(iter, &xact_state->pending_drops)
        {
                PgStat_PendingDroppedStatsItem *pending =
-               dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+                       dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
                xl_xact_stats_item *it = &pending->item;
 
                if (isCommit && !pending->is_create)
@@ -148,7 +148,7 @@ AtEOSubXact_PgStat_DroppedStats(PgStat_SubXactStatus *xact_state,
        dclist_foreach_modify(iter, &xact_state->pending_drops)
        {
                PgStat_PendingDroppedStatsItem *pending =
-               dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+                       dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
                xl_xact_stats_item *it = &pending->item;
 
                dclist_delete_from(&xact_state->pending_drops, &pending->node);
@@ -290,7 +290,7 @@ pgstat_get_transactional_drops(bool isCommit, xl_xact_stats_item **items)
        dclist_foreach(iter, &xact_state->pending_drops)
        {
                PgStat_PendingDroppedStatsItem *pending =
-               dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+                       dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
 
                if (isCommit && pending->is_create)
                        continue;
@@ -335,7 +335,7 @@ create_drop_transactional_internal(PgStat_Kind kind, Oid dboid, Oid objoid, bool
        int                     nest_level = GetCurrentTransactionNestLevel();
        PgStat_SubXactStatus *xact_state;
        PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *)
-       MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
+               MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
 
        xact_state = pgstat_get_xact_stack_level(nest_level);
 
index be2e55bb29fa21e8081d022300487564f8b5124a..5d8d583ddcb7b98d319dd9c698316fcf03d2e5fa 100644 (file)
@@ -4482,17 +4482,17 @@ EncodeInterval(struct pg_itm *itm, int style, char *str)
                case INTSTYLE_SQL_STANDARD:
                        {
                                bool            has_negative = year < 0 || mon < 0 ||
-                               mday < 0 || hour < 0 ||
-                               min < 0 || sec < 0 || fsec < 0;
+                                       mday < 0 || hour < 0 ||
+                                       min < 0 || sec < 0 || fsec < 0;
                                bool            has_positive = year > 0 || mon > 0 ||
-                               mday > 0 || hour > 0 ||
-                               min > 0 || sec > 0 || fsec > 0;
+                                       mday > 0 || hour > 0 ||
+                                       min > 0 || sec > 0 || fsec > 0;
                                bool            has_year_month = year != 0 || mon != 0;
                                bool            has_day_time = mday != 0 || hour != 0 ||
-                               min != 0 || sec != 0 || fsec != 0;
+                                       min != 0 || sec != 0 || fsec != 0;
                                bool            has_day = mday != 0;
                                bool            sql_standard_value = !(has_negative && has_positive) &&
-                               !(has_year_month && has_day_time);
+                                       !(has_year_month && has_day_time);
 
                                /*
                                 * SQL Standard wants only 1 "<sign>" preceding the whole
index 9b51da2382d4ef520af028e191c620edb6a974a7..dfa90a04fb2b154ad1c1ea397c05ef250626c27d 100644 (file)
@@ -189,8 +189,7 @@ float4in_internal(char *num, char **endptr_p,
        /*
         * endptr points to the first character _after_ the sequence we recognized
         * as a valid floating point number. orig_string points to the original
-        * input
-        * string.
+        * input string.
         */
 
        /* skip leading whitespace */
index 4c5abaff257e2537bff0fcf43d77b52832298ab6..70cb922e6b7aa45629483bb64d3380d07a1e81a0 100644 (file)
@@ -3219,9 +3219,9 @@ static RecordIOData *
 allocate_record_info(MemoryContext mcxt, int ncolumns)
 {
        RecordIOData *data = (RecordIOData *)
-       MemoryContextAlloc(mcxt,
-                                          offsetof(RecordIOData, columns) +
-                                          ncolumns * sizeof(ColumnIOData));
+               MemoryContextAlloc(mcxt,
+                                                  offsetof(RecordIOData, columns) +
+                                                  ncolumns * sizeof(ColumnIOData));
 
        data->record_type = InvalidOid;
        data->record_typmod = 0;
index 0021b0183067ce17c077265413f1c5a1a0e49b50..7891fde3105a56ec9d14950329830b5038a9ccb3 100644 (file)
@@ -76,7 +76,7 @@
 static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext);
 static char *jsonPathToCstring(StringInfo out, JsonPath *in,
                                                           int estimated_len);
-static bool    flattenJsonPathParseItem(StringInfo buf, int *result,
+static bool flattenJsonPathParseItem(StringInfo buf, int *result,
                                                                         struct Node *escontext,
                                                                         JsonPathParseItem *item,
                                                                         int nestingLevel, bool insideArraySubscript);
@@ -234,7 +234,7 @@ jsonPathToCstring(StringInfo out, JsonPath *in, int estimated_len)
  * children into a binary representation.
  */
 static bool
-flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
+flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
                                                 JsonPathParseItem *item, int nestingLevel,
                                                 bool insideArraySubscript)
 {
@@ -306,19 +306,19 @@ flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
 
                                if (!item->value.args.left)
                                        chld = pos;
-                               else if (! flattenJsonPathParseItem(buf, &chld, escontext,
-                                                                                                       item->value.args.left,
-                                                                                                       nestingLevel + argNestingLevel,
-                                                                                                       insideArraySubscript))
+                               else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+                                                                                                  item->value.args.left,
+                                                                                                  nestingLevel + argNestingLevel,
+                                                                                                  insideArraySubscript))
                                        return false;
                                *(int32 *) (buf->data + left) = chld - pos;
 
                                if (!item->value.args.right)
                                        chld = pos;
-                               else if (! flattenJsonPathParseItem(buf, &chld, escontext,
-                                                                                                       item->value.args.right,
-                                                                                                       nestingLevel + argNestingLevel,
-                                                                                                       insideArraySubscript))
+                               else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+                                                                                                  item->value.args.right,
+                                                                                                  nestingLevel + argNestingLevel,
+                                                                                                  insideArraySubscript))
                                        return false;
                                *(int32 *) (buf->data + right) = chld - pos;
                        }
@@ -338,10 +338,10 @@ flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
                                                                           item->value.like_regex.patternlen);
                                appendStringInfoChar(buf, '\0');
 
-                               if (! flattenJsonPathParseItem(buf, &chld, escontext,
-                                                                                          item->value.like_regex.expr,
-                                                                                          nestingLevel,
-                                                                                          insideArraySubscript))
+                               if (!flattenJsonPathParseItem(buf, &chld, escontext,
+                                                                                         item->value.like_regex.expr,
+                                                                                         nestingLevel,
+                                                                                         insideArraySubscript))
                                        return false;
                                *(int32 *) (buf->data + offs) = chld - pos;
                        }
@@ -360,10 +360,10 @@ flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
 
                                if (!item->value.arg)
                                        chld = pos;
-                               else if (! flattenJsonPathParseItem(buf, &chld, escontext,
-                                                                                                       item->value.arg,
-                                                                                                       nestingLevel + argNestingLevel,
-                                                                                                       insideArraySubscript))
+                               else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+                                                                                                  item->value.arg,
+                                                                                                  nestingLevel + argNestingLevel,
+                                                                                                  insideArraySubscript))
                                        return false;
                                *(int32 *) (buf->data + arg) = chld - pos;
                        }
@@ -405,17 +405,17 @@ flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
                                        int32           topos;
                                        int32           frompos;
 
-                                       if (! flattenJsonPathParseItem(buf, &frompos, escontext,
-                                                                                                  item->value.array.elems[i].from,
-                                                                                                  nestingLevel, true))
+                                       if (!flattenJsonPathParseItem(buf, &frompos, escontext,
+                                                                                                 item->value.array.elems[i].from,
+                                                                                                 nestingLevel, true))
                                                return false;
                                        frompos -= pos;
 
                                        if (item->value.array.elems[i].to)
                                        {
-                                               if (! flattenJsonPathParseItem(buf, &topos, escontext,
-                                                                                                          item->value.array.elems[i].to,
-                                                                                                          nestingLevel, true))
+                                               if (!flattenJsonPathParseItem(buf, &topos, escontext,
+                                                                                                         item->value.array.elems[i].to,
+                                                                                                         nestingLevel, true))
                                                        return false;
                                                topos -= pos;
                                        }
@@ -451,9 +451,9 @@ flattenJsonPathParseItem(StringInfo buf,  int *result, struct Node *escontext,
 
        if (item->next)
        {
-               if (! flattenJsonPathParseItem(buf, &chld, escontext,
-                                                                          item->next, nestingLevel,
-                                                                          insideArraySubscript))
+               if (!flattenJsonPathParseItem(buf, &chld, escontext,
+                                                                         item->next, nestingLevel,
+                                                                         insideArraySubscript))
                        return false;
                chld -= pos;
                *(int32 *) (buf->data + next) = chld;
index b561f0e7e803f0e5a546ad118a47f625225b9708..41430bab7ed22fd7ec72161ea2f7c4659d8b895a 100644 (file)
@@ -1326,8 +1326,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp,
                                 */
                                JsonValueList vals = {0};
                                JsonPathExecResult res =
-                               executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
-                                                                                                 false, &vals);
+                                       executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+                                                                                                         false, &vals);
 
                                if (jperIsError(res))
                                        return jpbUnknown;
@@ -1337,8 +1337,8 @@ executeBoolItem(JsonPathExecContext *cxt, JsonPathItem *jsp,
                        else
                        {
                                JsonPathExecResult res =
-                               executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
-                                                                                                 false, NULL);
+                                       executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+                                                                                                         false, NULL);
 
                                if (jperIsError(res))
                                        return jpbUnknown;
@@ -1869,7 +1869,7 @@ executeDateTimeMethod(JsonPathExecContext *cxt, JsonPathItem *jsp,
                        if (!fmt_txt[i])
                        {
                                MemoryContext oldcxt =
-                               MemoryContextSwitchTo(TopMemoryContext);
+                                       MemoryContextSwitchTo(TopMemoryContext);
 
                                fmt_txt[i] = cstring_to_text(fmt_str[i]);
                                MemoryContextSwitchTo(oldcxt);
index 2e12de038c3d335304980d5ccf4b89205c416f7a..90eea6e96162bdd106456a8689d338e93ad78242 100644 (file)
@@ -20,7 +20,7 @@ typedef struct JsonPathString
        char       *val;
        int                     len;
        int                     total;
-}                      JsonPathString;
+} JsonPathString;
 
 #include "utils/jsonpath.h"
 #include "jsonpath_gram.h"
@@ -29,8 +29,8 @@ typedef struct JsonPathString
                                                          JsonPathParseResult **result, \
                                                          struct Node *escontext)
 YY_DECL;
-extern int     jsonpath_yyparse(JsonPathParseResult **result,
-                                                               struct Node *escontext);
+extern int     jsonpath_yyparse(JsonPathParseResult **result,
+                                                        struct Node *escontext);
 extern void jsonpath_yyerror(JsonPathParseResult **result,
                                                         struct Node *escontext,
                                                         const char *message);
index eea1d1ae0ff8ad94db67c1087e7f9004cc63af15..31e3b16ae00e2c99d0f2b499cc8ad47443e7ed18 100644 (file)
@@ -1794,8 +1794,7 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
        else
 #endif
                result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
-       if (result == 2147483647)       /* _NLSCMPERROR; missing from mingw
-                                                                * headers */
+       if (result == 2147483647)       /* _NLSCMPERROR; missing from mingw headers */
                ereport(ERROR,
                                (errmsg("could not compare Unicode strings: %m")));
 
@@ -1818,14 +1817,15 @@ pg_strncoll_libc_win32_utf8(const char *arg1, size_t len1, const char *arg2,
 static int
 pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
 {
-       int result;
+       int                     result;
 
        Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
 #ifdef WIN32
        if (GetDatabaseEncoding() == PG_UTF8)
        {
-               size_t len1 = strlen(arg1);
-               size_t len2 = strlen(arg2);
+               size_t          len1 = strlen(arg1);
+               size_t          len2 = strlen(arg2);
+
                result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
        }
        else
@@ -1854,13 +1854,13 @@ static int
 pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2,
                                 pg_locale_t locale)
 {
-       char     sbuf[TEXTBUFLEN];
-       char    *buf      = sbuf;
-       size_t   bufsize1 = len1 + 1;
-       size_t   bufsize2 = len2 + 1;
-       char    *arg1n;
-       char    *arg2n;
-       int              result;
+       char            sbuf[TEXTBUFLEN];
+       char       *buf = sbuf;
+       size_t          bufsize1 = len1 + 1;
+       size_t          bufsize2 = len2 + 1;
+       char       *arg1n;
+       char       *arg2n;
+       int                     result;
 
        Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
 
@@ -1906,15 +1906,15 @@ static int
 pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1,
                                                const char *arg2, int32_t len2, pg_locale_t locale)
 {
-       char     sbuf[TEXTBUFLEN];
-       char    *buf = sbuf;
-       int32_t  ulen1;
-       int32_t  ulen2;
-       size_t   bufsize1;
-       size_t   bufsize2;
-       UChar   *uchar1,
-                       *uchar2;
-       int              result;
+       char            sbuf[TEXTBUFLEN];
+       char       *buf = sbuf;
+       int32_t         ulen1;
+       int32_t         ulen2;
+       size_t          bufsize1;
+       size_t          bufsize2;
+       UChar      *uchar1,
+                          *uchar2;
+       int                     result;
 
        Assert(locale->provider == COLLPROVIDER_ICU);
 #ifdef HAVE_UCOL_STRCOLLUTF8
@@ -1961,7 +1961,7 @@ static int
 pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2,
                                pg_locale_t locale)
 {
-       int result;
+       int                     result;
 
        Assert(locale->provider == COLLPROVIDER_ICU);
 
@@ -2042,7 +2042,7 @@ int
 pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2,
                        pg_locale_t locale)
 {
-       int              result;
+       int                     result;
 
        if (!locale || locale->provider == COLLPROVIDER_LIBC)
                result = pg_strncoll_libc(arg1, len1, arg2, len2, locale);
@@ -2074,7 +2074,7 @@ pg_strxfrm_libc(char *dest, const char *src, size_t destsize,
 #else
        /* shouldn't happen */
        elog(ERROR, "unsupported collprovider: %c", locale->provider);
-       return 0; /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 #endif
 }
 
@@ -2082,10 +2082,10 @@ static size_t
 pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize,
                                 pg_locale_t locale)
 {
-       char     sbuf[TEXTBUFLEN];
-       char    *buf     = sbuf;
-       size_t   bufsize = srclen + 1;
-       size_t   result;
+       char            sbuf[TEXTBUFLEN];
+       char       *buf = sbuf;
+       size_t          bufsize = srclen + 1;
+       size_t          result;
 
        Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
 
@@ -2114,12 +2114,12 @@ static size_t
 pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize,
                                pg_locale_t locale)
 {
-       char     sbuf[TEXTBUFLEN];
-       char    *buf    = sbuf;
-       UChar   *uchar;
-       int32_t  ulen;
-       size_t   uchar_bsize;
-       Size     result_bsize;
+       char            sbuf[TEXTBUFLEN];
+       char       *buf = sbuf;
+       UChar      *uchar;
+       int32_t         ulen;
+       size_t          uchar_bsize;
+       Size            result_bsize;
 
        Assert(locale->provider == COLLPROVIDER_ICU);
 
@@ -2161,15 +2161,15 @@ static size_t
 pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen,
                                                           int32_t destsize, pg_locale_t locale)
 {
-       char                     sbuf[TEXTBUFLEN];
-       char                    *buf   = sbuf;
-       UCharIterator    iter;
-       uint32_t                 state[2];
-       UErrorCode               status;
-       int32_t                  ulen  = -1;
-       UChar                   *uchar = NULL;
-       size_t                   uchar_bsize;
-       Size                     result_bsize;
+       char            sbuf[TEXTBUFLEN];
+       char       *buf = sbuf;
+       UCharIterator iter;
+       uint32_t        state[2];
+       UErrorCode      status;
+       int32_t         ulen = -1;
+       UChar      *uchar = NULL;
+       size_t          uchar_bsize;
+       Size            result_bsize;
 
        Assert(locale->provider == COLLPROVIDER_ICU);
        Assert(GetDatabaseEncoding() != PG_UTF8);
@@ -2209,7 +2209,7 @@ static size_t
 pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen,
                                           int32_t destsize, pg_locale_t locale)
 {
-       size_t result;
+       size_t          result;
 
        Assert(locale->provider == COLLPROVIDER_ICU);
 
@@ -2271,7 +2271,7 @@ pg_strxfrm_enabled(pg_locale_t locale)
                /* shouldn't happen */
                elog(ERROR, "unsupported collprovider: %c", locale->provider);
 
-       return false; /* keep compiler quiet */
+       return false;                           /* keep compiler quiet */
 }
 
 /*
@@ -2291,7 +2291,7 @@ pg_strxfrm_enabled(pg_locale_t locale)
 size_t
 pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale)
 {
-       size_t result = 0; /* keep compiler quiet */
+       size_t          result = 0;             /* keep compiler quiet */
 
        if (!locale || locale->provider == COLLPROVIDER_LIBC)
                result = pg_strxfrm_libc(dest, src, destsize, locale);
@@ -2328,7 +2328,7 @@ size_t
 pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen,
                        pg_locale_t locale)
 {
-       size_t result = 0; /* keep compiler quiet */
+       size_t          result = 0;             /* keep compiler quiet */
 
        if (!locale || locale->provider == COLLPROVIDER_LIBC)
                result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale);
@@ -2358,7 +2358,7 @@ pg_strxfrm_prefix_enabled(pg_locale_t locale)
                /* shouldn't happen */
                elog(ERROR, "unsupported collprovider: %c", locale->provider);
 
-       return false; /* keep compiler quiet */
+       return false;                           /* keep compiler quiet */
 }
 
 /*
@@ -2378,7 +2378,7 @@ size_t
 pg_strxfrm_prefix(char *dest, const char *src, size_t destsize,
                                  pg_locale_t locale)
 {
-       size_t result = 0; /* keep compiler quiet */
+       size_t          result = 0;             /* keep compiler quiet */
 
        if (!locale || locale->provider == COLLPROVIDER_LIBC)
                elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()",
@@ -2415,7 +2415,7 @@ size_t
 pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
                                   size_t srclen, pg_locale_t locale)
 {
-       size_t result = 0; /* keep compiler quiet */
+       size_t          result = 0;             /* keep compiler quiet */
 
        if (!locale || locale->provider == COLLPROVIDER_LIBC)
                elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()",
@@ -2491,7 +2491,7 @@ pg_ucol_open(const char *loc_str)
        collator = ucol_open(loc_str, &status);
        if (U_FAILURE(status))
                ereport(ERROR,
-                               /* use original string for error report */
+               /* use original string for error report */
                                (errmsg("could not open collator for locale \"%s\": %s",
                                                orig_str, u_errorName(status))));
 
@@ -2554,6 +2554,7 @@ uchar_length(UConverter *converter, const char *str, int32_t len)
 {
        UErrorCode      status = U_ZERO_ERROR;
        int32_t         ulen;
+
        ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
        if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
                ereport(ERROR,
@@ -2571,6 +2572,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
 {
        UErrorCode      status = U_ZERO_ERROR;
        int32_t         ulen;
+
        status = U_ZERO_ERROR;
        ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
        if (U_FAILURE(status))
@@ -2594,7 +2596,7 @@ uchar_convert(UConverter *converter, UChar *dest, int32_t destlen,
 int32_t
 icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes)
 {
-       int32_t len_uchar;
+       int32_t         len_uchar;
 
        init_icu_converter();
 
@@ -2781,11 +2783,11 @@ char *
 icu_language_tag(const char *loc_str, int elevel)
 {
 #ifdef USE_ICU
-       UErrorCode       status;
-       char             lang[ULOC_LANG_CAPACITY];
-       char            *langtag;
-       size_t           buflen = 32;   /* arbitrary starting buffer size */
-       const bool       strict = true;
+       UErrorCode      status;
+       char            lang[ULOC_LANG_CAPACITY];
+       char       *langtag;
+       size_t          buflen = 32;    /* arbitrary starting buffer size */
+       const bool      strict = true;
 
        status = U_ZERO_ERROR;
        uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
@@ -2803,8 +2805,8 @@ icu_language_tag(const char *loc_str, int elevel)
                return pstrdup("en-US-u-va-posix");
 
        /*
-        * A BCP47 language tag doesn't have a clearly-defined upper limit
-        * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+        * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+        * RFC5646 section 4.4). Additionally, in older ICU versions,
         * uloc_toLanguageTag() doesn't always return the ultimate length on the
         * first call, necessitating a loop.
         */
@@ -2843,7 +2845,7 @@ icu_language_tag(const char *loc_str, int elevel)
        ereport(ERROR,
                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                         errmsg("ICU is not supported in this build")));
-       return NULL;            /* keep compiler quiet */
+       return NULL;                            /* keep compiler quiet */
 #endif                                                 /* not USE_ICU */
 }
 
@@ -2854,11 +2856,11 @@ void
 icu_validate_locale(const char *loc_str)
 {
 #ifdef USE_ICU
-       UCollator       *collator;
-       UErrorCode       status;
-       char             lang[ULOC_LANG_CAPACITY];
-       bool             found   = false;
-       int                      elevel = icu_validation_level;
+       UCollator  *collator;
+       UErrorCode      status;
+       char            lang[ULOC_LANG_CAPACITY];
+       bool            found = false;
+       int                     elevel = icu_validation_level;
 
        /* no validation */
        if (elevel < 0)
@@ -2889,8 +2891,8 @@ icu_validate_locale(const char *loc_str)
        /* search for matching language within ICU */
        for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
        {
-               const char      *otherloc = uloc_getAvailable(i);
-               char             otherlang[ULOC_LANG_CAPACITY];
+               const char *otherloc = uloc_getAvailable(i);
+               char            otherlang[ULOC_LANG_CAPACITY];
 
                status = U_ZERO_ERROR;
                uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
index 6d673493cbc9dc266efa768d0015fb1d8a129e64..d3a973d86b761db4f0a1d478396a7340b581a091 100644 (file)
@@ -12587,7 +12587,7 @@ get_range_partbound_string(List *bound_datums)
        foreach(cell, bound_datums)
        {
                PartitionRangeDatum *datum =
-               lfirst_node(PartitionRangeDatum, cell);
+                       lfirst_node(PartitionRangeDatum, cell);
 
                appendStringInfoString(buf, sep);
                if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
index 7e3bd51c1f85b09a1cac505ac85c13847a1d3025..2bc4ec904fe482a337c1860cfda7b3a18b6c2e0e 100644 (file)
@@ -150,9 +150,9 @@ Datum
 tsquery_phrase(PG_FUNCTION_ARGS)
 {
        PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance,
-                                                                                 PG_GETARG_DATUM(0),
-                                                                                 PG_GETARG_DATUM(1),
-                                                                                 Int32GetDatum(1)));
+                                                                               PG_GETARG_DATUM(0),
+                                                                               PG_GETARG_DATUM(1),
+                                                                               Int32GetDatum(1)));
 }
 
 Datum
index a38db4697d31ae7395e6953c80e460e31fa71946..4457c5d4f9fafb5f5aea56e985a4d42f103800f6 100644 (file)
@@ -525,7 +525,7 @@ tsvector_delete_by_indices(TSVector tsv, int *indices_to_delete,
                if (arrin[i].haspos)
                {
                        int                     len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos)
-                       + sizeof(uint16);
+                               + sizeof(uint16);
 
                        curoff = SHORTALIGN(curoff);
                        memcpy(dataout + curoff,
index 592afc18ecc758357dedd92bd637b123a0e91530..b92ff4d266e261b902294a198910cf3415e497cb 100644 (file)
@@ -1021,7 +1021,8 @@ hashbpchar(PG_FUNCTION_ARGS)
        }
        else
        {
-               Size            bsize, rsize;
+               Size            bsize,
+                                       rsize;
                char       *buf;
 
                bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@@ -1033,8 +1034,8 @@ hashbpchar(PG_FUNCTION_ARGS)
 
                /*
                 * In principle, there's no reason to include the terminating NUL
-                * character in the hash, but it was done before and the behavior
-                * must be preserved.
+                * character in the hash, but it was done before and the behavior must
+                * be preserved.
                 */
                result = hash_any((uint8_t *) buf, bsize + 1);
 
@@ -1076,7 +1077,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
        }
        else
        {
-               Size            bsize, rsize;
+               Size            bsize,
+                                       rsize;
                char       *buf;
 
                bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
@@ -1088,8 +1090,8 @@ hashbpcharextended(PG_FUNCTION_ARGS)
 
                /*
                 * In principle, there's no reason to include the terminating NUL
-                * character in the hash, but it was done before and the behavior
-                * must be preserved.
+                * character in the hash, but it was done before and the behavior must
+                * be preserved.
                 */
                result = hash_any_extended((uint8_t *) buf, bsize + 1,
                                                                   PG_GETARG_INT64(1));
index b5718764684879ba079be8e65c41647008356a66..884bfbc8ceb6f9a4dfcd3412d5840f40174fba94 100644 (file)
@@ -2312,8 +2312,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup)
                memcpy(sss->buf1, authoritative_data, len);
 
                /*
-                * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated
-                * strings.
+                * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
                 */
                sss->buf1[len] = '\0';
                sss->last_len1 = len;
@@ -4523,7 +4522,7 @@ text_to_array(PG_FUNCTION_ARGS)
                PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
 
        PG_RETURN_DATUM(makeArrayResult(tstate.astate,
-                                                                                 CurrentMemoryContext));
+                                                                       CurrentMemoryContext));
 }
 
 /*
index 24271dfff738ae532374a37d1fb0fc8628d4fda1..06ae940df6fe4890646c68e62244c21718e91077 100644 (file)
@@ -519,7 +519,7 @@ pg_snapshot_recv(PG_FUNCTION_ARGS)
        for (i = 0; i < nxip; i++)
        {
                FullTransactionId cur =
-               FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
+                       FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
 
                if (FullTransactionIdPrecedes(cur, last) ||
                        FullTransactionIdPrecedes(cur, xmin) ||
index 15adbd6a016260f8b48e2b167603c7f6bd31e555..866d0d649a4a2da294bdd5b8bbe9ef0617fe6364 100644 (file)
@@ -630,7 +630,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
        XmlOptionType parsed_xmloptiontype;
        xmlNodePtr      content_nodes;
        volatile xmlBufferPtr buf = NULL;
-       volatile        xmlSaveCtxtPtr ctxt = NULL;
+       volatile xmlSaveCtxtPtr ctxt = NULL;
        ErrorSaveContext escontext = {T_ErrorSaveContext};
        PgXmlErrorContext *xmlerrcxt;
 #endif
index c7607895cddccaf33c833022a8f02d464f84cd63..60978f9415b33d1dd125ee76336ca57c619a09e2 100644 (file)
@@ -3603,7 +3603,7 @@ char *
 get_publication_name(Oid pubid, bool missing_ok)
 {
        HeapTuple       tup;
-       char    *pubname;
+       char       *pubname;
        Form_pg_publication pubform;
 
        tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
@@ -3630,16 +3630,16 @@ get_publication_name(Oid pubid, bool missing_ok)
  * return InvalidOid.
  */
 Oid
-get_subscription_oid(const charsubname, bool missing_ok)
+get_subscription_oid(const char *subname, bool missing_ok)
 {
        Oid                     oid;
 
        oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
-                                                  MyDatabaseId, CStringGetDatum(subname));
+                                                 MyDatabaseId, CStringGetDatum(subname));
        if (!OidIsValid(oid) && !missing_ok)
                ereport(ERROR,
-                       (errcode(ERRCODE_UNDEFINED_OBJECT),
-                        errmsg("subscription \"%s\" does not exist", subname)));
+                               (errcode(ERRCODE_UNDEFINED_OBJECT),
+                                errmsg("subscription \"%s\" does not exist", subname)));
        return oid;
 }
 
@@ -3653,7 +3653,7 @@ char *
 get_subscription_name(Oid subid, bool missing_ok)
 {
        HeapTuple       tup;
-       charsubname;
+       char       *subname;
        Form_pg_subscription subform;
 
        tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
index 40140de958985af4ebfb1db5747be12522a39211..8a08463c2b7c0ffc728431b1e2287dd3322104a4 100644 (file)
@@ -3084,10 +3084,10 @@ static void
 AssertPendingSyncConsistency(Relation relation)
 {
        bool            relcache_verdict =
-       RelationIsPermanent(relation) &&
-       ((relation->rd_createSubid != InvalidSubTransactionId &&
-         RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
-        relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
+               RelationIsPermanent(relation) &&
+               ((relation->rd_createSubid != InvalidSubTransactionId &&
+                 RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
+                relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
 
        Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
 
@@ -3765,12 +3765,12 @@ RelationSetNewRelfilenumber(Relation relation, char persistence)
         */
        if (IsBinaryUpgrade)
        {
-               SMgrRelation    srel;
+               SMgrRelation srel;
 
                /*
                 * During a binary upgrade, we use this code path to ensure that
-                * pg_largeobject and its index have the same relfilenumbers as in
-                * the old cluster. This is necessary because pg_upgrade treats
+                * pg_largeobject and its index have the same relfilenumbers as in the
+                * old cluster. This is necessary because pg_upgrade treats
                 * pg_largeobject like a user table, not a system table. It is however
                 * possible that a table or index may need to end up with the same
                 * relfilenumber in the new cluster as what it had in the old cluster.
@@ -5171,8 +5171,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
        Bitmapset  *uindexattrs;        /* columns in unique indexes */
        Bitmapset  *pkindexattrs;       /* columns in the primary index */
        Bitmapset  *idindexattrs;       /* columns in the replica identity */
-       Bitmapset  *hotblockingattrs;   /* columns with HOT blocking indexes */
-       Bitmapset  *summarizedattrs;   /* columns with summarizing indexes */
+       Bitmapset  *hotblockingattrs;   /* columns with HOT blocking indexes */
+       Bitmapset  *summarizedattrs;    /* columns with summarizing indexes */
        List       *indexoidlist;
        List       *newindexoidlist;
        Oid                     relpkindex;
@@ -5314,8 +5314,8 @@ restart:
                         * when the column value changes, thus require a separate
                         * attribute bitmapset.
                         *
-                        * Obviously, non-key columns couldn't be referenced by
-                        * foreign key or identity key. Hence we do not include them into
+                        * Obviously, non-key columns couldn't be referenced by foreign
+                        * key or identity key. Hence we do not include them into
                         * uindexattrs, pkindexattrs and idindexattrs bitmaps.
                         */
                        if (attrnum != 0)
index 4c21129707ca0ccb82506223e895b75e211213d8..26575cae6c9b36fdb107507b0d3841b61d322d53 100644 (file)
@@ -801,11 +801,11 @@ read_relmap_file(RelMapFile *map, char *dbpath, bool lock_held, int elevel)
        /*
         * Open the target file.
         *
-        * Because Windows isn't happy about the idea of renaming over a file
-        * that someone has open, we only open this file after acquiring the lock,
-        * and for the same reason, we close it before releasing the lock. That
-        * way, by the time write_relmap_file() acquires an exclusive lock, no
-        * one else will have it open.
+        * Because Windows isn't happy about the idea of renaming over a file that
+        * someone has open, we only open this file after acquiring the lock, and
+        * for the same reason, we close it before releasing the lock. That way,
+        * by the time write_relmap_file() acquires an exclusive lock, no one else
+        * will have it open.
         */
        snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
                         RELMAPPER_FILENAME);
index 7458ef5c90fb5fed179620278a91bb2458f456e0..9208c31fe06ad7ee5e1cadfa7eceec32a4f965cf 100644 (file)
@@ -2150,7 +2150,7 @@ CheckFunctionValidatorAccess(Oid validatorOid, Oid functionOid)
 
        /* first validate that we have permissions to use the language */
        aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(),
-                                                                        ACL_USAGE);
+                                                               ACL_USAGE);
        if (aclresult != ACLCHECK_OK)
                aclcheck_error(aclresult, OBJECT_LANGUAGE,
                                           NameStr(langStruct->lanname));
index dd8ac6d56d856df4071870b15d2739f6cfba5f44..34d0f25c2335d95d4962cc15b065ce3775135764 100644 (file)
@@ -7,10 +7,9 @@ use strict;
 use warnings;
 use Getopt::Long;
 
-my $outfile        = '';
+my $outfile = '';
 
-GetOptions(
-       'outfile=s'   => \$outfile) or die "$0: wrong arguments";
+GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments";
 
 open my $errcodes, '<', $ARGV[0]
   or die "$0: could not open input file '$ARGV[0]': $!\n";
index 53420f4974fc1047e6944198be111cad4848b6d9..88434c3e5d4e5a0acc47766db6d078f53fc544a8 100644 (file)
@@ -362,7 +362,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
                 */
                if (!am_superuser &&
                        object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
-                                                                ACL_CONNECT) != ACLCHECK_OK)
+                                                       ACL_CONNECT) != ACLCHECK_OK)
                        ereport(FATAL,
                                        (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                         errmsg("permission denied for database \"%s\"", name),
@@ -933,10 +933,10 @@ InitPostgres(const char *in_dbname, Oid dboid,
        }
 
        /*
-        * The last few connection slots are reserved for superusers and roles with
-        * privileges of pg_use_reserved_connections.  Replication connections are
-        * drawn from slots reserved with max_wal_senders and are not limited by
-        * max_connections, superuser_reserved_connections, or
+        * The last few connection slots are reserved for superusers and roles
+        * with privileges of pg_use_reserved_connections.  Replication
+        * connections are drawn from slots reserved with max_wal_senders and are
+        * not limited by max_connections, superuser_reserved_connections, or
         * reserved_connections.
         *
         * Note: At this point, the new backend has already claimed a proc struct,
index 38bcfa60df680812bd3f8754d622329a9bca7d09..dd9a0dd6a83f3e12bb20a2e3acc07fb63e3515be 100644 (file)
@@ -61,15 +61,15 @@ SwitchToUntrustedUser(Oid userid, UserContext *context)
        }
        else
        {
-               int     sec_context = context->save_sec_context;
+               int                     sec_context = context->save_sec_context;
 
                /*
                 * This user can SET ROLE to the target user, but not the other way
                 * around, so protect ourselves against the target user by setting
                 * SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
-                * session state. Also set up a new GUC nest level, so that we can roll
-                * back any GUC changes that may be made by code running as the target
-                * user, inasmuch as they could be malicious.
+                * session state. Also set up a new GUC nest level, so that we can
+                * roll back any GUC changes that may be made by code running as the
+                * target user, inasmuch as they could be malicious.
                 */
                sec_context |= SECURITY_RESTRICTED_OPERATION;
                SetUserIdAndSecContext(userid, sec_context);
index 40b3fb6db67376dc93d797913f543c230573e98e..4c5724b8b7502f2fdf102d9fb72cdaa2779a8941 100755 (executable)
@@ -40,7 +40,7 @@ my $cp950txt = &read_source("CP950.TXT");
 foreach my $i (@$cp950txt)
 {
        my $code = $i->{code};
-       my $ucs  = $i->{ucs};
+       my $ucs = $i->{ucs};
 
        # Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc
        # from CP950.TXT
@@ -51,12 +51,12 @@ foreach my $i (@$cp950txt)
        {
                push @$all,
                  {
-                       code      => $code,
-                       ucs       => $ucs,
-                       comment   => $i->{comment},
+                       code => $code,
+                       ucs => $ucs,
+                       comment => $i->{comment},
                        direction => BOTH,
-                       f         => $i->{f},
-                       l         => $i->{l}
+                       f => $i->{f},
+                       l => $i->{l}
                  };
        }
 }
@@ -64,7 +64,7 @@ foreach my $i (@$cp950txt)
 foreach my $i (@$all)
 {
        my $code = $i->{code};
-       my $ucs  = $i->{ucs};
+       my $ucs = $i->{ucs};
 
        # BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
        # contain only one of them. XXX: Doesn't really make sense to include any of them,
index adfdca24f7155e918e4ed2f2952decc60632d352..f9ff2bd3d2a7905a27ca552ae0cfc6357e9b0bbb 100755 (executable)
@@ -33,7 +33,7 @@ while (<$in>)
        next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
        my ($u, $c) = ($1, $2);
        $c =~ s/ //g;
-       my $ucs  = hex($u);
+       my $ucs = hex($u);
        my $code = hex($c);
 
        # The GB-18030 character set, which we use as the source, contains
@@ -73,11 +73,11 @@ while (<$in>)
 
        push @mapping,
          {
-               ucs       => $ucs,
-               code      => $code,
+               ucs => $ucs,
+               code => $code,
                direction => BOTH,
-               f         => $in_file,
-               l         => $.
+               f => $in_file,
+               l => $.
          };
 }
 close($in);
index b7715ed4195aa50786fddd8e6c8dcfa51d5999a2..2d0e05fb794170068efd4a81a582046e8f97973b 100755 (executable)
@@ -37,13 +37,13 @@ while (my $line = <$in>)
 
                push @all,
                  {
-                       direction  => BOTH,
-                       ucs        => $ucs1,
+                       direction => BOTH,
+                       ucs => $ucs1,
                        ucs_second => $ucs2,
-                       code       => $code,
-                       comment    => $rest,
-                       f          => $in_file,
-                       l          => $.
+                       code => $code,
+                       comment => $rest,
+                       f => $in_file,
+                       l => $.
                  };
        }
        elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
@@ -51,7 +51,7 @@ while (my $line = <$in>)
 
                # non-combined characters
                my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
-               my $ucs  = hex($u);
+               my $ucs = hex($u);
                my $code = hex($c);
 
                next if ($code < 0x80 && $ucs < 0x80);
@@ -59,11 +59,11 @@ while (my $line = <$in>)
                push @all,
                  {
                        direction => BOTH,
-                       ucs       => $ucs,
-                       code      => $code,
-                       comment   => $rest,
-                       f         => $in_file,
-                       l         => $.
+                       ucs => $ucs,
+                       code => $code,
+                       comment => $rest,
+                       f => $in_file,
+                       l => $.
                  };
        }
 }
index 9c949f95b158b930052d62f4e764a951b447f249..4073578027eba1aeed46475caadbea00304e188b 100755 (executable)
@@ -120,521 +120,521 @@ foreach my $i (grep defined $_->{sjis}, @mapping)
 push @mapping, (
        {
                direction => BOTH,
-               ucs       => 0x4efc,
-               code      => 0x8ff4af,
-               comment   => '# CJK(4EFC)'
+               ucs => 0x4efc,
+               code => 0x8ff4af,
+               comment => '# CJK(4EFC)'
        },
        {
                direction => BOTH,
-               ucs       => 0x50f4,
-               code      => 0x8ff4b0,
-               comment   => '# CJK(50F4)'
+               ucs => 0x50f4,
+               code => 0x8ff4b0,
+               comment => '# CJK(50F4)'
        },
        {
                direction => BOTH,
-               ucs       => 0x51EC,
-               code      => 0x8ff4b1,
-               comment   => '# CJK(51EC)'
+               ucs => 0x51EC,
+               code => 0x8ff4b1,
+               comment => '# CJK(51EC)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5307,
-               code      => 0x8ff4b2,
-               comment   => '# CJK(5307)'
+               ucs => 0x5307,
+               code => 0x8ff4b2,
+               comment => '# CJK(5307)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5324,
-               code      => 0x8ff4b3,
-               comment   => '# CJK(5324)'
+               ucs => 0x5324,
+               code => 0x8ff4b3,
+               comment => '# CJK(5324)'
        },
        {
                direction => BOTH,
-               ucs       => 0x548A,
-               code      => 0x8ff4b5,
-               comment   => '# CJK(548A)'
+               ucs => 0x548A,
+               code => 0x8ff4b5,
+               comment => '# CJK(548A)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5759,
-               code      => 0x8ff4b6,
-               comment   => '# CJK(5759)'
+               ucs => 0x5759,
+               code => 0x8ff4b6,
+               comment => '# CJK(5759)'
        },
        {
                direction => BOTH,
-               ucs       => 0x589E,
-               code      => 0x8ff4b9,
-               comment   => '# CJK(589E)'
+               ucs => 0x589E,
+               code => 0x8ff4b9,
+               comment => '# CJK(589E)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5BEC,
-               code      => 0x8ff4ba,
-               comment   => '# CJK(5BEC)'
+               ucs => 0x5BEC,
+               code => 0x8ff4ba,
+               comment => '# CJK(5BEC)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5CF5,
-               code      => 0x8ff4bb,
-               comment   => '# CJK(5CF5)'
+               ucs => 0x5CF5,
+               code => 0x8ff4bb,
+               comment => '# CJK(5CF5)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5D53,
-               code      => 0x8ff4bc,
-               comment   => '# CJK(5D53)'
+               ucs => 0x5D53,
+               code => 0x8ff4bc,
+               comment => '# CJK(5D53)'
        },
        {
                direction => BOTH,
-               ucs       => 0x5FB7,
-               code      => 0x8ff4be,
-               comment   => '# CJK(5FB7)'
+               ucs => 0x5FB7,
+               code => 0x8ff4be,
+               comment => '# CJK(5FB7)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6085,
-               code      => 0x8ff4bf,
-               comment   => '# CJK(6085)'
+               ucs => 0x6085,
+               code => 0x8ff4bf,
+               comment => '# CJK(6085)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6120,
-               code      => 0x8ff4c0,
-               comment   => '# CJK(6120)'
+               ucs => 0x6120,
+               code => 0x8ff4c0,
+               comment => '# CJK(6120)'
        },
        {
                direction => BOTH,
-               ucs       => 0x654E,
-               code      => 0x8ff4c1,
-               comment   => '# CJK(654E)'
+               ucs => 0x654E,
+               code => 0x8ff4c1,
+               comment => '# CJK(654E)'
        },
        {
                direction => BOTH,
-               ucs       => 0x663B,
-               code      => 0x8ff4c2,
-               comment   => '# CJK(663B)'
+               ucs => 0x663B,
+               code => 0x8ff4c2,
+               comment => '# CJK(663B)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6665,
-               code      => 0x8ff4c3,
-               comment   => '# CJK(6665)'
+               ucs => 0x6665,
+               code => 0x8ff4c3,
+               comment => '# CJK(6665)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6801,
-               code      => 0x8ff4c6,
-               comment   => '# CJK(6801)'
+               ucs => 0x6801,
+               code => 0x8ff4c6,
+               comment => '# CJK(6801)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6A6B,
-               code      => 0x8ff4c9,
-               comment   => '# CJK(6A6B)'
+               ucs => 0x6A6B,
+               code => 0x8ff4c9,
+               comment => '# CJK(6A6B)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6AE2,
-               code      => 0x8ff4ca,
-               comment   => '# CJK(6AE2)'
+               ucs => 0x6AE2,
+               code => 0x8ff4ca,
+               comment => '# CJK(6AE2)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6DF2,
-               code      => 0x8ff4cc,
-               comment   => '# CJK(6DF2)'
+               ucs => 0x6DF2,
+               code => 0x8ff4cc,
+               comment => '# CJK(6DF2)'
        },
        {
                direction => BOTH,
-               ucs       => 0x6DF8,
-               code      => 0x8ff4cb,
-               comment   => '# CJK(6DF8)'
+               ucs => 0x6DF8,
+               code => 0x8ff4cb,
+               comment => '# CJK(6DF8)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7028,
-               code      => 0x8ff4cd,
-               comment   => '# CJK(7028)'
+               ucs => 0x7028,
+               code => 0x8ff4cd,
+               comment => '# CJK(7028)'
        },
        {
                direction => BOTH,
-               ucs       => 0x70BB,
-               code      => 0x8ff4ae,
-               comment   => '# CJK(70BB)'
+               ucs => 0x70BB,
+               code => 0x8ff4ae,
+               comment => '# CJK(70BB)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7501,
-               code      => 0x8ff4d0,
-               comment   => '# CJK(7501)'
+               ucs => 0x7501,
+               code => 0x8ff4d0,
+               comment => '# CJK(7501)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7682,
-               code      => 0x8ff4d1,
-               comment   => '# CJK(7682)'
+               ucs => 0x7682,
+               code => 0x8ff4d1,
+               comment => '# CJK(7682)'
        },
        {
                direction => BOTH,
-               ucs       => 0x769E,
-               code      => 0x8ff4d2,
-               comment   => '# CJK(769E)'
+               ucs => 0x769E,
+               code => 0x8ff4d2,
+               comment => '# CJK(769E)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7930,
-               code      => 0x8ff4d4,
-               comment   => '# CJK(7930)'
+               ucs => 0x7930,
+               code => 0x8ff4d4,
+               comment => '# CJK(7930)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7AE7,
-               code      => 0x8ff4d9,
-               comment   => '# CJK(7AE7)'
+               ucs => 0x7AE7,
+               code => 0x8ff4d9,
+               comment => '# CJK(7AE7)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7DA0,
-               code      => 0x8ff4dc,
-               comment   => '# CJK(7DA0)'
+               ucs => 0x7DA0,
+               code => 0x8ff4dc,
+               comment => '# CJK(7DA0)'
        },
        {
                direction => BOTH,
-               ucs       => 0x7DD6,
-               code      => 0x8ff4dd,
-               comment   => '# CJK(7DD6)'
+               ucs => 0x7DD6,
+               code => 0x8ff4dd,
+               comment => '# CJK(7DD6)'
        },
        {
                direction => BOTH,
-               ucs       => 0x8362,
-               code      => 0x8ff4df,
-               comment   => '# CJK(8362)'
+               ucs => 0x8362,
+               code => 0x8ff4df,
+               comment => '# CJK(8362)'
        },
        {
                direction => BOTH,
-               ucs       => 0x85B0,
-               code      => 0x8ff4e1,
-               comment   => '# CJK(85B0)'
+               ucs => 0x85B0,
+               code => 0x8ff4e1,
+               comment => '# CJK(85B0)'
        },
        {
                direction => BOTH,
-               ucs       => 0x8807,
-               code      => 0x8ff4e4,
-               comment   => '# CJK(8807)'
+               ucs => 0x8807,
+               code => 0x8ff4e4,
+               comment => '# CJK(8807)'
        },
        {
                direction => BOTH,
-               ucs       => 0x8B7F,
-               code      => 0x8ff4e6,
-               comment   => '# CJK(8B7F)'
+               ucs => 0x8B7F,
+               code => 0x8ff4e6,
+               comment => '# CJK(8B7F)'
        },
        {
                direction => BOTH,
-               ucs       => 0x8CF4,
-               code      => 0x8ff4e7,
-               comment   => '# CJK(8CF4)'
+               ucs => 0x8CF4,
+               code => 0x8ff4e7,
+               comment => '# CJK(8CF4)'
        },
        {
                direction => BOTH,
-               ucs       => 0x8D76,
-               code      => 0x8ff4e8,
-               comment   => '# CJK(8D76)'
+               ucs => 0x8D76,
+               code => 0x8ff4e8,
+               comment => '# CJK(8D76)'
        },
        {
                direction => BOTH,
-               ucs       => 0x90DE,
-               code      => 0x8ff4ec,
-               comment   => '# CJK(90DE)'
+               ucs => 0x90DE,
+               code => 0x8ff4ec,
+               comment => '# CJK(90DE)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9115,
-               code      => 0x8ff4ee,
-               comment   => '# CJK(9115)'
+               ucs => 0x9115,
+               code => 0x8ff4ee,
+               comment => '# CJK(9115)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9592,
-               code      => 0x8ff4f1,
-               comment   => '# CJK(9592)'
+               ucs => 0x9592,
+               code => 0x8ff4f1,
+               comment => '# CJK(9592)'
        },
        {
                direction => BOTH,
-               ucs       => 0x973B,
-               code      => 0x8ff4f4,
-               comment   => '# CJK(973B)'
+               ucs => 0x973B,
+               code => 0x8ff4f4,
+               comment => '# CJK(973B)'
        },
        {
                direction => BOTH,
-               ucs       => 0x974D,
-               code      => 0x8ff4f5,
-               comment   => '# CJK(974D)'
+               ucs => 0x974D,
+               code => 0x8ff4f5,
+               comment => '# CJK(974D)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9751,
-               code      => 0x8ff4f6,
-               comment   => '# CJK(9751)'
+               ucs => 0x9751,
+               code => 0x8ff4f6,
+               comment => '# CJK(9751)'
        },
        {
                direction => BOTH,
-               ucs       => 0x999E,
-               code      => 0x8ff4fa,
-               comment   => '# CJK(999E)'
+               ucs => 0x999E,
+               code => 0x8ff4fa,
+               comment => '# CJK(999E)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9AD9,
-               code      => 0x8ff4fb,
-               comment   => '# CJK(9AD9)'
+               ucs => 0x9AD9,
+               code => 0x8ff4fb,
+               comment => '# CJK(9AD9)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9B72,
-               code      => 0x8ff4fc,
-               comment   => '# CJK(9B72)'
+               ucs => 0x9B72,
+               code => 0x8ff4fc,
+               comment => '# CJK(9B72)'
        },
        {
                direction => BOTH,
-               ucs       => 0x9ED1,
-               code      => 0x8ff4fe,
-               comment   => '# CJK(9ED1)'
+               ucs => 0x9ED1,
+               code => 0x8ff4fe,
+               comment => '# CJK(9ED1)'
        },
        {
                direction => BOTH,
-               ucs       => 0xF929,
-               code      => 0x8ff4c5,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-F929'
+               ucs => 0xF929,
+               code => 0x8ff4c5,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'
        },
        {
                direction => BOTH,
-               ucs       => 0xF9DC,
-               code      => 0x8ff4f2,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
+               ucs => 0xF9DC,
+               code => 0x8ff4f2,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA0E,
-               code      => 0x8ff4b4,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
+               ucs => 0xFA0E,
+               code => 0x8ff4b4,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA0F,
-               code      => 0x8ff4b7,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
+               ucs => 0xFA0F,
+               code => 0x8ff4b7,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA10,
-               code      => 0x8ff4b8,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
+               ucs => 0xFA10,
+               code => 0x8ff4b8,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA11,
-               code      => 0x8ff4bd,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
+               ucs => 0xFA11,
+               code => 0x8ff4bd,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA12,
-               code      => 0x8ff4c4,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
+               ucs => 0xFA12,
+               code => 0x8ff4c4,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA13,
-               code      => 0x8ff4c7,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
+               ucs => 0xFA13,
+               code => 0x8ff4c7,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA14,
-               code      => 0x8ff4c8,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
+               ucs => 0xFA14,
+               code => 0x8ff4c8,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA15,
-               code      => 0x8ff4ce,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
+               ucs => 0xFA15,
+               code => 0x8ff4ce,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA16,
-               code      => 0x8ff4cf,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
+               ucs => 0xFA16,
+               code => 0x8ff4cf,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA17,
-               code      => 0x8ff4d3,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
+               ucs => 0xFA17,
+               code => 0x8ff4d3,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA18,
-               code      => 0x8ff4d5,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
+               ucs => 0xFA18,
+               code => 0x8ff4d5,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA19,
-               code      => 0x8ff4d6,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
+               ucs => 0xFA19,
+               code => 0x8ff4d6,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1A,
-               code      => 0x8ff4d7,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
+               ucs => 0xFA1A,
+               code => 0x8ff4d7,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1B,
-               code      => 0x8ff4d8,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
+               ucs => 0xFA1B,
+               code => 0x8ff4d8,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1C,
-               code      => 0x8ff4da,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
+               ucs => 0xFA1C,
+               code => 0x8ff4da,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1D,
-               code      => 0x8ff4db,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
+               ucs => 0xFA1D,
+               code => 0x8ff4db,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1E,
-               code      => 0x8ff4de,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
+               ucs => 0xFA1E,
+               code => 0x8ff4de,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA1F,
-               code      => 0x8ff4e0,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
+               ucs => 0xFA1F,
+               code => 0x8ff4e0,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA20,
-               code      => 0x8ff4e2,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
+               ucs => 0xFA20,
+               code => 0x8ff4e2,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA21,
-               code      => 0x8ff4e3,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
+               ucs => 0xFA21,
+               code => 0x8ff4e3,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA22,
-               code      => 0x8ff4e5,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
+               ucs => 0xFA22,
+               code => 0x8ff4e5,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA23,
-               code      => 0x8ff4e9,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
+               ucs => 0xFA23,
+               code => 0x8ff4e9,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA24,
-               code      => 0x8ff4ea,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
+               ucs => 0xFA24,
+               code => 0x8ff4ea,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA25,
-               code      => 0x8ff4eb,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
+               ucs => 0xFA25,
+               code => 0x8ff4eb,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA26,
-               code      => 0x8ff4ed,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
+               ucs => 0xFA26,
+               code => 0x8ff4ed,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA27,
-               code      => 0x8ff4ef,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
+               ucs => 0xFA27,
+               code => 0x8ff4ef,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA28,
-               code      => 0x8ff4f0,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
+               ucs => 0xFA28,
+               code => 0x8ff4f0,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA29,
-               code      => 0x8ff4f3,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
+               ucs => 0xFA29,
+               code => 0x8ff4f3,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA2A,
-               code      => 0x8ff4f7,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
+               ucs => 0xFA2A,
+               code => 0x8ff4f7,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA2B,
-               code      => 0x8ff4f8,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
+               ucs => 0xFA2B,
+               code => 0x8ff4f8,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA2C,
-               code      => 0x8ff4f9,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
+               ucs => 0xFA2C,
+               code => 0x8ff4f9,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
        },
        {
                direction => BOTH,
-               ucs       => 0xFA2D,
-               code      => 0x8ff4fd,
-               comment   => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
+               ucs => 0xFA2D,
+               code => 0x8ff4fd,
+               comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
        },
        {
                direction => BOTH,
-               ucs       => 0xFF07,
-               code      => 0x8ff4a9,
-               comment   => '# FULLWIDTH APOSTROPHE'
+               ucs => 0xFF07,
+               code => 0x8ff4a9,
+               comment => '# FULLWIDTH APOSTROPHE'
        },
        {
                direction => BOTH,
-               ucs       => 0xFFE4,
-               code      => 0x8fa2c3,
-               comment   => '# FULLWIDTH BROKEN BAR'
+               ucs => 0xFFE4,
+               code => 0x8fa2c3,
+               comment => '# FULLWIDTH BROKEN BAR'
        },
 
        # additional conversions for EUC_JP -> UTF-8 conversion
        {
                direction => TO_UNICODE,
-               ucs       => 0x2116,
-               code      => 0x8ff4ac,
-               comment   => '# NUMERO SIGN'
+               ucs => 0x2116,
+               code => 0x8ff4ac,
+               comment => '# NUMERO SIGN'
        },
        {
                direction => TO_UNICODE,
-               ucs       => 0x2121,
-               code      => 0x8ff4ad,
-               comment   => '# TELEPHONE SIGN'
+               ucs => 0x2121,
+               code => 0x8ff4ad,
+               comment => '# TELEPHONE SIGN'
        },
        {
                direction => TO_UNICODE,
-               ucs       => 0x3231,
-               code      => 0x8ff4ab,
-               comment   => '# PARENTHESIZED IDEOGRAPH STOCK'
+               ucs => 0x3231,
+               code => 0x8ff4ab,
+               comment => '# PARENTHESIZED IDEOGRAPH STOCK'
        });
 
 print_conversion_tables($this_script, "EUC_JP", \@mapping);
index 4c3989d2c51239a96459718f8e751595ac577e31..9112e1cfe9bf5e2a5d8184bbe322afec12127917 100755 (executable)
@@ -36,27 +36,27 @@ foreach my $i (@$mapping)
 push @$mapping,
   ( {
                direction => BOTH,
-               ucs       => 0x20AC,
-               code      => 0xa2e6,
-               comment   => '# EURO SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x20AC,
+               code => 0xa2e6,
+               comment => '# EURO SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => BOTH,
-               ucs       => 0x00AE,
-               code      => 0xa2e7,
-               comment   => '# REGISTERED SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00AE,
+               code => 0xa2e7,
+               comment => '# REGISTERED SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => BOTH,
-               ucs       => 0x327E,
-               code      => 0xa2e8,
-               comment   => '# CIRCLED HANGUL IEUNG U',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x327E,
+               code => 0xa2e8,
+               comment => '# CIRCLED HANGUL IEUNG U',
+               f => $this_script,
+               l => __LINE__
        });
 
 print_conversion_tables($this_script, "EUC_KR", $mapping);
index ecc175528ec5ab6be64e254bafd08a5c2e1b9008..4ad17064abc22582c77ad9dfaa21f8a27a25c009 100755 (executable)
@@ -30,8 +30,8 @@ my @extras;
 
 foreach my $i (@$mapping)
 {
-       my $ucs      = $i->{ucs};
-       my $code     = $i->{code};
+       my $ucs = $i->{ucs};
+       my $code = $i->{code};
        my $origcode = $i->{code};
 
        my $plane = ($code & 0x1f0000) >> 16;
@@ -56,12 +56,12 @@ foreach my $i (@$mapping)
        {
                push @extras,
                  {
-                       ucs       => $i->{ucs},
-                       code      => ($i->{code} + 0x8ea10000),
-                       rest      => $i->{rest},
+                       ucs => $i->{ucs},
+                       code => ($i->{code} + 0x8ea10000),
+                       rest => $i->{rest},
                        direction => TO_UNICODE,
-                       f         => $i->{f},
-                       l         => $i->{l}
+                       f => $i->{f},
+                       l => $i->{l}
                  };
        }
 }
index fb401e6099194e3b86c133620608e0075a691eba..9c8a983bf7103618e8b6d7ea51680f24d11c12fa 100755 (executable)
@@ -33,17 +33,17 @@ while (<$in>)
        next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
        my ($u, $c) = ($1, $2);
        $c =~ s/ //g;
-       my $ucs  = hex($u);
+       my $ucs = hex($u);
        my $code = hex($c);
        if ($code >= 0x80 && $ucs >= 0x0080)
        {
                push @mapping,
                  {
-                       ucs       => $ucs,
-                       code      => $code,
+                       ucs => $ucs,
+                       code => $code,
                        direction => BOTH,
-                       f         => $in_file,
-                       l         => $.
+                       f => $in_file,
+                       l => $.
                  };
        }
 }
index 370c5b801c982848489dcb662b208518faba090a..f50baa8f1f423d6bbab5934b095408aeb7cae2cf 100755 (executable)
@@ -30,27 +30,27 @@ my $mapping = &read_source("JOHAB.TXT");
 push @$mapping,
   ( {
                direction => BOTH,
-               ucs       => 0x20AC,
-               code      => 0xd9e6,
-               comment   => '# EURO SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x20AC,
+               code => 0xd9e6,
+               comment => '# EURO SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => BOTH,
-               ucs       => 0x00AE,
-               code      => 0xd9e7,
-               comment   => '# REGISTERED SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00AE,
+               code => 0xd9e7,
+               comment => '# REGISTERED SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => BOTH,
-               ucs       => 0x327E,
-               code      => 0xd9e8,
-               comment   => '# CIRCLED HANGUL IEUNG U',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x327E,
+               code => 0xd9e8,
+               comment => '# CIRCLED HANGUL IEUNG U',
+               f => $this_script,
+               l => __LINE__
        });
 
 print_conversion_tables($this_script, "JOHAB", $mapping);
index 6431aba555dc31e4c7ec49c39b1213a2966958d6..ed010a58facf00f3a0944822a3bb355f2ea88a97 100755 (executable)
@@ -37,13 +37,13 @@ while (my $line = <$in>)
 
                push @mapping,
                  {
-                       code       => $code,
-                       ucs        => $ucs1,
+                       code => $code,
+                       ucs => $ucs1,
                        ucs_second => $ucs2,
-                       comment    => $rest,
-                       direction  => BOTH,
-                       f          => $in_file,
-                       l          => $.
+                       comment => $rest,
+                       direction => BOTH,
+                       f => $in_file,
+                       l => $.
                  };
        }
        elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
@@ -51,7 +51,7 @@ while (my $line = <$in>)
 
                # non-combined characters
                my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
-               my $ucs  = hex($u);
+               my $ucs = hex($u);
                my $code = hex($c);
                my $direction;
 
@@ -74,12 +74,12 @@ while (my $line = <$in>)
 
                push @mapping,
                  {
-                       code      => $code,
-                       ucs       => $ucs,
-                       comment   => $rest,
+                       code => $code,
+                       ucs => $ucs,
+                       comment => $rest,
                        direction => $direction,
-                       f         => $in_file,
-                       l         => $.
+                       f => $in_file,
+                       l => $.
                  };
        }
 }
index 6426cf479427135a76b41ecd8af179750b57d5ea..0808c6836b3a155c165ef701c66d230fddad9e30 100755 (executable)
@@ -22,13 +22,13 @@ my $mapping = read_source("CP932.TXT");
 # Drop these SJIS codes from the source for UTF8=>SJIS conversion
 my @reject_sjis = (
        0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
-       0x8784,           0xfa5b,           0xfa54, 0x8790 .. 0x8792,
+       0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
        0x8795 .. 0x8797, 0x879a .. 0x879c);
 
 foreach my $i (@$mapping)
 {
        my $code = $i->{code};
-       my $ucs  = $i->{ucs};
+       my $ucs = $i->{ucs};
 
        if (grep { $code == $_ } @reject_sjis)
        {
@@ -40,67 +40,67 @@ foreach my $i (@$mapping)
 push @$mapping,
   ( {
                direction => FROM_UNICODE,
-               ucs       => 0x00a2,
-               code      => 0x8191,
-               comment   => '# CENT SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00a2,
+               code => 0x8191,
+               comment => '# CENT SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x00a3,
-               code      => 0x8192,
-               comment   => '# POUND SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00a3,
+               code => 0x8192,
+               comment => '# POUND SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x00a5,
-               code      => 0x5c,
-               comment   => '# YEN SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00a5,
+               code => 0x5c,
+               comment => '# YEN SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x00ac,
-               code      => 0x81ca,
-               comment   => '# NOT SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x00ac,
+               code => 0x81ca,
+               comment => '# NOT SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x2016,
-               code      => 0x8161,
-               comment   => '# DOUBLE VERTICAL LINE',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x2016,
+               code => 0x8161,
+               comment => '# DOUBLE VERTICAL LINE',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x203e,
-               code      => 0x7e,
-               comment   => '# OVERLINE',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x203e,
+               code => 0x7e,
+               comment => '# OVERLINE',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x2212,
-               code      => 0x817c,
-               comment   => '# MINUS SIGN',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x2212,
+               code => 0x817c,
+               comment => '# MINUS SIGN',
+               f => $this_script,
+               l => __LINE__
        },
        {
                direction => FROM_UNICODE,
-               ucs       => 0x301c,
-               code      => 0x8160,
-               comment   => '# WAVE DASH',
-               f         => $this_script,
-               l         => __LINE__
+               ucs => 0x301c,
+               code => 0x8160,
+               comment => '# WAVE DASH',
+               f => $this_script,
+               l => __LINE__
        });
 
 print_conversion_tables($this_script, "SJIS", $mapping);
index 5ec9c069b7d04cb08f35b15901681189596dcc2b..207677d76dc22a7894bff39a27ce52175c8fad45 100755 (executable)
@@ -33,7 +33,7 @@ while (<$in>)
        next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
        my ($u, $c) = ($1, $2);
        $c =~ s/ //g;
-       my $ucs  = hex($u);
+       my $ucs = hex($u);
        my $code = hex($c);
 
        next if ($code == 0x0080 || $code == 0x00FF);
@@ -42,11 +42,11 @@ while (<$in>)
        {
                push @mapping,
                  {
-                       ucs       => $ucs,
-                       code      => $code,
+                       ucs => $ucs,
+                       code => $code,
                        direction => BOTH,
-                       f         => $in_file,
-                       l         => $.
+                       f => $in_file,
+                       l => $.
                  };
        }
 }
@@ -56,11 +56,11 @@ close($in);
 push @mapping,
   {
        direction => BOTH,
-       code      => 0xa2e8,
-       ucs       => 0x327e,
-       comment   => 'CIRCLED HANGUL IEUNG U',
-       f         => $this_script,
-       l         => __LINE__
+       code => 0xa2e8,
+       ucs => 0x327e,
+       comment => 'CIRCLED HANGUL IEUNG U',
+       f => $this_script,
+       l => __LINE__
   };
 
 print_conversion_tables($this_script, "UHC", \@mapping);
index 1917f86f0a3e7d5bbcbec06149064d16010937ea..a1947308ffa3a0bef0c6eb9e7da1744bcc9cc2e4 100755 (executable)
@@ -23,33 +23,33 @@ use convutils;
 my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl';
 
 my %filename = (
-       'WIN866'     => 'CP866.TXT',
-       'WIN874'     => 'CP874.TXT',
-       'WIN1250'    => 'CP1250.TXT',
-       'WIN1251'    => 'CP1251.TXT',
-       'WIN1252'    => 'CP1252.TXT',
-       'WIN1253'    => 'CP1253.TXT',
-       'WIN1254'    => 'CP1254.TXT',
-       'WIN1255'    => 'CP1255.TXT',
-       'WIN1256'    => 'CP1256.TXT',
-       'WIN1257'    => 'CP1257.TXT',
-       'WIN1258'    => 'CP1258.TXT',
-       'ISO8859_2'  => '8859-2.TXT',
-       'ISO8859_3'  => '8859-3.TXT',
-       'ISO8859_4'  => '8859-4.TXT',
-       'ISO8859_5'  => '8859-5.TXT',
-       'ISO8859_6'  => '8859-6.TXT',
-       'ISO8859_7'  => '8859-7.TXT',
-       'ISO8859_8'  => '8859-8.TXT',
-       'ISO8859_9'  => '8859-9.TXT',
+       'WIN866' => 'CP866.TXT',
+       'WIN874' => 'CP874.TXT',
+       'WIN1250' => 'CP1250.TXT',
+       'WIN1251' => 'CP1251.TXT',
+       'WIN1252' => 'CP1252.TXT',
+       'WIN1253' => 'CP1253.TXT',
+       'WIN1254' => 'CP1254.TXT',
+       'WIN1255' => 'CP1255.TXT',
+       'WIN1256' => 'CP1256.TXT',
+       'WIN1257' => 'CP1257.TXT',
+       'WIN1258' => 'CP1258.TXT',
+       'ISO8859_2' => '8859-2.TXT',
+       'ISO8859_3' => '8859-3.TXT',
+       'ISO8859_4' => '8859-4.TXT',
+       'ISO8859_5' => '8859-5.TXT',
+       'ISO8859_6' => '8859-6.TXT',
+       'ISO8859_7' => '8859-7.TXT',
+       'ISO8859_8' => '8859-8.TXT',
+       'ISO8859_9' => '8859-9.TXT',
        'ISO8859_10' => '8859-10.TXT',
        'ISO8859_13' => '8859-13.TXT',
        'ISO8859_14' => '8859-14.TXT',
        'ISO8859_15' => '8859-15.TXT',
        'ISO8859_16' => '8859-16.TXT',
-       'KOI8R'      => 'KOI8-R.TXT',
-       'KOI8U'      => 'KOI8-U.TXT',
-       'GBK'        => 'CP936.TXT');
+       'KOI8R' => 'KOI8-R.TXT',
+       'KOI8U' => 'KOI8-U.TXT',
+       'GBK' => 'CP936.TXT');
 
 # make maps for all encodings if not specified
 my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename);
index fd019424fdff232b139dab147cc5f06c949d1926..77de7b1a4d7468f945112a690b5d4382ab8a6715 100644 (file)
@@ -16,10 +16,10 @@ our @EXPORT =
 
 # Constants used in the 'direction' field of the character maps
 use constant {
-       NONE         => 0,
-       TO_UNICODE   => 1,
+       NONE => 0,
+       TO_UNICODE => 1,
        FROM_UNICODE => 2,
-       BOTH         => 3
+       BOTH => 3
 };
 
 #######################################################################
@@ -53,12 +53,12 @@ sub read_source
                        exit;
                }
                my $out = {
-                       code      => hex($1),
-                       ucs       => hex($2),
-                       comment   => $4,
+                       code => hex($1),
+                       ucs => hex($2),
+                       comment => $4,
                        direction => BOTH,
-                       f         => $fname,
-                       l         => $.
+                       f => $fname,
+                       l => $.
                };
 
                # Ignore pure ASCII mappings. PostgreSQL character conversion code
@@ -124,14 +124,14 @@ sub print_conversion_tables_direction
        my $tblname;
        if ($direction == TO_UNICODE)
        {
-               $fname   = lc("${csname}_to_utf8.map");
+               $fname = lc("${csname}_to_utf8.map");
                $tblname = lc("${csname}_to_unicode_tree");
 
                print "- Writing ${csname}=>UTF8 conversion table: $fname\n";
        }
        else
        {
-               $fname   = lc("utf8_to_${csname}.map");
+               $fname = lc("utf8_to_${csname}.map");
                $tblname = lc("${csname}_from_unicode_tree");
 
                print "- Writing UTF8=>${csname} conversion table: $fname\n";
@@ -378,10 +378,10 @@ sub print_radix_table
 
        unshift @segments,
          {
-               header  => "Dummy map, for invalid values",
+               header => "Dummy map, for invalid values",
                min_idx => 0,
                max_idx => $widest_range,
-               label   => "dummy map"
+               label => "dummy map"
          };
 
        ###
@@ -397,7 +397,7 @@ sub print_radix_table
        ###
        for (my $j = 0; $j < $#segments - 1; $j++)
        {
-               my $seg     = $segments[$j];
+               my $seg = $segments[$j];
                my $nextseg = $segments[ $j + 1 ];
 
                # Count the number of zero values at the end of this segment.
@@ -527,17 +527,17 @@ sub print_radix_table
        if ($max_val <= 0xffff)
        {
                $vals_per_line = 8;
-               $colwidth      = 4;
+               $colwidth = 4;
        }
        elsif ($max_val <= 0xffffff)
        {
                $vals_per_line = 4;
-               $colwidth      = 6;
+               $colwidth = 6;
        }
        else
        {
                $vals_per_line = 4;
-               $colwidth      = 8;
+               $colwidth = 8;
        }
 
        ###
@@ -607,8 +607,10 @@ sub print_radix_table
                        # Print the next line's worth of values.
                        # XXX pad to begin at a nice boundary
                        printf $out "  /* %02x */ ", $i;
-                       for (my $j = 0;
-                               $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
+                       for (
+                               my $j = 0;
+                               $j < $vals_per_line && $i <= $seg->{max_idx};
+                               $j++)
                        {
                                # missing values represent zero.
                                my $val = $seg->{values}->{$i} || 0;
@@ -671,10 +673,10 @@ sub build_segments_recurse
                push @segments,
                  {
                        header => $header . ", leaf: ${path}xx",
-                       label  => $label,
-                       level  => $level,
-                       depth  => $depth,
-                       path   => $path,
+                       label => $label,
+                       level => $level,
+                       depth => $depth,
+                       path => $path,
                        values => $map
                  };
        }
@@ -696,10 +698,10 @@ sub build_segments_recurse
                push @segments,
                  {
                        header => $header . ", byte #$level: ${path}xx",
-                       label  => $label,
-                       level  => $level,
-                       depth  => $depth,
-                       path   => $path,
+                       label => $label,
+                       level => $level,
+                       depth => $depth,
+                       path => $path,
                        values => \%children
                  };
        }
@@ -789,12 +791,12 @@ sub make_charmap_combined
                if (defined $c->{ucs_second})
                {
                        my $entry = {
-                               utf8        => ucs2utf($c->{ucs}),
+                               utf8 => ucs2utf($c->{ucs}),
                                utf8_second => ucs2utf($c->{ucs_second}),
-                               code        => $c->{code},
-                               comment     => $c->{comment},
-                               f           => $c->{f},
-                               l           => $c->{l}
+                               code => $c->{code},
+                               comment => $c->{comment},
+                               f => $c->{f},
+                               l => $c->{l}
                        };
                        push @combined, $entry;
                }
index 67c37c49cb39c4c9f4f86a29d4579147d67aa45f..a9033b7a54d427f54ac41fbda6ab23a7775b9b1d 100644 (file)
@@ -1470,8 +1470,8 @@ check_GUC_init(struct config_generic *gconf)
        /* Flag combinations */
 
        /*
-        * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part
-        * of SHOW ALL should not be hidden in postgresql.conf.sample.
+        * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
+        * SHOW ALL should not be hidden in postgresql.conf.sample.
         */
        if ((gconf->flags & GUC_NO_SHOW_ALL) &&
                !(gconf->flags & GUC_NOT_IN_SAMPLE))
index 844781a7f5d5bf34e46c7551d494fb6f88d3cb25..c27eb3675831a1d563b0ae8ce68ba145dcfede0a 100644 (file)
@@ -4685,8 +4685,8 @@ struct config_enum ConfigureNamesEnum[] =
 
        {
                {"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE,
-                gettext_noop("Log level for reporting invalid ICU locale strings."),
-                NULL
+                       gettext_noop("Log level for reporting invalid ICU locale strings."),
+                       NULL
                },
                &icu_validation_level,
                WARNING, icu_validation_level_options,
index f5a62061a3e919d582df956dd69c1ebfeb429c34..7a3781466ed9ec41d7a639c3795a7ed7696a1d79 100644 (file)
@@ -1369,7 +1369,7 @@ init_span(dsa_area *area,
        if (DsaPointerIsValid(pool->spans[1]))
        {
                dsa_area_span *head = (dsa_area_span *)
-               dsa_get_address(area, pool->spans[1]);
+                       dsa_get_address(area, pool->spans[1]);
 
                head->prevspan = span_pointer;
        }
@@ -2215,7 +2215,7 @@ make_new_segment(dsa_area *area, size_t requested_pages)
        if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
        {
                dsa_segment_map *next =
-               get_segment_by_index(area, segment_map->header->next);
+                       get_segment_by_index(area, segment_map->header->next);
 
                Assert(next->header->bin == segment_map->header->bin);
                next->header->prev = new_index;
index 722a2e34db6790eb79f9e69f059bd9f1544a2bbb..8f9ea090faa0b337f2d53d6a38afc18f69a50693 100644 (file)
@@ -285,7 +285,7 @@ sum_free_pages(FreePageManager *fpm)
                if (!relptr_is_null(fpm->freelist[list]))
                {
                        FreePageSpanLeader *candidate =
-                       relptr_access(base, fpm->freelist[list]);
+                               relptr_access(base, fpm->freelist[list]);
 
                        do
                        {
index 42b90e4d4fae81fcbb018794f2eff0ca2d51dc28..9fc83f11f6f1ff6b56d62eaf4a28af7cfd54779f 100644 (file)
@@ -734,9 +734,9 @@ MemoryContextStatsDetail(MemoryContext context, int max_children,
                 *
                 * We don't buffer the information about all memory contexts in a
                 * backend into StringInfo and log it as one message.  That would
-                * require the buffer to be enlarged, risking an OOM as there could
-                * be a large number of memory contexts in a backend.  Instead, we
-                * log one message per memory context.
+                * require the buffer to be enlarged, risking an OOM as there could be
+                * a large number of memory contexts in a backend.  Instead, we log
+                * one message per memory context.
                 */
                ereport(LOG_SERVER_ONLY,
                                (errhidestmt(true),
index 7dec652106ff90916804a3d2da43256833d9d403..f926f1faad388c7dba08122a5d254b459e43d0f6 100644 (file)
@@ -587,7 +587,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
                while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres))
                {
                        pg_cryptohash_ctx *context =
-                       (pg_cryptohash_ctx *) DatumGetPointer(foundres);
+                               (pg_cryptohash_ctx *) DatumGetPointer(foundres);
 
                        if (isCommit)
                                PrintCryptoHashLeakWarning(foundres);
index 95c3970437d0c02c705f2f7e444a83a2acea99a7..e5a4e5b371e3527a1c47dd728dde7993ef7a5bd3 100644 (file)
@@ -1438,8 +1438,8 @@ tuplesort_performsort(Tuplesortstate *state)
                        /*
                         * We were able to accumulate all the tuples required for output
                         * in memory, using a heap to eliminate excess tuples.  Now we
-                        * have to transform the heap to a properly-sorted array.
-                        * Note that sort_bounded_heap sets the correct state->status.
+                        * have to transform the heap to a properly-sorted array. Note
+                        * that sort_bounded_heap sets the correct state->status.
                         */
                        sort_bounded_heap(state);
                        state->current = 0;
index c9ca44d8b763eeecaf85cc2c20a9d92d7d379da2..3a419e348fa18bf8820977cc6921eeebcd881a3a 100644 (file)
@@ -1990,7 +1990,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin)
                int                     bucket = (oldSnapshotControl->head_offset
                                                          + ((ts - oldSnapshotControl->head_timestamp)
                                                                 / USECS_PER_MINUTE))
-               % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+                       % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
 
                if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin))
                        oldSnapshotControl->xid_by_minute[bucket] = xmin;
@@ -2057,7 +2057,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin)
                                        /* Extend map to unused entry. */
                                        int                     new_tail = (oldSnapshotControl->head_offset
                                                                                        + oldSnapshotControl->count_used)
-                                       % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+                                               % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
 
                                        oldSnapshotControl->count_used++;
                                        oldSnapshotControl->xid_by_minute[new_tail] = xmin;
@@ -2188,7 +2188,7 @@ SerializeSnapshot(Snapshot snapshot, char *start_address)
        if (serialized_snapshot.subxcnt > 0)
        {
                Size            subxipoff = sizeof(SerializedSnapshotData) +
-               snapshot->xcnt * sizeof(TransactionId);
+                       snapshot->xcnt * sizeof(TransactionId);
 
                memcpy((TransactionId *) (start_address + subxipoff),
                           snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
index 30b576932fd049e8f05aee667cef8eda8b51cd72..31156e863ba35fb17e54224113d83d29d1ff283c 100644 (file)
@@ -1565,8 +1565,8 @@ static void
 setup_auth(FILE *cmdfd)
 {
        /*
-        * The authid table shouldn't be readable except through views, to
-        * ensure passwords are not publicly visible.
+        * The authid table shouldn't be readable except through views, to ensure
+        * passwords are not publicly visible.
         */
        PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
 
@@ -1957,9 +1957,9 @@ make_template0(FILE *cmdfd)
                                " STRATEGY = file_copy;\n\n");
 
        /*
-        * template0 shouldn't have any collation-dependent objects, so unset
-        * the collation version.  This disables collation version checks when
-        * making a new database from it.
+        * template0 shouldn't have any collation-dependent objects, so unset the
+        * collation version.  This disables collation version checks when making
+        * a new database from it.
         */
        PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
 
@@ -1969,9 +1969,8 @@ make_template0(FILE *cmdfd)
        PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
 
        /*
-        * Explicitly revoke public create-schema and create-temp-table
-        * privileges in template1 and template0; else the latter would be on
-        * by default
+        * Explicitly revoke public create-schema and create-temp-table privileges
+        * in template1 and template0; else the latter would be on by default
         */
        PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
        PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
@@ -2244,11 +2243,11 @@ static char *
 icu_language_tag(const char *loc_str)
 {
 #ifdef USE_ICU
-       UErrorCode       status;
-       char             lang[ULOC_LANG_CAPACITY];
-       char            *langtag;
-       size_t           buflen = 32;   /* arbitrary starting buffer size */
-       const bool       strict = true;
+       UErrorCode      status;
+       char            lang[ULOC_LANG_CAPACITY];
+       char       *langtag;
+       size_t          buflen = 32;    /* arbitrary starting buffer size */
+       const bool      strict = true;
 
        status = U_ZERO_ERROR;
        uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
@@ -2264,8 +2263,8 @@ icu_language_tag(const char *loc_str)
                return pstrdup("en-US-u-va-posix");
 
        /*
-        * A BCP47 language tag doesn't have a clearly-defined upper limit
-        * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+        * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+        * RFC5646 section 4.4). Additionally, in older ICU versions,
         * uloc_toLanguageTag() doesn't always return the ultimate length on the
         * first call, necessitating a loop.
         */
@@ -2298,7 +2297,7 @@ icu_language_tag(const char *loc_str)
        return langtag;
 #else
        pg_fatal("ICU is not supported in this build");
-       return NULL;            /* keep compiler quiet */
+       return NULL;                            /* keep compiler quiet */
 #endif
 }
 
@@ -2311,9 +2310,9 @@ static void
 icu_validate_locale(const char *loc_str)
 {
 #ifdef USE_ICU
-       UErrorCode       status;
-       char             lang[ULOC_LANG_CAPACITY];
-       bool             found   = false;
+       UErrorCode      status;
+       char            lang[ULOC_LANG_CAPACITY];
+       bool            found = false;
 
        /* validate that we can extract the language */
        status = U_ZERO_ERROR;
@@ -2334,8 +2333,8 @@ icu_validate_locale(const char *loc_str)
        /* search for matching language within ICU */
        for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
        {
-               const char      *otherloc = uloc_getAvailable(i);
-               char             otherlang[ULOC_LANG_CAPACITY];
+               const char *otherloc = uloc_getAvailable(i);
+               char            otherlang[ULOC_LANG_CAPACITY];
 
                status = U_ZERO_ERROR;
                uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
@@ -2366,10 +2365,10 @@ static char *
 default_icu_locale(void)
 {
 #ifdef USE_ICU
-       UCollator       *collator;
-       UErrorCode   status;
-       const char      *valid_locale;
-       char            *default_locale;
+       UCollator  *collator;
+       UErrorCode      status;
+       const char *valid_locale;
+       char       *default_locale;
 
        status = U_ZERO_ERROR;
        collator = ucol_open(NULL, &status);
@@ -2449,7 +2448,7 @@ setlocales(void)
 
        if (locale_provider == COLLPROVIDER_ICU)
        {
-               char *langtag;
+               char       *langtag;
 
                /* acquire default locale from the environment, if not specified */
                if (icu_locale == NULL)
index 17a444d80c5da2e501e11f4f4b03eaeb8b3d1710..fa00bb3dabebe187b75ebcc846d2e0b160476f82 100644 (file)
@@ -105,7 +105,7 @@ if ($ENV{with_icu} eq 'yes')
 {
        command_ok(
                [
-                       'initdb',                '--no-sync',
+                       'initdb', '--no-sync',
                        '--locale-provider=icu', '--icu-locale=en',
                        "$tempdir/data3"
                ],
@@ -113,7 +113,7 @@ if ($ENV{with_icu} eq 'yes')
 
        command_fails_like(
                [
-                       'initdb',                '--no-sync',
+                       'initdb', '--no-sync',
                        '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
                        "$tempdir/dataX"
                ],
@@ -122,7 +122,7 @@ if ($ENV{with_icu} eq 'yes')
 
        command_fails_like(
                [
-                       'initdb',                '--no-sync',
+                       'initdb', '--no-sync',
                        '--locale-provider=icu', '--encoding=SQL_ASCII',
                        '--icu-locale=en', "$tempdir/dataX"
                ],
@@ -131,18 +131,18 @@ if ($ENV{with_icu} eq 'yes')
 
        command_fails_like(
                [
-                       'initdb',                '--no-sync',
-                       '--locale-provider=icu',
-                       '--icu-locale=nonsense-nowhere', "$tempdir/dataX"
+                       'initdb', '--no-sync',
+                       '--locale-provider=icu', '--icu-locale=nonsense-nowhere',
+                       "$tempdir/dataX"
                ],
                qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/,
                'fails for nonsense language');
 
        command_fails_like(
                [
-                       'initdb',                '--no-sync',
-                       '--locale-provider=icu',
-                       '--icu-locale=@colNumeric=lower', "$tempdir/dataX"
+                       'initdb', '--no-sync',
+                       '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+                       "$tempdir/dataX"
                ],
                qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/,
                'fails for invalid collation argument');
@@ -160,7 +160,7 @@ command_fails(
 
 command_fails(
        [
-               'initdb',                 '--no-sync',
+               'initdb', '--no-sync',
                '--locale-provider=libc', '--icu-locale=en',
                "$tempdir/dataX"
        ],
index e3cfae9cd4ae4a5d905e453302f68f2ef89d2c33..cf2438717e18251c4660e1db8ef0f410e77e3d9f 100644 (file)
@@ -183,7 +183,7 @@ $node->command_checks_all(
 $node->command_checks_all(
        [
                'pg_amcheck', '--no-strict-names',
-               '-t',         'this.is.a.really.long.dotted.string'
+               '-t', 'this.is.a.really.long.dotted.string'
        ],
        2,
        [qr/^$/],
@@ -252,20 +252,20 @@ $node->command_checks_all(
 $node->command_checks_all(
        [
                'pg_amcheck', '--no-strict-names',
-               '-t',         'no_such_table',
-               '-t',         'no*such*table',
-               '-i',         'no_such_index',
-               '-i',         'no*such*index',
-               '-r',         'no_such_relation',
-               '-r',         'no*such*relation',
-               '-d',         'no_such_database',
-               '-d',         'no*such*database',
-               '-r',         'none.none',
-               '-r',         'none.none.none',
-               '-r',         'postgres.none.none',
-               '-r',         'postgres.pg_catalog.none',
-               '-r',         'postgres.none.pg_class',
-               '-t',         'postgres.pg_catalog.pg_class',    # This exists
+               '-t', 'no_such_table',
+               '-t', 'no*such*table',
+               '-i', 'no_such_index',
+               '-i', 'no*such*index',
+               '-r', 'no_such_relation',
+               '-r', 'no*such*relation',
+               '-d', 'no_such_database',
+               '-d', 'no*such*database',
+               '-r', 'none.none',
+               '-r', 'none.none.none',
+               '-r', 'postgres.none.none',
+               '-r', 'postgres.pg_catalog.none',
+               '-r', 'postgres.none.pg_class',
+               '-t', 'postgres.pg_catalog.pg_class',    # This exists
        ],
        0,
        [qr/^$/],
@@ -304,13 +304,13 @@ $node->safe_psql('postgres', q(CREATE DATABASE another_db));
 $node->command_checks_all(
        [
                'pg_amcheck', '-d',
-               'postgres',   '--no-strict-names',
-               '-t',         'template1.public.foo',
-               '-t',         'another_db.public.foo',
-               '-t',         'no_such_database.public.foo',
-               '-i',         'template1.public.foo_idx',
-               '-i',         'another_db.public.foo_idx',
-               '-i',         'no_such_database.public.foo_idx',
+               'postgres', '--no-strict-names',
+               '-t', 'template1.public.foo',
+               '-t', 'another_db.public.foo',
+               '-t', 'no_such_database.public.foo',
+               '-i', 'template1.public.foo_idx',
+               '-i', 'another_db.public.foo_idx',
+               '-i', 'no_such_database.public.foo_idx',
        ],
        1,
        [qr/^$/],
@@ -334,8 +334,8 @@ $node->command_checks_all(
 $node->command_checks_all(
        [
                'pg_amcheck', '--all', '--no-strict-names', '-S',
-               'public',     '-S',    'pg_catalog',        '-S',
-               'pg_toast',   '-S',    'information_schema',
+               'public', '-S', 'pg_catalog', '-S',
+               'pg_toast', '-S', 'information_schema',
        ],
        1,
        [qr/^$/],
@@ -348,9 +348,9 @@ $node->command_checks_all(
 # Check with schema exclusion patterns overriding relation and schema inclusion patterns
 $node->command_checks_all(
        [
-               'pg_amcheck',          '--all', '--no-strict-names',  '-s',
-               'public',              '-s',    'pg_catalog',         '-s',
-               'pg_toast',            '-s',    'information_schema', '-t',
+               'pg_amcheck', '--all', '--no-strict-names', '-s',
+               'public', '-s', 'pg_catalog', '-s',
+               'pg_toast', '-s', 'information_schema', '-t',
                'pg_catalog.pg_class', '-S*'
        ],
        1,
index 359abe25a1c8eefb9184a7b2b4fcd9490311198a..d577cffa30df32da3c3ba9f0a8b3d4c328e9fe21 100644 (file)
@@ -319,7 +319,7 @@ plan_to_remove_relation_file('db2', 's1.t1_btree');
 my @cmd = ('pg_amcheck', '-p', $port);
 
 # Regular expressions to match various expected output
-my $no_output_re               = qr/^$/;
+my $no_output_re = qr/^$/;
 my $line_pointer_corruption_re = qr/line pointer/;
 my $missing_file_re = qr/could not open file ".*": No such file or directory/;
 my $index_missing_relation_fork_re =
index aa62422316956035f96772e666b9d2655b0a4eef..1b5027c4204b68a3006622763fbe4958336c6c20 100644 (file)
@@ -105,31 +105,31 @@ sub read_tuple
 
        @_ = unpack(HEAPTUPLE_PACK_CODE, $buffer);
        %tup = (
-               t_xmin          => shift,
-               t_xmax          => shift,
-               t_field3        => shift,
-               bi_hi           => shift,
-               bi_lo           => shift,
-               ip_posid        => shift,
-               t_infomask2     => shift,
-               t_infomask      => shift,
-               t_hoff          => shift,
-               t_bits          => shift,
-               a_1             => shift,
-               a_2             => shift,
-               b_header        => shift,
-               b_body1         => shift,
-               b_body2         => shift,
-               b_body3         => shift,
-               b_body4         => shift,
-               b_body5         => shift,
-               b_body6         => shift,
-               b_body7         => shift,
-               c_va_header     => shift,
-               c_va_vartag     => shift,
-               c_va_rawsize    => shift,
-               c_va_extinfo    => shift,
-               c_va_valueid    => shift,
+               t_xmin => shift,
+               t_xmax => shift,
+               t_field3 => shift,
+               bi_hi => shift,
+               bi_lo => shift,
+               ip_posid => shift,
+               t_infomask2 => shift,
+               t_infomask => shift,
+               t_hoff => shift,
+               t_bits => shift,
+               a_1 => shift,
+               a_2 => shift,
+               b_header => shift,
+               b_body1 => shift,
+               b_body2 => shift,
+               b_body3 => shift,
+               b_body4 => shift,
+               b_body5 => shift,
+               b_body6 => shift,
+               b_body7 => shift,
+               c_va_header => shift,
+               c_va_vartag => shift,
+               c_va_rawsize => shift,
+               c_va_extinfo => shift,
+               c_va_valueid => shift,
                c_va_toastrelid => shift);
        # Stitch together the text for column 'b'
        $tup{b} = join('', map { chr($tup{"b_body$_"}) } (1 .. 7));
@@ -151,17 +151,17 @@ sub write_tuple
        my ($fh, $offset, $tup) = @_;
        my $buffer = pack(
                HEAPTUPLE_PACK_CODE,
-               $tup->{t_xmin},       $tup->{t_xmax},
-               $tup->{t_field3},     $tup->{bi_hi},
-               $tup->{bi_lo},        $tup->{ip_posid},
-               $tup->{t_infomask2},  $tup->{t_infomask},
-               $tup->{t_hoff},       $tup->{t_bits},
-               $tup->{a_1},          $tup->{a_2},
-               $tup->{b_header},     $tup->{b_body1},
-               $tup->{b_body2},      $tup->{b_body3},
-               $tup->{b_body4},      $tup->{b_body5},
-               $tup->{b_body6},      $tup->{b_body7},
-               $tup->{c_va_header},  $tup->{c_va_vartag},
+               $tup->{t_xmin}, $tup->{t_xmax},
+               $tup->{t_field3}, $tup->{bi_hi},
+               $tup->{bi_lo}, $tup->{ip_posid},
+               $tup->{t_infomask2}, $tup->{t_infomask},
+               $tup->{t_hoff}, $tup->{t_bits},
+               $tup->{a_1}, $tup->{a_2},
+               $tup->{b_header}, $tup->{b_body1},
+               $tup->{b_body2}, $tup->{b_body3},
+               $tup->{b_body4}, $tup->{b_body5},
+               $tup->{b_body6}, $tup->{b_body7},
+               $tup->{c_va_header}, $tup->{c_va_vartag},
                $tup->{c_va_rawsize}, $tup->{c_va_extinfo},
                $tup->{c_va_valueid}, $tup->{c_va_toastrelid});
        sysseek($fh, $offset, 0)
@@ -188,7 +188,7 @@ $node->append_conf('postgresql.conf', 'max_prepared_transactions=10');
 # Start the node and load the extensions.  We depend on both
 # amcheck and pageinspect for this test.
 $node->start;
-my $port   = $node->port;
+my $port = $node->port;
 my $pgdata = $node->data_dir;
 $node->safe_psql('postgres', "CREATE EXTENSION amcheck");
 $node->safe_psql('postgres', "CREATE EXTENSION pageinspect");
@@ -354,23 +354,23 @@ binmode $file;
 my $ENDIANNESS;
 for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
 {
-       my $offnum = $tupidx + 1;        # offnum is 1-based, not zero-based
+       my $offnum = $tupidx + 1;    # offnum is 1-based, not zero-based
        my $offset = $lp_off[$tupidx];
-       next if $offset == -1;                   # ignore redirect line pointers
+       next if $offset == -1;       # ignore redirect line pointers
        my $tup = read_tuple($file, $offset);
 
        # Sanity-check that the data appears on the page where we expect.
        my $a_1 = $tup->{a_1};
        my $a_2 = $tup->{a_2};
-       my $b   = $tup->{b};
+       my $b = $tup->{b};
        if ($a_1 != 0xDEADF9F9 || $a_2 != 0xDEADF9F9 || $b ne 'abcdefg')
        {
                close($file);    # ignore errors on close; we're exiting anyway
                $node->clean_node;
                plan skip_all =>
                  sprintf(
-                       "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx,
-                       0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
+                       "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
+                       $tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
                exit;
        }
 
@@ -395,18 +395,18 @@ $node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
 $node->stop;
 
 # Some #define constants from access/htup_details.h for use while corrupting.
-use constant HEAP_HASNULL        => 0x0001;
+use constant HEAP_HASNULL => 0x0001;
 use constant HEAP_XMAX_LOCK_ONLY => 0x0080;
 use constant HEAP_XMIN_COMMITTED => 0x0100;
-use constant HEAP_XMIN_INVALID   => 0x0200;
+use constant HEAP_XMIN_INVALID => 0x0200;
 use constant HEAP_XMAX_COMMITTED => 0x0400;
-use constant HEAP_XMAX_INVALID   => 0x0800;
-use constant HEAP_NATTS_MASK     => 0x07FF;
-use constant HEAP_XMAX_IS_MULTI  => 0x1000;
-use constant HEAP_KEYS_UPDATED   => 0x2000;
-use constant HEAP_HOT_UPDATED    => 0x4000;
-use constant HEAP_ONLY_TUPLE     => 0x8000;
-use constant HEAP_UPDATED        => 0x2000;
+use constant HEAP_XMAX_INVALID => 0x0800;
+use constant HEAP_NATTS_MASK => 0x07FF;
+use constant HEAP_XMAX_IS_MULTI => 0x1000;
+use constant HEAP_KEYS_UPDATED => 0x2000;
+use constant HEAP_HOT_UPDATED => 0x4000;
+use constant HEAP_ONLY_TUPLE => 0x8000;
+use constant HEAP_UPDATED => 0x2000;
 
 # Helper function to generate a regular expression matching the header we
 # expect verify_heapam() to return given which fields we expect to be non-null.
@@ -436,7 +436,7 @@ binmode $file;
 
 for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
 {
-       my $offnum = $tupidx + 1;        # offnum is 1-based, not zero-based
+       my $offnum = $tupidx + 1;    # offnum is 1-based, not zero-based
        my $offset = $lp_off[$tupidx];
        my $header = header(0, $offnum, undef);
 
@@ -534,7 +534,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
                # Corrupt the tuple to look like it has lots of attributes, some of
                # them null.  This falsely creates the impression that the t_bits
                # array is longer than just one byte, but t_hoff still says otherwise.
-               $tup->{t_infomask}  |= HEAP_HASNULL;
+               $tup->{t_infomask} |= HEAP_HASNULL;
                $tup->{t_infomask2} |= HEAP_NATTS_MASK;
                $tup->{t_bits} = 0xAA;
 
@@ -544,7 +544,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
        elsif ($offnum == 11)
        {
                # Same as above, but this time t_hoff plays along
-               $tup->{t_infomask}  |= HEAP_HASNULL;
+               $tup->{t_infomask} |= HEAP_HASNULL;
                $tup->{t_infomask2} |= (HEAP_NATTS_MASK & 0x40);
                $tup->{t_bits} = 0xAA;
                $tup->{t_hoff} = 32;
@@ -568,9 +568,9 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
                # bytes with 0xFF using 0x3FFFFFFF.
                #
                $tup->{b_header} = $ENDIANNESS eq 'little' ? 0xFC : 0x3F;
-               $tup->{b_body1}  = 0xFF;
-               $tup->{b_body2}  = 0xFF;
-               $tup->{b_body3}  = 0xFF;
+               $tup->{b_body1} = 0xFF;
+               $tup->{b_body2} = 0xFF;
+               $tup->{b_body3} = 0xFF;
 
                $header = header(0, $offnum, 1);
                push @expected,
@@ -620,7 +620,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
                # at offnum 19 we will unset HEAP_ONLY_TUPLE flag
                die "offnum $offnum should be a redirect" if defined $tup;
                push @expected,
-                       qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
+                 qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
        }
        elsif ($offnum == 18)
        {
@@ -628,8 +628,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
                die "offnum $offnum should be a redirect" if defined $tup;
                sysseek($file, 92, 0) or BAIL_OUT("sysseek failed: $!");
                syswrite($file,
-                                pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
-                       or BAIL_OUT("syswrite failed: $!");
+                       pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
+                 or BAIL_OUT("syswrite failed: $!");
                push @expected,
                  qr/${header}redirected line pointer points to another redirected line pointer at offset \d+/;
        }
@@ -644,8 +644,8 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
                # rewrite line pointer with lp.off = 25, lp_flags = 2, lp_len = 0
                sysseek($file, 108, 0) or BAIL_OUT("sysseek failed: $!");
                syswrite($file,
-                                pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
-                       or BAIL_OUT("syswrite failed: $!");
+                       pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
+                 or BAIL_OUT("syswrite failed: $!");
                push @expected,
                  qr/${header}redirect line pointer points to offset \d+, but offset \d+ also points there/;
        }
@@ -756,7 +756,7 @@ $node->command_checks_all(
        [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
        2, [@expected], [], 'Expected corruption message output');
 $node->safe_psql(
-        'postgres', qq(
+       'postgres', qq(
                         COMMIT PREPARED 'in_progress_tx';
         ));
 
index 76321d12844494b9cafdb4b4ffcb77be7ea11f6e..cc3386d1464e1ab3d49701a1f30197565fe744a9 100644 (file)
@@ -14,7 +14,7 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 my @walfiles = (
        '00000001000000370000000C.gz', '00000001000000370000000D',
-       '00000001000000370000000E',    '00000001000000370000000F.partial',);
+       '00000001000000370000000E', '00000001000000370000000F.partial',);
 
 sub create_files
 {
@@ -57,8 +57,10 @@ command_fails_like(
 {
        # like command_like but checking stderr
        my $stderr;
-       my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
-               $walfiles[2] ], '2>', \$stderr;
+       my $result =
+         IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
+               $walfiles[2] ],
+         '2>', \$stderr;
        ok($result, "pg_archivecleanup dry run: exit code 0");
        like(
                $stderr,
@@ -98,8 +100,8 @@ sub run_check
        return;
 }
 
-run_check('',                 'pg_archivecleanup');
-run_check('.partial',         'pg_archivecleanup with .partial file');
+run_check('', 'pg_archivecleanup');
+run_check('.partial', 'pg_archivecleanup with .partial file');
 run_check('.00000020.backup', 'pg_archivecleanup with .backup file');
 
 done_testing();
index ba471f898c1c5b8fa3720ae38ccd3e18d28e93cc..1dc8efe0cb7be11dbe32e7f74ef1426e106c6cdf 100644 (file)
@@ -341,18 +341,18 @@ tablespace_list_append(const char *arg)
 
        /*
         * All tablespaces are created with absolute directories, so specifying a
-        * non-absolute path here would just never match, possibly confusing users.
-        * Since we don't know whether the remote side is Windows or not, and it
-        * might be different than the local side, permit any path that could be
-        * absolute under either set of rules.
+        * non-absolute path here would just never match, possibly confusing
+        * users. Since we don't know whether the remote side is Windows or not,
+        * and it might be different than the local side, permit any path that
+        * could be absolute under either set of rules.
         *
         * (There is little practical risk of confusion here, because someone
         * running entirely on Linux isn't likely to have a relative path that
         * begins with a backslash or something that looks like a drive
-        * specification. If they do, and they also incorrectly believe that
-        * a relative path is acceptable here, we'll silently fail to warn them
-        * of their mistake, and the -T option will just not get applied, same
-        * as if they'd specified -T for a nonexistent tablespace.)
+        * specification. If they do, and they also incorrectly believe that a
+        * relative path is acceptable here, we'll silently fail to warn them of
+        * their mistake, and the -T option will just not get applied, same as if
+        * they'd specified -T for a nonexistent tablespace.)
         */
        if (!is_nonwindows_absolute_path(cell->old_dir) &&
                !is_windows_absolute_path(cell->old_dir))
index fb9e29682b1c47f2c619c9abcb8b9bf5488645d8..d0a4079d50697ab6404664db24b4ee755013f88f 100644 (file)
@@ -43,7 +43,7 @@
 static char *basedir = NULL;
 static int     verbose = 0;
 static int     compresslevel = 0;
-static bool    noloop = false;
+static bool noloop = false;
 static int     standby_message_timeout = 10 * 1000;    /* 10 sec = default */
 static volatile sig_atomic_t time_to_stop = false;
 static bool do_create_slot = false;
index 4d130a7f9446dfb34fc84b49d4913af81ca7686e..793d64863c764b67e7b371eafb9813b7ca590945 100644 (file)
@@ -4,7 +4,7 @@
 use strict;
 use warnings;
 use File::Basename qw(basename dirname);
-use File::Path qw(rmtree);
+use File::Path     qw(rmtree);
 use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
@@ -29,7 +29,7 @@ umask(0077);
 
 # Initialize node without replication settings
 $node->init(
-       extra      => ['--data-checksums'],
+       extra => ['--data-checksums'],
        auth_extra => [ '--create-role', 'backupuser' ]);
 $node->start;
 my $pgdata = $node->data_dir;
@@ -144,8 +144,7 @@ SKIP:
                        'gzip:long',
                        'invalid compression specification: compression algorithm "gzip" does not support long-distance mode',
                        'failure on long mode for gzip'
-               ],
-       );
+               ],);
 
        for my $cft (@compression_failure_tests)
        {
@@ -153,7 +152,7 @@ SKIP:
                my $sfail = quotemeta($server_fails . $cft->[1]);
                $node->command_fails_like(
                        [
-                               'pg_basebackup',   '-D',
+                               'pg_basebackup', '-D',
                                "$tempdir/backup", '--compress',
                                $cft->[0]
                        ],
@@ -161,7 +160,7 @@ SKIP:
                        'client ' . $cft->[2]);
                $node->command_fails_like(
                        [
-                               'pg_basebackup',   '-D',
+                               'pg_basebackup', '-D',
                                "$tempdir/backup", '--compress',
                                'server-' . $cft->[0]
                        ],
@@ -193,7 +192,7 @@ my $baseUnloggedPath = $node->safe_psql('postgres',
 
 # Make sure main and init forks exist
 ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
-ok(-f "$pgdata/$baseUnloggedPath",        'unlogged main fork in base');
+ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
 
 # Create files that look like temporary relations to ensure they are ignored.
 my $postgresOid = $node->safe_psql('postgres',
@@ -211,7 +210,7 @@ foreach my $filename (@tempRelationFiles)
 $node->command_ok(
        [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
        'pg_basebackup runs');
-ok(-f "$tempdir/backup/PG_VERSION",      'backup was created');
+ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
 ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
 
 # Permissions on backup should be default
@@ -274,13 +273,13 @@ unlink("$pgdata/backup_label")
 $node->command_ok(
        [
                @pg_basebackup_defs, '-D',
-               "$tempdir/backup2",  '--no-manifest',
-               '--waldir',          "$tempdir/xlog2"
+               "$tempdir/backup2", '--no-manifest',
+               '--waldir', "$tempdir/xlog2"
        ],
        'separate xlog directory');
-ok(-f "$tempdir/backup2/PG_VERSION",       'backup was created');
+ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
 ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed');
-ok(-d "$tempdir/xlog2/",                   'xlog directory was created');
+ok(-d "$tempdir/xlog2/", 'xlog directory was created');
 rmtree("$tempdir/backup2");
 rmtree("$tempdir/xlog2");
 
@@ -346,7 +345,7 @@ $node->start;
 # to our physical temp location.  That way we can use shorter names
 # for the tablespace directories, which hopefully won't run afoul of
 # the 99 character length limit.
-my $sys_tempdir      = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
 my $real_sys_tempdir = "$sys_tempdir/tempdir";
 dir_symlink "$tempdir", $real_sys_tempdir;
 
@@ -355,7 +354,7 @@ my $realTsDir = "$real_sys_tempdir/tblspc1";
 $node->safe_psql('postgres',
        "CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
 $node->safe_psql('postgres',
-           "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
+               "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
          . "INSERT INTO test1 VALUES (1234);");
 $node->backup('tarbackup2', backup_options => ['-Ft']);
 # empty test1, just so that it's different from the to-be-restored data
@@ -363,7 +362,7 @@ $node->safe_psql('postgres', "TRUNCATE TABLE test1;");
 
 # basic checks on the output
 my $backupdir = $node->backup_dir . '/tarbackup2';
-ok(-f "$backupdir/base.tar",   'backup tar was created');
+ok(-f "$backupdir/base.tar", 'backup tar was created');
 ok(-f "$backupdir/pg_wal.tar", 'WAL tar was created');
 my @tblspc_tars = glob "$backupdir/[0-9]*.tar";
 is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
@@ -385,7 +384,7 @@ SKIP:
        $node2->init_from_backup($node, 'tarbackup2', tar_program => $tar);
 
        # Recover tablespace into a new directory (not where it was!)
-       my $repTsDir     = "$tempdir/tblspc1replica";
+       my $repTsDir = "$tempdir/tblspc1replica";
        my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
        mkdir $repTsDir;
        PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
@@ -394,7 +393,7 @@ SKIP:
        # Update tablespace map to point to new directory.
        # XXX Ideally pg_basebackup would handle this.
        $tblspc_tars[0] =~ m|/([0-9]*)\.tar$|;
-       my $tblspcoid       = $1;
+       my $tblspcoid = $1;
        my $escapedRepTsDir = $realRepTsDir;
        $escapedRepTsDir =~ s/\\/\\\\/g;
        open my $mapfile, '>', $node2->data_dir . '/tablespace_map';
@@ -442,7 +441,7 @@ $node->command_fails(
 $node->command_ok(
        [
                @pg_basebackup_defs, '-D',
-               "$tempdir/backup1",  '-Fp',
+               "$tempdir/backup1", '-Fp',
                "-T$realTsDir=$tempdir/tbackup/tblspc1",
        ],
        'plain format with tablespaces succeeds with tablespace mapping');
@@ -512,7 +511,7 @@ $realTsDir =~ s/=/\\=/;
 $node->command_ok(
        [
                @pg_basebackup_defs, '-D',
-               "$tempdir/backup3",  '-Fp',
+               "$tempdir/backup3", '-Fp',
                "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
        ],
        'mapping tablespace with = sign in path');
@@ -533,7 +532,7 @@ rmtree("$tempdir/tarbackup_l3");
 $node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
        'pg_basebackup -R runs');
 ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
-ok(-f "$tempdir/backupR/standby.signal",       'standby.signal was created');
+ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
 my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
 rmtree("$tempdir/backupR");
 
@@ -572,9 +571,9 @@ ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
 rmtree("$tempdir/backupxst");
 $node->command_ok(
        [
-               @pg_basebackup_defs,     '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupnoslot", '-X',
-               'stream',                '--no-slot'
+               'stream', '--no-slot'
        ],
        'pg_basebackup -X stream runs with --no-slot');
 rmtree("$tempdir/backupnoslot");
@@ -597,7 +596,7 @@ $node->command_fails_like(
 $node->command_fails_like(
        [
                @pg_basebackup_defs, '--target', 'blackhole', '-X',
-               'none',              '-D',       "$tempdir/blackhole"
+               'none', '-D', "$tempdir/blackhole"
        ],
        qr/cannot specify both output directory and backup target/,
        'backup target and output directory');
@@ -610,7 +609,7 @@ $node->command_ok(
        'backup target blackhole');
 $node->command_ok(
        [
-               @pg_basebackup_defs,              '--target',
+               @pg_basebackup_defs, '--target',
                "server:$tempdir/backuponserver", '-X',
                'none'
        ],
@@ -634,9 +633,9 @@ rmtree("$tempdir/backuponserver");
 
 $node->command_fails(
        [
-               @pg_basebackup_defs,         '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_sl_fail", '-X',
-               'stream',                    '-S',
+               'stream', '-S',
                'slot0'
        ],
        'pg_basebackup fails with nonexistent replication slot');
@@ -647,9 +646,9 @@ $node->command_fails(
 
 $node->command_fails(
        [
-               @pg_basebackup_defs,      '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_slot", '-C',
-               '-S',                     'slot0',
+               '-S', 'slot0',
                '--no-slot'
        ],
        'pg_basebackup fails with -C -S --no-slot');
@@ -667,9 +666,9 @@ $node->command_ok(
 
 $node->command_fails(
        [
-               @pg_basebackup_defs,         '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_sl_fail", '-X',
-               'stream',                    '-S',
+               'stream', '-S',
                'slot0'
        ],
        'pg_basebackup fails with nonexistent replication slot');
@@ -680,18 +679,18 @@ $node->command_fails(
 
 $node->command_fails(
        [
-               @pg_basebackup_defs,      '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_slot", '-C',
-               '-S',                     'slot0',
+               '-S', 'slot0',
                '--no-slot'
        ],
        'pg_basebackup fails with -C -S --no-slot');
 
 $node->command_ok(
        [
-               @pg_basebackup_defs,      '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_slot", '-C',
-               '-S',                     'slot0'
+               '-S', 'slot0'
        ],
        'pg_basebackup -C runs');
 rmtree("$tempdir/backupxs_slot");
@@ -712,9 +711,9 @@ isnt(
 
 $node->command_fails(
        [
-               @pg_basebackup_defs,       '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backupxs_slot1", '-C',
-               '-S',                      'slot0'
+               '-S', 'slot0'
        ],
        'pg_basebackup fails with -C -S and a previously existing slot');
 
@@ -727,13 +726,13 @@ is($lsn, '', 'restart LSN of new slot is null');
 $node->command_fails(
        [
                @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
-               'slot1',             '-X', 'none'
+               'slot1', '-X', 'none'
        ],
        'pg_basebackup with replication slot fails without WAL streaming');
 $node->command_ok(
        [
                @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
-               'stream',            '-S', 'slot1'
+               'stream', '-S', 'slot1'
        ],
        'pg_basebackup -X stream with replication slot runs');
 $lsn = $node->safe_psql('postgres',
@@ -745,7 +744,7 @@ rmtree("$tempdir/backupxs_sl");
 $node->command_ok(
        [
                @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
-               'stream',            '-S', 'slot1',                  '-R',
+               'stream', '-S', 'slot1', '-R',
        ],
        'pg_basebackup with replication slot and -R runs');
 like(
@@ -813,7 +812,7 @@ rmtree("$tempdir/backup_corrupt3");
 # do not verify checksums, should return ok
 $node->command_ok(
        [
-               @pg_basebackup_defs,        '-D',
+               @pg_basebackup_defs, '-D',
                "$tempdir/backup_corrupt4", '--no-verify-checksums',
        ],
        'pg_basebackup with -k does not report checksum mismatch');
@@ -832,24 +831,24 @@ SKIP:
 
        $node->command_ok(
                [
-                       @pg_basebackup_defs,    '-D',
+                       @pg_basebackup_defs, '-D',
                        "$tempdir/backup_gzip", '--compress',
-                       '1',                    '--format',
+                       '1', '--format',
                        't'
                ],
                'pg_basebackup with --compress');
        $node->command_ok(
                [
-                       @pg_basebackup_defs,     '-D',
+                       @pg_basebackup_defs, '-D',
                        "$tempdir/backup_gzip2", '--gzip',
-                       '--format',              't'
+                       '--format', 't'
                ],
                'pg_basebackup with --gzip');
        $node->command_ok(
                [
-                       @pg_basebackup_defs,     '-D',
+                       @pg_basebackup_defs, '-D',
                        "$tempdir/backup_gzip3", '--compress',
-                       'gzip:1',                '--format',
+                       'gzip:1', '--format',
                        't'
                ],
                'pg_basebackup with --compress=gzip:1');
@@ -895,8 +894,8 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
 my $sigchld_bb = IPC::Run::start(
        [
                @pg_basebackup_defs, '--wal-method=stream',
-               '-D',                "$tempdir/sigchld",
-               '--max-rate=32',     '-d',
+               '-D', "$tempdir/sigchld",
+               '--max-rate=32', '-d',
                $node->connstr('postgres')
        ],
        '<',
@@ -916,16 +915,17 @@ is( $node->poll_query_until(
        "Walsender killed");
 
 ok( pump_until(
-               $sigchld_bb,         $sigchld_bb_timeout,
+               $sigchld_bb, $sigchld_bb_timeout,
                \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
        'background process exit message');
 $sigchld_bb->finish();
 
 # Test that we can back up an in-place tablespace
 $node->safe_psql('postgres',
-       "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';");
+       "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
+);
 $node->safe_psql('postgres',
-           "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
+               "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
          . "INSERT INTO test2 VALUES (1234);");
 my $tblspc_oid = $node->safe_psql('postgres',
        "SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';");
index 50ac4f94ec7ede549644693d4da899f779f84e87..374f090a8b6dd840a59fcbc3e0d81b9e2db058d3 100644 (file)
@@ -66,8 +66,8 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (1);');
 # compression involved.
 $primary->command_ok(
        [
-               'pg_receivewal', '-D',     $stream_dir,     '--verbose',
-               '--endpos',      $nextlsn, '--synchronous', '--no-loop'
+               'pg_receivewal', '-D', $stream_dir, '--verbose',
+               '--endpos', $nextlsn, '--synchronous', '--no-loop'
        ],
        'streaming some WAL with --synchronous');
 
@@ -92,8 +92,8 @@ SKIP:
 
        $primary->command_ok(
                [
-                       'pg_receivewal', '-D',     $stream_dir,  '--verbose',
-                       '--endpos',      $nextlsn, '--compress', 'gzip:1',
+                       'pg_receivewal', '-D', $stream_dir, '--verbose',
+                       '--endpos', $nextlsn, '--compress', 'gzip:1',
                        '--no-loop'
                ],
                "streaming some WAL using ZLIB compression");
@@ -145,8 +145,8 @@ SKIP:
        # Stream up to the given position.
        $primary->command_ok(
                [
-                       'pg_receivewal', '-D',     $stream_dir, '--verbose',
-                       '--endpos',      $nextlsn, '--no-loop', '--compress',
+                       'pg_receivewal', '-D', $stream_dir, '--verbose',
+                       '--endpos', $nextlsn, '--no-loop', '--compress',
                        'lz4'
                ],
                'streaming some WAL using --compress=lz4');
@@ -191,8 +191,8 @@ chomp($nextlsn);
 $primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
 $primary->command_ok(
        [
-               'pg_receivewal', '-D',     $stream_dir, '--verbose',
-               '--endpos',      $nextlsn, '--no-loop'
+               'pg_receivewal', '-D', $stream_dir, '--verbose',
+               '--endpos', $nextlsn, '--no-loop'
        ],
        "streaming some WAL");
 
@@ -247,17 +247,17 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (6);');
 # Check case where the slot does not exist.
 $primary->command_fails_like(
        [
-               'pg_receivewal',   '-D', $slot_dir,   '--slot',
+               'pg_receivewal', '-D', $slot_dir, '--slot',
                'nonexistentslot', '-n', '--no-sync', '--verbose',
-               '--endpos',        $nextlsn
+               '--endpos', $nextlsn
        ],
        qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
        'pg_receivewal fails with non-existing slot');
 $primary->command_ok(
        [
-               'pg_receivewal', '-D', $slot_dir,   '--slot',
-               $slot_name,      '-n', '--no-sync', '--verbose',
-               '--endpos',      $nextlsn
+               'pg_receivewal', '-D', $slot_dir, '--slot',
+               $slot_name, '-n', '--no-sync', '--verbose',
+               '--endpos', $nextlsn
        ],
        "WAL streamed from the slot's restart_lsn");
 ok(-e "$slot_dir/$walfile_streamed",
@@ -281,7 +281,7 @@ $standby->psql(
 $primary->wait_for_catchup($standby);
 # Get a walfilename from before the promotion to make sure it is archived
 # after promotion
-my $standby_slot         = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
 my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
 
 # pg_walfile_name() is not supported while in recovery, so use the primary
@@ -311,9 +311,9 @@ mkdir($timeline_dir);
 
 $standby->command_ok(
        [
-               'pg_receivewal', '-D',     $timeline_dir, '--verbose',
-               '--endpos',      $nextlsn, '--slot',      $archive_slot,
-               '--no-sync',     '-n'
+               'pg_receivewal', '-D', $timeline_dir, '--verbose',
+               '--endpos', $nextlsn, '--slot', $archive_slot,
+               '--no-sync', '-n'
        ],
        "Stream some wal after promoting, resuming from the slot's position");
 ok(-e "$timeline_dir/$walfile_before_promotion",
index 6947d12ca8643c8dbf850b4191d0d30002de706c..62dca5b67a6ce6cc17b8c9f845e60387208654a1 100644 (file)
@@ -34,16 +34,16 @@ $node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
        'pg_recvlogical needs an action');
 $node->command_fails(
        [
-               'pg_recvlogical',           '-S',
-               'test',                     '-d',
+               'pg_recvlogical', '-S',
+               'test', '-d',
                $node->connstr('postgres'), '--start'
        ],
        'no destination file');
 
 $node->command_ok(
        [
-               'pg_recvlogical',           '-S',
-               'test',                     '-d',
+               'pg_recvlogical', '-S',
+               'test', '-d',
                $node->connstr('postgres'), '--create-slot'
        ],
        'slot created');
@@ -67,8 +67,8 @@ $node->command_ok(
 
 $node->command_ok(
        [
-               'pg_recvlogical',           '-S',
-               'test',                     '-d',
+               'pg_recvlogical', '-S',
+               'test', '-d',
                $node->connstr('postgres'), '--drop-slot'
        ],
        'slot dropped');
@@ -76,8 +76,8 @@ $node->command_ok(
 #test with two-phase option enabled
 $node->command_ok(
        [
-               'pg_recvlogical',           '-S',
-               'test',                     '-d',
+               'pg_recvlogical', '-S',
+               'test', '-d',
                $node->connstr('postgres'), '--create-slot',
                '--two-phase'
        ],
@@ -94,12 +94,12 @@ chomp($nextlsn);
 
 $node->command_fails(
        [
-               'pg_recvlogical',           '-S',
-               'test',                     '-d',
+               'pg_recvlogical', '-S',
+               'test', '-d',
                $node->connstr('postgres'), '--start',
-               '--endpos',                 "$nextlsn",
-               '--two-phase',              '--no-loop',
-               '-f',                       '-'
+               '--endpos', "$nextlsn",
+               '--two-phase', '--no-loop',
+               '-f', '-'
        ],
        'incorrect usage');
 
index 1934b7dd46c7ae4cda54dfb058c7ff4bd49b24be..376ddf72b7104640cede8dd049cbb00ad5e0af28 100644 (file)
@@ -44,14 +44,14 @@ static Walfile *dir_open_for_write(WalWriteMethod *wwmethod,
                                                                   const char *pathname,
                                                                   const char *temp_suffix,
                                                                   size_t pad_to_size);
-static int dir_close(Walfile *f, WalCloseMethod method);
+static int     dir_close(Walfile *f, WalCloseMethod method);
 static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname);
 static ssize_t dir_get_file_size(WalWriteMethod *wwmethod,
                                                                 const char *pathname);
 static char *dir_get_file_name(WalWriteMethod *wwmethod,
                                                           const char *pathname, const char *temp_suffix);
 static ssize_t dir_write(Walfile *f, const void *buf, size_t count);
-static int dir_sync(Walfile *f);
+static int     dir_sync(Walfile *f);
 static bool dir_finish(WalWriteMethod *wwmethod);
 static void dir_free(WalWriteMethod *wwmethod);
 
@@ -72,7 +72,7 @@ const WalWriteMethodOps WalDirectoryMethodOps = {
  */
 typedef struct DirectoryMethodData
 {
-       WalWriteMethod  base;
+       WalWriteMethod base;
        char       *basedir;
 } DirectoryMethodData;
 
@@ -660,14 +660,14 @@ static Walfile *tar_open_for_write(WalWriteMethod *wwmethod,
                                                                   const char *pathname,
                                                                   const char *temp_suffix,
                                                                   size_t pad_to_size);
-static int tar_close(Walfile *f, WalCloseMethod method);
+static int     tar_close(Walfile *f, WalCloseMethod method);
 static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname);
 static ssize_t tar_get_file_size(WalWriteMethod *wwmethod,
                                                                 const char *pathname);
 static char *tar_get_file_name(WalWriteMethod *wwmethod,
                                                           const char *pathname, const char *temp_suffix);
 static ssize_t tar_write(Walfile *f, const void *buf, size_t count);
-static int tar_sync(Walfile *f);
+static int     tar_sync(Walfile *f);
 static bool tar_finish(WalWriteMethod *wwmethod);
 static void tar_free(WalWriteMethod *wwmethod);
 
@@ -693,7 +693,7 @@ typedef struct TarMethodFile
 
 typedef struct TarMethodData
 {
-       WalWriteMethod  base;
+       WalWriteMethod base;
        char       *tarfilename;
        int                     fd;
        TarMethodFile *currentfile;
@@ -1353,7 +1353,7 @@ CreateWalTarMethod(const char *tarbase,
 {
        TarMethodData *wwmethod;
        const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ?
-       ".tar.gz" : ".tar";
+               ".tar.gz" : ".tar";
 
        wwmethod = pg_malloc0(sizeof(TarMethodData));
        *((const WalWriteMethodOps **) &wwmethod->base.ops) =
index d7284c08ce47269a165a7ac70509faa1526d0b5f..54a22fe6070a1105944fc7e2304b6706dceb380c 100644 (file)
@@ -19,11 +19,12 @@ typedef struct
        WalWriteMethod *wwmethod;
        off_t           currpos;
        char       *pathname;
+
        /*
         * MORE DATA FOLLOWS AT END OF STRUCT
         *
-        * Each WalWriteMethod is expected to embed this as the first member of
-        * larger struct with method-specific fields following.
+        * Each WalWriteMethod is expected to embed this as the first member of a
+        * larger struct with method-specific fields following.
         */
 } Walfile;
 
@@ -45,7 +46,7 @@ typedef struct WalWriteMethodOps
         * automatically renamed in close(). If pad_to_size is specified, the file
         * will be padded with NUL up to that size, if supported by the Walmethod.
         */
-       Walfile    *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
+       Walfile    *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
 
        /*
         * Close an open Walfile, using one or more methods for handling automatic
@@ -107,11 +108,12 @@ struct WalWriteMethod
        bool            sync;
        const char *lasterrstring;      /* if set, takes precedence over lasterrno */
        int                     lasterrno;
+
        /*
         * MORE DATA FOLLOWS AT END OF STRUCT
         *
-        * Each WalWriteMethod is expected to embed this as the first member of
-        * larger struct with method-specific fields following.
+        * Each WalWriteMethod is expected to embed this as the first member of a
+        * larger struct with method-specific fields following.
         */
 };
 
index 2316f611b23c1a05c912cc7b716df9f30c6fa209..2d63182d59f571181db00cfe2bd388d29061ec67 100644 (file)
@@ -18,10 +18,10 @@ use Test::More;
 # at the end.
 sub check_relation_corruption
 {
-       my $node       = shift;
-       my $table      = shift;
+       my $node = shift;
+       my $table = shift;
        my $tablespace = shift;
-       my $pgdata     = $node->data_dir;
+       my $pgdata = $node->data_dir;
 
        # Create table and discover its filesystem location.
        $node->safe_psql(
@@ -44,8 +44,8 @@ sub check_relation_corruption
        command_ok(
                [
                        'pg_checksums', '--check',
-                       '-D',           $pgdata,
-                       '--filenode',   $relfilenode_corrupted
+                       '-D', $pgdata,
+                       '--filenode', $relfilenode_corrupted
                ],
                "succeeds for single relfilenode on tablespace $tablespace with offline cluster"
        );
@@ -57,8 +57,8 @@ sub check_relation_corruption
        $node->command_checks_all(
                [
                        'pg_checksums', '--check',
-                       '-D',           $pgdata,
-                       '--filenode',   $relfilenode_corrupted
+                       '-D', $pgdata,
+                       '--filenode', $relfilenode_corrupted
                ],
                1,
                [qr/Bad checksums:.*1/],
@@ -97,21 +97,21 @@ command_like(
        'checksums disabled in control file');
 
 # These are correct but empty files, so they should pass through.
-append_to_file "$pgdata/global/99999",          "";
-append_to_file "$pgdata/global/99999.123",      "";
-append_to_file "$pgdata/global/99999_fsm",      "";
-append_to_file "$pgdata/global/99999_init",     "";
-append_to_file "$pgdata/global/99999_vm",       "";
+append_to_file "$pgdata/global/99999", "";
+append_to_file "$pgdata/global/99999.123", "";
+append_to_file "$pgdata/global/99999_fsm", "";
+append_to_file "$pgdata/global/99999_init", "";
+append_to_file "$pgdata/global/99999_vm", "";
 append_to_file "$pgdata/global/99999_init.123", "";
-append_to_file "$pgdata/global/99999_fsm.123",  "";
-append_to_file "$pgdata/global/99999_vm.123",   "";
+append_to_file "$pgdata/global/99999_fsm.123", "";
+append_to_file "$pgdata/global/99999_vm.123", "";
 
 # These are temporary files and folders with dummy contents, which
 # should be ignored by the scan.
 append_to_file "$pgdata/global/pgsql_tmp_123", "foo";
 mkdir "$pgdata/global/pgsql_tmp";
-append_to_file "$pgdata/global/pgsql_tmp/1.1",        "foo";
-append_to_file "$pgdata/global/pg_internal.init",     "foo";
+append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
+append_to_file "$pgdata/global/pg_internal.init", "foo";
 append_to_file "$pgdata/global/pg_internal.init.123", "foo";
 
 # Enable checksums.
@@ -197,7 +197,7 @@ command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
 check_relation_corruption($node, 'corrupt1', 'pg_default');
 
 # Create tablespace to check corruptions in a non-default tablespace.
-my $basedir        = $node->basedir;
+my $basedir = $node->basedir;
 my $tablespace_dir = "$basedir/ts_corrupt_dir";
 mkdir($tablespace_dir);
 $node->safe_psql('postgres',
@@ -208,8 +208,8 @@ check_relation_corruption($node, 'corrupt2', 'ts_corrupt');
 # correctly-named relation files filled with some corrupted data.
 sub fail_corrupt
 {
-       my $node   = shift;
-       my $file   = shift;
+       my $node = shift;
+       my $file = shift;
        my $pgdata = $node->data_dir;
 
        # Create the file with some dummy data in it.
index a502bce3c9a153359c81cd81496282ec5fbb88b1..0c641036e9c58940a2997ec7ec836101763f037a 100644 (file)
@@ -24,7 +24,7 @@ command_like([ 'pg_controldata', $node->data_dir ],
 # check with a corrupted pg_control
 
 my $pg_control = $node->data_dir . '/global/pg_control';
-my $size       = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
 
 open my $fh, '>', $pg_control or BAIL_OUT($!);
 binmode $fh;
index 11bc8053540b6e6af100429138593a9c7abf224f..f019fe1703f3ead97f026e755e0a032aae188152 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 my $tempdir_short = PostgreSQL::Test::Utils::tempdir_short;
 
 program_help_ok('pg_ctl');
index 10815a60d4f56ac4bd224ec5b3c2ac9a2fe10e3b..8d48e56ee9b1efaf6542eece5e29903cf3fefc5e 100644 (file)
@@ -14,8 +14,8 @@ use Time::HiRes qw(usleep);
 sub fetch_file_name
 {
        my $logfiles = shift;
-       my $format   = shift;
-       my @lines    = split(/\n/, $logfiles);
+       my $format = shift;
+       my @lines = split(/\n/, $logfiles);
        my $filename = undef;
        foreach my $line (@lines)
        {
@@ -33,11 +33,11 @@ sub check_log_pattern
 {
        local $Test::Builder::Level = $Test::Builder::Level + 1;
 
-       my $format   = shift;
+       my $format = shift;
        my $logfiles = shift;
-       my $pattern  = shift;
-       my $node     = shift;
-       my $lfname   = fetch_file_name($logfiles, $format);
+       my $pattern = shift;
+       my $node = shift;
+       my $lfname = fetch_file_name($logfiles, $format);
 
        my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
 
@@ -100,8 +100,8 @@ csvlog log/postgresql-.*csv
 jsonlog log/postgresql-.*json$|,
        'current_logfiles is sane');
 
-check_log_pattern('stderr',  $current_logfiles, 'division by zero', $node);
-check_log_pattern('csvlog',  $current_logfiles, 'division by zero', $node);
+check_log_pattern('stderr', $current_logfiles, 'division by zero', $node);
+check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node);
 check_log_pattern('jsonlog', $current_logfiles, 'division by zero', $node);
 
 # Sleep 2 seconds and ask for log rotation; this should result in
@@ -131,8 +131,8 @@ jsonlog log/postgresql-.*json$|,
 # Verify that log output gets to this file, too
 $node->psql('postgres', 'fee fi fo fum');
 
-check_log_pattern('stderr',  $new_current_logfiles, 'syntax error', $node);
-check_log_pattern('csvlog',  $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node);
 check_log_pattern('jsonlog', $new_current_logfiles, 'syntax error', $node);
 
 $node->stop();
index f97fb1aaff8dc0020abb642bab4ccda0b1953ac2..4fee6e24348cc53183e354d8cdeb40bfc97557aa 100644 (file)
@@ -87,8 +87,8 @@
 char *
 supports_compression(const pg_compress_specification compression_spec)
 {
-       const pg_compress_algorithm     algorithm = compression_spec.algorithm;
-       bool                                            supported = false;
+       const pg_compress_algorithm algorithm = compression_spec.algorithm;
+       bool            supported = false;
 
        if (algorithm == PG_COMPRESSION_NONE)
                supported = true;
index 8d7b28e51046ccda5f8a68ccda920bf159bf5438..52214b31eea0683d9332f9b441f96547086974b5 100644 (file)
@@ -44,8 +44,8 @@ typedef struct LZ4State
 
        LZ4F_preferences_t prefs;
 
-       LZ4F_compressionContext_t       ctx;
-       LZ4F_decompressionContext_t     dtx;
+       LZ4F_compressionContext_t ctx;
+       LZ4F_decompressionContext_t dtx;
 
        /*
         * Used by the Stream API's lazy initialization.
@@ -148,8 +148,8 @@ ReadDataFromArchiveLZ4(ArchiveHandle *AH, CompressorState *cs)
        char       *outbuf;
        char       *readbuf;
        LZ4F_decompressionContext_t ctx = NULL;
-       LZ4F_decompressOptions_t        dec_opt;
-       LZ4F_errorCode_t                        status;
+       LZ4F_decompressOptions_t dec_opt;
+       LZ4F_errorCode_t status;
 
        memset(&dec_opt, 0, sizeof(dec_opt));
        status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
@@ -651,8 +651,8 @@ LZ4Stream_gets(char *ptr, int size, CompressFileHandle *CFH)
                return NULL;
 
        /*
-        * Our caller expects the return string to be NULL terminated
-        * and we know that ret is greater than zero.
+        * Our caller expects the return string to be NULL terminated and we know
+        * that ret is greater than zero.
         */
        ptr[ret - 1] = '\0';
 
index 9fbdc0a87dad576b0e81fa637455ae9ba9d01d0d..82e3310100fe257533e480e644122a433410dde1 100644 (file)
@@ -82,8 +82,8 @@ _ZstdCStreamParams(pg_compress_specification compress)
 
        if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE)
                _Zstd_CCtx_setParam_or_die(cstream,
-                                                                 ZSTD_c_enableLongDistanceMatching,
-                                                                 compress.long_distance, "long");
+                                                                  ZSTD_c_enableLongDistanceMatching,
+                                                                  compress.long_distance, "long");
 
        return cstream;
 }
index 2aaa6b100b1966214d94a31b51cfbf6bc896b06b..d0ab1351fdd6db3b924e53356dfecb6bae4198d3 100644 (file)
@@ -18,8 +18,8 @@
 #include "compress_io.h"
 
 extern void InitCompressorZstd(CompressorState *cs,
-               const pg_compress_specification compression_spec);
+                                                          const pg_compress_specification compression_spec);
 extern void InitCompressFileHandleZstd(CompressFileHandle *CFH,
-               const pg_compress_specification compression_spec);
+                                                                          const pg_compress_specification compression_spec);
 
-#endif /* COMPRESS_ZSTD_H */
+#endif                                                 /* COMPRESS_ZSTD_H */
index d518349e100b9246e873bf6c6dc43bf118d01cc3..39ebcfec326d49553d43c0624c318786485c988d 100644 (file)
@@ -386,10 +386,11 @@ RestoreArchive(Archive *AHX)
                {
                        if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
                        {
-                               char *errmsg = supports_compression(AH->compression_spec);
+                               char       *errmsg = supports_compression(AH->compression_spec);
+
                                if (errmsg)
                                        pg_fatal("cannot restore from compressed archive (%s)",
-                                                         errmsg);
+                                                        errmsg);
                                else
                                        break;
                        }
@@ -2985,11 +2986,11 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
        if (!te->hadDumper)
        {
                /*
-                * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
-                * it is considered a data entry.  We don't need to check for the
-                * BLOBS entry or old-style BLOB COMMENTS, because they will have
-                * hadDumper = true ... but we do need to check new-style BLOB ACLs,
-                * comments, etc.
+                * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
+                * is considered a data entry.  We don't need to check for the BLOBS
+                * entry or old-style BLOB COMMENTS, because they will have hadDumper
+                * = true ... but we do need to check new-style BLOB ACLs, comments,
+                * etc.
                 */
                if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
                        strcmp(te->desc, "BLOB") == 0 ||
@@ -3480,6 +3481,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te)
        {
                appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
        }
+
        /*
         * These object types require additional decoration.  Fortunately, the
         * information needed is exactly what's in the DROP command.
@@ -3639,6 +3641,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
 
                initPQExpBuffer(&temp);
                _getObjectDescription(&temp, te);
+
                /*
                 * If _getObjectDescription() didn't fill the buffer, then there is no
                 * owner.
@@ -3802,7 +3805,7 @@ ReadHead(ArchiveHandle *AH)
        if (errmsg)
        {
                pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
-                                               errmsg);
+                                          errmsg);
                pg_free(errmsg);
        }
 
index babd23b4eb613ff404e2758780a5a0a97dc84b15..db5fb43bae8f783b7db29f8c76f2d041ff5d919b 100644 (file)
@@ -684,10 +684,10 @@ _LoadLOs(ArchiveHandle *AH)
                        tarClose(AH, th);
 
                        /*
-                        * Once we have found the first LO, stop at the first non-LO
-                        * entry (which will be 'blobs.toc').  This coding would eat all
-                        * the rest of the archive if there are no LOs ... but this
-                        * function shouldn't be called at all in that case.
+                        * Once we have found the first LO, stop at the first non-LO entry
+                        * (which will be 'blobs.toc').  This coding would eat all the
+                        * rest of the archive if there are no LOs ... but this function
+                        * shouldn't be called at all in that case.
                         */
                        if (foundLO)
                                break;
index f325045f9f04413cdaa5c2aecb4fa271bfe202dc..3af97a6039eeaaa6332b29736daf8769abe061a8 100644 (file)
@@ -756,9 +756,9 @@ main(int argc, char **argv)
                pg_fatal("%s", error_detail);
 
        /*
-        * Disable support for zstd workers for now - these are based on threading,
-        * and it's unclear how it interacts with parallel dumps on platforms where
-        * that relies on threads too (e.g. Windows).
+        * Disable support for zstd workers for now - these are based on
+        * threading, and it's unclear how it interacts with parallel dumps on
+        * platforms where that relies on threads too (e.g. Windows).
         */
        if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
                pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
@@ -879,8 +879,8 @@ main(int argc, char **argv)
        /*
         * Dumping LOs is the default for dumps where an inclusion switch is not
         * used (an "include everything" dump).  -B can be used to exclude LOs
-        * from those dumps.  -b can be used to include LOs even when an
-        * inclusion switch is used.
+        * from those dumps.  -b can be used to include LOs even when an inclusion
+        * switch is used.
         *
         * -s means "schema only" and LOs are data, not schema, so we never
         * include LOs when -s is used.
@@ -915,8 +915,8 @@ main(int argc, char **argv)
         * data or the associated metadata that resides in the pg_largeobject and
         * pg_largeobject_metadata tables, respectively.
         *
-        * However, we do need to collect LO information as there may be
-        * comments or other information on LOs that we do need to dump out.
+        * However, we do need to collect LO information as there may be comments
+        * or other information on LOs that we do need to dump out.
         */
        if (dopt.outputLOs || dopt.binary_upgrade)
                getLOs(fout);
@@ -3323,8 +3323,8 @@ dumpDatabase(Archive *fout)
                appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
                for (int i = 0; i < PQntuples(lo_res); ++i)
                {
-                       Oid             oid;
-                       RelFileNumber   relfilenumber;
+                       Oid                     oid;
+                       RelFileNumber relfilenumber;
 
                        appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
                                                          "SET relfrozenxid = '%u', relminmxid = '%u'\n"
@@ -3590,8 +3590,8 @@ getLOs(Archive *fout)
                        loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
 
                /*
-                * In binary-upgrade mode for LOs, we do *not* dump out the LO
-                * data, as it will be copied by pg_upgrade, which simply copies the
+                * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
+                * as it will be copied by pg_upgrade, which simply copies the
                 * pg_largeobject table. We *do* however dump out anything but the
                 * data, as pg_upgrade copies just pg_largeobject, but not
                 * pg_largeobject_metadata, after the dump is restored.
@@ -14828,7 +14828,10 @@ dumpSecLabel(Archive *fout, const char *type, const char *name,
        if (dopt->no_security_labels)
                return;
 
-       /* Security labels are schema not data ... except large object labels are data */
+       /*
+        * Security labels are schema not data ... except large object labels are
+        * data
+        */
        if (strcmp(type, "LARGE OBJECT") != 0)
        {
                if (dopt->dataOnly)
@@ -15161,7 +15164,7 @@ dumpTable(Archive *fout, const TableInfo *tbinfo)
        if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
        {
                const char *objtype =
-               (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
+                       (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
 
                tableAclDumpId =
                        dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
@@ -16632,10 +16635,12 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
                {
                        appendPQExpBufferStr(q,
                                                                 coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+
                        /*
                         * PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
                         * indexes. Being able to create this was fixed, but we need to
-                        * make the index distinct in order to be able to restore the dump.
+                        * make the index distinct in order to be able to restore the
+                        * dump.
                         */
                        if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
                                appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
@@ -17857,7 +17862,7 @@ processExtensionTables(Archive *fout, ExtensionInfo extinfo[],
                                TableInfo  *configtbl;
                                Oid                     configtbloid = atooid(extconfigarray[j]);
                                bool            dumpobj =
-                               curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
+                                       curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
 
                                configtbl = findTableByOid(configtbloid);
                                if (configtbl == NULL)
index c5647d059bef99e3457a854a549fba77e8fe2d11..3627b69e2a6b3acd0358d22089d26e30d1ad2ac7 100644 (file)
@@ -949,7 +949,7 @@ static void
 dumpRoleMembership(PGconn *conn)
 {
        PQExpBuffer buf = createPQExpBuffer();
-       PQExpBuffer     optbuf = createPQExpBuffer();
+       PQExpBuffer optbuf = createPQExpBuffer();
        PGresult   *res;
        int                     start = 0,
                                end,
@@ -996,8 +996,8 @@ dumpRoleMembership(PGconn *conn)
 
        /*
         * We can't dump these GRANT commands in arbitrary order, because a role
-        * that is named as a grantor must already have ADMIN OPTION on the
-        * role for which it is granting permissions, except for the bootstrap
+        * that is named as a grantor must already have ADMIN OPTION on the role
+        * for which it is granting permissions, except for the bootstrap
         * superuser, who can always be named as the grantor.
         *
         * We handle this by considering these grants role by role. For each role,
@@ -1005,8 +1005,8 @@ dumpRoleMembership(PGconn *conn)
         * superuser. Every time we grant ADMIN OPTION on the role to some user,
         * that user also becomes an allowable grantor. We make repeated passes
         * over the grants for the role, each time dumping those whose grantors
-        * are allowable and which we haven't done yet. Eventually this should
-        * let us dump all the grants.
+        * are allowable and which we haven't done yet. Eventually this should let
+        * us dump all the grants.
         */
        total = PQntuples(res);
        while (start < total)
@@ -1021,7 +1021,7 @@ dumpRoleMembership(PGconn *conn)
                /* All memberships for a single role should be adjacent. */
                for (end = start; end < total; ++end)
                {
-                       char   *otherrole;
+                       char       *otherrole;
 
                        otherrole = PQgetvalue(res, end, 0);
                        if (strcmp(role, otherrole) != 0)
@@ -1105,7 +1105,7 @@ dumpRoleMembership(PGconn *conn)
                                        appendPQExpBufferStr(optbuf, "ADMIN OPTION");
                                if (dump_grant_options)
                                {
-                                       char   *inherit_option;
+                                       char       *inherit_option;
 
                                        if (optbuf->data[0] != '\0')
                                                appendPQExpBufferStr(optbuf, ", ");
index d66f3b42ea42dcd793b29514f4a1fd1bafdd8921..387c5d3afbf4af30ab198acf2f84a4c3a06719c8 100644 (file)
@@ -53,10 +53,10 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
 # database and then pg_dump *that* database (or something along
 # those lines) to validate that part of the process.
 
-my $supports_icu  = ($ENV{with_icu} eq 'yes');
+my $supports_icu = ($ENV{with_icu} eq 'yes');
 my $supports_gzip = check_pg_config("#define HAVE_LIBZ 1");
-my $supports_lz4  = check_pg_config("#define USE_LZ4 1");
-my $supports_zstd  = check_pg_config("#define USE_ZSTD 1");
+my $supports_lz4 = check_pg_config("#define USE_LZ4 1");
+my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
 
 my %pgdump_runs = (
        binary_upgrade => {
@@ -79,10 +79,10 @@ my %pgdump_runs = (
 
        # Do not use --no-sync to give test coverage for data sync.
        compression_gzip_custom => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'gzip',
-               dump_cmd       => [
-                       'pg_dump',      '--format=custom',
+               dump_cmd => [
+                       'pg_dump', '--format=custom',
                        '--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
                        'postgres',
                ],
@@ -96,24 +96,24 @@ my %pgdump_runs = (
                                'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
                        ],
                        expected => qr/Compression: gzip/,
-                       name     => 'data content is gzip-compressed'
+                       name => 'data content is gzip-compressed'
                },
        },
 
        # Do not use --no-sync to give test coverage for data sync.
        compression_gzip_dir => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'gzip',
-               dump_cmd       => [
-                       'pg_dump',                              '--jobs=2',
-                       '--format=directory',                   '--compress=gzip:1',
+               dump_cmd => [
+                       'pg_dump', '--jobs=2',
+                       '--format=directory', '--compress=gzip:1',
                        "--file=$tempdir/compression_gzip_dir", 'postgres',
                ],
                # Give coverage for manually compressed blob.toc files during
                # restore.
                compress_cmd => {
                        program => $ENV{'GZIP_PROGRAM'},
-                       args    => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
+                       args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
                },
                # Verify that only data files were compressed
                glob_patterns => [
@@ -128,25 +128,25 @@ my %pgdump_runs = (
        },
 
        compression_gzip_plain => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'gzip',
-               dump_cmd       => [
+               dump_cmd => [
                        'pg_dump', '--format=plain', '-Z1',
                        "--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
                ],
                # Decompress the generated file to run through the tests.
                compress_cmd => {
                        program => $ENV{'GZIP_PROGRAM'},
-                       args    => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
+                       args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
                },
        },
 
        # Do not use --no-sync to give test coverage for data sync.
        compression_lz4_custom => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'lz4',
-               dump_cmd       => [
-                       'pg_dump',      '--format=custom',
+               dump_cmd => [
+                       'pg_dump', '--format=custom',
                        '--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
                        'postgres',
                ],
@@ -156,10 +156,8 @@ my %pgdump_runs = (
                        "$tempdir/compression_lz4_custom.dump",
                ],
                command_like => {
-                       command => [
-                               'pg_restore',
-                               '-l', "$tempdir/compression_lz4_custom.dump",
-                       ],
+                       command =>
+                         [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
                        expected => qr/Compression: lz4/,
                        name => 'data content is lz4 compressed'
                },
@@ -167,18 +165,18 @@ my %pgdump_runs = (
 
        # Do not use --no-sync to give test coverage for data sync.
        compression_lz4_dir => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'lz4',
-               dump_cmd       => [
-                       'pg_dump',                              '--jobs=2',
-                       '--format=directory',                   '--compress=lz4:1',
+               dump_cmd => [
+                       'pg_dump', '--jobs=2',
+                       '--format=directory', '--compress=lz4:1',
                        "--file=$tempdir/compression_lz4_dir", 'postgres',
                ],
                # Give coverage for manually compressed blob.toc files during
                # restore.
                compress_cmd => {
                        program => $ENV{'LZ4'},
-                       args    => [
+                       args => [
                                '-z', '-f', '--rm',
                                "$tempdir/compression_lz4_dir/blobs.toc",
                                "$tempdir/compression_lz4_dir/blobs.toc.lz4",
@@ -187,7 +185,7 @@ my %pgdump_runs = (
                # Verify that data files were compressed
                glob_patterns => [
                        "$tempdir/compression_lz4_dir/toc.dat",
-                   "$tempdir/compression_lz4_dir/*.dat.lz4",
+                       "$tempdir/compression_lz4_dir/*.dat.lz4",
                ],
                restore_cmd => [
                        'pg_restore', '--jobs=2',
@@ -197,16 +195,16 @@ my %pgdump_runs = (
        },
 
        compression_lz4_plain => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'lz4',
-               dump_cmd       => [
+               dump_cmd => [
                        'pg_dump', '--format=plain', '--compress=lz4',
                        "--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
                ],
                # Decompress the generated file to run through the tests.
                compress_cmd => {
                        program => $ENV{'LZ4'},
-                       args    => [
+                       args => [
                                '-d', '-f',
                                "$tempdir/compression_lz4_plain.sql.lz4",
                                "$tempdir/compression_lz4_plain.sql",
@@ -215,10 +213,10 @@ my %pgdump_runs = (
        },
 
        compression_zstd_custom => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'zstd',
-               dump_cmd       => [
-                       'pg_dump',      '--format=custom',
+               dump_cmd => [
+                       'pg_dump', '--format=custom',
                        '--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
                        'postgres',
                ],
@@ -229,8 +227,7 @@ my %pgdump_runs = (
                ],
                command_like => {
                        command => [
-                               'pg_restore',
-                               '-l', "$tempdir/compression_zstd_custom.dump",
+                               'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
                        ],
                        expected => qr/Compression: zstd/,
                        name => 'data content is zstd compressed'
@@ -238,27 +235,27 @@ my %pgdump_runs = (
        },
 
        compression_zstd_dir => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'zstd',
-               dump_cmd       => [
-                       'pg_dump',                              '--jobs=2',
-                       '--format=directory',                   '--compress=zstd:1',
+               dump_cmd => [
+                       'pg_dump', '--jobs=2',
+                       '--format=directory', '--compress=zstd:1',
                        "--file=$tempdir/compression_zstd_dir", 'postgres',
                ],
                # Give coverage for manually compressed blob.toc files during
                # restore.
                compress_cmd => {
                        program => $ENV{'ZSTD'},
-                       args    => [
-                               '-z', '-f', '--rm',
-                               "$tempdir/compression_zstd_dir/blobs.toc",
+                       args => [
+                               '-z', '-f',
+                               '--rm', "$tempdir/compression_zstd_dir/blobs.toc",
                                "-o", "$tempdir/compression_zstd_dir/blobs.toc.zst",
                        ],
                },
                # Verify that data files were compressed
                glob_patterns => [
-                   "$tempdir/compression_zstd_dir/toc.dat",
-                   "$tempdir/compression_zstd_dir/*.dat.zst",
+                       "$tempdir/compression_zstd_dir/toc.dat",
+                       "$tempdir/compression_zstd_dir/*.dat.zst",
                ],
                restore_cmd => [
                        'pg_restore', '--jobs=2',
@@ -269,19 +266,19 @@ my %pgdump_runs = (
 
        # Exercise long mode for test coverage
        compression_zstd_plain => {
-               test_key       => 'compression',
+               test_key => 'compression',
                compile_option => 'zstd',
-               dump_cmd       => [
+               dump_cmd => [
                        'pg_dump', '--format=plain', '--compress=zstd:long',
                        "--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
                ],
                # Decompress the generated file to run through the tests.
                compress_cmd => {
                        program => $ENV{'ZSTD'},
-                       args    => [
+                       args => [
                                '-d', '-f',
-                               "$tempdir/compression_zstd_plain.sql.zst",
-                               "-o", "$tempdir/compression_zstd_plain.sql",
+                               "$tempdir/compression_zstd_plain.sql.zst", "-o",
+                               "$tempdir/compression_zstd_plain.sql",
                        ],
                },
        },
@@ -308,9 +305,9 @@ my %pgdump_runs = (
        },
        column_inserts => {
                dump_cmd => [
-                       'pg_dump',                            '--no-sync',
+                       'pg_dump', '--no-sync',
                        "--file=$tempdir/column_inserts.sql", '-a',
-                       '--column-inserts',                   'postgres',
+                       '--column-inserts', 'postgres',
                ],
        },
        createdb => {
@@ -339,7 +336,7 @@ my %pgdump_runs = (
        defaults => {
                dump_cmd => [
                        'pg_dump', '--no-sync',
-                       '-f',      "$tempdir/defaults.sql",
+                       '-f', "$tempdir/defaults.sql",
                        'postgres',
                ],
        },
@@ -385,9 +382,9 @@ my %pgdump_runs = (
                command_like => {
                        command =>
                          [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
-                       expected => $supports_gzip ?
-                       qr/Compression: gzip/ :
-                       qr/Compression: none/,
+                       expected => $supports_gzip
+                       ? qr/Compression: gzip/
+                       qr/Compression: none/,
                        name => 'data content is gzip-compressed by default if available',
                },
        },
@@ -399,7 +396,7 @@ my %pgdump_runs = (
        defaults_dir_format => {
                test_key => 'defaults',
                dump_cmd => [
-                       'pg_dump',                             '-Fd',
+                       'pg_dump', '-Fd',
                        "--file=$tempdir/defaults_dir_format", 'postgres',
                ],
                restore_cmd => [
@@ -410,17 +407,15 @@ my %pgdump_runs = (
                command_like => {
                        command =>
                          [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
-                       expected => $supports_gzip ?
-                       qr/Compression: gzip/ :
-                       qr/Compression: none/,
+                       expected => $supports_gzip ? qr/Compression: gzip/
+                       : qr/Compression: none/,
                        name => 'data content is gzip-compressed by default',
                },
                glob_patterns => [
                        "$tempdir/defaults_dir_format/toc.dat",
                        "$tempdir/defaults_dir_format/blobs.toc",
-                       $supports_gzip ?
-                       "$tempdir/defaults_dir_format/*.dat.gz" :
-                       "$tempdir/defaults_dir_format/*.dat",
+                       $supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz"
+                       : "$tempdir/defaults_dir_format/*.dat",
                ],
        },
 
@@ -442,7 +437,7 @@ my %pgdump_runs = (
        defaults_tar_format => {
                test_key => 'defaults',
                dump_cmd => [
-                       'pg_dump',                                 '-Ft',
+                       'pg_dump', '-Ft',
                        "--file=$tempdir/defaults_tar_format.tar", 'postgres',
                ],
                restore_cmd => [
@@ -468,7 +463,8 @@ my %pgdump_runs = (
        },
        exclude_measurement => {
                dump_cmd => [
-                       'pg_dump', '--no-sync',
+                       'pg_dump',
+                       '--no-sync',
                        "--file=$tempdir/exclude_measurement.sql",
                        '--exclude-table-and-children=dump_test.measurement',
                        'postgres',
@@ -496,9 +492,9 @@ my %pgdump_runs = (
        },
        inserts => {
                dump_cmd => [
-                       'pg_dump',                     '--no-sync',
+                       'pg_dump', '--no-sync',
                        "--file=$tempdir/inserts.sql", '-a',
-                       '--inserts',                   'postgres',
+                       '--inserts', 'postgres',
                ],
        },
        pg_dumpall_globals => {
@@ -534,21 +530,20 @@ my %pgdump_runs = (
        },
        no_large_objects => {
                dump_cmd => [
-                       'pg_dump',                      '--no-sync',
-                       "--file=$tempdir/no_large_objects.sql", '-B',
-                       'postgres',
+                       'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
+                       '-B', 'postgres',
                ],
        },
        no_privs => {
                dump_cmd => [
-                       'pg_dump',                      '--no-sync',
+                       'pg_dump', '--no-sync',
                        "--file=$tempdir/no_privs.sql", '-x',
                        'postgres',
                ],
        },
        no_owner => {
                dump_cmd => [
-                       'pg_dump',                      '--no-sync',
+                       'pg_dump', '--no-sync',
                        "--file=$tempdir/no_owner.sql", '-O',
                        'postgres',
                ],
@@ -630,21 +625,21 @@ my %pgdump_runs = (
        },
        schema_only => {
                dump_cmd => [
-                       'pg_dump',                         '--format=plain',
+                       'pg_dump', '--format=plain',
                        "--file=$tempdir/schema_only.sql", '--no-sync',
-                       '-s',                              'postgres',
+                       '-s', 'postgres',
                ],
        },
        section_pre_data => {
                dump_cmd => [
-                       'pg_dump',            "--file=$tempdir/section_pre_data.sql",
+                       'pg_dump', "--file=$tempdir/section_pre_data.sql",
                        '--section=pre-data', '--no-sync',
                        'postgres',
                ],
        },
        section_data => {
                dump_cmd => [
-                       'pg_dump',        "--file=$tempdir/section_data.sql",
+                       'pg_dump', "--file=$tempdir/section_data.sql",
                        '--section=data', '--no-sync',
                        'postgres',
                ],
@@ -705,38 +700,38 @@ my %pgdump_runs = (
 
 # Tests which target the 'dump_test' schema, specifically.
 my %dump_test_schema_runs = (
-       only_dump_test_schema  => 1,
-       only_dump_measurement  => 1,
+       only_dump_test_schema => 1,
+       only_dump_measurement => 1,
        test_schema_plus_large_objects => 1,);
 
 # Tests which are considered 'full' dumps by pg_dump, but there
 # are flags used to exclude specific items (ACLs, LOs, etc).
 my %full_runs = (
-       binary_upgrade           => 1,
-       clean                    => 1,
-       clean_if_exists          => 1,
-       compression              => 1,
-       createdb                 => 1,
-       defaults                 => 1,
+       binary_upgrade => 1,
+       clean => 1,
+       clean_if_exists => 1,
+       compression => 1,
+       createdb => 1,
+       defaults => 1,
        exclude_dump_test_schema => 1,
-       exclude_test_table       => 1,
-       exclude_test_table_data  => 1,
-       exclude_measurement      => 1,
+       exclude_test_table => 1,
+       exclude_test_table_data => 1,
+       exclude_measurement => 1,
        exclude_measurement_data => 1,
-       no_toast_compression     => 1,
-       no_large_objects         => 1,
-       no_owner                 => 1,
-       no_privs                 => 1,
-       no_table_access_method   => 1,
-       pg_dumpall_dbprivs       => 1,
-       pg_dumpall_exclude       => 1,
-       schema_only              => 1,);
+       no_toast_compression => 1,
+       no_large_objects => 1,
+       no_owner => 1,
+       no_privs => 1,
+       no_table_access_method => 1,
+       pg_dumpall_dbprivs => 1,
+       pg_dumpall_exclude => 1,
+       schema_only => 1,);
 
 # This is where the actual tests are defined.
 my %tests = (
        'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => {
                create_order => 14,
-               create_sql   => 'ALTER DEFAULT PRIVILEGES
+               create_sql => 'ALTER DEFAULT PRIVILEGES
                                           FOR ROLE regress_dump_test_role IN SCHEMA dump_test
                                           GRANT SELECT ON TABLES TO regress_dump_test_role;',
                regexp => qr/^
@@ -748,15 +743,15 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_privs                 => 1,
-                       only_dump_measurement    => 1,
+                       no_privs => 1,
+                       only_dump_measurement => 1,
                },
        },
 
        'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT EXECUTE ON FUNCTIONS'
          => {
                create_order => 15,
-               create_sql   => 'ALTER DEFAULT PRIVILEGES
+               create_sql => 'ALTER DEFAULT PRIVILEGES
                                           FOR ROLE regress_dump_test_role IN SCHEMA dump_test
                                           GRANT EXECUTE ON FUNCTIONS TO regress_dump_test_role;',
                regexp => qr/^
@@ -768,14 +763,14 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_post_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_privs                 => 1,
-                       only_dump_measurement    => 1,
+                       no_privs => 1,
+                       only_dump_measurement => 1,
                },
          },
 
        'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => {
                create_order => 55,
-               create_sql   => 'ALTER DEFAULT PRIVILEGES
+               create_sql => 'ALTER DEFAULT PRIVILEGES
                                           FOR ROLE regress_dump_test_role
                                           REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC;',
                regexp => qr/^
@@ -790,7 +785,7 @@ my %tests = (
        'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT'
          => {
                create_order => 56,
-               create_sql   => 'ALTER DEFAULT PRIVILEGES
+               create_sql => 'ALTER DEFAULT PRIVILEGES
                                           FOR ROLE regress_dump_test_role
                                           REVOKE SELECT ON TABLES FROM regress_dump_test_role;',
                regexp => qr/^
@@ -812,29 +807,29 @@ my %tests = (
                        \QNOREPLICATION NOBYPASSRLS;\E
                        /xm,
                like => {
-                       pg_dumpall_dbprivs       => 1,
-                       pg_dumpall_globals       => 1,
+                       pg_dumpall_dbprivs => 1,
+                       pg_dumpall_globals => 1,
                        pg_dumpall_globals_clean => 1,
-                       pg_dumpall_exclude       => 1,
+                       pg_dumpall_exclude => 1,
                },
        },
 
        'ALTER COLLATION test0 OWNER TO' => {
-               regexp    => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
+               regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
                collation => 1,
-               like      => { %full_runs, section_pre_data => 1, },
-               unlike    => { %dump_test_schema_runs, no_owner => 1, },
+               like => { %full_runs, section_pre_data => 1, },
+               unlike => { %dump_test_schema_runs, no_owner => 1, },
        },
 
        'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => {
                regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .+;/m,
-               like   => { %full_runs, section_pre_data => 1, },
+               like => { %full_runs, section_pre_data => 1, },
                unlike => { no_owner => 1, },
        },
 
        'ALTER SERVER s1 OWNER TO' => {
                regexp => qr/^ALTER SERVER s1 OWNER TO .+;/m,
-               like   => { %full_runs, section_pre_data => 1, },
+               like => { %full_runs, section_pre_data => 1, },
                unlike => { no_owner => 1, },
        },
 
@@ -847,8 +842,8 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -861,8 +856,8 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -897,7 +892,7 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       only_dump_measurement    => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -910,37 +905,37 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
        'ALTER PUBLICATION pub1 OWNER TO' => {
                regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .+;/m,
-               like   => { %full_runs, section_post_data => 1, },
+               like => { %full_runs, section_post_data => 1, },
                unlike => { no_owner => 1, },
        },
 
        'ALTER LARGE OBJECT ... OWNER TO' => {
                regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .+;/m,
-               like   => {
+               like => {
                        %full_runs,
-                       column_inserts         => 1,
-                       data_only              => 1,
-                       inserts                => 1,
-                       section_pre_data       => 1,
+                       column_inserts => 1,
+                       data_only => 1,
+                       inserts => 1,
+                       section_pre_data => 1,
                        test_schema_plus_large_objects => 1,
                },
                unlike => {
                        no_large_objects => 1,
-                       no_owner    => 1,
+                       no_owner => 1,
                        schema_only => 1,
                },
        },
 
        'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => {
                regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .+;/m,
-               like   => { %full_runs, section_pre_data => 1, },
+               like => { %full_runs, section_pre_data => 1, },
                unlike => { no_owner => 1, },
        },
 
@@ -950,16 +945,16 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
        'ALTER SCHEMA dump_test_second_schema OWNER TO' => {
                regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .+;/m,
-               like   => {
+               like => {
                        %full_runs,
-                       role             => 1,
+                       role => 1,
                        section_pre_data => 1,
                },
                unlike => { no_owner => 1, },
@@ -970,14 +965,14 @@ my %tests = (
                create_sql =>
                  'ALTER SCHEMA public OWNER TO "regress_quoted  \"" role";',
                regexp => qr/^ALTER SCHEMA public OWNER TO .+;/m,
-               like   => {
+               like => {
                        %full_runs, section_pre_data => 1,
                },
                unlike => { no_owner => 1, },
        },
 
        'ALTER SCHEMA public OWNER TO (w/o ACL changes)' => {
-               database     => 'regress_public_owner',
+               database => 'regress_public_owner',
                create_order => 100,
                create_sql =>
                  'ALTER SCHEMA public OWNER TO "regress_quoted  \"" role";',
@@ -993,12 +988,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1011,18 +1006,18 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_post_data    => 1,
+                       section_post_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
        'ALTER TABLE (partitioned) ADD CONSTRAINT ... FOREIGN KEY' => {
                create_order => 4,
-               create_sql   => 'CREATE TABLE dump_test.test_table_fk (
+               create_sql => 'CREATE TABLE dump_test.test_table_fk (
                                                        col1 int references dump_test.test_table)
                                                        PARTITION BY RANGE (col1);
                                                        CREATE TABLE dump_test.test_table_fk_1
@@ -1036,7 +1031,7 @@ my %tests = (
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       only_dump_measurement    => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1051,12 +1046,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1071,12 +1066,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1091,12 +1086,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1111,12 +1106,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1128,9 +1123,9 @@ my %tests = (
                        /xm,
                like => {
                        %full_runs,
-                       role             => 1,
+                       role => 1,
                        section_pre_data => 1,
-                       binary_upgrade   => 1,
+                       binary_upgrade => 1,
                        only_dump_measurement => 1,
                },
                unlike => {
@@ -1149,12 +1144,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_post_data    => 1,
+                       section_post_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1178,29 +1173,29 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       only_dump_measurement    => 1,
+                       only_dump_measurement => 1,
                },
        },
 
        'ALTER TABLE test_table OWNER TO' => {
                regexp => qr/^\QALTER TABLE dump_test.test_table OWNER TO \E.+;/m,
-               like   => {
+               like => {
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_pre_data     => 1,
+                       section_pre_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
-                       no_owner                 => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
+                       no_owner => 1,
                },
        },
 
        'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => {
                create_order => 23,
-               create_sql   => 'ALTER TABLE dump_test.test_table
+               create_sql => 'ALTER TABLE dump_test.test_table
                                           ENABLE ROW LEVEL SECURITY;',
                regexp =>
                  qr/^\QALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;\E/m,
@@ -1208,12 +1203,12 @@ my %tests = (
                        %full_runs,
                        %dump_test_schema_runs,
                        only_dump_test_table => 1,
-                       section_post_data    => 1,
+                       section_post_data => 1,
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       exclude_test_table       => 1,
-                       only_dump_measurement    => 1,
+                       exclude_test_table => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1224,8 +1219,8 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1239,8 +1234,8 @@ my %tests = (
                },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       exclude_measurement      => 1,
+                       no_owner => 1,
+                       exclude_measurement => 1,
                },
        },
 
@@ -1249,7 +1244,7 @@ my %tests = (
                  qr/^\QALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO \E.+;/m,
                like => {
                        %full_runs,
-                       role             => 1,
+                       role => 1,
                        section_pre_data => 1,
                        only_dump_measurement => 1,
                },
@@ -1266,8 +1261,8 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1278,8 +1273,8 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       no_owner                 => 1,
-                       only_dump_measurement    => 1,
+                       no_owner => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1290,10 +1285,10 @@ my %tests = (
                  { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
-                       only_dump_test_table     => 1,
-                       no_owner                 => 1,
-                       role                     => 1,
-                       only_dump_measurement    => 1,
+                       only_dump_test_table => 1,
+                       no_owner => 1,
+                       role => 1,
+                       only_dump_measurement => 1,
                },
        },
 
@@ -1302,12 +1297,12 @@ my %tests = (
                create_sql =>
                  'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
                regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
-               like   => {
+               like => {
                        %full_runs,
-                       column_inserts         => 1,
-                       data_only              => 1,
-                       inserts                => 1,
-                       section_pre_data       => 1,
+                       column_inserts => 1,
+                       data_only => 1,
+                       inserts => 1,
+                       section_pre_data => 1,
                        test_schema_plus_large_objects => 1,
                },
                unlike => {
@@ -1325,39 +1320,38 @@ my %tests = (
                        /xm,
                like => {
                        %full_runs,
-                       column_inserts         => 1,
-                       data_only              => 1,
-                       inserts                => 1,
-                       section_data           => 1,
+                       column_inserts => 1,
+                       data_only => 1,
+                       inserts => 1,
+                       section_data => 1,
                        test_schema_plus_large_objects => 1,
                },
                unlike => {
                        binary_upgrade => 1,
                        no_large_objects => 1,
-                       schema_only    => 1,
+                       schema_only => 1,
                },
        },
 
        'LO create (with no data)' => {
-               create_sql =>
-                 'SELECT pg_catalog.lo_create(0);',
+               create_sql => 'SELECT pg_catalog.lo_create(0);',
                regexp => qr/^
                        \QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
                        \QSELECT pg_catalog.lo_close(0);\E
                        /xm,
-               like   => {
+               like => {
                        %full_runs,
-                       column_inserts         => 1,
-                       data_only              => 1,
-                       inserts                => 1,
-                       section_data           => 1,
+                       column_inserts => 1,
+                       data_only => 1,
+                       inserts => 1,
+                       section_data => 1,
                        test_schema_plus_large_objects => 1,
                },
                unlike => {
-                       binary_upgrade         => 1,
-                       no_large_objects       => 1,
-                       schema_only            => 1,
-                       section_pre_data       => 1,
+                       binary_upgrade => 1,
+                       no_large_objects => 1,
+                       schema_only => 1,
+                       section_pre_data => 1,
                },
        },
 
@@ -1385,16 +1379,16 @@ my %tests = (
        },
 
        'COMMENT ON SCHEMA public IS NULL' => {
-               database     => 'regress_public_owner',
+               database => 'regress_public_owner',
                create_order => 100,
-               create_sql   => 'COMMENT ON SCHEMA public IS NULL;',
-               regexp       => qr/^COMMENT ON SCHEMA public IS '';/m,
-               like         => { defaults_public_owner => 1 },
+               create_sql => 'COMMENT ON SCHEMA public IS NULL;',
+               regexp => qr/^COMMENT ON SCHEMA public IS '';/m,
+               like => { defaults_public_owner => 1 },
        },
 
        'COMMENT ON TABLE dump_test.test_table' => {
                create_order => 36,
-               create_sql   => 'COMMENT ON TABLE dump_test.test_table
+               create_sql => 'COMMENT ON TABLE dump_test.test_table
                                           IS \'comment on table\';',
                regexp =>
                  qr/^\QCOMMENT ON TABLE dump_test.test_table IS 'comment on table';\E/m,
@@