Pre-beta mechanical code beautification.
authorTom Lane <tgl@sss.pgh.pa.us>
Thu, 12 May 2022 19:17:30 +0000 (15:17 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Thu, 12 May 2022 19:17:30 +0000 (15:17 -0400)
Run pgindent, pgperltidy, and reformat-dat-files.
I manually fixed a couple of comments that pgindent uglified.

287 files changed:
config/check_modules.pl
contrib/amcheck/verify_heapam.c
contrib/basebackup_to_shell/basebackup_to_shell.c
contrib/basebackup_to_shell/t/001_basic.pl
contrib/basic_archive/basic_archive.c
contrib/btree_gist/btree_bool.c
contrib/hstore_plpython/hstore_plpython.c
contrib/pageinspect/brinfuncs.c
contrib/pageinspect/gistfuncs.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pg_walinspect/pg_walinspect.c
contrib/pgcrypto/openssl.c
contrib/pgstattuple/pgstattuple.c
contrib/postgres_fdw/connection.c
contrib/postgres_fdw/postgres_fdw.c
contrib/test_decoding/test_decoding.c
src/backend/access/common/toast_internals.c
src/backend/access/heap/pruneheap.c
src/backend/access/heap/vacuumlazy.c
src/backend/access/rmgrdesc/xactdesc.c
src/backend/access/rmgrdesc/xlogdesc.c
src/backend/access/transam/rmgr.c
src/backend/access/transam/twophase.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogarchive.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xlogrecovery.c
src/backend/access/transam/xlogstats.c
src/backend/access/transam/xlogutils.c
src/backend/catalog/Catalog.pm
src/backend/catalog/genbki.pl
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/objectaccess.c
src/backend/catalog/pg_constraint.c
src/backend/catalog/pg_publication.c
src/backend/catalog/storage.c
src/backend/commands/analyze.c
src/backend/commands/collationcmds.c
src/backend/commands/copy.c
src/backend/commands/copyfromparse.c
src/backend/commands/copyto.c
src/backend/commands/dbcommands.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/matview.c
src/backend/commands/publicationcmds.c
src/backend/commands/statscmds.c
src/backend/commands/subscriptioncmds.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumparallel.c
src/backend/executor/execExpr.c
src/backend/executor/execExprInterp.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeMemoize.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/spi.c
src/backend/jit/llvm/llvmjit.c
src/backend/lib/dshash.c
src/backend/libpq/pqcomm.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/equalfuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/value.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/equivclass.c
src/backend/optimizer/path/joinpath.c
src/backend/optimizer/path/pathkeys.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/plancat.c
src/backend/parser/analyze.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_collate.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_jsontable.c
src/backend/parser/parse_node.c
src/backend/parser/parse_param.c
src/backend/parser/parse_relation.c
src/backend/partitioning/partdesc.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/pgarch.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/startup.c
src/backend/postmaster/walwriter.c
src/backend/regex/regc_pg_locale.c
src/backend/replication/backup_manifest.c
src/backend/replication/basebackup_copy.c
src/backend/replication/basebackup_gzip.c
src/backend/replication/basebackup_lz4.c
src/backend/replication/basebackup_server.c
src/backend/replication/basebackup_target.c
src/backend/replication/basebackup_zstd.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/launcher.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/logical/tablesync.c
src/backend/replication/logical/worker.c
src/backend/replication/pgoutput/pgoutput.c
src/backend/replication/slot.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/statistics/dependencies.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/shm_mq.c
src/backend/storage/ipc/sinvaladt.c
src/backend/storage/page/bufpage.c
src/backend/tcop/postgres.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/jsonb_util.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/jsonpath.c
src/backend/utils/adt/jsonpath_exec.c
src/backend/utils/adt/like.c
src/backend/utils/adt/multirangetypes.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pgstatfuncs.c
src/backend/utils/adt/rangetypes_spgist.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/timestamp.c
src/backend/utils/adt/uuid.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/relmapper.c
src/backend/utils/init/postinit.c
src/backend/utils/misc/queryjumble.c
src/backend/utils/sort/tuplesort.c
src/bin/initdb/initdb.c
src/bin/initdb/t/001_initdb.pl
src/bin/pg_amcheck/pg_amcheck.c
src/bin/pg_amcheck/t/002_nonesuch.pl
src/bin/pg_amcheck/t/005_opclass_damage.pl
src/bin/pg_basebackup/bbstreamer_file.c
src/bin/pg_basebackup/bbstreamer_gzip.c
src/bin/pg_basebackup/bbstreamer_lz4.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/streamutil.c
src/bin/pg_basebackup/t/010_pg_basebackup.pl
src/bin/pg_basebackup/t/020_pg_receivewal.pl
src/bin/pg_basebackup/t/030_pg_recvlogical.pl
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_ctl/t/002_status.pl
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_dump/t/001_basic.pl
src/bin/pg_dump/t/002_pg_dump.pl
src/bin/pg_dump/t/003_pg_dump_with_server.pl
src/bin/pg_dump/t/010_dump_connstr.pl
src/bin/pg_rewind/filemap.c
src/bin/pg_rewind/t/004_pg_xlog_symlink.pl
src/bin/pg_rewind/t/009_growing_files.pl
src/bin/pg_rewind/t/RewindTest.pm
src/bin/pg_upgrade/t/002_pg_upgrade.pl
src/bin/pg_upgrade/util.c
src/bin/pg_verifybackup/t/003_corruption.pl
src/bin/pg_verifybackup/t/004_options.pl
src/bin/pg_verifybackup/t/005_bad_manifest.pl
src/bin/pg_verifybackup/t/007_wal.pl
src/bin/pg_verifybackup/t/008_untar.pl
src/bin/pg_verifybackup/t/009_extract.pl
src/bin/pg_verifybackup/t/010_client_untar.pl
src/bin/pg_waldump/pg_waldump.c
src/bin/pgbench/pgbench.c
src/bin/pgbench/t/001_pgbench_with_server.pl
src/bin/pgbench/t/002_pgbench_no_server.pl
src/bin/psql/common.c
src/bin/psql/describe.c
src/bin/psql/t/001_basic.pl
src/bin/psql/t/010_tab_completion.pl
src/bin/psql/t/020_cancel.pl
src/bin/psql/tab-complete.c
src/bin/scripts/t/020_createdb.pl
src/common/compression.c
src/common/cryptohash_openssl.c
src/common/exec.c
src/include/access/amapi.h
src/include/access/heapam.h
src/include/access/rmgr.h
src/include/access/xact.h
src/include/access/xlogstats.h
src/include/access/xlogutils.h
src/include/catalog/objectaccess.h
src/include/catalog/pg_aggregate.dat
src/include/catalog/pg_class.h
src/include/catalog/pg_collation.h
src/include/catalog/pg_database.dat
src/include/catalog/pg_parameter_acl.h
src/include/catalog/pg_proc.dat
src/include/catalog/pg_publication.h
src/include/catalog/pg_statistic_ext_data.h
src/include/catalog/renumber_oids.pl
src/include/commands/publicationcmds.h
src/include/executor/execExpr.h
src/include/executor/executor.h
src/include/fmgr.h
src/include/nodes/execnodes.h
src/include/nodes/nodes.h
src/include/nodes/parsenodes.h
src/include/nodes/pathnodes.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/optimizer/paths.h
src/include/parser/analyze.h
src/include/parser/parse_param.h
src/include/port.h
src/include/postmaster/pgarch.h
src/include/replication/basebackup_target.h
src/include/replication/decode.h
src/include/replication/slot.h
src/include/storage/latch.h
src/include/tcop/tcopprot.h
src/include/utils/formatting.h
src/include/utils/jsonpath.h
src/include/utils/rel.h
src/include/utils/relmapper.h
src/include/utils/selfuncs.h
src/include/utils/sortsupport.h
src/interfaces/libpq/fe-auth.c
src/interfaces/libpq/fe-secure-common.c
src/interfaces/libpq/t/002_api.pl
src/test/icu/t/010_database.pl
src/test/ldap/t/001_auth.pl
src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
src/test/modules/test_misc/t/002_tablespace.pl
src/test/modules/test_oat_hooks/test_oat_hooks.c
src/test/modules/test_pg_dump/t/001_base.pl
src/test/perl/PostgreSQL/Test/Cluster.pm
src/test/perl/PostgreSQL/Test/SimpleTee.pm
src/test/perl/PostgreSQL/Test/Utils.pm
src/test/perl/PostgreSQL/Version.pm
src/test/recovery/t/001_stream_rep.pl
src/test/recovery/t/002_archiving.pl
src/test/recovery/t/006_logical_decoding.pl
src/test/recovery/t/013_crash_restart.pl
src/test/recovery/t/014_unlogged_reinit.pl
src/test/recovery/t/019_replslot_limit.pl
src/test/recovery/t/022_crash_temp_files.pl
src/test/recovery/t/027_stream_regress.pl
src/test/recovery/t/029_stats_restart.pl
src/test/recovery/t/031_recovery_conflict.pl
src/test/recovery/t/032_relfilenode_reuse.pl
src/test/regress/regress.c
src/test/ssl/t/001_ssltests.pl
src/test/ssl/t/002_scram.pl
src/test/ssl/t/003_sslinfo.pl
src/test/ssl/t/SSL/Backend/OpenSSL.pm
src/test/ssl/t/SSL/Server.pm
src/test/subscription/t/001_rep_changes.pl
src/test/subscription/t/007_ddl.pl
src/test/subscription/t/013_partition.pl
src/test/subscription/t/021_twophase.pl
src/test/subscription/t/022_twophase_cascade.pl
src/test/subscription/t/023_twophase_stream.pl
src/test/subscription/t/024_add_drop_pub.pl
src/test/subscription/t/025_rep_changes_for_schema.pl
src/test/subscription/t/027_nosuperuser.pl
src/test/subscription/t/028_row_filter.pl
src/test/subscription/t/031_column_list.pl
src/tools/PerfectHash.pm
src/tools/ci/windows_build_config.pl
src/tools/mark_pgdllimport.pl
src/tools/msvc/MSBuildProject.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Project.pm
src/tools/msvc/Solution.pm
src/tools/msvc/vcregress.pl
src/tools/pgindent/typedefs.list

index 470c3e9c144f4e177c2f7313e783746d8d1726e1..611f3a673fdf9afafde85c8190f38886ff875b08 100644 (file)
@@ -21,8 +21,7 @@ diag("Test::More::VERSION: $Test::More::VERSION");
 diag("Time::HiRes::VERSION: $Time::HiRes::VERSION");
 
 # Check that if prove is using msys perl it is for an msys target
-ok(($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
-   "Msys perl used for correct target")
-  if $Config{osname} eq 'msys';
+ok( ($ENV{__CONFIG_HOST_OS__} || "") eq 'msys',
+       "Msys perl used for correct target") if $Config{osname} eq 'msys';
 ok(1);
 done_testing();
index e5f7355dcb81ed04869d676908f01bc9f60ed2d3..c875f3e5a2a559d6818517bd7e041e9685ef15ae 100644 (file)
@@ -1402,17 +1402,17 @@ check_tuple_attribute(HeapCheckContext *ctx)
                cmid = TOAST_COMPRESS_METHOD(&toast_pointer);
                switch (cmid)
                {
-                       /* List of all valid compression method IDs */
+                               /* List of all valid compression method IDs */
                        case TOAST_PGLZ_COMPRESSION_ID:
                        case TOAST_LZ4_COMPRESSION_ID:
                                valid = true;
                                break;
 
-                       /* Recognized but invalid compression method ID */
+                               /* Recognized but invalid compression method ID */
                        case TOAST_INVALID_COMPRESSION_ID:
                                break;
 
-                       /* Intentionally no default here */
+                               /* Intentionally no default here */
                }
                if (!valid)
                        report_corruption(ctx,
index a279219966d49ec923bfd4247e74313791c1809d..bc754b177afbb79c7f3dc8a1a85057ed6a72030b 100644 (file)
@@ -37,13 +37,13 @@ typedef struct bbsink_shell
        FILE       *pipe;
 } bbsink_shell;
 
-void _PG_init(void);
+void           _PG_init(void);
 
 static void *shell_check_detail(char *target, char *target_detail);
 static bbsink *shell_get_sink(bbsink *next_sink, void *detail_arg);
 
 static void bbsink_shell_begin_archive(bbsink *sink,
-                                                                               const char *archive_name);
+                                                                          const char *archive_name);
 static void bbsink_shell_archive_contents(bbsink *sink, size_t len);
 static void bbsink_shell_end_archive(bbsink *sink);
 static void bbsink_shell_begin_manifest(bbsink *sink);
@@ -101,7 +101,7 @@ shell_check_detail(char *target, char *target_detail)
 {
        if (shell_required_role[0] != '\0')
        {
-               Oid             roleid;
+               Oid                     roleid;
 
                StartTransactionCommand();
                roleid = get_role_oid(shell_required_role, true);
@@ -125,8 +125,8 @@ static bbsink *
 shell_get_sink(bbsink *next_sink, void *detail_arg)
 {
        bbsink_shell *sink;
-       bool    has_detail_escape = false;
-       char   *c;
+       bool            has_detail_escape = false;
+       char       *c;
 
        /*
         * Set up the bbsink.
@@ -171,15 +171,15 @@ shell_get_sink(bbsink *next_sink, void *detail_arg)
        /*
         * Since we're passing the string provided by the user to popen(), it will
         * be interpreted by the shell, which is a potential security
-        * vulnerability, since the user invoking this module is not necessarily
-        * superuser. To stay out of trouble, we must disallow any shell
+        * vulnerability, since the user invoking this module is not necessarily a
+        * superuser. To stay out of trouble, we must disallow any shell
         * metacharacters here; to be conservative and keep things simple, we
         * allow only alphanumerics.
         */
        if (sink->target_detail != NULL)
        {
-               char   *d;
-               bool    scary = false;
+               char       *d;
+               bool            scary = false;
 
                for (d = sink->target_detail; *d != '\0'; ++d)
                {
@@ -210,7 +210,7 @@ static char *
 shell_construct_command(char *base_command, const char *filename,
                                                char *target_detail)
 {
-       StringInfoData  buf;
+       StringInfoData buf;
        char       *c;
 
        initStringInfo(&buf);
@@ -271,7 +271,7 @@ shell_construct_command(char *base_command, const char *filename,
 static void
 shell_finish_command(bbsink_shell *sink)
 {
-       int             pclose_rc;
+       int                     pclose_rc;
 
        /* There should be a command running. */
        Assert(sink->current_command != NULL);
@@ -335,9 +335,8 @@ shell_send_data(bbsink_shell *sink, size_t len)
                {
                        /*
                         * The error we're about to throw would shut down the command
-                        * anyway, but we may get a more meaningful error message by
-                        * doing this. If not, we'll fall through to the generic error
-                        * below.
+                        * anyway, but we may get a more meaningful error message by doing
+                        * this. If not, we'll fall through to the generic error below.
                         */
                        shell_finish_command(sink);
                        errno = EPIPE;
index 350d42079a76dc0c4f61b9d9ec206a79b37c4c2d..acb66eb9a84034a0cb00a97bb3f26c19f040d97b 100644 (file)
@@ -20,11 +20,12 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
 
 # Make sure pg_hba.conf is set up to allow connections from backupuser.
 # This is only needed on Windows machines that don't use UNIX sockets.
-$node->init('allows_streaming' => 1,
-                       'auth_extra' => [ '--create-role', 'backupuser' ]);
+$node->init(
+       'allows_streaming' => 1,
+       'auth_extra'       => [ '--create-role', 'backupuser' ]);
 
 $node->append_conf('postgresql.conf',
-                                  "shared_preload_libraries = 'basebackup_to_shell'");
+       "shared_preload_libraries = 'basebackup_to_shell'");
 $node->start;
 $node->safe_psql('postgres', 'CREATE USER backupuser REPLICATION');
 $node->safe_psql('postgres', 'CREATE ROLE trustworthy');
@@ -41,61 +42,61 @@ my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch');
 
 # Can't use this module without setting basebackup_to_shell.command.
 $node->command_fails_like(
-    [ @pg_basebackup_cmd, '--target', 'shell' ],
+       [ @pg_basebackup_cmd, '--target', 'shell' ],
        qr/shell command for backup is not configured/,
        'fails if basebackup_to_shell.command is not set');
 
 # Configure basebackup_to_shell.command and reload the configuation file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path         = PostgreSQL::Test::Utils::tempdir;
 my $escaped_backup_path = $backup_path;
-$escaped_backup_path =~ s{\\}{\\\\}g if ($PostgreSQL::Test::Utils::windows_os);
+$escaped_backup_path =~ s{\\}{\\\\}g
+  if ($PostgreSQL::Test::Utils::windows_os);
 my $shell_command =
-       $PostgreSQL::Test::Utils::windows_os
-       ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
-    : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
+  $PostgreSQL::Test::Utils::windows_os
+  ? qq{$gzip --fast > "$escaped_backup_path\\\\%f.gz"}
+  : qq{$gzip --fast > "$escaped_backup_path/%f.gz"};
 $node->append_conf('postgresql.conf',
-                                  "basebackup_to_shell.command='$shell_command'");
+       "basebackup_to_shell.command='$shell_command'");
 $node->reload();
 
 # Should work now.
 $node->command_ok(
-    [ @pg_basebackup_cmd, '--target', 'shell' ],
+       [ @pg_basebackup_cmd, '--target', 'shell' ],
        'backup with no detail: pg_basebackup');
 verify_backup('', $backup_path, "backup with no detail");
 
 # Should fail with a detail.
 $node->command_fails_like(
-    [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
+       [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
        qr/a target detail is not permitted because the configured command does not include %d/,
        'fails if detail provided without %d');
 
 # Reconfigure to restrict access and require a detail.
 $shell_command =
-       $PostgreSQL::Test::Utils::windows_os
-       ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
-    : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
+  $PostgreSQL::Test::Utils::windows_os
+  ? qq{$gzip --fast > "$escaped_backup_path\\\\%d.%f.gz"}
+  : qq{$gzip --fast > "$escaped_backup_path/%d.%f.gz"};
 $node->append_conf('postgresql.conf',
-                                  "basebackup_to_shell.command='$shell_command'");
+       "basebackup_to_shell.command='$shell_command'");
 $node->append_conf('postgresql.conf',
-                                  "basebackup_to_shell.required_role='trustworthy'");
+       "basebackup_to_shell.required_role='trustworthy'");
 $node->reload();
 
 # Should fail due to lack of permission.
 $node->command_fails_like(
-    [ @pg_basebackup_cmd, '--target', 'shell' ],
+       [ @pg_basebackup_cmd, '--target', 'shell' ],
        qr/permission denied to use basebackup_to_shell/,
        'fails if required_role not granted');
 
 # Should fail due to lack of a detail.
 $node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
 $node->command_fails_like(
-    [ @pg_basebackup_cmd, '--target', 'shell' ],
+       [ @pg_basebackup_cmd, '--target', 'shell' ],
        qr/a target detail is required because the configured command includes %d/,
        'fails if %d is present and detail not given');
 
 # Should work.
-$node->command_ok(
-    [ @pg_basebackup_cmd, '--target', 'shell:bar' ],
+$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
        'backup with detail: pg_basebackup');
 verify_backup('bar.', $backup_path, "backup with detail");
 
@@ -105,30 +106,34 @@ sub verify_backup
 {
        my ($prefix, $backup_dir, $test_name) = @_;
 
-       ok(-f "$backup_dir/${prefix}backup_manifest.gz",
-          "$test_name: backup_manifest.gz was created");
-       ok(-f "$backup_dir/${prefix}base.tar.gz",
-          "$test_name: base.tar.gz was created");
+       ok( -f "$backup_dir/${prefix}backup_manifest.gz",
+               "$test_name: backup_manifest.gz was created");
+       ok( -f "$backup_dir/${prefix}base.tar.gz",
+               "$test_name: base.tar.gz was created");
 
-       SKIP: {
+  SKIP:
+       {
                my $tar = $ENV{TAR};
                skip "no tar program available", 1 if (!defined $tar || $tar eq '');
 
                # Decompress.
                system_or_bail($gzip, '-d',
-                                          $backup_dir . '/' . $prefix . 'backup_manifest.gz');
+                       $backup_dir . '/' . $prefix . 'backup_manifest.gz');
                system_or_bail($gzip, '-d',
-                                          $backup_dir . '/' . $prefix . 'base.tar.gz');
+                       $backup_dir . '/' . $prefix . 'base.tar.gz');
 
                # Untar.
                my $extract_path = PostgreSQL::Test::Utils::tempdir;
                system_or_bail($tar, 'xf', $backup_dir . '/' . $prefix . 'base.tar',
-                                          '-C', $extract_path);
+                       '-C', $extract_path);
 
                # Verify.
-               $node->command_ok([ 'pg_verifybackup', '-n',
-                                                 '-m', "${backup_dir}/${prefix}backup_manifest",
-                                                 '-e', $extract_path ],
-                                                 "$test_name: backup verifies ok");
+               $node->command_ok(
+                       [
+                               'pg_verifybackup', '-n',
+                               '-m', "${backup_dir}/${prefix}backup_manifest",
+                               '-e', $extract_path
+                       ],
+                       "$test_name: backup verifies ok");
        }
 }
index e7efbfb9c34f5d1e3ef230ce56c4198d7ecae721..c21735332282dff4fb73af7bf9092f2df04e082b 100644 (file)
@@ -40,8 +40,8 @@
 
 PG_MODULE_MAGIC;
 
-void _PG_init(void);
-void _PG_archive_module_init(ArchiveModuleCallbacks *cb);
+void           _PG_init(void);
+void           _PG_archive_module_init(ArchiveModuleCallbacks *cb);
 
 static char *archive_directory = NULL;
 static MemoryContext basic_archive_context;
@@ -102,8 +102,8 @@ check_archive_directory(char **newval, void **extra, GucSource source)
 
        /*
         * The default value is an empty string, so we have to accept that value.
-        * Our check_configured callback also checks for this and prevents archiving
-        * from proceeding if it is still empty.
+        * Our check_configured callback also checks for this and prevents
+        * archiving from proceeding if it is still empty.
         */
        if (*newval == NULL || *newval[0] == '\0')
                return true;
@@ -119,7 +119,7 @@ check_archive_directory(char **newval, void **extra, GucSource source)
        }
 
        /*
-        * Do a basic sanity check that the specified archive directory exists.  It
+        * Do a basic sanity check that the specified archive directory exists. It
         * could be removed at some point in the future, so we still need to be
         * prepared for it not to exist in the actual archiving logic.
         */
@@ -155,18 +155,19 @@ basic_archive_file(const char *file, const char *path)
        MemoryContext oldcontext;
 
        /*
-        * We run basic_archive_file_internal() in our own memory context so that we
-        * can easily reset it during error recovery (thus avoiding memory leaks).
+        * We run basic_archive_file_internal() in our own memory context so that
+        * we can easily reset it during error recovery (thus avoiding memory
+        * leaks).
         */
        oldcontext = MemoryContextSwitchTo(basic_archive_context);
 
        /*
-        * Since the archiver operates at the bottom of the exception stack, ERRORs
-        * turn into FATALs and cause the archiver process to restart.  However,
-        * using ereport(ERROR, ...) when there are problems is easy to code and
-        * maintain.  Therefore, we create our own exception handler to catch ERRORs
-        * and return false instead of restarting the archiver whenever there is a
-        * failure.
+        * Since the archiver operates at the bottom of the exception stack,
+        * ERRORs turn into FATALs and cause the archiver process to restart.
+        * However, using ereport(ERROR, ...) when there are problems is easy to
+        * code and maintain.  Therefore, we create our own exception handler to
+        * catch ERRORs and return false instead of restarting the archiver
+        * whenever there is a failure.
         */
        if (sigsetjmp(local_sigjmp_buf, 1) != 0)
        {
@@ -228,14 +229,14 @@ basic_archive_file_internal(const char *file, const char *path)
        snprintf(destination, MAXPGPATH, "%s/%s", archive_directory, file);
 
        /*
-        * First, check if the file has already been archived.  If it already exists
-        * and has the same contents as the file we're trying to archive, we can
-        * return success (after ensuring the file is persisted to disk). This
-        * scenario is possible if the server crashed after archiving the file but
-        * before renaming its .ready file to .done.
+        * First, check if the file has already been archived.  If it already
+        * exists and has the same contents as the file we're trying to archive,
+        * we can return success (after ensuring the file is persisted to disk).
+        * This scenario is possible if the server crashed after archiving the
+        * file but before renaming its .ready file to .done.
         *
-        * If the archive file already exists but has different contents, something
-        * might be wrong, so we just fail.
+        * If the archive file already exists but has different contents,
+        * something might be wrong, so we just fail.
         */
        if (stat(destination, &st) == 0)
        {
@@ -274,8 +275,8 @@ basic_archive_file_internal(const char *file, const char *path)
                         archive_directory, "archtemp", file, MyProcPid, epoch);
 
        /*
-        * Copy the file to its temporary destination.  Note that this will fail if
-        * temp already exists.
+        * Copy the file to its temporary destination.  Note that this will fail
+        * if temp already exists.
         */
        copy_file(unconstify(char *, path), temp);
 
@@ -318,9 +319,9 @@ compare_files(const char *file1, const char *file2)
 
        for (;;)
        {
-               int             nbytes = 0;
-               int             buf1_len = 0;
-               int             buf2_len = 0;
+               int                     nbytes = 0;
+               int                     buf1_len = 0;
+               int                     buf2_len = 0;
 
                while (buf1_len < CMP_BUF_SIZE)
                {
index 1be246ea5e15c27889d4b029220770e06aaf059e..8b2af129b52dc14edb4b913b4ec623b88bdc540a 100644 (file)
@@ -53,8 +53,8 @@ gbt_boollt(const void *a, const void *b, FmgrInfo *flinfo)
 static int
 gbt_boolkey_cmp(const void *a, const void *b, FmgrInfo *flinfo)
 {
-       boolKEY   *ia = (boolKEY *) (((const Nsrt *) a)->t);
-       boolKEY   *ib = (boolKEY *) (((const Nsrt *) b)->t);
+       boolKEY    *ia = (boolKEY *) (((const Nsrt *) a)->t);
+       boolKEY    *ib = (boolKEY *) (((const Nsrt *) b)->t);
 
        if (ia->lower == ib->lower)
        {
index 889ece315df8f0e7cebf611a0d804d536bfdb429..0be65075916db8044e158d524cff4f25affbeb85 100644 (file)
@@ -99,7 +99,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
                PyObject   *key;
 
                key = PLyUnicode_FromStringAndSize(HSTORE_KEY(entries, base, i),
-                                                                                 HSTORE_KEYLEN(entries, i));
+                                                                                  HSTORE_KEYLEN(entries, i));
                if (HSTORE_VALISNULL(entries, i))
                        PyDict_SetItem(dict, key, Py_None);
                else
@@ -107,7 +107,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
                        PyObject   *value;
 
                        value = PLyUnicode_FromStringAndSize(HSTORE_VAL(entries, base, i),
-                                                                                               HSTORE_VALLEN(entries, i));
+                                                                                                HSTORE_VALLEN(entries, i));
                        PyDict_SetItem(dict, key, value);
                        Py_XDECREF(value);
                }
index c2a37277e0834daf3aeff54242cfa5c9d48bc25d..879276e6dece94fab01987520735102f0e0afdb8 100644 (file)
@@ -63,12 +63,12 @@ brin_page_type(PG_FUNCTION_ARGS)
 
        /* verify the special space has the expected size */
        if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "BRIN"),
-                                        errdetail("Expected special size %d, got %d.",
-                                                          (int) MAXALIGN(sizeof(BrinSpecialSpace)),
-                                                          (int) PageGetSpecialSize(page))));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "BRIN"),
+                                errdetail("Expected special size %d, got %d.",
+                                                  (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+                                                  (int) PageGetSpecialSize(page))));
 
        switch (BrinPageType(page))
        {
@@ -103,12 +103,12 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
 
        /* verify the special space has the expected size */
        if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BrinSpecialSpace)))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "BRIN"),
-                                        errdetail("Expected special size %d, got %d.",
-                                                          (int) MAXALIGN(sizeof(BrinSpecialSpace)),
-                                                          (int) PageGetSpecialSize(page))));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "BRIN"),
+                                errdetail("Expected special size %d, got %d.",
+                                                  (int) MAXALIGN(sizeof(BrinSpecialSpace)),
+                                                  (int) PageGetSpecialSize(page))));
 
        /* verify the special space says this page is what we want */
        if (BrinPageType(page) != type)
index 9c29fbc7aa60d9449283620873ae4aeddc68406e..4943d6f75bd1905846e2c5d7f1884ae528b3da5e 100644 (file)
@@ -60,21 +60,21 @@ gist_page_opaque_info(PG_FUNCTION_ARGS)
 
        /* verify the special space has the expected size */
        if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "GiST"),
-                                        errdetail("Expected special size %d, got %d.",
-                                                          (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
-                                                          (int) PageGetSpecialSize(page))));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "GiST"),
+                                errdetail("Expected special size %d, got %d.",
+                                                  (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+                                                  (int) PageGetSpecialSize(page))));
 
        opaq = GistPageGetOpaque(page);
        if (opaq->gist_page_id != GIST_PAGE_ID)
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "GiST"),
-                                        errdetail("Expected %08x, got %08x.",
-                                                          GIST_PAGE_ID,
-                                                          opaq->gist_page_id)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "GiST"),
+                                errdetail("Expected %08x, got %08x.",
+                                                  GIST_PAGE_ID,
+                                                  opaq->gist_page_id)));
 
        /* Build a tuple descriptor for our result type */
        if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
@@ -138,21 +138,21 @@ gist_page_items_bytea(PG_FUNCTION_ARGS)
 
        /* verify the special space has the expected size */
        if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GISTPageOpaqueData)))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "GiST"),
-                                        errdetail("Expected special size %d, got %d.",
-                                                          (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
-                                                          (int) PageGetSpecialSize(page))));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "GiST"),
+                                errdetail("Expected special size %d, got %d.",
+                                                  (int) MAXALIGN(sizeof(GISTPageOpaqueData)),
+                                                  (int) PageGetSpecialSize(page))));
 
        opaq = GistPageGetOpaque(page);
        if (opaq->gist_page_id != GIST_PAGE_ID)
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("input page is not a valid %s page", "GiST"),
-                                        errdetail("Expected %08x, got %08x.",
-                                                          GIST_PAGE_ID,
-                                                          opaq->gist_page_id)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("input page is not a valid %s page", "GiST"),
+                                errdetail("Expected %08x, got %08x.",
+                                                  GIST_PAGE_ID,
+                                                  opaq->gist_page_id)));
 
        /* Avoid bogus PageGetMaxOffsetNumber() call with deleted pages */
        if (GistPageIsDeleted(page))
index ceaad81a433e2d8f3102563ce895c5318b5281bf..5c8b9ff94300de5e48ad9d496bd03b36a5cab901 100644 (file)
@@ -1533,7 +1533,10 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
        HASH_SEQ_STATUS hash_seq;
        pgssEntry  *entry;
 
-       /* Superusers or roles with the privileges of pg_read_all_stats members are allowed */
+       /*
+        * Superusers or roles with the privileges of pg_read_all_stats members
+        * are allowed
+        */
        is_allowed_role = has_privs_of_role(userid, ROLE_PG_READ_ALL_STATS);
 
        /* hash table must exist already */
index cc33fb65d5c75f1432f0d21a36f46235a25ff8f8..a082dfb3310c932368cad494689b4d29bfc5853f 100644 (file)
@@ -47,7 +47,7 @@ static XLogRecPtr ValidateInputLSNs(bool till_end_of_wal,
                                                                        XLogRecPtr start_lsn, XLogRecPtr end_lsn);
 static void GetWALRecordsInfo(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
                                                          XLogRecPtr end_lsn);
-static void GetXLogSummaryStats(XLogStats * stats, ReturnSetInfo *rsinfo,
+static void GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
                                                                Datum *values, bool *nulls, uint32 ncols,
                                                                bool stats_per_record);
 static void FillXLogStatsRow(const char *name, uint64 n, uint64 total_count,
@@ -102,7 +102,7 @@ InitXLogReaderState(XLogRecPtr lsn, XLogRecPtr *first_record)
                                                LSN_FORMAT_ARGS(lsn))));
 
        private_data = (ReadLocalXLogPageNoWaitPrivate *)
-                                               palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
+               palloc0(sizeof(ReadLocalXLogPageNoWaitPrivate));
 
        xlogreader = XLogReaderAllocate(wal_segment_size, NULL,
                                                                        XL_ROUTINE(.page_read = &read_local_xlog_page_no_wait,
@@ -143,7 +143,7 @@ static XLogRecord *
 ReadNextXLogRecord(XLogReaderState *xlogreader, XLogRecPtr first_record)
 {
        XLogRecord *record;
-       char    *errormsg;
+       char       *errormsg;
 
        record = XLogReadRecord(xlogreader, &errormsg);
 
@@ -153,7 +153,7 @@ ReadNextXLogRecord(XLogReaderState *xlogreader, XLogRecPtr first_record)
 
                /* return NULL, if end of WAL is reached */
                private_data = (ReadLocalXLogPageNoWaitPrivate *)
-                                                       xlogreader->private_data;
+                       xlogreader->private_data;
 
                if (private_data->end_of_wal)
                        return NULL;
@@ -181,12 +181,12 @@ GetWALRecordInfo(XLogReaderState *record, XLogRecPtr lsn,
                                 Datum *values, bool *nulls, uint32 ncols)
 {
        const char *id;
-       RmgrData desc;
-       uint32  fpi_len = 0;
+       RmgrData        desc;
+       uint32          fpi_len = 0;
        StringInfoData rec_desc;
        StringInfoData rec_blk_ref;
-       uint32  main_data_len;
-       int     i = 0;
+       uint32          main_data_len;
+       int                     i = 0;
 
        desc = GetRmgr(XLogRecGetRmid(record));
        id = desc.rm_identify(XLogRecGetInfo(record));
@@ -228,9 +228,9 @@ Datum
 pg_get_wal_record_info(PG_FUNCTION_ARGS)
 {
 #define PG_GET_WAL_RECORD_INFO_COLS 11
-       Datum   result;
-       Datum   values[PG_GET_WAL_RECORD_INFO_COLS];
-       bool    nulls[PG_GET_WAL_RECORD_INFO_COLS];
+       Datum           result;
+       Datum           values[PG_GET_WAL_RECORD_INFO_COLS];
+       bool            nulls[PG_GET_WAL_RECORD_INFO_COLS];
        XLogRecPtr      lsn;
        XLogRecPtr      curr_lsn;
        XLogRecPtr      first_record;
@@ -334,8 +334,8 @@ GetWALRecordsInfo(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
        XLogRecPtr      first_record;
        XLogReaderState *xlogreader;
        ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       Datum   values[PG_GET_WAL_RECORDS_INFO_COLS];
-       bool    nulls[PG_GET_WAL_RECORDS_INFO_COLS];
+       Datum           values[PG_GET_WAL_RECORDS_INFO_COLS];
+       bool            nulls[PG_GET_WAL_RECORDS_INFO_COLS];
 
        SetSingleFuncCall(fcinfo, 0);
 
@@ -418,11 +418,11 @@ FillXLogStatsRow(const char *name,
                                 uint64 tot_len, uint64 total_len,
                                 Datum *values, bool *nulls, uint32 ncols)
 {
-       double  n_pct,
-                       rec_len_pct,
-                       fpi_len_pct,
-                       tot_len_pct;
-       int     i = 0;
+       double          n_pct,
+                               rec_len_pct,
+                               fpi_len_pct,
+                               tot_len_pct;
+       int                     i = 0;
 
        n_pct = 0;
        if (total_count != 0)
@@ -461,11 +461,11 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
                                        Datum *values, bool *nulls, uint32 ncols,
                                        bool stats_per_record)
 {
-       uint64  total_count = 0;
-       uint64  total_rec_len = 0;
-       uint64  total_fpi_len = 0;
-       uint64  total_len = 0;
-       int     ri;
+       uint64          total_count = 0;
+       uint64          total_rec_len = 0;
+       uint64          total_fpi_len = 0;
+       uint64          total_len = 0;
+       int                     ri;
 
        /*
         * Each row shows its percentages of the total, so make a first pass to
@@ -488,7 +488,7 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
                uint64          rec_len;
                uint64          fpi_len;
                uint64          tot_len;
-               RmgrData        desc;
+               RmgrData        desc;
 
                if (!RmgrIdIsValid(ri))
                        continue;
@@ -500,7 +500,7 @@ GetXLogSummaryStats(XLogStats *stats, ReturnSetInfo *rsinfo,
 
                if (stats_per_record)
                {
-                       int rj;
+                       int                     rj;
 
                        for (rj = 0; rj < MAX_XLINFO_TYPES; rj++)
                        {
@@ -556,10 +556,10 @@ GetWalStats(FunctionCallInfo fcinfo, XLogRecPtr start_lsn,
 #define PG_GET_WAL_STATS_COLS 9
        XLogRecPtr      first_record;
        XLogReaderState *xlogreader;
-       XLogStats stats;
+       XLogStats       stats;
        ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       Datum   values[PG_GET_WAL_STATS_COLS];
-       bool    nulls[PG_GET_WAL_STATS_COLS];
+       Datum           values[PG_GET_WAL_STATS_COLS];
+       bool            nulls[PG_GET_WAL_STATS_COLS];
 
        SetSingleFuncCall(fcinfo, 0);
 
@@ -599,7 +599,7 @@ pg_get_wal_stats(PG_FUNCTION_ARGS)
 {
        XLogRecPtr      start_lsn;
        XLogRecPtr      end_lsn;
-       bool    stats_per_record;
+       bool            stats_per_record;
 
        start_lsn = PG_GETARG_LSN(0);
        end_lsn = PG_GETARG_LSN(1);
@@ -623,7 +623,7 @@ pg_get_wal_stats_till_end_of_wal(PG_FUNCTION_ARGS)
 {
        XLogRecPtr      start_lsn;
        XLogRecPtr      end_lsn = InvalidXLogRecPtr;
-       bool    stats_per_record;
+       bool            stats_per_record;
 
        start_lsn = PG_GETARG_LSN(0);
        stats_per_record = PG_GETARG_BOOL(1);
index 53e64297c28449b2ae78d46f6cb65a1074d22a3e..cf315517e0cba6a18653a547659d0ae390c7c0b0 100644 (file)
@@ -373,7 +373,8 @@ gen_ossl_decrypt(PX_Cipher *c, int padding, const uint8 *data, unsigned dlen,
                                 uint8 *res, unsigned *rlen)
 {
        OSSLCipher *od = c->ptr;
-       int                     outlen, outlen2;
+       int                     outlen,
+                               outlen2;
 
        if (!od->init)
        {
@@ -402,7 +403,8 @@ gen_ossl_encrypt(PX_Cipher *c, int padding, const uint8 *data, unsigned dlen,
                                 uint8 *res, unsigned *rlen)
 {
        OSSLCipher *od = c->ptr;
-       int                     outlen, outlen2;
+       int                     outlen,
+                               outlen2;
 
        if (!od->init)
        {
index 30945669081aa2a33bdf06939db32036cec18aa7..93b7834b774a4b2ca84adda3b48991e6adb04937 100644 (file)
@@ -255,46 +255,46 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo)
        if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind) ||
                rel->rd_rel->relkind == RELKIND_SEQUENCE)
        {
-                       return pgstat_heap(rel, fcinfo);
+               return pgstat_heap(rel, fcinfo);
        }
        else if (rel->rd_rel->relkind == RELKIND_INDEX)
        {
-                       switch (rel->rd_rel->relam)
-                       {
-                               case BTREE_AM_OID:
-                                       return pgstat_index(rel, BTREE_METAPAGE + 1,
-                                                                               pgstat_btree_page, fcinfo);
-                               case HASH_AM_OID:
-                                       return pgstat_index(rel, HASH_METAPAGE + 1,
-                                                                               pgstat_hash_page, fcinfo);
-                               case GIST_AM_OID:
-                                       return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
-                                                                               pgstat_gist_page, fcinfo);
-                               case GIN_AM_OID:
-                                       err = "gin index";
-                                       break;
-                               case SPGIST_AM_OID:
-                                       err = "spgist index";
-                                       break;
-                               case BRIN_AM_OID:
-                                       err = "brin index";
-                                       break;
-                               default:
-                                       err = "unknown index";
-                                       break;
-                       }
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                        errmsg("index \"%s\" (%s) is not supported",
-                                                       RelationGetRelationName(rel), err)));
+               switch (rel->rd_rel->relam)
+               {
+                       case BTREE_AM_OID:
+                               return pgstat_index(rel, BTREE_METAPAGE + 1,
+                                                                       pgstat_btree_page, fcinfo);
+                       case HASH_AM_OID:
+                               return pgstat_index(rel, HASH_METAPAGE + 1,
+                                                                       pgstat_hash_page, fcinfo);
+                       case GIST_AM_OID:
+                               return pgstat_index(rel, GIST_ROOT_BLKNO + 1,
+                                                                       pgstat_gist_page, fcinfo);
+                       case GIN_AM_OID:
+                               err = "gin index";
+                               break;
+                       case SPGIST_AM_OID:
+                               err = "spgist index";
+                               break;
+                       case BRIN_AM_OID:
+                               err = "brin index";
+                               break;
+                       default:
+                               err = "unknown index";
+                               break;
+               }
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("index \"%s\" (%s) is not supported",
+                                               RelationGetRelationName(rel), err)));
        }
        else
        {
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                        errmsg("cannot get tuple-level statistics for relation \"%s\"",
-                                                       RelationGetRelationName(rel)),
-                                        errdetail_relkind_not_supported(rel->rd_rel->relkind)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                errmsg("cannot get tuple-level statistics for relation \"%s\"",
+                                               RelationGetRelationName(rel)),
+                                errdetail_relkind_not_supported(rel->rd_rel->relkind)));
        }
 
        return 0;                                       /* should not happen */
index 541526ab80b2cbe62188749893293ac9b01739a5..061ffaf329ead89221cdf09d4a9dc824db1c615c 100644 (file)
@@ -654,10 +654,10 @@ do_sql_command_end(PGconn *conn, const char *sql, bool consume_input)
        PGresult   *res;
 
        /*
-        * If requested, consume whatever data is available from the socket.
-        * (Note that if all data is available, this allows pgfdw_get_result to
-        * call PQgetResult without forcing the overhead of WaitLatchOrSocket,
-        * which would be large compared to the overhead of PQconsumeInput.)
+        * If requested, consume whatever data is available from the socket. (Note
+        * that if all data is available, this allows pgfdw_get_result to call
+        * PQgetResult without forcing the overhead of WaitLatchOrSocket, which
+        * would be large compared to the overhead of PQconsumeInput.)
         */
        if (consume_input && !PQconsumeInput(conn))
                pgfdw_report_error(ERROR, NULL, conn, false, sql);
@@ -1560,6 +1560,7 @@ pgfdw_finish_pre_commit_cleanup(List *pending_entries)
                entry = (ConnCacheEntry *) lfirst(lc);
 
                Assert(entry->changing_xact_state);
+
                /*
                 * We might already have received the result on the socket, so pass
                 * consume_input=true to try to consume it first
@@ -1634,6 +1635,7 @@ pgfdw_finish_pre_subcommit_cleanup(List *pending_entries, int curlevel)
                entry = (ConnCacheEntry *) lfirst(lc);
 
                Assert(entry->changing_xact_state);
+
                /*
                 * We might already have received the result on the socket, so pass
                 * consume_input=true to try to consume it first
index 0e5771c89d8271cc8fabb59c0a07fffdbb29c8e2..d56951153bb93f6a569aaf89e92a2e8db6ad5199 100644 (file)
@@ -1243,9 +1243,9 @@ postgresGetForeignPlan(PlannerInfo *root,
        if (best_path->fdw_private)
        {
                has_final_sort = boolVal(list_nth(best_path->fdw_private,
-                                                                                FdwPathPrivateHasFinalSort));
+                                                                                 FdwPathPrivateHasFinalSort));
                has_limit = boolVal(list_nth(best_path->fdw_private,
-                                                                       FdwPathPrivateHasLimit));
+                                                                        FdwPathPrivateHasLimit));
        }
 
        if (IS_SIMPLE_REL(foreignrel))
@@ -1926,7 +1926,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
        values_end_len = intVal(list_nth(fdw_private,
                                                                         FdwModifyPrivateLen));
        has_returning = boolVal(list_nth(fdw_private,
-                                                                       FdwModifyPrivateHasReturning));
+                                                                        FdwModifyPrivateHasReturning));
        retrieved_attrs = (List *) list_nth(fdw_private,
                                                                                FdwModifyPrivateRetrievedAttrs);
 
@@ -2686,11 +2686,11 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
        dmstate->query = strVal(list_nth(fsplan->fdw_private,
                                                                         FdwDirectModifyPrivateUpdateSql));
        dmstate->has_returning = boolVal(list_nth(fsplan->fdw_private,
-                                                                                        FdwDirectModifyPrivateHasReturning));
+                                                                                         FdwDirectModifyPrivateHasReturning));
        dmstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
                                                                                                 FdwDirectModifyPrivateRetrievedAttrs);
        dmstate->set_processed = boolVal(list_nth(fsplan->fdw_private,
-                                                                                        FdwDirectModifyPrivateSetProcessed));
+                                                                                         FdwDirectModifyPrivateSetProcessed));
 
        /* Create context for per-tuple temp workspace. */
        dmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
index 08d366a594e196625b7f1c7706d8518b94115d44..3736da6784b1053c195ad16a711499d5637c22d8 100644 (file)
@@ -300,8 +300,8 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
        txn->output_plugin_private = txndata;
 
        /*
-        * If asked to skip empty transactions, we'll emit BEGIN at the point where
-        * the first operation is received for this transaction.
+        * If asked to skip empty transactions, we'll emit BEGIN at the point
+        * where the first operation is received for this transaction.
         */
        if (data->skip_empty_xacts)
                return;
@@ -360,8 +360,8 @@ pg_decode_begin_prepare_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
        txn->output_plugin_private = txndata;
 
        /*
-        * If asked to skip empty transactions, we'll emit BEGIN at the point where
-        * the first operation is received for this transaction.
+        * If asked to skip empty transactions, we'll emit BEGIN at the point
+        * where the first operation is received for this transaction.
         */
        if (data->skip_empty_xacts)
                return;
index 7052ac99780b4e3057f525f89e8840514c22262b..576e585a89fc2d9d8d29887bd5d264057ed0e0fc 100644 (file)
@@ -663,9 +663,9 @@ init_toast_snapshot(Snapshot toast_snapshot)
        /*
         * Catalog snapshots can be returned by GetOldestSnapshot() even if not
         * registered or active. That easily hides bugs around not having a
-        * snapshot set up - most of the time there is a valid catalog
-        * snapshot. So additionally insist that the current snapshot is
-        * registered or active.
+        * snapshot set up - most of the time there is a valid catalog snapshot.
+        * So additionally insist that the current snapshot is registered or
+        * active.
         */
        Assert(HaveRegisteredOrActiveSnapshot());
 
index 98d31de00310ff0df024da96b7e05efe8670ce39..9f43bbe25f5bd3c894c71e810a3477e8be5b9600 100644 (file)
@@ -68,9 +68,9 @@ typedef struct
 
        /*
         * Tuple visibility is only computed once for each tuple, for correctness
-        * and efficiency reasons; see comment in heap_page_prune() for
-        * details. This is of type int8[,] instead of HTSV_Result[], so we can use
-        * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items.
+        * and efficiency reasons; see comment in heap_page_prune() for details.
+        * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
+        * indicate no visibility has been computed, e.g. for LP_DEAD items.
         *
         * Same indexing as ->marked.
         */
@@ -203,8 +203,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
                 */
                if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
                {
-                       int             ndeleted,
-                                       nnewlpdead;
+                       int                     ndeleted,
+                                               nnewlpdead;
 
                        ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin,
                                                                           limited_ts, &nnewlpdead, NULL);
@@ -267,7 +267,7 @@ heap_page_prune(Relation relation, Buffer buffer,
                                GlobalVisState *vistest,
                                TransactionId old_snap_xmin,
                                TimestampTz old_snap_ts,
-                               int     *nnewlpdead,
+                               int *nnewlpdead,
                                OffsetNumber *off_loc)
 {
        int                     ndeleted = 0;
index 9482f99e68b7b3a3047f71b1fc2012c121b2b023..b802ed247e7333433046defb80b568ad050d2391 100644 (file)
@@ -326,7 +326,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
        PGRUsage        ru0;
        TimestampTz starttime = 0;
        PgStat_Counter startreadtime = 0,
-                                  startwritetime = 0;
+                               startwritetime = 0;
        WalUsage        startwalusage = pgWalUsage;
        int64           StartPageHit = VacuumPageHit,
                                StartPageMiss = VacuumPageMiss,
@@ -2232,12 +2232,12 @@ lazy_vacuum(LVRelState *vacrel)
                 * dead_items space is not CPU cache resident.
                 *
                 * We don't take any special steps to remember the LP_DEAD items (such
-                * as counting them in our final update to the stats system) when
-                * the optimization is applied.  Though the accounting used in
-                * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD
-                * items as dead rows in its own stats report, that's okay.
-                * The discrepancy should be negligible.  If this optimization is ever
-                * expanded to cover more cases then this may need to be reconsidered.
+                * as counting them in our final update to the stats system) when the
+                * optimization is applied.  Though the accounting used in analyze.c's
+                * acquire_sample_rows() will recognize the same LP_DEAD items as dead
+                * rows in its own stats report, that's okay. The discrepancy should
+                * be negligible.  If this optimization is ever expanded to cover more
+                * cases then this may need to be reconsidered.
                 */
                threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES;
                bypass = (vacrel->lpdead_item_pages < threshold &&
index e739c4a3bd91bfc0978bcd4e85e1dc4f70139210..90b6ac2884d5700238ba1d589b116e0d9464b54e 100644 (file)
@@ -411,8 +411,8 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI
                                                           parsed.tsId, xlrec->initfileinval);
 
        /*
-        * Check if the replication origin has been set in this record in the
-        * same way as PrepareRedoAdd().
+        * Check if the replication origin has been set in this record in the same
+        * way as PrepareRedoAdd().
         */
        if (origin_id != InvalidRepOriginId)
                appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
index c0dfea40c70f3b209760f0ca0d95518b93c65179..fefc563323d74c84971fdb866cff7c7b584c4dd3 100644 (file)
@@ -210,7 +210,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
                                           bool detailed_format, StringInfo buf,
                                           uint32 *fpi_len)
 {
-       int     block_id;
+       int                     block_id;
 
        Assert(record != NULL);
 
index e1d6ebbd3dbb5e0518d42108f3892d3d2a57855a..8ed69244e39ee3215c9971f8f5f756523919f775 100644 (file)
@@ -38,7 +38,7 @@
 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \
        { name, redo, desc, identify, startup, cleanup, mask, decode },
 
-RmgrData RmgrTable[RM_MAX_ID + 1] = {
+RmgrData       RmgrTable[RM_MAX_ID + 1] = {
 #include "access/rmgrlist.h"
 };
 
@@ -125,8 +125,8 @@ RegisterCustomRmgr(RmgrId rmid, RmgrData *rmgr)
 
                if (!pg_strcasecmp(RmgrTable[existing_rmid].rm_name, rmgr->rm_name))
                        ereport(ERROR,
-                               (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
-                                errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
+                                       (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
+                                        errdetail("Existing resource manager with ID %d has the same name.", existing_rmid)));
        }
 
        /* register it */
index dc0266693e396e127d2adf0380b1125e5d0b4165..75551f60cbcbee50edc0189ecd54f8c4a2b30c51 100644 (file)
@@ -1119,7 +1119,7 @@ StartPrepare(GlobalTransaction gxact)
        if (hdr.nabortstats > 0)
        {
                save_state_data(abortstats,
-                                               hdr.nabortstats * sizeof(xl_xact_stats_item));
+                                               hdr.nabortstats * sizeof(xl_xact_stats_item));
                pfree(abortstats);
        }
        if (hdr.ninvalmsgs > 0)
@@ -1529,9 +1529,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
        bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
        abortrels = (RelFileNode *) bufptr;
        bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
-       commitstats = (xl_xact_stats_item*) bufptr;
+       commitstats = (xl_xact_stats_item *) bufptr;
        bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
-       abortstats = (xl_xact_stats_item*) bufptr;
+       abortstats = (xl_xact_stats_item *) bufptr;
        bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
        invalmsgs = (SharedInvalidationMessage *) bufptr;
        bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
index 36852f2327704f930228603648e72ca8f07ddadb..71136b11a2a0936d5132ab114fd775b8550206a0 100644 (file)
@@ -435,10 +435,10 @@ typedef struct XLogCtlInsert
        bool            fullPageWrites;
 
        /*
-        * runningBackups is a counter indicating the number of backups currently in
-        * progress. forcePageWrites is set to true when runningBackups is non-zero.
-        * lastBackupStart is the latest checkpoint redo location used as a starting
-        * point for an online backup.
+        * runningBackups is a counter indicating the number of backups currently
+        * in progress. forcePageWrites is set to true when runningBackups is
+        * non-zero. lastBackupStart is the latest checkpoint redo location used
+        * as a starting point for an online backup.
         */
        int                     runningBackups;
        XLogRecPtr      lastBackupStart;
@@ -5307,14 +5307,14 @@ StartupXLOG(void)
         * When recovering from a backup (we are in recovery, and archive recovery
         * was requested), complain if we did not roll forward far enough to reach
         * the point where the database is consistent.  For regular online
-        * backup-from-primary, that means reaching the end-of-backup WAL record (at
-        * which point we reset backupStartPoint to be Invalid), for
+        * backup-from-primary, that means reaching the end-of-backup WAL record
+        * (at which point we reset backupStartPoint to be Invalid), for
         * backup-from-replica (which can't inject records into the WAL stream),
         * that point is when we reach the minRecoveryPoint in pg_control (which
-        * we purposfully copy last when backing up from a replica).  For pg_rewind
-        * (which creates a backup_label with a method of "pg_rewind") or
-        * snapshot-style backups (which don't), backupEndRequired will be set to
-        * false.
+        * we purposefully copy last when backing up from a replica).  For
+        * pg_rewind (which creates a backup_label with a method of "pg_rewind")
+        * or snapshot-style backups (which don't), backupEndRequired will be set
+        * to false.
         *
         * Note: it is indeed okay to look at the local variable
         * LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint
@@ -5328,8 +5328,8 @@ StartupXLOG(void)
                /*
                 * Ran off end of WAL before reaching end-of-backup WAL record, or
                 * minRecoveryPoint. That's a bad sign, indicating that you tried to
-                * recover from an online backup but never called pg_backup_stop(),
-                * or you didn't archive all the WAL needed.
+                * recover from an online backup but never called pg_backup_stop(), or
+                * you didn't archive all the WAL needed.
                 */
                if (ArchiveRecoveryRequested || ControlFile->backupEndRequired)
                {
@@ -8481,8 +8481,8 @@ do_pg_backup_stop(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
        WALInsertLockAcquireExclusive();
 
        /*
-        * It is expected that each do_pg_backup_start() call is matched by exactly
-        * one do_pg_backup_stop() call.
+        * It is expected that each do_pg_backup_start() call is matched by
+        * exactly one do_pg_backup_stop() call.
         */
        Assert(XLogCtl->Insert.runningBackups > 0);
        XLogCtl->Insert.runningBackups--;
index a2657a20058c6239289869603dae6a65bf554868..4101a30e374ab3ea420d69142b0169e9630772a2 100644 (file)
@@ -497,15 +497,15 @@ XLogArchiveNotify(const char *xlog)
        }
 
        /*
-        * Timeline history files are given the highest archival priority to
-        * lower the chance that a promoted standby will choose a timeline that
-        * is already in use.  However, the archiver ordinarily tries to gather
+        * Timeline history files are given the highest archival priority to lower
+        * the chance that a promoted standby will choose a timeline that is
+        * already in use.  However, the archiver ordinarily tries to gather
         * multiple files to archive from each scan of the archive_status
-        * directory, which means that newly created timeline history files
-        * could be left unarchived for a while.  To ensure that the archiver
-        * picks up timeline history files as soon as possible, we force the
-        * archiver to scan the archive_status directory the next time it looks
-        * for a file to archive.
+        * directory, which means that newly created timeline history files could
+        * be left unarchived for a while.  To ensure that the archiver picks up
+        * timeline history files as soon as possible, we force the archiver to
+        * scan the archive_status directory the next time it looks for a file to
+        * archive.
         */
        if (IsTLHistoryFileName(xlog))
                PgArchForceDirScan();
index b61ae6c0b4aa1acdd34bd66d9fd266a03c0926f9..02bd919ff640fb46e3112bc16ada02466ecdde58 100644 (file)
@@ -74,8 +74,8 @@ pg_backup_start(PG_FUNCTION_ARGS)
                                 errmsg("a backup is already in progress in this session")));
 
        /*
-        * Label file and tablespace map file need to be long-lived, since
-        * they are read in pg_backup_stop.
+        * Label file and tablespace map file need to be long-lived, since they
+        * are read in pg_backup_stop.
         */
        oldcontext = MemoryContextSwitchTo(TopMemoryContext);
        label_file = makeStringInfo();
@@ -127,8 +127,8 @@ pg_backup_stop(PG_FUNCTION_ARGS)
                                 errhint("Did you call pg_backup_start()?")));
 
        /*
-        * Stop the backup. Return a copy of the backup label and tablespace map so
-        * they can be written to disk by the caller.
+        * Stop the backup. Return a copy of the backup label and tablespace map
+        * so they can be written to disk by the caller.
         */
        stoppoint = do_pg_backup_stop(label_file->data, waitforarchive, NULL);
 
index 39ef865ed92997d5b37ac9b4a6f5c3ad2863a16f..6eba62642026d47d91bb93d256dee1c2eec2ea46 100644 (file)
@@ -1205,9 +1205,9 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI,
         * method was used) or if this label came from somewhere else (the only
         * other option today being from pg_rewind).  If this was a streamed
         * backup then we know that we need to play through until we get to the
-        * end of the WAL which was generated during the backup (at which point
-        * we will have reached consistency and backupEndRequired will be reset
-        * to be false).
+        * end of the WAL which was generated during the backup (at which point we
+        * will have reached consistency and backupEndRequired will be reset to be
+        * false).
         */
        if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
        {
@@ -2055,10 +2055,9 @@ CheckRecoveryConsistency(void)
 
        /*
         * Have we passed our safe starting point? Note that minRecoveryPoint is
-        * known to be incorrectly set if recovering from a backup, until
-        * the XLOG_BACKUP_END arrives to advise us of the correct
-        * minRecoveryPoint. All we know prior to that is that we're not
-        * consistent yet.
+        * known to be incorrectly set if recovering from a backup, until the
+        * XLOG_BACKUP_END arrives to advise us of the correct minRecoveryPoint.
+        * All we know prior to that is that we're not consistent yet.
         */
        if (!reachedConsistency && !backupEndRequired &&
                minRecoveryPoint <= lastReplayedEndRecPtr)
@@ -3802,7 +3801,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
                HandleStartupProcInterrupts();
        }
 
-       return XLREAD_FAIL;                             /* not reached */
+       return XLREAD_FAIL;                     /* not reached */
 }
 
 
index 6524a1ad0b939590e6e04ef251ee17fb1399f92a..514181792dc2366bdc511de088b1b1ae05305618 100644 (file)
@@ -22,7 +22,7 @@ void
 XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
                          uint32 *fpi_len)
 {
-       int     block_id;
+       int                     block_id;
 
        /*
         * Calculate the amount of FPI data in the record.
@@ -53,10 +53,10 @@ XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
 void
 XLogRecStoreStats(XLogStats *stats, XLogReaderState *record)
 {
-       RmgrId  rmid;
-       uint8   recid;
-       uint32  rec_len;
-       uint32  fpi_len;
+       RmgrId          rmid;
+       uint8           recid;
+       uint32          rec_len;
+       uint32          fpi_len;
 
        Assert(stats != NULL && record != NULL);
 
index 29419c10a889357f1de9267b7f1358256dac1dfe..48516694f0873e5f09656b40c202c72d052cf126 100644 (file)
@@ -80,10 +80,9 @@ typedef struct xl_invalid_page
 
 static HTAB *invalid_page_tab = NULL;
 
-static int
-read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
-                                                 int reqLen, XLogRecPtr targetRecPtr,
-                                                 char *cur_page, bool wait_for_wal);
+static int     read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
+                                                                         int reqLen, XLogRecPtr targetRecPtr,
+                                                                         char *cur_page, bool wait_for_wal);
 
 /* Report a reference to an invalid page */
 static void
@@ -940,8 +939,8 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
                 * archive in the timeline will get renamed to .partial by
                 * StartupXLOG().
                 *
-                * If that happens after our caller determined the TLI but before
-                * we actually read the xlog page, we might still try to read from the
+                * If that happens after our caller determined the TLI but before we
+                * actually read the xlog page, we might still try to read from the
                 * old (now renamed) segment and fail. There's not much we can do
                 * about this, but it can only happen when we're a leaf of a cascading
                 * standby whose primary gets promoted while we're decoding, so a
@@ -965,7 +964,7 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr,
                                 * end of WAL has been reached.
                                 */
                                private_data = (ReadLocalXLogPageNoWaitPrivate *)
-                                                                               state->private_data;
+                                       state->private_data;
                                private_data->end_of_wal = true;
                                break;
                        }
index ece0a934f05be8a0c2ca9682e2a3d89312519ebe..e91a8e10a8d6f880f30e444116b8d42d8be6e93c 100644 (file)
@@ -41,12 +41,12 @@ sub ParseHeader
        my $is_varlen            = 0;
        my $is_client_code       = 0;
 
-       $catalog{columns}     = [];
-       $catalog{toasting}    = [];
-       $catalog{indexing}    = [];
-       $catalog{other_oids}  = [];
+       $catalog{columns}      = [];
+       $catalog{toasting}     = [];
+       $catalog{indexing}     = [];
+       $catalog{other_oids}   = [];
        $catalog{foreign_keys} = [];
-       $catalog{client_code} = [];
+       $catalog{client_code}  = [];
 
        open(my $ifh, '<', $input_file) || die "$input_file: $!";
 
@@ -96,7 +96,9 @@ sub ParseHeader
                        push @{ $catalog{toasting} },
                          { parent_table => $1, toast_oid => $2, toast_index_oid => $3 };
                }
-               elsif (/^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/)
+               elsif (
+                       /^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/
+                 )
                {
                        push @{ $catalog{toasting} },
                          {
@@ -108,16 +110,17 @@ sub ParseHeader
                          };
                }
                elsif (
-                       /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/)
+                       /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/
+                 )
                {
                        push @{ $catalog{indexing} },
                          {
                                is_unique => $1 ? 1 : 0,
                                is_pkey   => $2 ? 1 : 0,
-                               index_name => $3,
-                               index_oid  => $4,
+                               index_name      => $3,
+                               index_oid       => $4,
                                index_oid_macro => $5,
-                               index_decl => $6
+                               index_decl      => $6
                          };
                }
                elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
index f4ec6d6d40cb061f6601ae15e4b6faaf54d03fbf..17b2c5e3f3deb91cd37a9f8e605bd4e64cdf43a2 100644 (file)
@@ -814,7 +814,7 @@ Catalog::RenameTempFile($schemafile,       $tmpext);
 Catalog::RenameTempFile($fk_info_file,     $tmpext);
 Catalog::RenameTempFile($constraints_file, $tmpext);
 
-exit ($num_errors != 0 ? 1 : 0);
+exit($num_errors != 0 ? 1 : 0);
 
 #################### Subroutines ########################
 
@@ -916,11 +916,11 @@ sub morph_row_for_pgattr
        # Copy the type data from pg_type, and add some type-dependent items
        my $type = $types{$atttype};
 
-       $row->{atttypid}       = $type->{oid};
-       $row->{attlen}         = $type->{typlen};
-       $row->{attbyval}       = $type->{typbyval};
-       $row->{attalign}       = $type->{typalign};
-       $row->{attstorage}     = $type->{typstorage};
+       $row->{atttypid}   = $type->{oid};
+       $row->{attlen}     = $type->{typlen};
+       $row->{attbyval}   = $type->{typbyval};
+       $row->{attalign}   = $type->{typalign};
+       $row->{attstorage} = $type->{typstorage};
 
        # set attndims if it's an array type
        $row->{attndims} = $type->{typcategory} eq 'A' ? '1' : '0';
index 9b512ccd3c064ee38c04359cc736fc67ae952037..800f85ed7db42126bdfc8e45d7a7931d307564ef 100644 (file)
@@ -1198,7 +1198,7 @@ heap_create_with_catalog(const char *relname,
                                        if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode))
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                                                 errmsg("toast relfilenode value not set when in binary upgrade mode")));
+                                                                errmsg("toast relfilenode value not set when in binary upgrade mode")));
 
                                        relfilenode = binary_upgrade_next_toast_pg_class_relfilenode;
                                        binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
@@ -1265,8 +1265,8 @@ heap_create_with_catalog(const char *relname,
         * remove the disk file again.)
         *
         * NB: Note that passing create_storage = true is correct even for binary
-        * upgrade.  The storage we create here will be replaced later, but we need
-        * to have something on disk in the meanwhile.
+        * upgrade.  The storage we create here will be replaced later, but we
+        * need to have something on disk in the meanwhile.
         */
        new_rel_desc = heap_create(relname,
                                                           relnamespace,
@@ -3219,9 +3219,8 @@ restart:
                /*
                 * If this constraint has a parent constraint which we have not seen
                 * yet, keep track of it for the second loop, below.  Tracking parent
-                * constraints allows us to climb up to the top-level constraint
-                * and look for all possible relations referencing the partitioned
-                * table.
+                * constraints allows us to climb up to the top-level constraint and
+                * look for all possible relations referencing the partitioned table.
                 */
                if (OidIsValid(con->conparentid) &&
                        !list_member_oid(parent_cons, con->conparentid))
index 7539742c782887338219278eb05f22f265b43184..bdd3c348417234febf0ccf62040133733ef0f534 100644 (file)
@@ -928,9 +928,9 @@ index_create(Relation heapRelation,
                        binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
 
                        /*
-                        * Note that we want create_storage = true for binary upgrade.
-                        * The storage we create here will be replaced later, but we need
-                        * to have something on disk in the meanwhile.
+                        * Note that we want create_storage = true for binary upgrade. The
+                        * storage we create here will be replaced later, but we need to
+                        * have something on disk in the meanwhile.
                         */
                        Assert(create_storage);
                }
index 38922294e285d447fad58ef87b6356489883f6da..1c51df02d216d743af32cc1e7466c15621486c48 100644 (file)
@@ -156,7 +156,7 @@ RunFunctionExecuteHook(Oid objectId)
  */
 void
 RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
-                                               bool is_internal)
+                                                  bool is_internal)
 {
        ObjectAccessPostCreate pc_arg;
 
@@ -167,8 +167,8 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
        pc_arg.is_internal = is_internal;
 
        (*object_access_hook_str) (OAT_POST_CREATE,
-                                                  classId, objectName, subId,
-                                                  (void *) &pc_arg);
+                                                          classId, objectName, subId,
+                                                          (void *) &pc_arg);
 }
 
 /*
@@ -178,7 +178,7 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId,
  */
 void
 RunObjectDropHookStr(Oid classId, const char *objectName, int subId,
-                                 int dropflags)
+                                        int dropflags)
 {
        ObjectAccessDrop drop_arg;
 
@@ -189,8 +189,8 @@ RunObjectDropHookStr(Oid classId, const char *objectName, int subId,
        drop_arg.dropflags = dropflags;
 
        (*object_access_hook_str) (OAT_DROP,
-                                                  classId, objectName, subId,
-                                                  (void *) &drop_arg);
+                                                          classId, objectName, subId,
+                                                          (void *) &drop_arg);
 }
 
 /*
@@ -205,8 +205,8 @@ RunObjectTruncateHookStr(const char *objectName)
        Assert(object_access_hook_str != NULL);
 
        (*object_access_hook_str) (OAT_TRUNCATE,
-                                                  RelationRelationId, objectName, 0,
-                                                  NULL);
+                                                          RelationRelationId, objectName, 0,
+                                                          NULL);
 }
 
 /*
@@ -216,7 +216,7 @@ RunObjectTruncateHookStr(const char *objectName)
  */
 void
 RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId,
-                                          Oid auxiliaryId, bool is_internal)
+                                                 Oid auxiliaryId, bool is_internal)
 {
        ObjectAccessPostAlter pa_arg;
 
@@ -228,8 +228,8 @@ RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId,
        pa_arg.is_internal = is_internal;
 
        (*object_access_hook_str) (OAT_POST_ALTER,
-                                                  classId, objectName, subId,
-                                                  (void *) &pa_arg);
+                                                          classId, objectName, subId,
+                                                          (void *) &pa_arg);
 }
 
 /*
@@ -250,8 +250,8 @@ RunNamespaceSearchHookStr(const char *objectName, bool ereport_on_violation)
        ns_arg.result = true;
 
        (*object_access_hook_str) (OAT_NAMESPACE_SEARCH,
-                                                  NamespaceRelationId, objectName, 0,
-                                                  (void *) &ns_arg);
+                                                          NamespaceRelationId, objectName, 0,
+                                                          (void *) &ns_arg);
 
        return ns_arg.result;
 }
@@ -268,6 +268,6 @@ RunFunctionExecuteHookStr(const char *objectName)
        Assert(object_access_hook_str != NULL);
 
        (*object_access_hook_str) (OAT_FUNCTION_EXECUTE,
-                                                  ProcedureRelationId, objectName, 0,
-                                                  NULL);
+                                                          ProcedureRelationId, objectName, 0,
+                                                          NULL);
 }
index 472dbda21166e2787d5b8383a0c2543c178a0ff1..489f0b2818ee7e5c0db4c2992df41bb41da038fb 100644 (file)
@@ -145,7 +145,7 @@ CreateConstraintEntry(const char *constraintName,
                        for (i = 0; i < numFkDeleteSetCols; i++)
                                fkdatums[i] = Int16GetDatum(fkDeleteSetCols[i]);
                        confdelsetcolsArray = construct_array(fkdatums, numFkDeleteSetCols,
-                                                                                  INT2OID, 2, true, TYPALIGN_SHORT);
+                                                                                                 INT2OID, 2, true, TYPALIGN_SHORT);
                }
                else
                        confdelsetcolsArray = NULL;
@@ -1291,7 +1291,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
                }
                else
                {
-                       int num_delete_cols;
+                       int                     num_delete_cols;
 
                        arr = DatumGetArrayTypeP(adatum);       /* ensure not toasted */
                        if (ARR_NDIM(arr) != 1 ||
@@ -1301,7 +1301,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks,
                        num_delete_cols = ARR_DIMS(arr)[0];
                        memcpy(fk_del_set_cols, ARR_DATA_PTR(arr), num_delete_cols * sizeof(int16));
                        if ((Pointer) arr != DatumGetPointer(adatum))
-                               pfree(arr);                             /* free de-toasted copy, if any */
+                               pfree(arr);             /* free de-toasted copy, if any */
 
                        *num_fk_del_set_cols = num_delete_cols;
                }
index 2631558ff11ab48b03bc9b2d0c4d440e2ece497b..e2c8bcb2797d59d31a5b5dce280cb8d9480e9f18 100644 (file)
@@ -378,9 +378,9 @@ publication_add_relation(Oid pubid, PublicationRelInfo *pri,
        check_publication_add_relation(targetrel);
 
        /*
-        * Translate column names to attnums and make sure the column list contains
-        * only allowed elements (no system or generated columns etc.). Also build
-        * an array of attnums, for storing in the catalog.
+        * Translate column names to attnums and make sure the column list
+        * contains only allowed elements (no system or generated columns etc.).
+        * Also build an array of attnums, for storing in the catalog.
         */
        publication_translate_columns(pri->relation, pri->columns,
                                                                  &natts, &attarray);
@@ -555,11 +555,11 @@ pub_collist_to_bitmapset(Bitmapset *columns, Datum pubcols, MemoryContext mcxt)
        ArrayType  *arr;
        int                     nelems;
        int16      *elems;
-       MemoryContext   oldcxt = NULL;
+       MemoryContext oldcxt = NULL;
 
        /*
-        * If an existing bitmap was provided, use it. Otherwise just use NULL
-        * and build a new bitmap.
+        * If an existing bitmap was provided, use it. Otherwise just use NULL and
+        * build a new bitmap.
         */
        if (columns)
                result = columns;
index e4d000d4fe86df24e3569ce301f3db011aaed7d2..cd31e68e95e5ac8402674e69bd9486fa6d142e04 100644 (file)
@@ -340,13 +340,13 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
         * is in progress.
         *
         * The truncation operation might drop buffers that the checkpoint
-        * otherwise would have flushed. If it does, then it's essential that
-        * the files actually get truncated on disk before the checkpoint record
-        * is written. Otherwise, if reply begins from that checkpoint, the
+        * otherwise would have flushed. If it does, then it's essential that the
+        * files actually get truncated on disk before the checkpoint record is
+        * written. Otherwise, if reply begins from that checkpoint, the
         * to-be-truncated blocks might still exist on disk but have older
-        * contents than expected, which can cause replay to fail. It's OK for
-        * the blocks to not exist on disk at all, but not for them to have the
-        * wrong contents.
+        * contents than expected, which can cause replay to fail. It's OK for the
+        * blocks to not exist on disk at all, but not for them to have the wrong
+        * contents.
         */
        Assert((MyProc->delayChkptFlags & DELAY_CHKPT_COMPLETE) == 0);
        MyProc->delayChkptFlags |= DELAY_CHKPT_COMPLETE;
index 305226692a470d4f705a8d548f90435249459a1d..2da6b75a155df3416d18df9c77b84d3b3555350b 100644 (file)
@@ -429,7 +429,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
         */
        if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
        {
-               List *idxs = RelationGetIndexList(onerel);
+               List       *idxs = RelationGetIndexList(onerel);
 
                Irel = NULL;
                nindexes = 0;
@@ -680,10 +680,10 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
        }
 
        /*
-        * Now report ANALYZE to the cumulative stats system.  For regular tables, we do
-        * it only if not doing inherited stats.  For partitioned tables, we only
-        * do it for inherited stats. (We're never called for not-inherited stats
-        * on partitioned tables anyway.)
+        * Now report ANALYZE to the cumulative stats system.  For regular tables,
+        * we do it only if not doing inherited stats.  For partitioned tables, we
+        * only do it for inherited stats. (We're never called for not-inherited
+        * stats on partitioned tables anyway.)
         *
         * Reset the changes_since_analyze counter only if we analyzed all
         * columns; otherwise, there is still work for auto-analyze to do.
index 346f85f05eaa99c06d19881a6cd888b525b6f7c9..fcfc02d2aede9ba053450defa1e92a2d8ceaa9a5 100644 (file)
@@ -246,8 +246,9 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e
 
                /*
                 * Nondeterministic collations are currently only supported with ICU
-                * because that's the only case where it can actually make a difference.
-                * So we can save writing the code for the other providers.
+                * because that's the only case where it can actually make a
+                * difference. So we can save writing the code for the other
+                * providers.
                 */
                if (!collisdeterministic && collprovider != COLLPROVIDER_ICU)
                        ereport(ERROR,
index 689713ea5802b7303a6cd48830cdd9f1565407e6..f448d39c7edc401a14dd284b96d777946ae21812 100644 (file)
@@ -345,7 +345,7 @@ defGetCopyHeaderChoice(DefElem *def)
                        break;
                default:
                        {
-                               char    *sval = defGetString(def);
+                               char       *sval = defGetString(def);
 
                                /*
                                 * The set of strings accepted here should match up with the
@@ -365,8 +365,8 @@ defGetCopyHeaderChoice(DefElem *def)
                        break;
        }
        ereport(ERROR,
-                               (errcode(ERRCODE_SYNTAX_ERROR),
-                                errmsg("%s requires a Boolean value or \"match\"",
+                       (errcode(ERRCODE_SYNTAX_ERROR),
+                        errmsg("%s requires a Boolean value or \"match\"",
                                        def->defname)));
        return COPY_HEADER_FALSE;       /* keep compiler quiet */
 }
index 58017ec53b005792b6583c434b6c7fc10d88f7e4..edb80e2cd52f8690ae1376f4ae373dc7badbaefa 100644 (file)
@@ -800,7 +800,8 @@ NextCopyFromRawFields(CopyFromState cstate, char ***fields, int *nfields)
                                                         errmsg("column name mismatch in header line field %d: got null value (\"%s\"), expected \"%s\"",
                                                                        fldnum, cstate->opts.null_print, NameStr(attr->attname))));
 
-                               if (namestrcmp(&attr->attname, colName) != 0) {
+                               if (namestrcmp(&attr->attname, colName) != 0)
+                               {
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
                                                         errmsg("column name mismatch in header line field %d: got \"%s\", expected \"%s\"",
index 643bbf286e5c8d956cb18cc9d350b38811c5c22c..fca29a9a1050009fb2835bcb6f2637a139527da1 100644 (file)
@@ -439,8 +439,8 @@ BeginCopyTo(ParseState *pstate,
                 * locks on the source table(s).
                 */
                rewritten = pg_analyze_and_rewrite_fixedparams(raw_query,
-                                                                                  pstate->p_sourcetext, NULL, 0,
-                                                                                  NULL);
+                                                                                                          pstate->p_sourcetext, NULL, 0,
+                                                                                                          NULL);
 
                /* check that we got back something we can work with */
                if (rewritten == NIL)
@@ -862,7 +862,7 @@ DoCopyTo(CopyToState cstate)
 
                                if (cstate->opts.csv_mode)
                                        CopyAttributeOutCSV(cstate, colname, false,
-                                                                       list_length(cstate->attnumlist) == 1);
+                                                                               list_length(cstate->attnumlist) == 1);
                                else
                                        CopyAttributeOutText(cstate, colname);
                        }
index 6da58437c58be595b695e6c8404b52ccdffbdac7..f2691684010a6c112aec4132b9212c20fb08adb0 100644 (file)
@@ -201,9 +201,9 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
                 *
                 * We typically do not read relation data into shared_buffers without
                 * holding a relation lock. It's unclear what could go wrong if we
-                * skipped it in this case, because nobody can be modifying either
-                * the source or destination database at this point, and we have locks
-                * on both databases, too, but let's take the conservative route.
+                * skipped it in this case, because nobody can be modifying either the
+                * source or destination database at this point, and we have locks on
+                * both databases, too, but let's take the conservative route.
                 */
                dstrelid.relId = srcrelid.relId = relinfo->reloid;
                LockRelationId(&srcrelid, AccessShareLock);
@@ -274,9 +274,9 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
 
        /*
         * We can't use a real relcache entry for a relation in some other
-        * database, but since we're only going to access the fields related
-        * to physical storage, a fake one is good enough. If we didn't do this
-        * and used the smgr layer directly, we would have to worry about
+        * database, but since we're only going to access the fields related to
+        * physical storage, a fake one is good enough. If we didn't do this and
+        * used the smgr layer directly, we would have to worry about
         * invalidations.
         */
        rel = CreateFakeRelcacheEntry(rnode);
@@ -333,10 +333,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
                                                          char *srcpath, List *rnodelist,
                                                          Snapshot snapshot)
 {
-       BlockNumber             blkno = BufferGetBlockNumber(buf);
-       OffsetNumber    offnum;
-       OffsetNumber    maxoff;
-       HeapTupleData   tuple;
+       BlockNumber blkno = BufferGetBlockNumber(buf);
+       OffsetNumber offnum;
+       OffsetNumber maxoff;
+       HeapTupleData tuple;
 
        maxoff = PageGetMaxOffsetNumber(page);
 
@@ -368,10 +368,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
                        CreateDBRelInfo *relinfo;
 
                        /*
-                        * ScanSourceDatabasePgClassTuple is in charge of constructing
-                        * a CreateDBRelInfo object for this tuple, but can also decide
-                        * that this tuple isn't something we need to copy. If we do need
-                        * to copy the relation, add it to the list.
+                        * ScanSourceDatabasePgClassTuple is in charge of constructing a
+                        * CreateDBRelInfo object for this tuple, but can also decide that
+                        * this tuple isn't something we need to copy. If we do need to
+                        * copy the relation, add it to the list.
                         */
                        relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid,
                                                                                                         srcpath);
@@ -395,9 +395,9 @@ CreateDBRelInfo *
 ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
                                                           char *srcpath)
 {
-       CreateDBRelInfo    *relinfo;
-       Form_pg_class           classForm;
-       Oid                                     relfilenode = InvalidOid;
+       CreateDBRelInfo *relinfo;
+       Form_pg_class classForm;
+       Oid                     relfilenode = InvalidOid;
 
        classForm = (Form_pg_class) GETSTRUCT(tuple);
 
@@ -406,11 +406,11 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
         *
         * Shared objects don't need to be copied, because they are shared.
         * Objects without storage can't be copied, because there's nothing to
-        * copy. Temporary relations don't need to be copied either, because
-        * they are inaccessible outside of the session that created them,
-        * which must be gone already, and couldn't connect to a different database
-        * if it still existed. autovacuum will eventually remove the pg_class
-        * entries as well.
+        * copy. Temporary relations don't need to be copied either, because they
+        * are inaccessible outside of the session that created them, which must
+        * be gone already, and couldn't connect to a different database if it
+        * still existed. autovacuum will eventually remove the pg_class entries
+        * as well.
         */
        if (classForm->reltablespace == GLOBALTABLESPACE_OID ||
                !RELKIND_HAS_STORAGE(classForm->relkind) ||
@@ -702,7 +702,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
        DefElem    *dcollate = NULL;
        DefElem    *dctype = NULL;
        DefElem    *diculocale = NULL;
-       DefElem    *dlocprovider = NULL;
+       DefElem    *dlocprovider = NULL;
        DefElem    *distemplate = NULL;
        DefElem    *dallowconnections = NULL;
        DefElem    *dconnlimit = NULL;
@@ -824,10 +824,10 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
                        /*
                         * We don't normally permit new databases to be created with
                         * system-assigned OIDs. pg_upgrade tries to preserve database
-                        * OIDs, so we can't allow any database to be created with an
-                        * OID that might be in use in a freshly-initialized cluster
-                        * created by some future version. We assume all such OIDs will
-                        * be from the system-managed OID range.
+                        * OIDs, so we can't allow any database to be created with an OID
+                        * that might be in use in a freshly-initialized cluster created
+                        * by some future version. We assume all such OIDs will be from
+                        * the system-managed OID range.
                         *
                         * As an exception, however, we permit any OID to be assigned when
                         * allow_system_table_mods=on (so that initdb can assign system
@@ -1348,15 +1348,15 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
        InvokeObjectPostCreateHook(DatabaseRelationId, dboid, 0);
 
        /*
-        * If we're going to be reading data for the to-be-created database
-        * into shared_buffers, take a lock on it. Nobody should know that this
+        * If we're going to be reading data for the to-be-created database into
+        * shared_buffers, take a lock on it. Nobody should know that this
         * database exists yet, but it's good to maintain the invariant that a
         * lock an AccessExclusiveLock on the database is sufficient to drop all
         * of its buffers without worrying about more being read later.
         *
-        * Note that we need to do this before entering the PG_ENSURE_ERROR_CLEANUP
-        * block below, because createdb_failure_callback expects this lock to
-        * be held already.
+        * Note that we need to do this before entering the
+        * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
+        * expects this lock to be held already.
         */
        if (dbstrategy == CREATEDB_WAL_LOG)
                LockSharedObject(DatabaseRelationId, dboid, 0, AccessShareLock);
index d2a24798220f0e9d0c7d0524f2ed1cc604a41f68..c461061fe9edb757d5dc96306a166f2d0c3aa702 100644 (file)
@@ -3833,7 +3833,7 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es)
                        if (rte->tablefunc)
                                if (rte->tablefunc->functype == TFT_XMLTABLE)
                                        objectname = "xmltable";
-                               else /* Must be TFT_JSON_TABLE */
+                               else                    /* Must be TFT_JSON_TABLE */
                                        objectname = "json_table";
                        else
                                objectname = NULL;
index 1013790dbb342960010fb02732ef72ea0549acf3..767d9b961904c7f8f4e09a2e802a4b9750f662e8 100644 (file)
@@ -758,10 +758,10 @@ execute_sql_string(const char *sql)
                CommandCounterIncrement();
 
                stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
-                                                                                  sql,
-                                                                                  NULL,
-                                                                                  0,
-                                                                                  NULL);
+                                                                                                          sql,
+                                                                                                          NULL,
+                                                                                                          0,
+                                                                                                          NULL);
                stmt_list = pg_plan_queries(stmt_list, sql, CURSOR_OPT_PARALLEL_OK, NULL);
 
                foreach(lc2, stmt_list)
index 52534f182745e2b29038ab4ce56533aab6df3f77..d1ee1064652dee00eeda675150a6aba009208ee6 100644 (file)
@@ -332,8 +332,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
                /*
                 * Inform cumulative stats system about our activity: basically, we
                 * truncated the matview and inserted some new data.  (The concurrent
-                * code path above doesn't need to worry about this because the inserts
-                * and deletes it issues get counted by lower-level code.)
+                * code path above doesn't need to worry about this because the
+                * inserts and deletes it issues get counted by lower-level code.)
                 */
                pgstat_count_truncate(matviewRel);
                if (!stmt->skipData)
index 6df0e6670fd59ff09ce797198effd871047537c5..8e645741e4e8ee534f889bf1de50014530bf892a 100644 (file)
@@ -297,7 +297,7 @@ contain_invalid_rfcolumn_walker(Node *node, rf_context *context)
  */
 bool
 pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
-                                                bool pubviaroot)
+                                                          bool pubviaroot)
 {
        HeapTuple       rftuple;
        Oid                     relid = RelationGetRelid(relation);
@@ -373,7 +373,7 @@ pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
  */
 bool
 pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestors,
-                                                bool pubviaroot)
+                                                                       bool pubviaroot)
 {
        HeapTuple       tuple;
        Oid                     relid = RelationGetRelid(relation);
@@ -384,8 +384,8 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
 
        /*
         * For a partition, if pubviaroot is true, find the topmost ancestor that
-        * is published via this publication as we need to use its column list
-        * for the changes.
+        * is published via this publication as we need to use its column list for
+        * the changes.
         *
         * Note that even though the column list used is for an ancestor, the
         * REPLICA IDENTITY used will be for the actual child table.
@@ -399,19 +399,19 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
        }
 
        tuple = SearchSysCache2(PUBLICATIONRELMAP,
-                                                         ObjectIdGetDatum(publish_as_relid),
-                                                         ObjectIdGetDatum(pubid));
+                                                       ObjectIdGetDatum(publish_as_relid),
+                                                       ObjectIdGetDatum(pubid));
 
        if (!HeapTupleIsValid(tuple))
                return false;
 
        datum = SysCacheGetAttr(PUBLICATIONRELMAP, tuple,
-                                                         Anum_pg_publication_rel_prattrs,
-                                                         &isnull);
+                                                       Anum_pg_publication_rel_prattrs,
+                                                       &isnull);
 
        if (!isnull)
        {
-               int     x;
+               int                     x;
                Bitmapset  *idattrs;
                Bitmapset  *columns = NULL;
 
@@ -429,8 +429,9 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
                /*
                 * Attnums in the bitmap returned by RelationGetIndexAttrBitmap are
                 * offset (to handle system columns the usual way), while column list
-                * does not use offset, so we can't do bms_is_subset(). Instead, we have
-                * to loop over the idattrs and check all of them are in the list.
+                * does not use offset, so we can't do bms_is_subset(). Instead, we
+                * have to loop over the idattrs and check all of them are in the
+                * list.
                 */
                x = -1;
                while ((x = bms_next_member(idattrs, x)) >= 0)
@@ -440,14 +441,14 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor
                        /*
                         * If pubviaroot is true, we are validating the column list of the
                         * parent table, but the bitmap contains the replica identity
-                        * information of the child table. The parent/child attnums may not
-                        * match, so translate them to the parent - get the attname from
-                        * the child, and look it up in the parent.
+                        * information of the child table. The parent/child attnums may
+                        * not match, so translate them to the parent - get the attname
+                        * from the child, and look it up in the parent.
                         */
                        if (pubviaroot)
                        {
                                /* attribute name in the child table */
-                               char   *colname = get_attname(relid, attnum, false);
+                               char       *colname = get_attname(relid, attnum, false);
 
                                /*
                                 * Determine the attnum for the attribute name in parent (we
@@ -720,7 +721,7 @@ TransformPubWhereClauses(List *tables, const char *queryString,
  */
 static void
 CheckPubRelationColumnList(List *tables, const char *queryString,
-                                          bool pubviaroot)
+                                                  bool pubviaroot)
 {
        ListCell   *lc;
 
@@ -864,7 +865,7 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt)
                                                                         publish_via_partition_root);
 
                        CheckPubRelationColumnList(rels, pstate->p_sourcetext,
-                                                                  publish_via_partition_root);
+                                                                          publish_via_partition_root);
 
                        PublicationAddTables(puboid, rels, true, NULL);
                        CloseTableList(rels);
@@ -1198,8 +1199,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup,
 
                                /* Transform the int2vector column list to a bitmap. */
                                columnListDatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,
-                                                                                                  Anum_pg_publication_rel_prattrs,
-                                                                                                  &isnull);
+                                                                                                 Anum_pg_publication_rel_prattrs,
+                                                                                                 &isnull);
 
                                if (!isnull)
                                        oldcolumns = pub_collist_to_bitmapset(NULL, columnListDatum, NULL);
@@ -1210,15 +1211,15 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup,
                        foreach(newlc, rels)
                        {
                                PublicationRelInfo *newpubrel;
-                               Oid                                     newrelid;
-                               Bitmapset                  *newcolumns = NULL;
+                               Oid                     newrelid;
+                               Bitmapset  *newcolumns = NULL;
 
                                newpubrel = (PublicationRelInfo *) lfirst(newlc);
                                newrelid = RelationGetRelid(newpubrel->relation);
 
                                /*
-                                * If the new publication has column list, transform it to
-                                * bitmap too.
+                                * If the new publication has column list, transform it to a
+                                * bitmap too.
                                 */
                                if (newpubrel->columns)
                                {
index 54a190722dfc8af7a9f065dcabb6e09fa7b61a25..2e716743dd636ab24067fa360877b43b7e556e31 100644 (file)
@@ -258,9 +258,9 @@ CreateStatistics(CreateStatsStmt *stmt)
                        nattnums++;
                        ReleaseSysCache(atttuple);
                }
-               else if (IsA(selem->expr, Var)) /* column reference in parens */
+               else if (IsA(selem->expr, Var)) /* column reference in parens */
                {
-                       Var *var = (Var *) selem->expr;
+                       Var                *var = (Var *) selem->expr;
                        TypeCacheEntry *type;
 
                        /* Disallow use of system attributes in extended stats */
@@ -297,10 +297,11 @@ CreateStatistics(CreateStatsStmt *stmt)
                        while ((k = bms_next_member(attnums, k)) >= 0)
                        {
                                AttrNumber      attnum = k + FirstLowInvalidHeapAttributeNumber;
+
                                if (attnum <= 0)
                                        ereport(ERROR,
-                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                errmsg("statistics creation on system columns is not supported")));
+                                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                        errmsg("statistics creation on system columns is not supported")));
                        }
 
                        /*
@@ -511,9 +512,9 @@ CreateStatistics(CreateStatsStmt *stmt)
        relation_close(statrel, RowExclusiveLock);
 
        /*
-        * We used to create the pg_statistic_ext_data tuple too, but it's not clear
-        * what value should the stxdinherit flag have (it depends on whether the rel
-        * is partitioned, contains data, etc.)
+        * We used to create the pg_statistic_ext_data tuple too, but it's not
+        * clear what value should the stxdinherit flag have (it depends on
+        * whether the rel is partitioned, contains data, etc.)
         */
 
        InvokeObjectPostCreateHook(StatisticExtRelationId, statoid, 0);
index b94236f74d31e0dbd15cc7bec509ad45334b7b2b..690cdaa426ec78e8adaf72f66d9ce601217d04ae 100644 (file)
@@ -1578,13 +1578,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
        PG_END_TRY();
 
        /*
-        * Tell the cumulative stats system that the subscription is getting dropped.
-        * We can safely report dropping the subscription statistics here if the
-        * subscription is associated with a replication slot since we cannot run
-        * DROP SUBSCRIPTION inside a transaction block.  Subscription statistics
-        * will be removed later by (auto)vacuum either if it's not associated
-        * with a replication slot or if the message for dropping the subscription
-        * gets lost.
+        * Tell the cumulative stats system that the subscription is getting
+        * dropped. We can safely report dropping the subscription statistics here
+        * if the subscription is associated with a replication slot since we
+        * cannot run DROP SUBSCRIPTION inside a transaction block.  Subscription
+        * statistics will be removed later by (auto)vacuum either if it's not
+        * associated with a replication slot or if the message for dropping the
+        * subscription gets lost.
         */
        if (slotname)
                pgstat_drop_subscription(subid);
index 2cd8546d4717f9e584c7f91c67dbaeef8b9fdcfe..2de0ebacec38a26f1721f6ba013ab9ad4550f346 100644 (file)
@@ -495,8 +495,8 @@ static ObjectAddress addFkRecurseReferenced(List **wqueue, Constraint *fkconstra
                                                                                        bool old_check_ok,
                                                                                        Oid parentDelTrigger, Oid parentUpdTrigger);
 static void validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
-                                                                          int numfksetcols, const int16 *fksetcolsattnums,
-                                                                          List *fksetcols);
+                                                                                int numfksetcols, const int16 *fksetcolsattnums,
+                                                                                List *fksetcols);
 static void addFkRecurseReferencing(List **wqueue, Constraint *fkconstraint,
                                                                        Relation rel, Relation pkrel, Oid indexOid, Oid parentConstr,
                                                                        int numfks, int16 *pkattnum, int16 *fkattnum,
@@ -5579,7 +5579,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode,
 
                        foreach(lc, seqlist)
                        {
-                               Oid         seq_relid = lfirst_oid(lc);
+                               Oid                     seq_relid = lfirst_oid(lc);
 
                                SequenceChangePersistence(seq_relid, tab->newrelpersistence);
                        }
@@ -9448,8 +9448,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
 {
        for (int i = 0; i < numfksetcols; i++)
        {
-               int16 setcol_attnum = fksetcolsattnums[i];
-               bool seen = false;
+               int16           setcol_attnum = fksetcolsattnums[i];
+               bool            seen = false;
 
                for (int j = 0; j < numfks; j++)
                {
@@ -9462,7 +9462,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums,
 
                if (!seen)
                {
-                       char *col = strVal(list_nth(fksetcols, i));
+                       char       *col = strVal(list_nth(fksetcols, i));
+
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
                                         errmsg("column \"%s\" referenced in ON DELETE SET action must be part of foreign key", col)));
@@ -15890,6 +15891,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
                        CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple);
                        InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,
                                                                                 InvalidOid, is_internal);
+
                        /*
                         * Invalidate the relcache for the table, so that after we commit
                         * all sessions will refresh the table's replica identity index
@@ -17931,12 +17933,12 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd,
        /*
         * If the partition we just attached is partitioned itself, invalidate
         * relcache for all descendent partitions too to ensure that their
-        * rd_partcheck expression trees are rebuilt; partitions already locked
-        * at the beginning of this function.
+        * rd_partcheck expression trees are rebuilt; partitions already locked at
+        * the beginning of this function.
         */
        if (attachrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
        {
-               ListCell *l;
+               ListCell   *l;
 
                foreach(l, attachrel_children)
                {
@@ -18652,13 +18654,13 @@ DetachPartitionFinalize(Relation rel, Relation partRel, bool concurrent,
        /*
         * If the partition we just detached is partitioned itself, invalidate
         * relcache for all descendent partitions too to ensure that their
-        * rd_partcheck expression trees are rebuilt; must lock partitions
-        * before doing so, using the same lockmode as what partRel has been
-        * locked with by the caller.
+        * rd_partcheck expression trees are rebuilt; must lock partitions before
+        * doing so, using the same lockmode as what partRel has been locked with
+        * by the caller.
         */
        if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
        {
-               List   *children;
+               List       *children;
 
                children = find_all_inheritors(RelationGetRelid(partRel),
                                                                           AccessExclusiveLock, NULL);
index 822d65287ef1c3b9316d5ddeb8146b494fb0caf3..690f05f66200866b90ca40023037956f7c5f84de 100644 (file)
@@ -89,7 +89,7 @@ char     *default_tablespace = NULL;
 char      *temp_tablespaces = NULL;
 bool           allow_in_place_tablespaces = false;
 
-Oid         binary_upgrade_next_pg_tablespace_oid = InvalidOid;
+Oid                    binary_upgrade_next_pg_tablespace_oid = InvalidOid;
 
 static void create_tablespace_directories(const char *location,
                                                                                  const Oid tablespaceoid);
index c263f6c8b9f19cd1e0cb01074096b458bc7ef1b6..984305ba31cf1798b4b64e448a02904ffed9ad9f 100644 (file)
@@ -798,11 +798,11 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt)
         */
        if (drolemembers)
        {
-               List       *rolemembers = (List *) drolemembers->arg;
+               List       *rolemembers = (List *) drolemembers->arg;
 
                CommandCounterIncrement();
 
-               if (stmt->action == +1)         /* add members to role */
+               if (stmt->action == +1) /* add members to role */
                        AddRoleMems(rolename, roleid,
                                                rolemembers, roleSpecsToIds(rolemembers),
                                                GetUserId(), false);
index e0fc7e8d7949c315da4c3bfa65210652d5e418f0..8df25f59d87ded1848746b2eda973907f479bf45 100644 (file)
@@ -1409,7 +1409,7 @@ vac_update_relstats(Relation relation,
                *frozenxid_updated = false;
        if (TransactionIdIsNormal(frozenxid) && oldfrozenxid != frozenxid)
        {
-               bool    update = false;
+               bool            update = false;
 
                if (TransactionIdPrecedes(oldfrozenxid, frozenxid))
                        update = true;
@@ -1432,7 +1432,7 @@ vac_update_relstats(Relation relation,
                *minmulti_updated = false;
        if (MultiXactIdIsValid(minmulti) && oldminmulti != minmulti)
        {
-               bool    update = false;
+               bool            update = false;
 
                if (MultiXactIdPrecedes(oldminmulti, minmulti))
                        update = true;
index bbf3b69c57ea926a6b05c4661b66bb25d8636158..1753da6c83085a45cabbc9740d0efd4123193fae 100644 (file)
@@ -112,7 +112,7 @@ typedef enum PVIndVacStatus
        PARALLEL_INDVAC_STATUS_NEED_BULKDELETE,
        PARALLEL_INDVAC_STATUS_NEED_CLEANUP,
        PARALLEL_INDVAC_STATUS_COMPLETED
-}                      PVIndVacStatus;
+} PVIndVacStatus;
 
 /*
  * Struct for index vacuum statistics of an index that is used for parallel vacuum.
index 38b94c0276745b638cf6bd68449f4ebd39f2e374..2831e7978b57ece7eaf41f20ca155f43eae5fb4a 100644 (file)
@@ -2504,7 +2504,7 @@ ExecInitExprRec(Expr *node, ExprState *state,
                                        if (ctor->type == JSCTOR_JSON_SCALAR)
                                        {
                                                bool            is_jsonb =
-                                                       ctor->returning->format->format_type == JS_FORMAT_JSONB;
+                                               ctor->returning->format->format_type == JS_FORMAT_JSONB;
 
                                                scratch.d.json_constructor.arg_type_cache =
                                                        palloc(sizeof(*scratch.d.json_constructor.arg_type_cache) * nargs);
@@ -2666,7 +2666,7 @@ ExecInitExprRec(Expr *node, ExprState *state,
                                        {
                                                cstate->coercion = *coercion;
                                                cstate->estate = *coercion ?
-                                                       ExecInitExprWithCaseValue((Expr *)(*coercion)->expr,
+                                                       ExecInitExprWithCaseValue((Expr *) (*coercion)->expr,
                                                                                                          state->parent,
                                                                                                          caseval, casenull) : NULL;
                                        }
index 3b1c045c52fe0e6652d0f517ecea7e9154e52236..e024611aa5438daebae0bc35bcbd69cf6ea16025 100644 (file)
@@ -3978,8 +3978,8 @@ ExecEvalJsonIsPredicate(ExprState *state, ExprEvalStep *op)
                }
 
                /*
-                * Do full parsing pass only for uniqueness check or for
-                * JSON text validation.
+                * Do full parsing pass only for uniqueness check or for JSON text
+                * validation.
                 */
                if (res && (pred->unique_keys || exprtype == TEXTOID))
                        res = json_validate(json, pred->unique_keys, false);
@@ -4513,20 +4513,20 @@ ExecEvalJsonConstructor(ExprState *state, ExprEvalStep *op,
        if (ctor->type == JSCTOR_JSON_ARRAY)
                res = (is_jsonb ?
                           jsonb_build_array_worker :
-                          json_build_array_worker)(op->d.json_constructor.nargs,
-                                                                               op->d.json_constructor.arg_values,
-                                                                               op->d.json_constructor.arg_nulls,
-                                                                               op->d.json_constructor.arg_types,
-                                                                               op->d.json_constructor.constructor->absent_on_null);
-       else if (ctor->type == JSCTOR_JSON_OBJECT)
-               res = (is_jsonb ?
-                          jsonb_build_object_worker :
-                          json_build_object_worker)(op->d.json_constructor.nargs,
+                          json_build_array_worker) (op->d.json_constructor.nargs,
                                                                                 op->d.json_constructor.arg_values,
                                                                                 op->d.json_constructor.arg_nulls,
                                                                                 op->d.json_constructor.arg_types,
-                                                                                op->d.json_constructor.constructor->absent_on_null,
-                                                                                op->d.json_constructor.constructor->unique);
+                                                                                op->d.json_constructor.constructor->absent_on_null);
+       else if (ctor->type == JSCTOR_JSON_OBJECT)
+               res = (is_jsonb ?
+                          jsonb_build_object_worker :
+                          json_build_object_worker) (op->d.json_constructor.nargs,
+                                                                                 op->d.json_constructor.arg_values,
+                                                                                 op->d.json_constructor.arg_nulls,
+                                                                                 op->d.json_constructor.arg_types,
+                                                                                 op->d.json_constructor.constructor->absent_on_null,
+                                                                                 op->d.json_constructor.constructor->unique);
        else if (ctor->type == JSCTOR_JSON_SCALAR)
        {
                if (op->d.json_constructor.arg_nulls[0])
@@ -4622,9 +4622,9 @@ static Datum
 ExecEvalJsonExprCoercion(ExprEvalStep *op, ExprContext *econtext,
                                                 Datum res, bool *isNull, void *p, bool *error)
 {
-       ExprState *estate = p;
+       ExprState  *estate = p;
 
-       if (estate)             /* coerce using specified expression */
+       if (estate)                                     /* coerce using specified expression */
                return ExecEvalExpr(estate, econtext, isNull);
 
        if (op->d.jsonexpr.jsexpr->op != JSON_EXISTS_OP)
@@ -4696,7 +4696,7 @@ EvalJsonPathVar(void *cxt, char *varName, int varNameLen,
        if (!var->evaluated)
        {
                MemoryContext oldcxt = var->mcxt ?
-                       MemoryContextSwitchTo(var->mcxt) : NULL;
+               MemoryContextSwitchTo(var->mcxt) : NULL;
 
                var->value = ExecEvalExpr(var->estate, var->econtext, &var->isnull);
                var->evaluated = true;
@@ -4751,9 +4751,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item,
 
                case jbvString:
                        coercion = &coercions->string;
-                       res = PointerGetDatum(
-                               cstring_to_text_with_len(item->val.string.val,
-                                                                                item->val.string.len));
+                       res = PointerGetDatum(cstring_to_text_with_len(item->val.string.val,
+                                                                                                                  item->val.string.len));
                        break;
 
                case jbvNumeric:
@@ -4809,8 +4808,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item,
        return res;
 }
 
-typedef Datum (*JsonFunc)(ExprEvalStep *op, ExprContext *econtext,
-                                                 Datum item, bool *resnull, void *p, bool *error);
+typedef Datum (*JsonFunc) (ExprEvalStep *op, ExprContext *econtext,
+                                                  Datum item, bool *resnull, void *p, bool *error);
 
 static Datum
 ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
@@ -4826,8 +4825,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
                return func(op, econtext, res, resnull, p, error);
 
        /*
-        * We should catch exceptions of category ERRCODE_DATA_EXCEPTION
-        * and execute the corresponding ON ERROR behavior then.
+        * We should catch exceptions of category ERRCODE_DATA_EXCEPTION and
+        * execute the corresponding ON ERROR behavior then.
         */
        oldcontext = CurrentMemoryContext;
        oldowner = CurrentResourceOwner;
@@ -4864,7 +4863,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op,
 
                ecategory = ERRCODE_TO_CATEGORY(edata->sqlerrcode);
 
-               if (ecategory != ERRCODE_DATA_EXCEPTION &&      /* jsonpath and other data errors */
+               if (ecategory != ERRCODE_DATA_EXCEPTION &&      /* jsonpath and other data
+                                                                                                        * errors */
                        ecategory != ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION)    /* domain errors */
                        ReThrowError(edata);
 
@@ -4918,7 +4918,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
                                if (error && *error)
                                        return (Datum) 0;
 
-                               if (!jbv)       /* NULL or empty */
+                               if (!jbv)               /* NULL or empty */
                                        break;
 
                                Assert(!empty);
@@ -4949,21 +4949,23 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
                                                *error = true;
                                                return (Datum) 0;
                                        }
+
                                        /*
                                         * Coercion via I/O means here that the cast to the target
                                         * type simply does not exist.
                                         */
                                        ereport(ERROR,
-                                                       /*
-                                                        * XXX Standard says about a separate error code
-                                                        * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE
-                                                        * but does not define its number.
-                                                        */
+
+                                       /*
+                                        * XXX Standard says about a separate error code
+                                        * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE but
+                                        * does not define its number.
+                                        */
                                                        (errcode(ERRCODE_SQL_JSON_SCALAR_REQUIRED),
                                                         errmsg("SQL/JSON item cannot be cast to target type")));
                                }
                                else if (!jcstate->estate)
-                                       return res;             /* no coercion */
+                                       return res; /* no coercion */
 
                                /* coerce using specific expression */
                                estate = jcstate->estate;
@@ -5018,6 +5020,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext,
                }
 
                if (jexpr->on_empty->btype == JSON_BEHAVIOR_DEFAULT)
+
                        /*
                         * Execute DEFAULT expression as a coercion expression, because
                         * its result is already coerced to the target type.
index 90b2699a96be915bc3eb091ade356e47d053c2c0..5ef5c6930fdb8dac492e59b4f2d298ef8f86535a 100644 (file)
@@ -575,6 +575,7 @@ ExecReScanIndexScan(IndexScanState *node)
        if (node->iss_ReorderQueue)
        {
                HeapTuple       tuple;
+
                while (!pairingheap_is_empty(node->iss_ReorderQueue))
                {
                        tuple = reorderqueue_pop(node);
index 23441e33cad54f54f0716761c4d0861e165eafc9..f7be4fc31f728c0ab25dc39ea50c07afc3218226 100644 (file)
@@ -375,7 +375,7 @@ static void
 cache_purge_all(MemoizeState *mstate)
 {
        uint64          evictions = mstate->hashtable->members;
-       PlanState *pstate = (PlanState *) mstate;
+       PlanState  *pstate = (PlanState *) mstate;
 
        /*
         * Likely the most efficient way to remove all items is to just reset the
index 982acfdad98e822257dd2e0333dd742e1c532493..a49c3da5b6c9a3d5d26b31d561e242ea2bc68898 100644 (file)
@@ -831,7 +831,7 @@ ExecInsert(ModifyTableContext *context,
                        {
                                TupleDesc       tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
                                TupleDesc       plan_tdesc =
-                                       CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+                               CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
 
                                resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
                                        MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
index 042a5f8b0a2f7f6b0f708f5ea4b58cae24bfff45..29bc26669b00aa824f94a96acadfd1be2a3af104 100644 (file)
@@ -2267,10 +2267,10 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan)
                else
                {
                        stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
-                                                                                          src,
-                                                                                          plan->argtypes,
-                                                                                          plan->nargs,
-                                                                                          _SPI_current->queryEnv);
+                                                                                                                  src,
+                                                                                                                  plan->argtypes,
+                                                                                                                  plan->nargs,
+                                                                                                                  _SPI_current->queryEnv);
                }
 
                /* Finish filling in the CachedPlanSource */
@@ -2504,10 +2504,10 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
                        else
                        {
                                stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree,
-                                                                                                  src,
-                                                                                                  plan->argtypes,
-                                                                                                  plan->nargs,
-                                                                                                  _SPI_current->queryEnv);
+                                                                                                                          src,
+                                                                                                                          plan->argtypes,
+                                                                                                                          plan->nargs,
+                                                                                                                          _SPI_current->queryEnv);
                        }
 
                        /* Finish filling in the CachedPlanSource */
index fcd63218f28501dc282757836b374485dbf680e8..6c72d43beb663528d18d3d7208a6b5a63c0e931c 100644 (file)
@@ -890,8 +890,8 @@ llvm_shutdown(int code, Datum arg)
         * has occurred in the middle of LLVM code. It is not safe to call back
         * into LLVM (which is why a FATAL error was thrown).
         *
-        * We do need to shutdown LLVM in other shutdown cases, otherwise
-        * e.g. profiling data won't be written out.
+        * We do need to shutdown LLVM in other shutdown cases, otherwise e.g.
+        * profiling data won't be written out.
         */
        if (llvm_in_fatal_on_oom())
        {
index 1b94a76e43e323a837d3de530abcdea33d219af7..ec454b4d65589aed2ae518bc79302a5d852a2c05 100644 (file)
@@ -634,9 +634,9 @@ dshash_seq_next(dshash_seq_status *status)
 
        /*
         * Not yet holding any partition locks. Need to determine the size of the
-        * hash table, it could have been resized since we were looking
-        * last. Since we iterate in partition order, we can start by
-        * unconditionally lock partition 0.
+        * hash table, it could have been resized since we were looking last.
+        * Since we iterate in partition order, we can start by unconditionally
+        * lock partition 0.
         *
         * Once we hold the lock, no resizing can happen until the scan ends. So
         * we don't need to repeatedly call ensure_valid_bucket_pointers().
index 03cdc72b406377fd30220355f01004d69f4a780b..75392a8bb7c61419b7879e5f224447807a9649b7 100644 (file)
@@ -1967,8 +1967,8 @@ retry:
                         * because no code should expect latches to survive across
                         * CHECK_FOR_INTERRUPTS().
                         */
-                        ResetLatch(MyLatch);
-                        goto retry;
+                       ResetLatch(MyLatch);
+                       goto retry;
                }
        }
 
index 836f427ea8f68dc38ba31e3d436f2f28e4c71700..205506305b04951e61abb284f17d2a135e8c3018 100644 (file)
@@ -2343,7 +2343,7 @@ _copyJsonReturning(const JsonReturning *from)
 static JsonValueExpr *
 _copyJsonValueExpr(const JsonValueExpr *from)
 {
-       JsonValueExpr  *newnode = makeNode(JsonValueExpr);
+       JsonValueExpr *newnode = makeNode(JsonValueExpr);
 
        COPY_NODE_FIELD(raw_expr);
        COPY_NODE_FIELD(formatted_expr);
@@ -2358,7 +2358,7 @@ _copyJsonValueExpr(const JsonValueExpr *from)
 static JsonParseExpr *
 _copyJsonParseExpr(const JsonParseExpr *from)
 {
-       JsonParseExpr  *newnode = makeNode(JsonParseExpr);
+       JsonParseExpr *newnode = makeNode(JsonParseExpr);
 
        COPY_NODE_FIELD(expr);
        COPY_NODE_FIELD(output);
@@ -2488,7 +2488,7 @@ _copyJsonObjectAgg(const JsonObjectAgg *from)
 static JsonOutput *
 _copyJsonOutput(const JsonOutput *from)
 {
-       JsonOutput         *newnode = makeNode(JsonOutput);
+       JsonOutput *newnode = makeNode(JsonOutput);
 
        COPY_NODE_FIELD(typeName);
        COPY_NODE_FIELD(returning);
@@ -2550,7 +2550,7 @@ _copyJsonArrayQueryConstructor(const JsonArrayQueryConstructor *from)
 static JsonExpr *
 _copyJsonExpr(const JsonExpr *from)
 {
-       JsonExpr    *newnode = makeNode(JsonExpr);
+       JsonExpr   *newnode = makeNode(JsonExpr);
 
        COPY_SCALAR_FIELD(op);
        COPY_NODE_FIELD(formatted_expr);
@@ -2614,7 +2614,7 @@ _copyJsonItemCoercions(const JsonItemCoercions *from)
 static JsonFuncExpr *
 _copyJsonFuncExpr(const JsonFuncExpr *from)
 {
-       JsonFuncExpr   *newnode = makeNode(JsonFuncExpr);
+       JsonFuncExpr *newnode = makeNode(JsonFuncExpr);
 
        COPY_SCALAR_FIELD(op);
        COPY_NODE_FIELD(common);
@@ -2651,7 +2651,7 @@ _copyJsonIsPredicate(const JsonIsPredicate *from)
 static JsonBehavior *
 _copyJsonBehavior(const JsonBehavior *from)
 {
-       JsonBehavior   *newnode = makeNode(JsonBehavior);
+       JsonBehavior *newnode = makeNode(JsonBehavior);
 
        COPY_SCALAR_FIELD(btype);
        COPY_NODE_FIELD(default_expr);
@@ -2665,7 +2665,7 @@ _copyJsonBehavior(const JsonBehavior *from)
 static JsonCommon *
 _copyJsonCommon(const JsonCommon *from)
 {
-       JsonCommon         *newnode = makeNode(JsonCommon);
+       JsonCommon *newnode = makeNode(JsonCommon);
 
        COPY_NODE_FIELD(expr);
        COPY_NODE_FIELD(pathspec);
@@ -2682,7 +2682,7 @@ _copyJsonCommon(const JsonCommon *from)
 static JsonArgument *
 _copyJsonArgument(const JsonArgument *from)
 {
-       JsonArgument   *newnode = makeNode(JsonArgument);
+       JsonArgument *newnode = makeNode(JsonArgument);
 
        COPY_NODE_FIELD(val);
        COPY_STRING_FIELD(name);
@@ -2696,7 +2696,7 @@ _copyJsonArgument(const JsonArgument *from)
 static JsonTable *
 _copyJsonTable(const JsonTable *from)
 {
-       JsonTable *newnode = makeNode(JsonTable);
+       JsonTable  *newnode = makeNode(JsonTable);
 
        COPY_NODE_FIELD(common);
        COPY_NODE_FIELD(columns);
@@ -5480,7 +5480,7 @@ _copyExtensibleNode(const ExtensibleNode *from)
 static Integer *
 _copyInteger(const Integer *from)
 {
-       Integer    *newnode = makeNode(Integer);
+       Integer    *newnode = makeNode(Integer);
 
        COPY_SCALAR_FIELD(ival);
 
@@ -5500,7 +5500,7 @@ _copyFloat(const Float *from)
 static Boolean *
 _copyBoolean(const Boolean *from)
 {
-       Boolean    *newnode = makeNode(Boolean);
+       Boolean    *newnode = makeNode(Boolean);
 
        COPY_SCALAR_FIELD(boolval);
 
@@ -5520,7 +5520,7 @@ _copyString(const String *from)
 static BitString *
 _copyBitString(const BitString *from)
 {
-       BitString   *newnode = makeNode(BitString);
+       BitString  *newnode = makeNode(BitString);
 
        COPY_STRING_FIELD(bsval);
 
index e013c1bbfed014d5218b9bab88c5858cc7ff3e2c..9688b22a4b99086c983ac30c0f5a37e7634d777a 100644 (file)
@@ -2802,8 +2802,7 @@ static bool
 _equalA_Const(const A_Const *a, const A_Const *b)
 {
        /*
-        * Hack for in-line val field.  Also val is not valid is isnull is
-        * true.
+        * Hack for in-line val field.  Also val is not valid is isnull is true.
         */
        if (!a->isnull && !b->isnull &&
                !equal(&a->val, &b->val))
index 4ae5e5d4dd6f305cb92d0f9690e88cb641cd4299..3b3ef3a4cdda48d0afea6cc6fef59b7e44b13fa8 100644 (file)
@@ -1003,7 +1003,7 @@ exprCollation(const Node *expr)
                        break;
                case T_JsonExpr:
                        {
-                               JsonExpr *jexpr = (JsonExpr *) expr;
+                               JsonExpr   *jexpr = (JsonExpr *) expr;
                                JsonCoercion *coercion = jexpr->result_coercion;
 
                                if (!coercion)
@@ -1239,7 +1239,8 @@ exprSetCollation(Node *expr, Oid collation)
                                if (ctor->coercion)
                                        exprSetCollation((Node *) ctor->coercion, collation);
                                else
-                                       Assert(!OidIsValid(collation)); /* result is always a json[b] type */
+                                       Assert(!OidIsValid(collation)); /* result is always a
+                                                                                                        * json[b] type */
                        }
                        break;
                case T_JsonIsPredicate:
@@ -1247,7 +1248,7 @@ exprSetCollation(Node *expr, Oid collation)
                        break;
                case T_JsonExpr:
                        {
-                               JsonExpr *jexpr = (JsonExpr *) expr;
+                               JsonExpr   *jexpr = (JsonExpr *) expr;
                                JsonCoercion *coercion = jexpr->result_coercion;
 
                                if (!coercion)
@@ -2496,7 +2497,7 @@ expression_tree_walker(Node *node,
                        return walker(((JsonIsPredicate *) node)->expr, context);
                case T_JsonExpr:
                        {
-                               JsonExpr    *jexpr = (JsonExpr *) node;
+                               JsonExpr   *jexpr = (JsonExpr *) node;
 
                                if (walker(jexpr->formatted_expr, context))
                                        return true;
@@ -3568,8 +3569,8 @@ expression_tree_mutator(Node *node,
                        break;
                case T_JsonExpr:
                        {
-                               JsonExpr    *jexpr = (JsonExpr *) node;
-                               JsonExpr    *newnode;
+                               JsonExpr   *jexpr = (JsonExpr *) node;
+                               JsonExpr   *newnode;
 
                                FLATCOPY(newnode, jexpr, JsonExpr);
                                MUTATE(newnode->path_spec, jexpr->path_spec, Node *);
@@ -4545,7 +4546,7 @@ raw_expression_tree_walker(Node *node,
                        break;
                case T_JsonTableColumn:
                        {
-                               JsonTableColumn  *jtc = (JsonTableColumn *) node;
+                               JsonTableColumn *jtc = (JsonTableColumn *) node;
 
                                if (walker(jtc->typeName, context))
                                        return true;
index b1f2de8b28de2458ac2b4f7b07141de488479c71..0271ea9d7869dd615d018f592911efd51455bacc 100644 (file)
@@ -3613,8 +3613,8 @@ static void
 _outFloat(StringInfo str, const Float *node)
 {
        /*
-        * We assume the value is a valid numeric literal and so does not
-        * need quoting.
+        * We assume the value is a valid numeric literal and so does not need
+        * quoting.
         */
        appendStringInfoString(str, node->fval);
 }
@@ -3629,8 +3629,8 @@ static void
 _outString(StringInfo str, const String *node)
 {
        /*
-        * We use outToken to provide escaping of the string's content,
-        * but we don't want it to do anything with an empty string.
+        * We use outToken to provide escaping of the string's content, but we
+        * don't want it to do anything with an empty string.
         */
        appendStringInfoChar(str, '"');
        if (node->sval[0] != '\0')
index 6fe55f5dd5cbad9a5d5c38a9bd8ad3ee587ee7db..5774a6867062e6943c8d3db78ea43340ff4f29a2 100644 (file)
@@ -22,7 +22,7 @@
 Integer *
 makeInteger(int i)
 {
-       Integer    *v = makeNode(Integer);
+       Integer    *v = makeNode(Integer);
 
        v->ival = i;
        return v;
@@ -48,7 +48,7 @@ makeFloat(char *numericStr)
 Boolean *
 makeBoolean(bool val)
 {
-       Boolean    *v = makeNode(Boolean);
+       Boolean    *v = makeNode(Boolean);
 
        v->boolval = val;
        return v;
index d84f66a81b304250c7590c8cf89878fa8c29512e..7ac116a791f5152e5389e0b533ccfa1d1bd4ab01 100644 (file)
@@ -1777,17 +1777,18 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
                        }
 
                        /*
-                        * When building a fractional path, determine a cheapest fractional
-                        * path for each child relation too. Looking at startup and total
-                        * costs is not enough, because the cheapest fractional path may be
-                        * dominated by two separate paths (one for startup, one for total).
+                        * When building a fractional path, determine a cheapest
+                        * fractional path for each child relation too. Looking at startup
+                        * and total costs is not enough, because the cheapest fractional
+                        * path may be dominated by two separate paths (one for startup,
+                        * one for total).
                         *
                         * When needed (building fractional path), determine the cheapest
                         * fractional path too.
                         */
                        if (root->tuple_fraction > 0)
                        {
-                               double  path_fraction = (1.0 / root->tuple_fraction);
+                               double          path_fraction = (1.0 / root->tuple_fraction);
 
                                cheapest_fractional =
                                        get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
@@ -1796,8 +1797,8 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
                                                                                                                          path_fraction);
 
                                /*
-                                * If we found no path with matching pathkeys, use the cheapest
-                                * total path instead.
+                                * If we found no path with matching pathkeys, use the
+                                * cheapest total path instead.
                                 *
                                 * XXX We might consider partially sorted paths too (with an
                                 * incremental sort on top). But we'd have to build all the
index 6673d271c2661c00b3da632ad62a00d4cb87a637..ed98ba7dbd2b917a74e9133173166bb41b588e8a 100644 (file)
@@ -1794,7 +1794,7 @@ is_fake_var(Expr *expr)
 static double
 get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
 {
-       double  width = -1.0; /* fake value */
+       double          width = -1.0;   /* fake value */
 
        if (IsA(expr, RelabelType))
                expr = (Expr *) ((RelabelType *) expr)->arg;
@@ -1802,17 +1802,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
        /* Try to find actual stat in corresponding relation */
        if (IsA(expr, Var))
        {
-               Var             *var = (Var *) expr;
+               Var                *var = (Var *) expr;
 
                if (var->varno > 0 && var->varno < root->simple_rel_array_size)
                {
-                       RelOptInfo      *rel = root->simple_rel_array[var->varno];
+                       RelOptInfo *rel = root->simple_rel_array[var->varno];
 
                        if (rel != NULL &&
                                var->varattno >= rel->min_attr &&
                                var->varattno <= rel->max_attr)
                        {
-                               int     ndx = var->varattno - rel->min_attr;
+                               int                     ndx = var->varattno - rel->min_attr;
 
                                if (rel->attr_widths[ndx] > 0)
                                        width = rel->attr_widths[ndx];
@@ -1823,7 +1823,7 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
        /* Didn't find any actual stats, try using type width instead. */
        if (width < 0.0)
        {
-               Node    *node = (Node*) expr;
+               Node       *node = (Node *) expr;
 
                width = get_typavgwidth(exprType(node), exprTypmod(node));
        }
@@ -1832,17 +1832,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr)
         * Values are passed as Datum type, so comparisons can't be cheaper than
         * comparing a Datum value.
         *
-        * FIXME I find this reasoning questionable. We may pass int2, and comparing
-        * it is probably a bit cheaper than comparing a bigint.
+        * FIXME I find this reasoning questionable. We may pass int2, and
+        * comparing it is probably a bit cheaper than comparing a bigint.
         */
        if (width <= sizeof(Datum))
                return 1.0;
 
        /*
         * We consider the cost of a comparison not to be directly proportional to
-        * width of the argument, because widths of the arguments could be slightly
-        * different (we only know the average width for the whole column). So we
-        * use log16(width) as an estimate.
+        * width of the argument, because widths of the arguments could be
+        * slightly different (we only know the average width for the whole
+        * column). So we use log16(width) as an estimate.
         */
        return 1.0 + 0.125 * LOG2(width / sizeof(Datum));
 }
@@ -1902,23 +1902,23 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
                                          bool heapSort)
 {
        Cost            per_tuple_cost = 0.0;
-       ListCell        *lc;
-       List            *pathkeyExprs = NIL;
+       ListCell   *lc;
+       List       *pathkeyExprs = NIL;
        double          tuplesPerPrevGroup = tuples;
        double          totalFuncCost = 1.0;
        bool            has_fake_var = false;
        int                     i = 0;
        Oid                     prev_datatype = InvalidOid;
-       List            *cache_varinfos = NIL;
+       List       *cache_varinfos = NIL;
 
        /* fallback if pathkeys is unknown */
        if (list_length(pathkeys) == 0)
        {
                /*
-                * If we'll use a bounded heap-sort keeping just K tuples in memory, for
-                * a total number of tuple comparisons of N log2 K; but the constant
-                * factor is a bit higher than for quicksort. Tweak it so that the cost
-                * curve is continuous at the crossover point.
+                * If we'll use a bounded heap-sort keeping just K tuples in memory,
+                * for a total number of tuple comparisons of N log2 K; but the
+                * constant factor is a bit higher than for quicksort. Tweak it so
+                * that the cost curve is continuous at the crossover point.
                 */
                output_tuples = (heapSort) ? 2.0 * output_tuples : tuples;
                per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples);
@@ -1930,17 +1930,17 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
        }
 
        /*
-        * Computing total cost of sorting takes into account:
-        * - per column comparison function cost
-        * - we try to compute needed number of comparison per column
+        * Computing total cost of sorting takes into account the per-column
+        * comparison function cost.  We try to compute the needed number of
+        * comparisons per column.
         */
        foreach(lc, pathkeys)
        {
-               PathKey                    *pathkey = (PathKey*) lfirst(lc);
-               EquivalenceMember  *em;
-               double                          nGroups,
-                                                       correctedNGroups;
-               Cost                            funcCost = 1.0;
+               PathKey    *pathkey = (PathKey *) lfirst(lc);
+               EquivalenceMember *em;
+               double          nGroups,
+                                       correctedNGroups;
+               Cost            funcCost = 1.0;
 
                /*
                 * We believe that equivalence members aren't very different, so, to
@@ -1985,10 +1985,10 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
                pathkeyExprs = lappend(pathkeyExprs, em->em_expr);
 
                /*
-                * We need to calculate the number of comparisons for this column, which
-                * requires knowing the group size. So we estimate the number of groups
-                * by calling estimate_num_groups_incremental(), which estimates the
-                * group size for "new" pathkeys.
+                * We need to calculate the number of comparisons for this column,
+                * which requires knowing the group size. So we estimate the number of
+                * groups by calling estimate_num_groups_incremental(), which
+                * estimates the group size for "new" pathkeys.
                 *
                 * Note: estimate_num_groups_incremental does not handle fake Vars, so
                 * use a default estimate otherwise.
@@ -1999,26 +1999,30 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
                                                                                                          &cache_varinfos,
                                                                                                          list_length(pathkeyExprs) - 1);
                else if (tuples > 4.0)
+
                        /*
                         * Use geometric mean as estimation if there are no stats.
                         *
-                        * We don't use DEFAULT_NUM_DISTINCT here, because that’s used for
-                        * a single column, but here we’re dealing with multiple columns.
+                        * We don't use DEFAULT_NUM_DISTINCT here, because that's used for
+                        * a single column, but here we're dealing with multiple columns.
                         */
                        nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys));
                else
                        nGroups = tuples;
 
                /*
-                * Presorted keys are not considered in the cost above, but we still do
-                * have to compare them in the qsort comparator. So make sure to factor
-                * in the cost in that case.
+                * Presorted keys are not considered in the cost above, but we still
+                * do have to compare them in the qsort comparator. So make sure to
+                * factor in the cost in that case.
                 */
                if (i >= nPresortedKeys)
                {
                        if (heapSort)
                        {
-                               /* have to keep at least one group, and a multiple of group size */
+                               /*
+                                * have to keep at least one group, and a multiple of group
+                                * size
+                                */
                                correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup);
                        }
                        else
@@ -2033,19 +2037,20 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
                i++;
 
                /*
-                * Uniform distributions with all groups being of the same size are the
-                * best case, with nice smooth behavior. Real-world distributions tend
-                * not to be uniform, though, and we don’t have any reliable easy-to-use
-                * information. As a basic defense against skewed distributions, we use
-                * a 1.5 factor to make the expected group a bit larger, but we need to
-                * be careful not to make the group larger than in the preceding step.
+                * Uniform distributions with all groups being of the same size are
+                * the best case, with nice smooth behavior. Real-world distributions
+                * tend not to be uniform, though, and we don't have any reliable
+                * easy-to-use information. As a basic defense against skewed
+                * distributions, we use a 1.5 factor to make the expected group a bit
+                * larger, but we need to be careful not to make the group larger than
+                * in the preceding step.
                 */
                tuplesPerPrevGroup = Min(tuplesPerPrevGroup,
                                                                 ceil(1.5 * tuplesPerPrevGroup / nGroups));
 
                /*
-                * Once we get single-row group, it means tuples in the group are unique
-                * and we can skip all remaining columns.
+                * Once we get single-row group, it means tuples in the group are
+                * unique and we can skip all remaining columns.
                 */
                if (tuplesPerPrevGroup <= 1.0)
                        break;
@@ -2057,15 +2062,15 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
        per_tuple_cost *= cpu_operator_cost;
 
        /*
-        * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E.
-        * Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation
-        * formula has additional term proportional to number of tuples (See Chapter
-        * 8.2 and Theorem 4.1). That affects  cases with a low number of tuples,
-        * approximately less than 1e4. We could implement it as an additional
-        * multiplier under the logarithm, but we use a bit more complex formula
-        * which takes into account the number of unique tuples and it’s not clear
-        * how to combine the multiplier with the number of groups. Estimate it as
-        * 10 in cpu_operator_cost unit.
+        * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles
+        * E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort
+        * estimation formula has additional term proportional to number of tuples
+        * (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number
+        * of tuples, approximately less than 1e4. We could implement it as an
+        * additional multiplier under the logarithm, but we use a bit more
+        * complex formula which takes into account the number of unique tuples
+        * and it's not clear how to combine the multiplier with the number of
+        * groups. Estimate it as 10 cpu_operator_cost units.
         */
        per_tuple_cost += 10 * cpu_operator_cost;
 
@@ -2082,7 +2087,7 @@ cost_sort_estimate(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
                                   double tuples)
 {
        return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys,
-                                                               0, tuples, tuples, false);
+                                                                0, tuples, tuples, false);
 }
 
 /*
index 34c5ab1cb603d5f9b13c4d0e7e98e9fe03abd82b..60c0e3f1089bae7e23a540705345313f4f7e18b8 100644 (file)
@@ -685,9 +685,9 @@ get_eclass_for_sort_expr(PlannerInfo *root,
                                /*
                                 * Match!
                                 *
-                                * Copy the sortref if it wasn't set yet. That may happen if the
-                                * ec was constructed from WHERE clause, i.e. it doesn't have a
-                                * target reference at all.
+                                * Copy the sortref if it wasn't set yet. That may happen if
+                                * the ec was constructed from WHERE clause, i.e. it doesn't
+                                * have a target reference at all.
                                 */
                                if (cur_ec->ec_sortref == 0 && sortref > 0)
                                        cur_ec->ec_sortref = sortref;
index 9a8c5165b0451c57bfee124c4cbff83d8c0efee2..55206ec54d2bd43f4027671e52d7f1cf00b37dd6 100644 (file)
@@ -1258,7 +1258,7 @@ sort_inner_and_outer(PlannerInfo *root,
 
        foreach(l, all_pathkeys)
        {
-               PathKey    *front_pathkey = (PathKey *) lfirst(l);
+               PathKey    *front_pathkey = (PathKey *) lfirst(l);
                List       *cur_mergeclauses;
                List       *outerkeys;
                List       *innerkeys;
index 91556910aec208bb6d688a2177a559813cfa41ca..9775c4a722592c35b5559ad21522357f3dd117fb 100644 (file)
@@ -32,7 +32,7 @@
 #include "utils/selfuncs.h"
 
 /* Consider reordering of GROUP BY keys? */
-bool enable_group_by_reordering = true;
+bool           enable_group_by_reordering = true;
 
 static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys);
 static bool matches_boolean_partition_clause(RestrictInfo *rinfo,
@@ -352,7 +352,7 @@ int
 group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
                                                           List **group_clauses)
 {
-       List       *new_group_pathkeys= NIL,
+       List       *new_group_pathkeys = NIL,
                           *new_group_clauses = NIL;
        ListCell   *lc;
        int                     n;
@@ -365,16 +365,16 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
         * there's a matching GROUP BY key. If we find one, we append it to the
         * list, and do the same for the clauses.
         *
-        * Once we find the first pathkey without a matching GROUP BY key, the rest
-        * of the pathkeys are useless and can't be used to evaluate the grouping,
-        * so we abort the loop and ignore the remaining pathkeys.
+        * Once we find the first pathkey without a matching GROUP BY key, the
+        * rest of the pathkeys are useless and can't be used to evaluate the
+        * grouping, so we abort the loop and ignore the remaining pathkeys.
         *
         * XXX Pathkeys are built in a way to allow simply comparing pointers.
         */
        foreach(lc, pathkeys)
        {
-               PathKey                 *pathkey = (PathKey *) lfirst(lc);
-               SortGroupClause *sgc;
+               PathKey    *pathkey = (PathKey *) lfirst(lc);
+               SortGroupClause *sgc;
 
                /* abort on first mismatch */
                if (!list_member_ptr(*group_pathkeys, pathkey))
@@ -403,13 +403,14 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
 /*
  * Used to generate all permutations of a pathkey list.
  */
-typedef struct PathkeyMutatorState {
+typedef struct PathkeyMutatorState
+{
        List       *elemsList;
        ListCell  **elemCells;
        void      **elems;
        int                *positions;
-       int                      mutatorNColumns;
-       int                      count;
+       int                     mutatorNColumns;
+       int                     count;
 } PathkeyMutatorState;
 
 
@@ -428,9 +429,9 @@ typedef struct PathkeyMutatorState {
 static void
 PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
 {
-       int i;
+       int                     i;
        int                     n = end - start;
-       ListCell        *lc;
+       ListCell   *lc;
 
        memset(state, 0, sizeof(*state));
 
@@ -438,8 +439,8 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
 
        state->elemsList = list_copy(elems);
 
-       state->elems = palloc(sizeof(void*) * n);
-       state->elemCells = palloc(sizeof(ListCell*) * n);
+       state->elems = palloc(sizeof(void *) * n);
+       state->elemCells = palloc(sizeof(ListCell *) * n);
        state->positions = palloc(sizeof(int) * n);
 
        i = 0;
@@ -459,10 +460,10 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end)
 static void
 PathkeyMutatorSwap(int *a, int i, int j)
 {
-  int s = a[i];
+       int                     s = a[i];
 
-  a[i] = a[j];
-  a[j] = s;
+       a[i] = a[j];
+       a[j] = s;
 }
 
 /*
@@ -471,7 +472,10 @@ PathkeyMutatorSwap(int *a, int i, int j)
 static bool
 PathkeyMutatorNextSet(int *a, int n)
 {
-       int j, k, l, r;
+       int                     j,
+                               k,
+                               l,
+                               r;
 
        j = n - 2;
 
@@ -507,7 +511,7 @@ PathkeyMutatorNextSet(int *a, int n)
 static List *
 PathkeyMutatorNext(PathkeyMutatorState *state)
 {
-       int     i;
+       int                     i;
 
        state->count++;
 
@@ -528,9 +532,9 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
        }
 
        /* update the list cells to point to the right elements */
-       for(i = 0; i < state->mutatorNColumns; i++)
+       for (i = 0; i < state->mutatorNColumns; i++)
                lfirst(state->elemCells[i]) =
-                       (void *) state->elems[ state->positions[i] - 1 ];
+                       (void *) state->elems[state->positions[i] - 1];
 
        return state->elemsList;
 }
@@ -541,7 +545,7 @@ PathkeyMutatorNext(PathkeyMutatorState *state)
 typedef struct PathkeySortCost
 {
        Cost            cost;
-       PathKey    *pathkey;
+       PathKey    *pathkey;
 } PathkeySortCost;
 
 static int
@@ -581,41 +585,42 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
                                                          List **group_pathkeys, List **group_clauses,
                                                          int n_preordered)
 {
-       List               *new_group_pathkeys = NIL,
-                                  *new_group_clauses = NIL,
-                                  *var_group_pathkeys;
+       List       *new_group_pathkeys = NIL,
+                          *new_group_clauses = NIL,
+                          *var_group_pathkeys;
 
-       ListCell           *cell;
-       PathkeyMutatorState     mstate;
-       double                  cheapest_sort_cost = -1.0;
+       ListCell   *cell;
+       PathkeyMutatorState mstate;
+       double          cheapest_sort_cost = -1.0;
 
-       int nFreeKeys;
-       int nToPermute;
+       int                     nFreeKeys;
+       int                     nToPermute;
 
        /* If there are less than 2 unsorted pathkeys, we're done. */
        if (list_length(*group_pathkeys) - n_preordered < 2)
                return false;
 
        /*
-        * We could exhaustively cost all possible orderings of the pathkeys, but for
-        * a large number of pathkeys it might be prohibitively expensive. So we try
-        * to apply simple cheap heuristics first - we sort the pathkeys by sort cost
-        * (as if the pathkey was sorted independently) and then check only the four
-        * cheapest pathkeys. The remaining pathkeys are kept ordered by cost.
+        * We could exhaustively cost all possible orderings of the pathkeys, but
+        * for a large number of pathkeys it might be prohibitively expensive. So
+        * we try to apply simple cheap heuristics first - we sort the pathkeys by
+        * sort cost (as if the pathkey was sorted independently) and then check
+        * only the four cheapest pathkeys. The remaining pathkeys are kept
+        * ordered by cost.
         *
         * XXX This is a very simple heuristics, but likely to work fine for most
-        * cases (because the number of GROUP BY clauses tends to be lower than 4).
-        * But it ignores how the number of distinct values in each pathkey affects
-        * the following steps. It might be better to use "more expensive" pathkey
-        * first if it has many distinct values, because it then limits the number
-        * of comparisons for the remaining pathkeys. But evaluating that is likely
-        * quite the expensive.
+        * cases (because the number of GROUP BY clauses tends to be lower than
+        * 4). But it ignores how the number of distinct values in each pathkey
+        * affects the following steps. It might be better to use "more expensive"
+        * pathkey first if it has many distinct values, because it then limits
+        * the number of comparisons for the remaining pathkeys. But evaluating
+        * that is likely quite the expensive.
         */
        nFreeKeys = list_length(*group_pathkeys) - n_preordered;
        nToPermute = 4;
        if (nFreeKeys > nToPermute)
        {
-               int i;
+               int                     i;
                PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys);
 
                /* skip the pre-ordered pathkeys */
@@ -624,7 +629,7 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
                /* estimate cost for sorting individual pathkeys */
                for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell)))
                {
-                       List *to_cost = list_make1(lfirst(cell));
+                       List       *to_cost = list_make1(lfirst(cell));
 
                        Assert(i < nFreeKeys);
 
@@ -658,28 +663,29 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
        Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys));
 
        /*
-        * Generate pathkey lists with permutations of the first nToPermute pathkeys.
+        * Generate pathkey lists with permutations of the first nToPermute
+        * pathkeys.
         *
         * XXX We simply calculate sort cost for each individual pathkey list, but
-        * there's room for two dynamic programming optimizations here. Firstly, we
-        * may pass the current "best" cost to cost_sort_estimate so that it can
-        * "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass
-        * the return information about the position when it exceeded the cost, and
-        * we could skip all permutations with the same prefix.
+        * there's room for two dynamic programming optimizations here. Firstly,
+        * we may pass the current "best" cost to cost_sort_estimate so that it
+        * can "abort" if the estimated pathkeys list exceeds it. Secondly, it
+        * could pass the return information about the position when it exceeded
+        * the cost, and we could skip all permutations with the same prefix.
         *
         * Imagine we've already found ordering with cost C1, and we're evaluating
         * another ordering - cost_sort_estimate() calculates cost by adding the
         * pathkeys one by one (more or less), and the cost only grows. If at any
-        * point it exceeds C1, it can't possibly be "better" so we can discard it.
-        * But we also know that we can discard all ordering with the same prefix,
-        * because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the
-        * same thing will happen for any ordering with this prefix.
+        * point it exceeds C1, it can't possibly be "better" so we can discard
+        * it. But we also know that we can discard all ordering with the same
+        * prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b)
+        * then the same thing will happen for any ordering with this prefix.
         */
        PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute);
 
-       while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
+       while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL)
        {
-               Cost    cost;
+               Cost            cost;
 
                cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows);
 
@@ -694,11 +700,11 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows,
        /* Reorder the group clauses according to the reordered pathkeys. */
        foreach(cell, new_group_pathkeys)
        {
-               PathKey                 *pathkey = (PathKey *) lfirst(cell);
+               PathKey    *pathkey = (PathKey *) lfirst(cell);
 
                new_group_clauses = lappend(new_group_clauses,
-                                               get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
-                                                                                               *group_clauses));
+                                                                       get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref,
+                                                                                                                       *group_clauses));
        }
 
        /* Just append the rest GROUP BY clauses */
@@ -745,8 +751,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
        PathKeyInfo *info;
        int                     n_preordered = 0;
 
-       List *pathkeys = group_pathkeys;
-       List *clauses = group_clauses;
+       List       *pathkeys = group_pathkeys;
+       List       *clauses = group_clauses;
 
        /* always return at least the original pathkeys/clauses */
        info = makeNode(PathKeyInfo);
@@ -756,9 +762,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
        infos = lappend(infos, info);
 
        /*
-        * Should we try generating alternative orderings of the group keys? If not,
-        * we produce only the order specified in the query, i.e. the optimization
-        * is effectively disabled.
+        * Should we try generating alternative orderings of the group keys? If
+        * not, we produce only the order specified in the query, i.e. the
+        * optimization is effectively disabled.
         */
        if (!enable_group_by_reordering)
                return infos;
@@ -782,8 +788,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
        }
 
        /*
-        * If the path is sorted in some way, try reordering the group keys to match
-        * as much of the ordering as possible - we get this sort for free (mostly).
+        * If the path is sorted in some way, try reordering the group keys to
+        * match as much of the ordering as possible - we get this sort for free
+        * (mostly).
         *
         * We must not do this when there are no grouping sets, because those use
         * more complex logic to decide the ordering.
@@ -2400,8 +2407,8 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
 static int
 pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
 {
-       ListCell *key;
-       int               n = 0;
+       ListCell   *key;
+       int                     n = 0;
 
        /* no special ordering requested for grouping */
        if (root->group_pathkeys == NIL)
@@ -2414,7 +2421,7 @@ pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys)
        /* walk the pathkeys and search for matching group key */
        foreach(key, pathkeys)
        {
-               PathKey *pathkey = (PathKey *) lfirst(key);
+               PathKey    *pathkey = (PathKey *) lfirst(key);
 
                /* no matching group key, we're done */
                if (!list_member_ptr(root->group_pathkeys, pathkey))
index db11936efefb4e9ccfc9fc78e13549d5a5207184..f4cc56039c2a176c752b3c151f3563ae958b7ea3 100644 (file)
@@ -1162,8 +1162,8 @@ mark_async_capable_plan(Plan *plan, Path *path)
                case T_ProjectionPath:
 
                        /*
-                        * If the generated plan node includes a Result node for
-                        * the projection, we can't execute it asynchronously.
+                        * If the generated plan node includes a Result node for the
+                        * projection, we can't execute it asynchronously.
                         */
                        if (IsA(plan, Result))
                                return false;
index 9a4accb4d9da31147fabf08600c9c69332a23ec4..a0f2390334eb0aeb062232416f2b8991a82acda0 100644 (file)
@@ -6250,7 +6250,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                        Assert(list_length(pathkey_orderings) > 0);
 
                        /* process all potentially interesting grouping reorderings */
-                       foreach (lc2, pathkey_orderings)
+                       foreach(lc2, pathkey_orderings)
                        {
                                bool            is_sorted;
                                int                     presorted_keys = 0;
@@ -6283,8 +6283,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                        else if (parse->hasAggs)
                                        {
                                                /*
-                                                * We have aggregation, possibly with plain GROUP BY. Make
-                                                * an AggPath.
+                                                * We have aggregation, possibly with plain GROUP BY.
+                                                * Make an AggPath.
                                                 */
                                                add_path(grouped_rel, (Path *)
                                                                 create_agg_path(root,
@@ -6301,8 +6301,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                        else if (group_clauses)
                                        {
                                                /*
-                                                * We have GROUP BY without aggregation or grouping sets.
-                                                * Make a GroupPath.
+                                                * We have GROUP BY without aggregation or grouping
+                                                * sets. Make a GroupPath.
                                                 */
                                                add_path(grouped_rel, (Path *)
                                                                 create_group_path(root,
@@ -6321,8 +6321,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 
                                /*
                                 * Now we may consider incremental sort on this path, but only
-                                * when the path is not already sorted and when incremental sort
-                                * is enabled.
+                                * when the path is not already sorted and when incremental
+                                * sort is enabled.
                                 */
                                if (is_sorted || !enable_incremental_sort)
                                        continue;
@@ -6335,8 +6335,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                        continue;
 
                                /*
-                                * We should have already excluded pathkeys of length 1 because
-                                * then presorted_keys > 0 would imply is_sorted was true.
+                                * We should have already excluded pathkeys of length 1
+                                * because then presorted_keys > 0 would imply is_sorted was
+                                * true.
                                 */
                                Assert(list_length(root->group_pathkeys) != 1);
 
@@ -6357,8 +6358,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                else if (parse->hasAggs)
                                {
                                        /*
-                                        * We have aggregation, possibly with plain GROUP BY. Make an
-                                        * AggPath.
+                                        * We have aggregation, possibly with plain GROUP BY. Make
+                                        * an AggPath.
                                         */
                                        add_path(grouped_rel, (Path *)
                                                         create_agg_path(root,
@@ -6375,8 +6376,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                else if (parse->groupClause)
                                {
                                        /*
-                                        * We have GROUP BY without aggregation or grouping sets. Make
-                                        * a GroupPath.
+                                        * We have GROUP BY without aggregation or grouping sets.
+                                        * Make a GroupPath.
                                         */
                                        add_path(grouped_rel, (Path *)
                                                         create_group_path(root,
@@ -6421,7 +6422,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                Assert(list_length(pathkey_orderings) > 0);
 
                                /* process all potentially interesting grouping reorderings */
-                               foreach (lc2, pathkey_orderings)
+                               foreach(lc2, pathkey_orderings)
                                {
                                        bool            is_sorted;
                                        int                     presorted_keys = 0;
@@ -6435,8 +6436,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                                                                                                        &presorted_keys);
 
                                        /*
-                                        * Insert a Sort node, if required.  But there's no point in
-                                        * sorting anything but the cheapest path.
+                                        * Insert a Sort node, if required.  But there's no point
+                                        * in sorting anything but the cheapest path.
                                         */
                                        if (!is_sorted)
                                        {
@@ -6471,24 +6472,30 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
                                                                                                   dNumGroups));
 
                                        /*
-                                        * Now we may consider incremental sort on this path, but only
-                                        * when the path is not already sorted and when incremental
-                                        * sort is enabled.
+                                        * Now we may consider incremental sort on this path, but
+                                        * only when the path is not already sorted and when
+                                        * incremental sort is enabled.
                                         */
                                        if (is_sorted || !enable_incremental_sort)
                                                continue;
 
-                                       /* Restore the input path (we might have added Sort on top). */
+                                       /*
+                                        * Restore the input path (we might have added Sort on
+                                        * top).
+                                        */
                                        path = path_original;
 
-                                       /* no shared prefix, not point in building incremental sort */
+                                       /*
+                                        * no shared prefix, not point in building incremental
+                                        * sort
+                                        */
                                        if (presorted_keys == 0)
                                                continue;
 
                                        /*
                                         * We should have already excluded pathkeys of length 1
-                                        * because then presorted_keys > 0 would imply is_sorted was
-                                        * true.
+                                        * because then presorted_keys > 0 would imply is_sorted
+                                        * was true.
                                         */
                                        Assert(list_length(root->group_pathkeys) != 1);
 
@@ -6741,7 +6748,7 @@ create_partial_grouping_paths(PlannerInfo *root,
                        Assert(list_length(pathkey_orderings) > 0);
 
                        /* process all potentially interesting grouping reorderings */
-                       foreach (lc2, pathkey_orderings)
+                       foreach(lc2, pathkey_orderings)
                        {
                                bool            is_sorted;
                                int                     presorted_keys = 0;
@@ -6874,7 +6881,7 @@ create_partial_grouping_paths(PlannerInfo *root,
                        Assert(list_length(pathkey_orderings) > 0);
 
                        /* process all potentially interesting grouping reorderings */
-                       foreach (lc2, pathkey_orderings)
+                       foreach(lc2, pathkey_orderings)
                        {
                                bool            is_sorted;
                                int                     presorted_keys = 0;
@@ -6924,8 +6931,8 @@ create_partial_grouping_paths(PlannerInfo *root,
 
                                /*
                                 * Now we may consider incremental sort on this path, but only
-                                * when the path is not already sorted and when incremental sort
-                                * is enabled.
+                                * when the path is not already sorted and when incremental
+                                * sort is enabled.
                                 */
                                if (is_sorted || !enable_incremental_sort)
                                        continue;
@@ -6938,8 +6945,9 @@ create_partial_grouping_paths(PlannerInfo *root,
                                        continue;
 
                                /*
-                                * We should have already excluded pathkeys of length 1 because
-                                * then presorted_keys > 0 would imply is_sorted was true.
+                                * We should have already excluded pathkeys of length 1
+                                * because then presorted_keys > 0 would imply is_sorted was
+                                * true.
                                 */
                                Assert(list_length(root->group_pathkeys) != 1);
 
index e381ae512a293244df6a9b77a4df0a304c0d18ed..533df86ff77291ede9d2f77ee4f6f29612cde177 100644 (file)
@@ -391,7 +391,7 @@ contain_mutable_functions_walker(Node *node, void *context)
                const JsonConstructorExpr *ctor = (JsonConstructorExpr *) node;
                ListCell   *lc;
                bool            is_jsonb =
-                       ctor->returning->format->format_type == JS_FORMAT_JSONB;
+               ctor->returning->format->format_type == JS_FORMAT_JSONB;
 
                /* Check argument_type => json[b] conversions */
                foreach(lc, ctor->args)
@@ -899,7 +899,7 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
        /* JsonExpr is parallel-unsafe if subtransactions can be used. */
        else if (IsA(node, JsonExpr))
        {
-               JsonExpr  *jsexpr = (JsonExpr *) node;
+               JsonExpr   *jsexpr = (JsonExpr *) node;
 
                if (ExecEvalJsonNeedsSubTransaction(jsexpr, NULL))
                {
@@ -3581,7 +3581,7 @@ eval_const_expressions_mutator(Node *node,
                                        context->case_val = raw;
 
                                        formatted = eval_const_expressions_mutator((Node *) jve->formatted_expr,
-                                                                                                                               context);
+                                                                                                                          context);
 
                                        context->case_val = save_case_val;
 
@@ -5315,7 +5315,7 @@ pull_paramids_walker(Node *node, Bitmapset **context)
                return false;
        if (IsA(node, Param))
        {
-               Param      *param = (Param *)node;
+               Param      *param = (Param *) node;
 
                *context = bms_add_member(*context, param->paramid);
                return false;
index df97b799174be0526c54f0cbf34613d1c2b0dbf3..5012bfe1425b226df0451f90a9b26a7dc07e136e 100644 (file)
@@ -968,102 +968,102 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
 
        if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
        {
-                       table_relation_estimate_size(rel, attr_widths, pages, tuples,
-                                                                                allvisfrac);
+               table_relation_estimate_size(rel, attr_widths, pages, tuples,
+                                                                        allvisfrac);
        }
        else if (rel->rd_rel->relkind == RELKIND_INDEX)
        {
-                       /*
-                        * XXX: It'd probably be good to move this into a callback,
-                        * individual index types e.g. know if they have a metapage.
-                        */
+               /*
+                * XXX: It'd probably be good to move this into a callback, individual
+                * index types e.g. know if they have a metapage.
+                */
 
-                       /* it has storage, ok to call the smgr */
-                       curpages = RelationGetNumberOfBlocks(rel);
+               /* it has storage, ok to call the smgr */
+               curpages = RelationGetNumberOfBlocks(rel);
 
-                       /* report estimated # pages */
-                       *pages = curpages;
-                       /* quick exit if rel is clearly empty */
-                       if (curpages == 0)
-                       {
-                               *tuples = 0;
-                               *allvisfrac = 0;
-                               return;
-                       }
+               /* report estimated # pages */
+               *pages = curpages;
+               /* quick exit if rel is clearly empty */
+               if (curpages == 0)
+               {
+                       *tuples = 0;
+                       *allvisfrac = 0;
+                       return;
+               }
 
-                       /* coerce values in pg_class to more desirable types */
-                       relpages = (BlockNumber) rel->rd_rel->relpages;
-                       reltuples = (double) rel->rd_rel->reltuples;
-                       relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
+               /* coerce values in pg_class to more desirable types */
+               relpages = (BlockNumber) rel->rd_rel->relpages;
+               reltuples = (double) rel->rd_rel->reltuples;
+               relallvisible = (BlockNumber) rel->rd_rel->relallvisible;
 
+               /*
+                * Discount the metapage while estimating the number of tuples. This
+                * is a kluge because it assumes more than it ought to about index
+                * structure.  Currently it's OK for btree, hash, and GIN indexes but
+                * suspect for GiST indexes.
+                */
+               if (relpages > 0)
+               {
+                       curpages--;
+                       relpages--;
+               }
+
+               /* estimate number of tuples from previous tuple density */
+               if (reltuples >= 0 && relpages > 0)
+                       density = reltuples / (double) relpages;
+               else
+               {
                        /*
-                        * Discount the metapage while estimating the number of tuples.
-                        * This is a kluge because it assumes more than it ought to about
-                        * index structure.  Currently it's OK for btree, hash, and GIN
-                        * indexes but suspect for GiST indexes.
+                        * If we have no data because the relation was never vacuumed,
+                        * estimate tuple width from attribute datatypes.  We assume here
+                        * that the pages are completely full, which is OK for tables
+                        * (since they've presumably not been VACUUMed yet) but is
+                        * probably an overestimate for indexes.  Fortunately
+                        * get_relation_info() can clamp the overestimate to the parent
+                        * table's size.
+                        *
+                        * Note: this code intentionally disregards alignment
+                        * considerations, because (a) that would be gilding the lily
+                        * considering how crude the estimate is, and (b) it creates
+                        * platform dependencies in the default plans which are kind of a
+                        * headache for regression testing.
+                        *
+                        * XXX: Should this logic be more index specific?
                         */
-                       if (relpages > 0)
-                       {
-                               curpages--;
-                               relpages--;
-                       }
-
-                       /* estimate number of tuples from previous tuple density */
-                       if (reltuples >= 0 && relpages > 0)
-                               density = reltuples / (double) relpages;
-                       else
-                       {
-                               /*
-                                * If we have no data because the relation was never vacuumed,
-                                * estimate tuple width from attribute datatypes.  We assume
-                                * here that the pages are completely full, which is OK for
-                                * tables (since they've presumably not been VACUUMed yet) but
-                                * is probably an overestimate for indexes.  Fortunately
-                                * get_relation_info() can clamp the overestimate to the
-                                * parent table's size.
-                                *
-                                * Note: this code intentionally disregards alignment
-                                * considerations, because (a) that would be gilding the lily
-                                * considering how crude the estimate is, and (b) it creates
-                                * platform dependencies in the default plans which are kind
-                                * of a headache for regression testing.
-                                *
-                                * XXX: Should this logic be more index specific?
-                                */
-                               int32           tuple_width;
+                       int32           tuple_width;
 
-                               tuple_width = get_rel_data_width(rel, attr_widths);
-                               tuple_width += MAXALIGN(SizeofHeapTupleHeader);
-                               tuple_width += sizeof(ItemIdData);
-                               /* note: integer division is intentional here */
-                               density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
-                       }
-                       *tuples = rint(density * (double) curpages);
+                       tuple_width = get_rel_data_width(rel, attr_widths);
+                       tuple_width += MAXALIGN(SizeofHeapTupleHeader);
+                       tuple_width += sizeof(ItemIdData);
+                       /* note: integer division is intentional here */
+                       density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width;
+               }
+               *tuples = rint(density * (double) curpages);
 
-                       /*
-                        * We use relallvisible as-is, rather than scaling it up like we
-                        * do for the pages and tuples counts, on the theory that any
-                        * pages added since the last VACUUM are most likely not marked
-                        * all-visible.  But costsize.c wants it converted to a fraction.
-                        */
-                       if (relallvisible == 0 || curpages <= 0)
-                               *allvisfrac = 0;
-                       else if ((double) relallvisible >= curpages)
-                               *allvisfrac = 1;
-                       else
-                               *allvisfrac = (double) relallvisible / curpages;
+               /*
+                * We use relallvisible as-is, rather than scaling it up like we do
+                * for the pages and tuples counts, on the theory that any pages added
+                * since the last VACUUM are most likely not marked all-visible.  But
+                * costsize.c wants it converted to a fraction.
+                */
+               if (relallvisible == 0 || curpages <= 0)
+                       *allvisfrac = 0;
+               else if ((double) relallvisible >= curpages)
+                       *allvisfrac = 1;
+               else
+                       *allvisfrac = (double) relallvisible / curpages;
        }
        else
        {
-                       /*
-                        * Just use whatever's in pg_class.  This covers foreign tables,
-                        * sequences, and also relkinds without storage (shouldn't get
-                        * here?); see initializations in AddNewRelationTuple().  Note
-                        * that FDW must cope if reltuples is -1!
-                        */
-                       *pages = rel->rd_rel->relpages;
-                       *tuples = rel->rd_rel->reltuples;
-                       *allvisfrac = 0;
+               /*
+                * Just use whatever's in pg_class.  This covers foreign tables,
+                * sequences, and also relkinds without storage (shouldn't get here?);
+                * see initializations in AddNewRelationTuple().  Note that FDW must
+                * cope if reltuples is -1!
+                */
+               *pages = rel->rd_rel->relpages;
+               *tuples = rel->rd_rel->reltuples;
+               *allvisfrac = 0;
        }
 }
 
index 6b54e8e46df4f448b93d8b437aba49d6d881e479..1bcb875507d28e3907572674c03c71aebe5c0ff5 100644 (file)
@@ -104,8 +104,8 @@ static bool test_raw_expression_coverage(Node *node, void *context);
  */
 Query *
 parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText,
-                         const Oid *paramTypes, int numParams,
-                         QueryEnvironment *queryEnv)
+                                                 const Oid *paramTypes, int numParams,
+                                                 QueryEnvironment *queryEnv)
 {
        ParseState *pstate = make_parsestate(NULL);
        Query      *query;
@@ -2076,8 +2076,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
                ListCell   *ltl;
                ListCell   *rtl;
                const char *context;
-               bool recursive = (pstate->p_parent_cte &&
-                                                 pstate->p_parent_cte->cterecursive);
+               bool            recursive = (pstate->p_parent_cte &&
+                                                                pstate->p_parent_cte->cterecursive);
 
                context = (stmt->op == SETOP_UNION ? "UNION" :
                                   (stmt->op == SETOP_INTERSECT ? "INTERSECT" :
@@ -2231,7 +2231,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
                                setup_parser_errposition_callback(&pcbstate, pstate,
                                                                                                  bestlocation);
 
-                               /* If it's a recursive union, we need to require hashing support. */
+                               /*
+                                * If it's a recursive union, we need to require hashing
+                                * support.
+                                */
                                op->groupClauses = lappend(op->groupClauses,
                                                                                   makeSortGroupClauseForSetOp(rescoltype, recursive));
 
index dafde68b2079a9ca06b047b4a3b8153854e29a4c..e2baa9d852efda43d9101b273c38454ee8ecade8 100644 (file)
@@ -2004,7 +2004,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
        }
        if (IsA(node, A_Const))
        {
-               A_Const    *aconst = castNode(A_Const, node);
+               A_Const    *aconst = castNode(A_Const, node);
                int                     targetlist_pos = 0;
                int                     target_pos;
 
index 45dacc6c4c5f9495e883aab4dfbb2ce7b6bccec7..e90af4c4771a1aadff46ed2d8e1881c36521a5eb 100644 (file)
@@ -692,8 +692,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                                }
                                                break;
                                        case T_JsonExpr:
-                                               /* Context item and PASSING arguments are already
-                                                * marked with collations in parse_expr.c. */
+
+                                               /*
+                                                * Context item and PASSING arguments are already
+                                                * marked with collations in parse_expr.c.
+                                                */
                                                break;
                                        default:
 
index c1f194cc5b0a8cf17f9784bafd536ff993fce2a9..17709c3416bc62035e3f879940c193f608853176 100644 (file)
@@ -3277,7 +3277,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
 
                if (exprtype == JSONOID || exprtype == JSONBOID)
                {
-                       format = JS_FORMAT_DEFAULT;     /* do not format json[b] types */
+                       format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
                        ereport(WARNING,
                                        (errmsg("FORMAT JSON has no effect for json and jsonb types"),
                                         parser_errposition(pstate, ve->format->location)));
@@ -3316,7 +3316,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
                format = default_format;
        }
        else if (exprtype == JSONOID || exprtype == JSONBOID)
-               format = JS_FORMAT_DEFAULT;     /* do not format json[b] types */
+               format = JS_FORMAT_DEFAULT; /* do not format json[b] types */
        else
                format = default_format;
 
@@ -3364,13 +3364,13 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve,
                        FuncExpr   *fexpr;
                        Oid                     fnoid;
 
-                       if (cast_is_needed)             /* only CAST is allowed */
+                       if (cast_is_needed) /* only CAST is allowed */
                                ereport(ERROR,
                                                (errcode(ERRCODE_CANNOT_COERCE),
                                                 errmsg("cannot cast type %s to %s",
                                                                format_type_be(exprtype),
                                                                format_type_be(targettype)),
-                                                               parser_errposition(pstate, location)));
+                                                parser_errposition(pstate, location)));
 
                        fnoid = targettype == JSONOID ? F_TO_JSON : F_TO_JSONB;
                        fexpr = makeFuncExpr(fnoid, targettype, list_make1(expr),
@@ -3444,7 +3444,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format,
        if (format->format_type == JS_FORMAT_JSON)
        {
                JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
-                                                  format->encoding : JS_ENC_UTF8;
+               format->encoding : JS_ENC_UTF8;
 
                if (targettype != BYTEAOID &&
                        format->encoding != JS_ENC_DEFAULT)
@@ -3583,6 +3583,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr,
                                                                                 list_make2(texpr, enc),
                                                                                 InvalidOid, InvalidOid,
                                                                                 COERCE_EXPLICIT_CALL);
+
                fexpr->location = location;
 
                return (Node *) fexpr;
@@ -3591,7 +3592,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr,
        /* try to coerce expression to the output type */
        res = coerce_to_target_type(pstate, expr, exprtype,
                                                                returning->typid, returning->typmod,
-                                                               /* XXX throwing errors when casting to char(N) */
+       /* XXX throwing errors when casting to char(N) */
                                                                COERCION_EXPLICIT,
                                                                COERCE_EXPLICIT_CAST,
                                                                location);
@@ -3616,7 +3617,7 @@ makeJsonConstructorExpr(ParseState *pstate, JsonConstructorType type,
        Node       *placeholder;
        Node       *coercion;
        Oid                     intermediate_typid =
-               returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
+       returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID;
 
        jsctor->args = args;
        jsctor->func = fexpr;
@@ -3694,7 +3695,7 @@ static Node *
 transformJsonArrayQueryConstructor(ParseState *pstate,
                                                                   JsonArrayQueryConstructor *ctor)
 {
-       SubLink    *sublink = makeNode(SubLink);
+       SubLink    *sublink = makeNode(SubLink);
        SelectStmt *select = makeNode(SelectStmt);
        RangeSubselect *range = makeNode(RangeSubselect);
        Alias      *alias = makeNode(Alias);
@@ -3766,8 +3767,8 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
        Oid                     aggfnoid;
        Node       *node;
        Expr       *aggfilter = agg_ctor->agg_filter ? (Expr *)
-               transformWhereClause(pstate, agg_ctor->agg_filter,
-                                                        EXPR_KIND_FILTER, "FILTER") : NULL;
+       transformWhereClause(pstate, agg_ctor->agg_filter,
+                                                EXPR_KIND_FILTER, "FILTER") : NULL;
 
        aggfnoid = DatumGetInt32(DirectFunctionCall1(regprocin,
                                                                                                 CStringGetDatum(aggfn)));
@@ -3809,7 +3810,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
                aggref->aggtype = aggtype;
 
                /* aggcollid and inputcollid will be set by parse_collate.c */
-               aggref->aggtranstype = InvalidOid;              /* will be set by planner */
+               aggref->aggtranstype = InvalidOid;      /* will be set by planner */
                /* aggargtypes will be set by transformAggregateCall */
                /* aggdirectargs and args will be set by transformAggregateCall */
                /* aggorder and aggdistinct will be set by transformAggregateCall */
@@ -3818,7 +3819,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor,
                aggref->aggvariadic = false;
                aggref->aggkind = AGGKIND_NORMAL;
                /* agglevelsup will be set by transformAggregateCall */
-               aggref->aggsplit = AGGSPLIT_SIMPLE;             /* planner might change this */
+               aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */
                aggref->location = agg_ctor->location;
 
                transformAggregateCall(pstate, aggref, args, agg_ctor->agg_order, false);
@@ -3860,14 +3861,13 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg)
        {
                if (agg->absent_on_null)
                        if (agg->unique)
-                               aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
+                               aggfnname = "pg_catalog.jsonb_object_agg_unique_strict";        /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */
                        else
-                               aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */
+                               aggfnname = "pg_catalog.jsonb_object_agg_strict";       /* F_JSONB_OBJECT_AGG_STRICT */
+               else if (agg->unique)
+                       aggfnname = "pg_catalog.jsonb_object_agg_unique";       /* F_JSONB_OBJECT_AGG_UNIQUE */
                else
-                       if (agg->unique)
-                               aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */
-                       else
-                               aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */
+                       aggfnname = "pg_catalog.jsonb_object_agg";      /* F_JSONB_OBJECT_AGG */
 
                aggtype = JSONBOID;
        }
@@ -3877,12 +3877,11 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg)
                        if (agg->unique)
                                aggfnname = "pg_catalog.json_object_agg_unique_strict"; /* F_JSON_OBJECT_AGG_UNIQUE_STRICT */
                        else
-                               aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */
+                               aggfnname = "pg_catalog.json_object_agg_strict";        /* F_JSON_OBJECT_AGG_STRICT */
+               else if (agg->unique)
+                       aggfnname = "pg_catalog.json_object_agg_unique";        /* F_JSON_OBJECT_AGG_UNIQUE */
                else
-                       if (agg->unique)
-                               aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */
-                       else
-                               aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */
+                       aggfnname = "pg_catalog.json_object_agg";       /* F_JSON_OBJECT_AGG */
 
                aggtype = JSONOID;
        }
@@ -4209,7 +4208,7 @@ coerceJsonExpr(ParseState *pstate, Node *expr, const JsonReturning *returning)
  * Transform a JSON output clause of JSON_VALUE and JSON_QUERY.
  */
 static void
-transformJsonFuncExprOutput(ParseState *pstate,        JsonFuncExpr *func,
+transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func,
                                                        JsonExpr *jsexpr)
 {
        Node       *expr = jsexpr->formatted_expr;
@@ -4333,19 +4332,19 @@ initJsonItemCoercions(ParseState *pstate, JsonItemCoercions *coercions,
                Oid                     typid;
        }                  *p,
                                coercionTypids[] =
-                               {
-                                       { &coercions->null, UNKNOWNOID },
-                                       { &coercions->string, TEXTOID },
-                                       { &coercions->numeric, NUMERICOID },
-                                       { &coercions->boolean, BOOLOID },
-                                       { &coercions->date, DATEOID },
-                                       { &coercions->time, TIMEOID },
-                                       { &coercions->timetz, TIMETZOID },
-                                       { &coercions->timestamp, TIMESTAMPOID },
-                                       { &coercions->timestamptz, TIMESTAMPTZOID },
-                                       { &coercions->composite, contextItemTypeId },
-                                       { NULL, InvalidOid }
-                               };
+       {
+               {&coercions->null, UNKNOWNOID},
+               {&coercions->string, TEXTOID},
+               {&coercions->numeric, NUMERICOID},
+               {&coercions->boolean, BOOLOID},
+               {&coercions->date, DATEOID},
+               {&coercions->time, TIMEOID},
+               {&coercions->timetz, TIMETZOID},
+               {&coercions->timestamp, TIMESTAMPOID},
+               {&coercions->timestamptz, TIMESTAMPTZOID},
+               {&coercions->composite, contextItemTypeId},
+               {NULL, InvalidOid}
+       };
 
        for (p = coercionTypids; p->coercion; p++)
                *p->coercion = initJsonItemCoercion(pstate, p->typid, returning);
@@ -4512,7 +4511,7 @@ static Node *
 transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr)
 {
        JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
-                                                                                                       "JSON()");
+                                                                                                                  "JSON()");
        Node       *arg;
 
        if (jsexpr->unique_keys)
@@ -4544,8 +4543,8 @@ transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr)
        }
 
        return makeJsonConstructorExpr(pstate, JSCTOR_JSON_PARSE, list_make1(arg), NULL,
-                                                       returning, jsexpr->unique_keys, false,
-                                                       jsexpr->location);
+                                                                  returning, jsexpr->unique_keys, false,
+                                                                  jsexpr->location);
 }
 
 /*
@@ -4556,13 +4555,13 @@ transformJsonScalarExpr(ParseState *pstate, JsonScalarExpr *jsexpr)
 {
        Node       *arg = transformExprRecurse(pstate, (Node *) jsexpr->expr);
        JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output,
-                                                                                                       "JSON_SCALAR()");
+                                                                                                                  "JSON_SCALAR()");
 
        if (exprType(arg) == UNKNOWNOID)
                arg = coerce_to_specific_type(pstate, arg, TEXTOID, "JSON_SCALAR");
 
        return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SCALAR, list_make1(arg), NULL,
-                                                       returning, false, false, jsexpr->location);
+                                                                  returning, false, false, jsexpr->location);
 }
 
 /*
@@ -4586,5 +4585,5 @@ transformJsonSerializeExpr(ParseState *pstate, JsonSerializeExpr *expr)
        }
 
        return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SERIALIZE, list_make1(arg),
-                                                       NULL, returning, false, false, expr->location);
+                                                                  NULL, returning, false, false, expr->location);
 }
index 5ee63cf57f022b10d85cf90ec80470650e87ffe9..dbd3e66205d6569fc486075f28d49d9dbdb25667 100644 (file)
 /* Context for JSON_TABLE transformation */
 typedef struct JsonTableContext
 {
-       ParseState *pstate;                             /* parsing state */
-       JsonTable  *table;                              /* untransformed node */
-       TableFunc  *tablefunc;                  /* transformed node     */
-       List       *pathNames;                  /* list of all path and columns names */
-       int                     pathNameId;                     /* path name id counter */
+       ParseState *pstate;                     /* parsing state */
+       JsonTable  *table;                      /* untransformed node */
+       TableFunc  *tablefunc;          /* transformed node     */
+       List       *pathNames;          /* list of all path and columns names */
+       int                     pathNameId;             /* path name id counter */
        Oid                     contextItemTypid;       /* type oid of context item (json/jsonb) */
 } JsonTableContext;
 
-static JsonTableParent * transformJsonTableColumns(JsonTableContext *cxt,
-                                                                                                  JsonTablePlan *plan,
-                                                                                                  List *columns,
-                                                                                                  char *pathSpec,
-                                                                                                  char **pathName,
-                                                                                                  int location);
+static JsonTableParent *transformJsonTableColumns(JsonTableContext *cxt,
+                                                                                                 JsonTablePlan *plan,
+                                                                                                 List *columns,
+                                                                                                 char *pathSpec,
+                                                                                                 char **pathName,
+                                                                                                 int location);
 
 static Node *
 makeStringConst(char *str, int location)
 {
-       A_Const *n = makeNode(A_Const);
+       A_Const    *n = makeNode(A_Const);
 
        n->val.node.type = T_String;
        n->val.sval.sval = str;
        n->location = location;
 
-       return (Node *)n;
+       return (Node *) n;
 }
 
 /*
@@ -122,7 +122,7 @@ transformJsonTableColumn(JsonTableColumn *jtc, Node *contextItemExpr,
 static bool
 isJsonTablePathNameDuplicate(JsonTableContext *cxt, const char *pathname)
 {
-       ListCell *lc;
+       ListCell   *lc;
 
        foreach(lc, cxt->pathNames)
        {
@@ -342,7 +342,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
                foreach(lc, columns)
                {
                        JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc));
-                       Node *node;
+                       Node       *node;
 
                        if (jtc->coltype != JTC_NESTED)
                                continue;
@@ -369,10 +369,10 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
                }
                else
                {
-                       Node       *node1 =
-                               transformJsonTableChildPlan(cxt, plan->plan1, columns);
-                       Node       *node2 =
-                               transformJsonTableChildPlan(cxt, plan->plan2, columns);
+                       Node       *node1 = transformJsonTableChildPlan(cxt, plan->plan1,
+                                                                                                                       columns);
+                       Node       *node2 = transformJsonTableChildPlan(cxt, plan->plan2,
+                                                                                                                       columns);
 
                        return makeJsonTableSiblingJoin(plan->join_type == JSTPJ_CROSS,
                                                                                        node1, node2);
@@ -396,7 +396,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan,
 static bool
 typeIsComposite(Oid typid)
 {
-       char typtype;
+       char            typtype;
 
        if (typid == JSONOID ||
                typid == JSONBOID ||
@@ -406,7 +406,7 @@ typeIsComposite(Oid typid)
 
        typtype = get_typtype(typid);
 
-       if (typtype ==  TYPTYPE_COMPOSITE)
+       if (typtype == TYPTYPE_COMPOSITE)
                return true;
 
        if (typtype == TYPTYPE_DOMAIN)
@@ -424,7 +424,7 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
        JsonTable  *jt = cxt->table;
        TableFunc  *tf = cxt->tablefunc;
        bool            errorOnError = jt->on_error &&
-                                                          jt->on_error->btype == JSON_BEHAVIOR_ERROR;
+       jt->on_error->btype == JSON_BEHAVIOR_ERROR;
 
        foreach(col, columns)
        {
@@ -436,24 +436,23 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
                if (rawc->name)
                {
                        /* make sure column names are unique */
-                       ListCell *colname;
+                       ListCell   *colname;
 
                        foreach(colname, tf->colnames)
                                if (!strcmp((const char *) colname, rawc->name))
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_SYNTAX_ERROR),
-                                                        errmsg("column name \"%s\" is not unique",
-                                                                       rawc->name),
-                                                        parser_errposition(pstate, rawc->location)));
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_SYNTAX_ERROR),
+                                                errmsg("column name \"%s\" is not unique",
+                                                               rawc->name),
+                                                parser_errposition(pstate, rawc->location)));
 
                        tf->colnames = lappend(tf->colnames,
                                                                   makeString(pstrdup(rawc->name)));
                }
 
                /*
-                * Determine the type and typmod for the new column. FOR
-                * ORDINALITY columns are INTEGER by standard; the others are
-                * user-specified.
+                * Determine the type and typmod for the new column. FOR ORDINALITY
+                * columns are INTEGER by standard; the others are user-specified.
                 */
                switch (rawc->coltype)
                {
@@ -517,8 +516,8 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns)
                tf->coltypmods = lappend_int(tf->coltypmods, typmod);
                tf->colcollations = lappend_oid(tf->colcollations,
                                                                                type_is_collatable(typid)
-                                                                                       ? DEFAULT_COLLATION_OID
-                                                                                       : InvalidOid);
+                                                                               ? DEFAULT_COLLATION_OID
+                                                                               : InvalidOid);
                tf->colvalexprs = lappend(tf->colvalexprs, colexpr);
        }
 }
@@ -571,7 +570,7 @@ transformJsonTableColumns(JsonTableContext *cxt, JsonTablePlan *plan,
                                         errdetail("JSON_TABLE columns must contain "
                                                           "explicit AS pathname specification if "
                                                           "explicit PLAN clause is used"),
-                                       parser_errposition(cxt->pstate, location)));
+                                        parser_errposition(cxt->pstate, location)));
 
                *pathName = generateJsonTablePathName(cxt);
        }
@@ -662,14 +661,15 @@ transformJsonTable(ParseState *pstate, JsonTable *jt)
 
        registerAllJsonTableColumns(&cxt, jt->columns);
 
-#if 0 /* XXX it' unclear from the standard whether root path name is mandatory or not */
+#if 0                                                  /* XXX it' unclear from the standard whether
+                                                                * root path name is mandatory or not */
        if (plan && plan->plan_type != JSTP_DEFAULT && !rootPathName)
        {
                /* Assign root path name and create corresponding plan node */
                JsonTablePlan *rootNode = makeNode(JsonTablePlan);
                JsonTablePlan *rootPlan = (JsonTablePlan *)
-                               makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
-                                                                               (Node *) plan, jt->location);
+               makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode,
+                                                               (Node *) plan, jt->location);
 
                rootPathName = generateJsonTablePathName(&cxt);
 
index a49c985d36e94b5bf4b4f0f35e2294fd15c679f1..4d39cf959450b3392306cc0dadf5e76ed32ac766 100644 (file)
@@ -382,55 +382,56 @@ make_const(ParseState *pstate, A_Const *aconst)
                        break;
 
                case T_Float:
-               {
-                       /* could be an oversize integer as well as a float ... */
-
-                       int64           val64;
-                       char       *endptr;
-
-                       errno = 0;
-                       val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
-                       if (errno == 0 && *endptr == '\0')
                        {
-                               /*
-                                * It might actually fit in int32. Probably only INT_MIN can
-                                * occur, but we'll code the test generally just to be sure.
-                                */
-                               int32           val32 = (int32) val64;
+                               /* could be an oversize integer as well as a float ... */
 
-                               if (val64 == (int64) val32)
-                               {
-                                       val = Int32GetDatum(val32);
+                               int64           val64;
+                               char       *endptr;
 
-                                       typeid = INT4OID;
-                                       typelen = sizeof(int32);
-                                       typebyval = true;
+                               errno = 0;
+                               val64 = strtoi64(aconst->val.fval.fval, &endptr, 10);
+                               if (errno == 0 && *endptr == '\0')
+                               {
+                                       /*
+                                        * It might actually fit in int32. Probably only INT_MIN
+                                        * can occur, but we'll code the test generally just to be
+                                        * sure.
+                                        */
+                                       int32           val32 = (int32) val64;
+
+                                       if (val64 == (int64) val32)
+                                       {
+                                               val = Int32GetDatum(val32);
+
+                                               typeid = INT4OID;
+                                               typelen = sizeof(int32);
+                                               typebyval = true;
+                                       }
+                                       else
+                                       {
+                                               val = Int64GetDatum(val64);
+
+                                               typeid = INT8OID;
+                                               typelen = sizeof(int64);
+                                               typebyval = FLOAT8PASSBYVAL;    /* int8 and float8 alike */
+                                       }
                                }
                                else
                                {
-                                       val = Int64GetDatum(val64);
-
-                                       typeid = INT8OID;
-                                       typelen = sizeof(int64);
-                                       typebyval = FLOAT8PASSBYVAL;    /* int8 and float8 alike */
+                                       /* arrange to report location if numeric_in() fails */
+                                       setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
+                                       val = DirectFunctionCall3(numeric_in,
+                                                                                         CStringGetDatum(aconst->val.fval.fval),
+                                                                                         ObjectIdGetDatum(InvalidOid),
+                                                                                         Int32GetDatum(-1));
+                                       cancel_parser_errposition_callback(&pcbstate);
+
+                                       typeid = NUMERICOID;
+                                       typelen = -1;   /* variable len */
+                                       typebyval = false;
                                }
+                               break;
                        }
-                       else
-                       {
-                               /* arrange to report location if numeric_in() fails */
-                               setup_parser_errposition_callback(&pcbstate, pstate, aconst->location);
-                               val = DirectFunctionCall3(numeric_in,
-                                                                                 CStringGetDatum(aconst->val.fval.fval),
-                                                                                 ObjectIdGetDatum(InvalidOid),
-                                                                                 Int32GetDatum(-1));
-                               cancel_parser_errposition_callback(&pcbstate);
-
-                               typeid = NUMERICOID;
-                               typelen = -1;   /* variable len */
-                               typebyval = false;
-                       }
-                       break;
-               }
 
                case T_Boolean:
                        val = BoolGetDatum(boolVal(&aconst->val));
index 31a43e034c64448a9d7bb9f66b8ed102e5a59746..f668abfcb336c04b6789c9626cfb78954d29fd18 100644 (file)
@@ -65,7 +65,7 @@ static bool query_contains_extern_params_walker(Node *node, void *context);
  */
 void
 setup_parse_fixed_parameters(ParseState *pstate,
-                                          const Oid *paramTypes, int numParams)
+                                                        const Oid *paramTypes, int numParams)
 {
        FixedParamState *parstate = palloc(sizeof(FixedParamState));
 
@@ -81,7 +81,7 @@ setup_parse_fixed_parameters(ParseState *pstate,
  */
 void
 setup_parse_variable_parameters(ParseState *pstate,
-                                                 Oid **paramTypes, int *numParams)
+                                                               Oid **paramTypes, int *numParams)
 {
        VarParamState *parstate = palloc(sizeof(VarParamState));
 
index 5448cb01fa7c789238287de769c7ee81b64c0870..00469763e88c2a2d53473eed79c01083dc3cfc11 100644 (file)
@@ -1990,7 +1990,7 @@ addRangeTableEntryForTableFunc(ParseState *pstate,
 {
        RangeTblEntry *rte = makeNode(RangeTblEntry);
        char       *refname = alias ? alias->aliasname :
-               pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
+       pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table");
        Alias      *eref;
        int                     numaliases;
 
index df2dcbfb99e3491696c5e6316a584133626518c2..8b6e0bd595333df9032ae71bd915e69af17850ea 100644 (file)
@@ -91,8 +91,8 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached)
         * cached descriptor too.  We determine that based on the pg_inherits.xmin
         * that was saved alongside that descriptor: if the xmin that was not in
         * progress for that active snapshot is also not in progress for the
-        * current active snapshot, then we can use it.  Otherwise build one
-        * from scratch.
+        * current active snapshot, then we can use it.  Otherwise build one from
+        * scratch.
         */
        if (omit_detached &&
                rel->rd_partdesc_nodetached &&
index f36c40e852ff80c8bd3ad606088d848fb34fb2c9..2e146aac93b99ae8c2af5488914a0ebdafc38b3b 100644 (file)
@@ -984,7 +984,8 @@ rebuild_database_list(Oid newdb)
        hctl.keysize = sizeof(Oid);
        hctl.entrysize = sizeof(avl_dbase);
        hctl.hcxt = tmpcxt;
-       dbhash = hash_create("autovacuum db hash", 20, &hctl,   /* magic number here FIXME */
+       dbhash = hash_create("autovacuum db hash", 20, &hctl,   /* magic number here
+                                                                                                                        * FIXME */
                                                 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
 
        /* start by inserting the new database */
@@ -1683,12 +1684,12 @@ AutoVacWorkerMain(int argc, char *argv[])
                char            dbname[NAMEDATALEN];
 
                /*
-                * Report autovac startup to the cumulative stats system.  We deliberately do
-                * this before InitPostgres, so that the last_autovac_time will get
-                * updated even if the connection attempt fails.  This is to prevent
-                * autovac from getting "stuck" repeatedly selecting an unopenable
-                * database, rather than making any progress on stuff it can connect
-                * to.
+                * Report autovac startup to the cumulative stats system.  We
+                * deliberately do this before InitPostgres, so that the
+                * last_autovac_time will get updated even if the connection attempt
+                * fails.  This is to prevent autovac from getting "stuck" repeatedly
+                * selecting an unopenable database, rather than making any progress
+                * on stuff it can connect to.
                 */
                pgstat_report_autovac(dbid);
 
index 30682b63b3fa1fd7c26d3597b8730576899da9f2..40601aefd974e4955dded8f2c929286b98145cca 100644 (file)
@@ -826,9 +826,9 @@ StartBackgroundWorker(void)
 
        /*
         * Create a per-backend PGPROC struct in shared memory, except in the
-        * EXEC_BACKEND case where this was done in SubPostmasterMain. We must
-        * do this before we can use LWLocks (and in the EXEC_BACKEND case we
-        * already had to do some stuff with LWLocks).
+        * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+        * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+        * had to do some stuff with LWLocks).
         */
 #ifndef EXEC_BACKEND
        InitProcess();
index 8beff4a53cdcb9bd6bbabd7791dcc4c3270db844..25e31c42e1662482c016c40ba155ad742e89a4e7 100644 (file)
@@ -81,15 +81,14 @@ typedef struct PgArchData
        int                     pgprocno;               /* pgprocno of archiver process */
 
        /*
-        * Forces a directory scan in pgarch_readyXlog().  Protected by
-        * arch_lck.
+        * Forces a directory scan in pgarch_readyXlog().  Protected by arch_lck.
         */
        bool            force_dir_scan;
 
        slock_t         arch_lck;
 } PgArchData;
 
-char *XLogArchiveLibrary = "";
+char      *XLogArchiveLibrary = "";
 
 
 /* ----------
@@ -143,7 +142,7 @@ static bool pgarch_readyXlog(char *xlog);
 static void pgarch_archiveDone(char *xlog);
 static void pgarch_die(int code, Datum arg);
 static void HandlePgArchInterrupts(void);
-static int ready_file_comparator(Datum a, Datum b, void *arg);
+static int     ready_file_comparator(Datum a, Datum b, void *arg);
 static void LoadArchiveLibrary(void);
 static void call_archive_module_shutdown_callback(int code, Datum arg);
 
@@ -579,13 +578,13 @@ pgarch_readyXlog(char *xlog)
 
        /*
         * If we still have stored file names from the previous directory scan,
-        * try to return one of those.  We check to make sure the status file
-        * is still present, as the archive_command for a previous file may
-        * have already marked it done.
+        * try to return one of those.  We check to make sure the status file is
+        * still present, as the archive_command for a previous file may have
+        * already marked it done.
         */
        while (arch_files->arch_files_size > 0)
        {
-               struct stat     st;
+               struct stat st;
                char            status_file[MAXPGPATH];
                char       *arch_file;
 
@@ -655,8 +654,8 @@ pgarch_readyXlog(char *xlog)
                                                                           CStringGetDatum(basename), NULL) > 0)
                {
                        /*
-                        * Remove the lowest priority file and add the current one to
-                        * the heap.
+                        * Remove the lowest priority file and add the current one to the
+                        * heap.
                         */
                        arch_file = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap));
                        strcpy(arch_file, basename);
@@ -677,8 +676,8 @@ pgarch_readyXlog(char *xlog)
                binaryheap_build(arch_files->arch_heap);
 
        /*
-        * Fill arch_files array with the files to archive in ascending order
-        * of priority.
+        * Fill arch_files array with the files to archive in ascending order of
+        * priority.
         */
        arch_files->arch_files_size = arch_files->arch_heap->bh_size;
        for (int i = 0; i < arch_files->arch_files_size; i++)
@@ -702,10 +701,10 @@ pgarch_readyXlog(char *xlog)
 static int
 ready_file_comparator(Datum a, Datum b, void *arg)
 {
-       char *a_str = DatumGetCString(a);
-       char *b_str = DatumGetCString(b);
-       bool a_history = IsTLHistoryFileName(a_str);
-       bool b_history = IsTLHistoryFileName(b_str);
+       char       *a_str = DatumGetCString(a);
+       char       *b_str = DatumGetCString(b);
+       bool            a_history = IsTLHistoryFileName(a_str);
+       bool            b_history = IsTLHistoryFileName(b_str);
 
        /* Timeline history files always have the highest priority. */
        if (a_history != b_history)
@@ -793,8 +792,8 @@ HandlePgArchInterrupts(void)
                if (archiveLibChanged)
                {
                        /*
-                        * Call the currently loaded archive module's shutdown callback, if
-                        * one is defined.
+                        * Call the currently loaded archive module's shutdown callback,
+                        * if one is defined.
                         */
                        call_archive_module_shutdown_callback(0, 0);
 
@@ -803,8 +802,8 @@ HandlePgArchInterrupts(void)
                         * load the new one, but there is presently no mechanism for
                         * unloading a library (see the comment above
                         * internal_load_library()).  To deal with this, we simply restart
-                        * the archiver.  The new archive module will be loaded when the new
-                        * archiver process starts up.
+                        * the archiver.  The new archive module will be loaded when the
+                        * new archiver process starts up.
                         */
                        ereport(LOG,
                                        (errmsg("restarting archiver process because value of "
@@ -828,9 +827,8 @@ LoadArchiveLibrary(void)
        memset(&ArchiveContext, 0, sizeof(ArchiveModuleCallbacks));
 
        /*
-        * If shell archiving is enabled, use our special initialization
-        * function.  Otherwise, load the library and call its
-        * _PG_archive_module_init().
+        * If shell archiving is enabled, use our special initialization function.
+        * Otherwise, load the library and call its _PG_archive_module_init().
         */
        if (XLogArchiveLibrary[0] == '\0')
                archive_init = shell_archive_init;
index 8e61b3471ca442fb53f30613c6e0f5822e2b45ec..bf591f048d4d38dd4fc6437dc2c751738338dd33 100644 (file)
@@ -2859,8 +2859,8 @@ pmdie(SIGNAL_ARGS)
 
                        /*
                         * If we reached normal running, we go straight to waiting for
-                        * client backends to exit.  If already in PM_STOP_BACKENDS or
-                        * later state, do not change it.
+                        * client backends to exit.  If already in PM_STOP_BACKENDS or a
+                        * later state, do not change it.
                         */
                        if (pmState == PM_RUN || pmState == PM_HOT_STANDBY)
                                connsAllowed = false;
index 29cf8f18e1a8c8b937675d8fa09aed3ff37a89f6..f99186eab7dbe9b7cf1954559930b4a027ee3a5c 100644 (file)
@@ -75,7 +75,7 @@ static volatile sig_atomic_t startup_progress_timer_expired = false;
 /*
  * Time between progress updates for long-running startup operations.
  */
-int log_startup_progress_interval = 10000;     /* 10 sec */
+int                    log_startup_progress_interval = 10000;  /* 10 sec */
 
 /* Signal handlers */
 static void StartupProcTriggerHandler(SIGNAL_ARGS);
index 77aebb244cb238f425ba2a02980b41babf084508..e926f8c27c760bc05f32b27c3c0341fe7dc010ad 100644 (file)
@@ -297,9 +297,9 @@ HandleWalWriterInterrupts(void)
                /*
                 * Force reporting remaining WAL statistics at process exit.
                 *
-                * Since pgstat_report_wal is invoked with 'force' is false in main loop
-                * to avoid overloading the cumulative stats system, there may exist
-                * unreported stats counters for the WAL writer.
+                * Since pgstat_report_wal is invoked with 'force' is false in main
+                * loop to avoid overloading the cumulative stats system, there may
+                * exist unreported stats counters for the WAL writer.
                 */
                pgstat_report_wal(true);
 
index 6e84f42cb24f661b7dd1c68d83d4883c68e4708d..e1f9df091803259b9e15ed5389d5049dd6b937f7 100644 (file)
@@ -234,8 +234,8 @@ pg_set_regex_collation(Oid collation)
        if (!OidIsValid(collation))
        {
                /*
-                * This typically means that the parser could not resolve a
-                * conflict of implicit collations, so report it that way.
+                * This typically means that the parser could not resolve a conflict
+                * of implicit collations, so report it that way.
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -253,9 +253,9 @@ pg_set_regex_collation(Oid collation)
        else
        {
                /*
-                * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
-                * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
-                * have to be considered below.
+                * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T; the
+                * case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not have to
+                * be considered below.
                 */
                pg_regex_locale = pg_newlocale_from_collation(collation);
 
index 7e22f9e48cf6fbf2aaf654331e1f4dc9dec2cf0f..d47ab4c41e3596956a6af115a23d8526f230c70e 100644 (file)
@@ -312,7 +312,7 @@ AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr,
  * Finalize the backup manifest, and send it to the client.
  */
 void
-SendBackupManifest(backup_manifest_info *manifest, bbsink * sink)
+SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
 {
        uint8           checksumbuf[PG_SHA256_DIGEST_LENGTH];
        char            checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH];
index 90daeff09ce8289207af962dc7f3b0452981348b..cabb077240200fa5d4ea0d06580c91c69bee22fb 100644 (file)
@@ -124,18 +124,18 @@ bbsink_copystream_begin_backup(bbsink *sink)
 {
        bbsink_copystream *mysink = (bbsink_copystream *) sink;
        bbsink_state *state = sink->bbs_state;
-       char *buf;
+       char       *buf;
 
        /*
         * Initialize buffer. We ultimately want to send the archive and manifest
         * data by means of CopyData messages where the payload portion of each
         * message begins with a type byte. However, basebackup.c expects the
         * buffer to be aligned, so we can't just allocate one extra byte for the
-        * type byte. Instead, allocate enough extra bytes that the portion of
-        * the buffer we reveal to our callers can be aligned, while leaving room
-        * to slip the type byte in just beforehand.  That will allow us to ship
-        * the data with a single call to pq_putmessage and without needing any
-        * extra copying.
+        * type byte. Instead, allocate enough extra bytes that the portion of the
+        * buffer we reveal to our callers can be aligned, while leaving room to
+        * slip the type byte in just beforehand.  That will allow us to ship the
+        * data with a single call to pq_putmessage and without needing any extra
+        * copying.
         */
        buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF);
        mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1);
index 44f28ceba453477792f6d4304303b3aefe05bc1f..ef2b954946ade7d0cf324e3bbe5021edcf628504 100644 (file)
@@ -68,7 +68,7 @@ bbsink_gzip_new(bbsink *next, pg_compress_specification *compress)
        return NULL;                            /* keep compiler quiet */
 #else
        bbsink_gzip *sink;
-       int             compresslevel;
+       int                     compresslevel;
 
        Assert(next != NULL);
 
@@ -118,8 +118,8 @@ static void
 bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
 {
        bbsink_gzip *mysink = (bbsink_gzip *) sink;
-       char *gz_archive_name;
-       z_stream *zs = &mysink->zstream;
+       char       *gz_archive_name;
+       z_stream   *zs = &mysink->zstream;
 
        /* Initialize compressor object. */
        memset(zs, 0, sizeof(z_stream));
@@ -129,10 +129,10 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
        zs->avail_out = sink->bbs_next->bbs_buffer_length;
 
        /*
-        * We need to use deflateInit2() rather than deflateInit() here so that
-        * we can request a gzip header rather than a zlib header. Otherwise, we
-        * want to supply the same values that would have been used by default
-        * if we had just called deflateInit().
+        * We need to use deflateInit2() rather than deflateInit() here so that we
+        * can request a gzip header rather than a zlib header. Otherwise, we want
+        * to supply the same values that would have been used by default if we
+        * had just called deflateInit().
         *
         * Per the documentation for deflateInit2, the third argument must be
         * Z_DEFLATED; the fourth argument is the number of "window bits", by
@@ -147,9 +147,8 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name)
                                errmsg("could not initialize compression library"));
 
        /*
-        * Add ".gz" to the archive name. Note that the pg_basebackup -z
-        * produces archives named ".tar.gz" rather than ".tgz", so we match
-        * that here.
+        * Add ".gz" to the archive name. Note that the pg_basebackup -z produces
+        * archives named ".tar.gz" rather than ".tgz", so we match that here.
         */
        gz_archive_name = psprintf("%s.gz", archive_name);
        Assert(sink->bbs_next != NULL);
@@ -172,7 +171,7 @@ static void
 bbsink_gzip_archive_contents(bbsink *sink, size_t len)
 {
        bbsink_gzip *mysink = (bbsink_gzip *) sink;
-       z_stream *zs = &mysink->zstream;
+       z_stream   *zs = &mysink->zstream;
 
        /* Compress data from input buffer. */
        zs->next_in = (uint8 *) mysink->base.bbs_buffer;
@@ -180,7 +179,7 @@ bbsink_gzip_archive_contents(bbsink *sink, size_t len)
 
        while (zs->avail_in > 0)
        {
-               int             res;
+               int                     res;
 
                /* Write output data into unused portion of output buffer. */
                Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
@@ -230,7 +229,7 @@ static void
 bbsink_gzip_end_archive(bbsink *sink)
 {
        bbsink_gzip *mysink = (bbsink_gzip *) sink;
-       z_stream *zs = &mysink->zstream;
+       z_stream   *zs = &mysink->zstream;
 
        /* There is no more data available. */
        zs->next_in = (uint8 *) mysink->base.bbs_buffer;
@@ -238,7 +237,7 @@ bbsink_gzip_end_archive(bbsink *sink)
 
        while (1)
        {
-               int             res;
+               int                     res;
 
                /* Write output data into unused portion of output buffer. */
                Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length);
@@ -248,8 +247,8 @@ bbsink_gzip_end_archive(bbsink *sink)
                        mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written;
 
                /*
-                * As bbsink_gzip_archive_contents, but pass Z_FINISH since there
-                * is no more input.
+                * As bbsink_gzip_archive_contents, but pass Z_FINISH since there is
+                * no more input.
                 */
                res = deflate(zs, Z_FINISH);
                if (res == Z_STREAM_ERROR)
@@ -260,8 +259,8 @@ bbsink_gzip_end_archive(bbsink *sink)
                        mysink->base.bbs_next->bbs_buffer_length - zs->avail_out;
 
                /*
-                * Apparently we had no data in the output buffer and deflate()
-                * was not able to add any. We must be done.
+                * Apparently we had no data in the output buffer and deflate() was
+                * not able to add any. We must be done.
                 */
                if (mysink->bytes_written == 0)
                        break;
index 65e774eff62a7405317ad4c89ab1623f4463adc8..c9d19b6c448661c25f32c9804f3f7c51077ccec8 100644 (file)
@@ -68,7 +68,7 @@ bbsink_lz4_new(bbsink *next, pg_compress_specification *compress)
        return NULL;                            /* keep compiler quiet */
 #else
        bbsink_lz4 *sink;
-       int             compresslevel;
+       int                     compresslevel;
 
        Assert(next != NULL);
 
index 54e6829d2beb63ca47dccb588e541f5d7fa58ee9..9b4847d90cc9c5eb249a9e37c0221de3d5fcb7df 100644 (file)
@@ -77,10 +77,11 @@ bbsink_server_new(bbsink *next, char *pathname)
 
        /*
         * It's not a good idea to store your backups in the same directory that
-        * you're backing up. If we allowed a relative path here, that could easily
-        * happen accidentally, so we don't. The user could still accomplish the
-        * same thing by including the absolute path to $PGDATA in the pathname,
-        * but that's likely an intentional bad decision rather than an accident.
+        * you're backing up. If we allowed a relative path here, that could
+        * easily happen accidentally, so we don't. The user could still
+        * accomplish the same thing by including the absolute path to $PGDATA in
+        * the pathname, but that's likely an intentional bad decision rather than
+        * an accident.
         */
        if (!is_absolute_path(pathname))
                ereport(ERROR,
@@ -90,14 +91,15 @@ bbsink_server_new(bbsink *next, char *pathname)
        switch (pg_check_dir(pathname))
        {
                case 0:
+
                        /*
-                        * Does not exist, so create it using the same permissions we'd use
-                        * for a new subdirectory of the data directory itself.
+                        * Does not exist, so create it using the same permissions we'd
+                        * use for a new subdirectory of the data directory itself.
                         */
                        if (MakePGDirectory(pathname) < 0)
                                ereport(ERROR,
-                                                (errcode_for_file_access(),
-                                                 errmsg("could not create directory \"%s\": %m", pathname)));
+                                               (errcode_for_file_access(),
+                                                errmsg("could not create directory \"%s\": %m", pathname)));
                        break;
 
                case 1:
index 243a2bacfef5484a36515393c339bdee74156100..9f73457320ed3fd633592392842816fd6fe2b01e 100644 (file)
@@ -80,9 +80,9 @@ BaseBackupAddTarget(char *name,
                        /*
                         * We found one, so update it.
                         *
-                        * It is probably not a great idea to call BaseBackupAddTarget
-                        * for the same name multiple times, but if it happens, this
-                        * seems like the sanest behavior.
+                        * It is probably not a great idea to call BaseBackupAddTarget for
+                        * the same name multiple times, but if it happens, this seems
+                        * like the sanest behavior.
                         */
                        ttype->check_detail = check_detail;
                        ttype->get_sink = get_sink;
@@ -91,9 +91,9 @@ BaseBackupAddTarget(char *name,
        }
 
        /*
-        * We use TopMemoryContext for allocations here to make sure that the
-        * data we need doesn't vanish under us; that's also why we copy the
-        * target name into a newly-allocated chunk of memory.
+        * We use TopMemoryContext for allocations here to make sure that the data
+        * we need doesn't vanish under us; that's also why we copy the target
+        * name into a newly-allocated chunk of memory.
         */
        oldcontext = MemoryContextSwitchTo(TopMemoryContext);
        ttype = palloc(sizeof(BaseBackupTargetType));
index d767b26f4e36faccd1b351d81ab363f002b3f6a6..b23a37b29ed2052f285a2ce2584bffd62fa6a6c2 100644 (file)
@@ -108,9 +108,9 @@ bbsink_zstd_begin_backup(bbsink *sink)
        if ((compress->options & PG_COMPRESSION_OPTION_WORKERS) != 0)
        {
                /*
-                * On older versions of libzstd, this option does not exist, and trying
-                * to set it will fail. Similarly for newer versions if they are
-                * compiled without threading support.
+                * On older versions of libzstd, this option does not exist, and
+                * trying to set it will fail. Similarly for newer versions if they
+                * are compiled without threading support.
                 */
                ret = ZSTD_CCtx_setParameter(mysink->cctx, ZSTD_c_nbWorkers,
                                                                         compress->workers);
index 6303647fe0f6f2ceb5cc11f7c2b12908008d362b..aa2427ba73f4feb37f61393da77169c1fca3fc31 100644 (file)
@@ -92,7 +92,7 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *recor
 {
        XLogRecordBuffer buf;
        TransactionId txid;
-       RmgrData rmgr;
+       RmgrData        rmgr;
 
        buf.origptr = ctx->reader->ReadRecPtr;
        buf.endptr = ctx->reader->EndRecPtr;
index 0adb2d1d665539d4710393ae9c0505c7ce94300c..6a4b2d43063d77d632aa6fa1c905bd1e73b847ea 100644 (file)
@@ -344,9 +344,9 @@ retry:
        }
 
        /*
-        * We don't allow to invoke more sync workers once we have reached the sync
-        * worker limit per subscription. So, just return silently as we might get
-        * here because of an otherwise harmless race condition.
+        * We don't allow to invoke more sync workers once we have reached the
+        * sync worker limit per subscription. So, just return silently as we
+        * might get here because of an otherwise harmless race condition.
         */
        if (OidIsValid(relid) && nsyncworkers >= max_sync_workers_per_subscription)
        {
index 6887dc23f616f02309e8634dff651d1f01fad330..da7bd1321cb779b6d93947f6f8da5d0abcc9e0b1 100644 (file)
@@ -646,8 +646,8 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create,
        }
 
        /*
-        * If the cache wasn't hit or it yielded a "does-not-exist" and we want
-        * to create an entry.
+        * If the cache wasn't hit or it yielded a "does-not-exist" and we want to
+        * create an entry.
         */
 
        /* search the lookup table */
index 49ceec3bdc849f843b7a6bbb7e7773d6446f432c..61aee61b8eeca9b3e5300ffeee4aec6c8b47284f 100644 (file)
@@ -786,11 +786,11 @@ fetch_remote_table_info(char *nspname, char *relname,
 
                /*
                 * Fetch info about column lists for the relation (from all the
-                * publications). We unnest the int2vector values, because that
-                * makes it easier to combine lists by simply adding the attnums
-                * to a new bitmap (without having to parse the int2vector data).
-                * This preserves NULL values, so that if one of the publications
-                * has no column list, we'll know that.
+                * publications). We unnest the int2vector values, because that makes
+                * it easier to combine lists by simply adding the attnums to a new
+                * bitmap (without having to parse the int2vector data). This
+                * preserves NULL values, so that if one of the publications has no
+                * column list, we'll know that.
                 */
                resetStringInfo(&cmd);
                appendStringInfo(&cmd,
@@ -816,15 +816,15 @@ fetch_remote_table_info(char *nspname, char *relname,
                                                        nspname, relname, pubres->err)));
 
                /*
-                * Merge the column lists (from different publications) by creating
-                * a single bitmap with all the attnums. If we find a NULL value,
-                * that means one of the publications has no column list for the
-                * table we're syncing.
+                * Merge the column lists (from different publications) by creating a
+                * single bitmap with all the attnums. If we find a NULL value, that
+                * means one of the publications has no column list for the table
+                * we're syncing.
                 */
                slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple);
                while (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot))
                {
-                       Datum   cfval = slot_getattr(slot, 1, &isnull);
+                       Datum           cfval = slot_getattr(slot, 1, &isnull);
 
                        /* NULL means empty column list, so we're done. */
                        if (isnull)
@@ -835,7 +835,7 @@ fetch_remote_table_info(char *nspname, char *relname,
                        }
 
                        included_cols = bms_add_member(included_cols,
-                                                                               DatumGetInt16(cfval));
+                                                                                  DatumGetInt16(cfval));
 
                        ExecClearTuple(slot);
                }
@@ -1056,8 +1056,8 @@ copy_table(Relation rel)
                                                 quote_qualified_identifier(lrel.nspname, lrel.relname));
 
                /*
-                * XXX Do we need to list the columns in all cases? Maybe we're replicating
-                * all columns?
+                * XXX Do we need to list the columns in all cases? Maybe we're
+                * replicating all columns?
                 */
                for (int i = 0; i < lrel.natts; i++)
                {
@@ -1321,10 +1321,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
 
        /*
         * COPY FROM does not honor RLS policies.  That is not a problem for
-        * subscriptions owned by roles with BYPASSRLS privilege (or superuser, who
-        * has it implicitly), but other roles should not be able to circumvent
-        * RLS.  Disallow logical replication into RLS enabled relations for such
-        * roles.
+        * subscriptions owned by roles with BYPASSRLS privilege (or superuser,
+        * who has it implicitly), but other roles should not be able to
+        * circumvent RLS.  Disallow logical replication into RLS enabled
+        * relations for such roles.
         */
        if (check_enable_rls(RelationGetRelid(rel), InvalidOid, false) == RLS_ENABLED)
                ereport(ERROR,
index 7da7823c3524b945aa5da05623d37f2fba52e7b1..725a21b55ece97a5bbf2aab3f502d9de94110562 100644 (file)
@@ -1608,8 +1608,8 @@ GetRelationIdentityOrPK(Relation rel)
 static void
 TargetPrivilegesCheck(Relation rel, AclMode mode)
 {
-       Oid                             relid;
-       AclResult               aclresult;
+       Oid                     relid;
+       AclResult       aclresult;
 
        relid = RelationGetRelid(rel);
        aclresult = pg_class_aclcheck(relid, GetUserId(), mode);
index 406ad84e1d649e837e4dc24d748eaaa9f684887a..42c06af2391d2109786375f09c172b66d8fdbdba 100644 (file)
@@ -174,8 +174,8 @@ typedef struct RelationSyncEntry
        Bitmapset  *columns;
 
        /*
-        * Private context to store additional data for this entry - state for
-        * the row filter expressions, column list, etc.
+        * Private context to store additional data for this entry - state for the
+        * row filter expressions, column list, etc.
         */
        MemoryContext entry_cxt;
 } RelationSyncEntry;
@@ -206,9 +206,8 @@ typedef struct RelationSyncEntry
  */
 typedef struct PGOutputTxnData
 {
-       bool            sent_begin_txn; /* flag indicating whether BEGIN has
-                                                                * been sent */
-}              PGOutputTxnData;
+       bool            sent_begin_txn; /* flag indicating whether BEGIN has been sent */
+} PGOutputTxnData;
 
 /* Map used to remember which relation schemas we sent. */
 static HTAB *RelationSyncCache = NULL;
@@ -511,9 +510,9 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
  * using bandwidth on something with little/no use for logical replication.
  */
 static void
-pgoutput_begin_txn(LogicalDecodingContext * ctx, ReorderBufferTXN * txn)
+pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
 {
-       PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
+       PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context,
                                                                                                          sizeof(PGOutputTxnData));
 
        txn->output_plugin_private = txndata;
@@ -987,7 +986,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
         *
         * All the given publication-table mappings must be checked.
         *
-        * Multiple publications might have multiple column lists for this relation.
+        * Multiple publications might have multiple column lists for this
+        * relation.
         *
         * FOR ALL TABLES and FOR ALL TABLES IN SCHEMA implies "don't use column
         * list" so it takes precedence.
@@ -1005,8 +1005,9 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
                bool            pub_no_list = true;
 
                /*
-                * If the publication is FOR ALL TABLES then it is treated the same as if
-                * there are no column lists (even if other publications have a list).
+                * If the publication is FOR ALL TABLES then it is treated the same as
+                * if there are no column lists (even if other publications have a
+                * list).
                 */
                if (!pub->alltables)
                {
@@ -1014,8 +1015,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications,
                         * Check for the presence of a column list in this publication.
                         *
                         * Note: If we find no pg_publication_rel row, it's a publication
-                        * defined for a whole schema, so it can't have a column list, just
-                        * like a FOR ALL TABLES publication.
+                        * defined for a whole schema, so it can't have a column list,
+                        * just like a FOR ALL TABLES publication.
                         */
                        cftuple = SearchSysCache2(PUBLICATIONRELMAP,
                                                                          ObjectIdGetDatum(entry->publish_as_relid),
@@ -1221,9 +1222,9 @@ pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot,
         * For updates, we can have only a new tuple when none of the replica
         * identity columns changed and none of those columns have external data
         * but we still need to evaluate the row filter for the new tuple as the
-        * existing values of those columns might not match the filter. Also, users
-        * can use constant expressions in the row filter, so we anyway need to
-        * evaluate it for the new tuple.
+        * existing values of those columns might not match the filter. Also,
+        * users can use constant expressions in the row filter, so we anyway need
+        * to evaluate it for the new tuple.
         *
         * For deletes, we only have the old tuple.
         */
@@ -1674,8 +1675,7 @@ pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
                xid = txn->xid;
 
        /*
-        * Output BEGIN if we haven't yet. Avoid for non-transactional
-        * messages.
+        * Output BEGIN if we haven't yet. Avoid for non-transactional messages.
         */
        if (transactional)
        {
@@ -2079,15 +2079,15 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
 
                        /*
                         * Under what relid should we publish changes in this publication?
-                        * We'll use the top-most relid across all publications. Also track
-                        * the ancestor level for this publication.
+                        * We'll use the top-most relid across all publications. Also
+                        * track the ancestor level for this publication.
                         */
-                       Oid     pub_relid = relid;
-                       int     ancestor_level = 0;
+                       Oid                     pub_relid = relid;
+                       int                     ancestor_level = 0;
 
                        /*
-                        * If this is a FOR ALL TABLES publication, pick the partition root
-                        * and set the ancestor level accordingly.
+                        * If this is a FOR ALL TABLES publication, pick the partition
+                        * root and set the ancestor level accordingly.
                         */
                        if (pub->alltables)
                        {
@@ -2156,18 +2156,18 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
 
                                /*
                                 * We want to publish the changes as the top-most ancestor
-                                * across all publications. So we need to check if the
-                                * already calculated level is higher than the new one. If
-                                * yes, we can ignore the new value (as it's a child).
-                                * Otherwise the new value is an ancestor, so we keep it.
+                                * across all publications. So we need to check if the already
+                                * calculated level is higher than the new one. If yes, we can
+                                * ignore the new value (as it's a child). Otherwise the new
+                                * value is an ancestor, so we keep it.
                                 */
                                if (publish_ancestor_level > ancestor_level)
                                        continue;
 
                                /*
-                                * If we found an ancestor higher up in the tree, discard
-                                * the list of publications through which we replicate it,
-                                * and use the new ancestor.
+                                * If we found an ancestor higher up in the tree, discard the
+                                * list of publications through which we replicate it, and use
+                                * the new ancestor.
                                 */
                                if (publish_ancestor_level < ancestor_level)
                                {
index 5c778f5333b0d3b169c5ba49078c13c2bf082202..e5c2102bcd566d25aa72067ee2e39832d060580f 100644 (file)
@@ -504,8 +504,8 @@ retry:
        MyReplicationSlot = s;
 
        /*
-        * The call to pgstat_acquire_replslot() protects against stats for
-        * different slot, from before a restart or such, being present during
+        * The call to pgstat_acquire_replslot() protects against stats for a
+        * different slot, from before a restart or such, being present during
         * pgstat_report_replslot().
         */
        if (SlotIsLogical(s))
index 3c9411e22130761c1f89cff9b284ab1cbdfe00ef..b369d28a806b69028ed0da0d7955aaf8e439f7f7 100644 (file)
@@ -1406,9 +1406,9 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS)
        if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
        {
                /*
-                * Only superusers and roles with privileges of pg_read_all_stats
-                * can see details. Other users only get the pid value to know whether
-                * it is a WAL receiver, but no details.
+                * Only superusers and roles with privileges of pg_read_all_stats can
+                * see details. Other users only get the pid value to know whether it
+                * is a WAL receiver, but no details.
                 */
                MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1));
        }
index c6c196b2fab28746b944dcaffb96727b5f9a1a71..e42671722a808006d340f3258026d26e7ad40d21 100644 (file)
@@ -1505,9 +1505,9 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId
         * When skipping empty transactions in synchronous replication, we send a
         * keepalive message to avoid delaying such transactions.
         *
-        * It is okay to check sync_standbys_defined flag without lock here as
-        * in the worst case we will just send an extra keepalive message when it
-        * is really not required.
+        * It is okay to check sync_standbys_defined flag without lock here as in
+        * the worst case we will just send an extra keepalive message when it is
+        * really not required.
         */
        if (skipped_xact &&
                SyncRepRequested() &&
index b6f31849616f4507af0343d08388dbc90a09c4c2..c1c27e67d474f5feb01b8cc98f74c8a2c8e19251 100644 (file)
@@ -354,7 +354,7 @@ statext_dependencies_build(StatsBuildData *data)
 
        /* result */
        MVDependencies *dependencies = NULL;
-       MemoryContext   cxt;
+       MemoryContext cxt;
 
        Assert(data->nattnums >= 2);
 
index e02ea3a977c9abb07278a5daded38654362c82ed..ae13011d27597672e76a2fa8dd90034b27d2def9 100644 (file)
@@ -673,9 +673,8 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
                {
                        /*
                         * It's now safe to pin the buffer.  We can't pin first and ask
-                        * questions later, because it might confuse code paths
-                        * like InvalidateBuffer() if we pinned a random non-matching
-                        * buffer.
+                        * questions later, because it might confuse code paths like
+                        * InvalidateBuffer() if we pinned a random non-matching buffer.
                         */
                        if (have_private_ref)
                                PinBuffer(bufHdr, NULL);        /* bump pin count */
@@ -2945,10 +2944,10 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
        if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
        {
                /*
-                * Not every table AM uses BLCKSZ wide fixed size blocks.
-                * Therefore tableam returns the size in bytes - but for the
-                * purpose of this routine, we want the number of blocks.
-                * Therefore divide, rounding up.
+                * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
+                * tableam returns the size in bytes - but for the purpose of this
+                * routine, we want the number of blocks. Therefore divide, rounding
+                * up.
                 */
                uint64          szbytes;
 
@@ -2958,7 +2957,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
        }
        else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
        {
-                       return smgrnblocks(RelationGetSmgr(relation), forkNum);
+               return smgrnblocks(RelationGetSmgr(relation), forkNum);
        }
        else
                Assert(false);
@@ -3707,9 +3706,9 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
        BufferAccessStrategy bstrategy_dst;
 
        /*
-        * In general, we want to write WAL whenever wal_level > 'minimal', but
-        * we can skip it when copying any fork of an unlogged relation other
-        * than the init fork.
+        * In general, we want to write WAL whenever wal_level > 'minimal', but we
+        * can skip it when copying any fork of an unlogged relation other than
+        * the init fork.
         */
        use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
 
@@ -3779,9 +3778,9 @@ void
 CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
                                                  bool permanent)
 {
-       Relation                src_rel;
-       Relation                dst_rel;
-       char                    relpersistence;
+       Relation        src_rel;
+       Relation        dst_rel;
+       char            relpersistence;
 
        /* Set the relpersistence. */
        relpersistence = permanent ?
@@ -3789,9 +3788,9 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
 
        /*
         * We can't use a real relcache entry for a relation in some other
-        * database, but since we're only going to access the fields related
-        * to physical storage, a fake one is good enough. If we didn't do this
-        * and used the smgr layer directly, we would have to worry about
+        * database, but since we're only going to access the fields related to
+        * physical storage, a fake one is good enough. If we didn't do this and
+        * used the smgr layer directly, we would have to worry about
         * invalidations.
         */
        src_rel = CreateFakeRelcacheEntry(src_rnode);
index 25c310f6757062a0eb62ca1f4dae56fb1c92d986..ca22336e35d5672a46203ae9ae06b1c1ca2dedc9 100644 (file)
@@ -1172,8 +1172,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
                 *
                 * We have to sort them logically, because in KnownAssignedXidsAdd we
                 * call TransactionIdFollowsOrEquals and so on. But we know these XIDs
-                * come from RUNNING_XACTS, which means there are only normal XIDs from
-                * the same epoch, so this is safe.
+                * come from RUNNING_XACTS, which means there are only normal XIDs
+                * from the same epoch, so this is safe.
                 */
                qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator);
 
index 603cf9b0fa7ec4083f7aec247dd85ec3a87741f5..6139c622e0b28205cf11ea888c52a538159414d6 100644 (file)
@@ -534,9 +534,9 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait,
        }
 
        /*
-        * If the caller has requested force flush or we have written more than 1/4
-        * of the ring size, mark it as written in shared memory and notify the
-        * receiver.
+        * If the caller has requested force flush or we have written more than
+        * 1/4 of the ring size, mark it as written in shared memory and notify
+        * the receiver.
         */
        if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2))
        {
index 2861c03e04bd594ef84287715fa325596f4a7c4a..59310b708fbd1f61ed7e394767b9b0389c48675e 100644 (file)
@@ -208,10 +208,11 @@ SInvalShmemSize(void)
 
        /*
         * In Hot Standby mode, the startup process requests a procState array
-        * slot using InitRecoveryTransactionEnvironment(). Even though MaxBackends
-        * doesn't account for the startup process, it is guaranteed to get a
-        * free slot. This is because the autovacuum launcher and worker processes,
-        * which are included in MaxBackends, are not started in Hot Standby mode.
+        * slot using InitRecoveryTransactionEnvironment(). Even though
+        * MaxBackends doesn't account for the startup process, it is guaranteed
+        * to get a free slot. This is because the autovacuum launcher and worker
+        * processes, which are included in MaxBackends, are not started in Hot
+        * Standby mode.
         */
        size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
 
index cc15396789b315f096045df27a089c176f3b8dbc..a3d367db5118e3d398419464717469b9e81d6d16 100644 (file)
@@ -795,7 +795,7 @@ PageRepairFragmentation(Page page)
        if (finalusedlp != nline)
        {
                /* The last line pointer is not the last used line pointer */
-               int             nunusedend = nline - finalusedlp;
+               int                     nunusedend = nline - finalusedlp;
 
                Assert(nunused >= nunusedend && nunusedend > 0);
 
index 304cce135aa20ee84b859e6a2d9463c160186ca7..8b6b5bbaaab4a14f8833b2e42738b9eaf31f8db6 100644 (file)
@@ -655,7 +655,7 @@ pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree,
                ResetUsage();
 
        query = parse_analyze_fixedparams(parsetree, query_string, paramTypes, numParams,
-                                                 queryEnv);
+                                                                         queryEnv);
 
        if (log_parser_stats)
                ShowUsage("PARSE ANALYSIS STATISTICS");
@@ -694,7 +694,7 @@ pg_analyze_and_rewrite_varparams(RawStmt *parsetree,
                ResetUsage();
 
        query = parse_analyze_varparams(parsetree, query_string, paramTypes, numParams,
-                                                 queryEnv);
+                                                                       queryEnv);
 
        /*
         * Check all parameter types got determined.
@@ -1164,7 +1164,7 @@ exec_simple_query(const char *query_string)
                        oldcontext = MemoryContextSwitchTo(MessageContext);
 
                querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, query_string,
-                                                                                               NULL, 0, NULL);
+                                                                                                                       NULL, 0, NULL);
 
                plantree_list = pg_plan_queries(querytree_list, query_string,
                                                                                CURSOR_OPT_PARALLEL_OK, NULL);
@@ -4377,11 +4377,12 @@ PostgresMain(const char *dbname, const char *username)
                 * Note: this includes fflush()'ing the last of the prior output.
                 *
                 * This is also a good time to flush out collected statistics to the
-                * cumulative stats system, and to update the PS stats display.  We avoid doing
-                * those every time through the message loop because it'd slow down
-                * processing of batched messages, and because we don't want to report
-                * uncommitted updates (that confuses autovacuum).  The notification
-                * processor wants a call too, if we are not in a transaction block.
+                * cumulative stats system, and to update the PS stats display.  We
+                * avoid doing those every time through the message loop because it'd
+                * slow down processing of batched messages, and because we don't want
+                * to report uncommitted updates (that confuses autovacuum).  The
+                * notification processor wants a call too, if we are not in a
+                * transaction block.
                 *
                 * Also, if an idle timeout is enabled, start the timer for that.
                 */
@@ -4415,7 +4416,7 @@ PostgresMain(const char *dbname, const char *username)
                        }
                        else
                        {
-                               long stats_timeout;
+                               long            stats_timeout;
 
                                /*
                                 * Process incoming notifies (including self-notifies), if
@@ -4470,8 +4471,9 @@ PostgresMain(const char *dbname, const char *username)
 
                /*
                 * (4) turn off the idle-in-transaction, idle-session and
-                * idle-stats-update timeouts if active.  We do this before step (5) so
-                * that any last-moment timeout is certain to be detected in step (5).
+                * idle-stats-update timeouts if active.  We do this before step (5)
+                * so that any last-moment timeout is certain to be detected in step
+                * (5).
                 *
                 * At most one of these timeouts will be active, so there's no need to
                 * worry about combining the timeout.c calls into one.
index 78e951a6bca9c1a8c7eb30f67ebcdc70aa31fab1..2570e5e6301650322f5b9439bcf69f713ddf6e7a 100644 (file)
@@ -3996,7 +3996,8 @@ hash_array(PG_FUNCTION_ARGS)
 
                        /*
                         * Make fake type cache entry structure.  Note that we can't just
-                        * modify typentry, since that points directly into the type cache.
+                        * modify typentry, since that points directly into the type
+                        * cache.
                         */
                        record_typentry = palloc0(sizeof(*record_typentry));
                        record_typentry->type_id = element_type;
index 0576764ac4b3780286e1a0b8f19269929e0737d2..b4a2c8d21976f4b8dc41cad04aaf10acec30679c 100644 (file)
@@ -112,8 +112,8 @@ calculate_database_size(Oid dbOid)
        AclResult       aclresult;
 
        /*
-        * User must have connect privilege for target database or have privileges of
-        * pg_read_all_stats
+        * User must have connect privilege for target database or have privileges
+        * of pg_read_all_stats
         */
        aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT);
        if (aclresult != ACLCHECK_OK &&
@@ -196,9 +196,9 @@ calculate_tablespace_size(Oid tblspcOid)
        AclResult       aclresult;
 
        /*
-        * User must have privileges of pg_read_all_stats or have CREATE privilege for
-        * target tablespace, either explicitly granted or implicitly because it
-        * is default for current database.
+        * User must have privileges of pg_read_all_stats or have CREATE privilege
+        * for target tablespace, either explicitly granted or implicitly because
+        * it is default for current database.
         */
        if (tblspcOid != MyDatabaseTableSpace &&
                !has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
index 97a4544ffc630e1234dd387c9ebb0bcd58ec586b..e909c1a200cba783310636dda6a02b303cdfeaf7 100644 (file)
@@ -898,7 +898,7 @@ static const KeyWord DCH_keywords[] = {
        {"month", 5, DCH_month, false, FROM_CHAR_DATE_GREGORIAN},
        {"mon", 3, DCH_mon, false, FROM_CHAR_DATE_GREGORIAN},
        {"ms", 2, DCH_MS, true, FROM_CHAR_DATE_NONE},
-       {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE},  /* o */
+       {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE},  /* o */
        {"p.m.", 4, DCH_p_m, false, FROM_CHAR_DATE_NONE},       /* p */
        {"pm", 2, DCH_pm, false, FROM_CHAR_DATE_NONE},
        {"q", 1, DCH_Q, true, FROM_CHAR_DATE_NONE}, /* q */
@@ -906,7 +906,7 @@ static const KeyWord DCH_keywords[] = {
        {"sssss", 5, DCH_SSSS, true, FROM_CHAR_DATE_NONE},      /* s */
        {"ssss", 4, DCH_SSSS, true, FROM_CHAR_DATE_NONE},
        {"ss", 2, DCH_SS, true, FROM_CHAR_DATE_NONE},
-       {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE},    /* t */
+       {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE},        /* t */
        {"tzm", 3, DCH_TZM, true, FROM_CHAR_DATE_NONE},
        {"tz", 2, DCH_tz, false, FROM_CHAR_DATE_NONE},
        {"us", 2, DCH_US, true, FROM_CHAR_DATE_NONE},   /* u */
@@ -1675,8 +1675,8 @@ str_tolower(const char *buff, size_t nbytes, Oid collid)
        if (!OidIsValid(collid))
        {
                /*
-                * This typically means that the parser could not resolve a
-                * conflict of implicit collations, so report it that way.
+                * This typically means that the parser could not resolve a conflict
+                * of implicit collations, so report it that way.
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -1797,8 +1797,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid)
        if (!OidIsValid(collid))
        {
                /*
-                * This typically means that the parser could not resolve a
-                * conflict of implicit collations, so report it that way.
+                * This typically means that the parser could not resolve a conflict
+                * of implicit collations, so report it that way.
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_INDETERMINATE_COLLATION),
@@ -1920,8 +1920,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
        if (!OidIsValid(collid))
        {
                /*
-                * This typically means that the parser could not resolve a
-                * conflict of implicit collations, so report it that way.
+                * This typically means that the parser could not resolve a conflict
+                * of implicit collations, so report it that way.
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_INDETERMINATE_COLLATION),
index 63649ba7351a2a4a187bf27308392bcaddbd2ff8..553cc25eb9d6d598ba3e1513af835dabc09f7f11 100644 (file)
@@ -44,9 +44,9 @@ typedef struct JsonUniqueHashEntry
 /* Context for key uniqueness check in builder functions */
 typedef struct JsonUniqueBuilderState
 {
-       JsonUniqueCheckState check;     /* unique check */
+       JsonUniqueCheckState check; /* unique check */
        StringInfoData skipped_keys;    /* skipped keys with NULL values */
-       MemoryContext mcxt;                             /* context for saving skipped keys */
+       MemoryContext mcxt;                     /* context for saving skipped keys */
 } JsonUniqueBuilderState;
 
 /* Element of object stack for key uniqueness check during json parsing */
@@ -774,10 +774,10 @@ to_json_is_immutable(Oid typoid)
                        return false;
 
                case JSONTYPE_ARRAY:
-                       return false;   /* TODO recurse into elements */
+                       return false;           /* TODO recurse into elements */
 
                case JSONTYPE_COMPOSITE:
-                       return false;   /* TODO recurse into fields */
+                       return false;           /* TODO recurse into fields */
 
                case JSONTYPE_NUMERIC:
                case JSONTYPE_CAST:
@@ -938,7 +938,7 @@ static uint32
 json_unique_hash(const void *key, Size keysize)
 {
        const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key;
-       uint32          hash =  hash_bytes_uint32(entry->object_id);
+       uint32          hash = hash_bytes_uint32(entry->object_id);
 
        hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
 
@@ -1011,6 +1011,7 @@ json_unique_builder_get_skipped_keys(JsonUniqueBuilderState *cxt)
        if (!out->data)
        {
                MemoryContext oldcxt = MemoryContextSwitchTo(cxt->mcxt);
+
                initStringInfo(out);
                MemoryContextSwitchTo(oldcxt);
        }
@@ -1116,8 +1117,8 @@ json_object_agg_transfn_worker(FunctionCallInfo fcinfo,
                out = state->str;
 
                /*
-                * Append comma delimiter only if we have already outputted some fields
-                * after the initial string "{ ".
+                * Append comma delimiter only if we have already outputted some
+                * fields after the initial string "{ ".
                 */
                if (out->len > 2)
                        appendStringInfoString(out, ", ");
@@ -1285,7 +1286,7 @@ json_build_object_worker(int nargs, Datum *args, bool *nulls, Oid *types,
                if (nulls[i])
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                        errmsg("argument %d cannot be null",  i + 1),
+                                        errmsg("argument %d cannot be null", i + 1),
                                         errhint("Object keys should be text.")));
 
                /* save key offset before key appending */
@@ -1327,6 +1328,7 @@ json_build_object(PG_FUNCTION_ARGS)
        Datum      *args;
        bool       *nulls;
        Oid                *types;
+
        /* build argument values to build the object */
        int                     nargs = extract_variadic_args(fcinfo, 0, true,
                                                                                          &args, &types, &nulls);
@@ -1382,6 +1384,7 @@ json_build_array(PG_FUNCTION_ARGS)
        Datum      *args;
        bool       *nulls;
        Oid                *types;
+
        /* build argument values to build the object */
        int                     nargs = extract_variadic_args(fcinfo, 0, true,
                                                                                          &args, &types, &nulls);
@@ -1706,7 +1709,7 @@ json_validate(text *json, bool check_unique_keys, bool throw_error)
                if (throw_error)
                        json_ereport_error(result, lex);
 
-               return false;   /* invalid json */
+               return false;                   /* invalid json */
        }
 
        if (check_unique_keys && !state.unique)
@@ -1716,10 +1719,10 @@ json_validate(text *json, bool check_unique_keys, bool throw_error)
                                        (errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE),
                                         errmsg("duplicate JSON object key value")));
 
-               return false;   /* not unique keys */
+               return false;                   /* not unique keys */
        }
 
-       return true;    /* ok */
+       return true;                            /* ok */
 }
 
 /*
index 26d81366c9f8c3f00e58e2f149239d844d50e9e2..39355e242d22d46798cbb3137671b5283b4a8e6d 100644 (file)
@@ -1148,10 +1148,10 @@ to_jsonb_is_immutable(Oid typoid)
                        return false;
 
                case JSONBTYPE_ARRAY:
-                       return false;   /* TODO recurse into elements */
+                       return false;           /* TODO recurse into elements */
 
                case JSONBTYPE_COMPOSITE:
-                       return false;   /* TODO recurse into fields */
+                       return false;           /* TODO recurse into fields */
 
                case JSONBTYPE_NUMERIC:
                case JSONBTYPE_JSONCAST:
@@ -1240,6 +1240,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
        Datum      *args;
        bool       *nulls;
        Oid                *types;
+
        /* build argument values to build the object */
        int                     nargs = extract_variadic_args(fcinfo, 0, true,
                                                                                          &args, &types, &nulls);
@@ -1299,6 +1300,7 @@ jsonb_build_array(PG_FUNCTION_ARGS)
        Datum      *args;
        bool       *nulls;
        Oid                *types;
+
        /* build argument values to build the object */
        int                     nargs = extract_variadic_args(fcinfo, 0, true,
                                                                                          &args, &types, &nulls);
@@ -2229,7 +2231,7 @@ jsonb_float8(PG_FUNCTION_ARGS)
 Jsonb *
 JsonbMakeEmptyArray(void)
 {
-       JsonbValue jbv;
+       JsonbValue      jbv;
 
        jbv.type = jbvArray;
        jbv.val.array.elems = NULL;
@@ -2245,7 +2247,7 @@ JsonbMakeEmptyArray(void)
 Jsonb *
 JsonbMakeEmptyObject(void)
 {
-       JsonbValue jbv;
+       JsonbValue      jbv;
 
        jbv.type = jbvObject;
        jbv.val.object.pairs = NULL;
@@ -2272,7 +2274,7 @@ JsonbUnquote(Jsonb *jb)
                        return pstrdup(v.val.boolean ? "true" : "false");
                else if (v.type == jbvNumeric)
                        return DatumGetCString(DirectFunctionCall1(numeric_out,
-                                                                          PointerGetDatum(v.val.numeric)));
+                                                                                                          PointerGetDatum(v.val.numeric)));
                else if (v.type == jbvNull)
                        return pstrdup("null");
                else
index 21d874c098a28b6c919edf8c8221a82935e4529a..5318eda9cfbed11129e74e052386711b2837939d 100644 (file)
@@ -1959,7 +1959,8 @@ uniqueifyJsonbObject(JsonbValue *object, bool unique_keys, bool skip_nulls)
 
        if (hasNonUniq || skip_nulls)
        {
-               JsonbPair  *ptr, *res;
+               JsonbPair  *ptr,
+                                  *res;
 
                while (skip_nulls && object->val.object.nPairs > 0 &&
                           object->val.object.pairs->value.type == jbvNull)
index d1356d641660c1cfc176b263bb231f903c5f73d7..d427bdfbe0de722b9823fd81ad9419545a82295e 100644 (file)
@@ -3139,7 +3139,7 @@ Datum
 json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
                                   void **cache, MemoryContext mcxt, bool *isnull)
 {
-       JsValue         jsv = { 0 };
+       JsValue         jsv = {0};
        JsonbValue      jbv;
 
        jsv.is_json = json_type == JSONOID;
@@ -3157,7 +3157,8 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
 
                jsv.val.json.str = VARDATA_ANY(json);
                jsv.val.json.len = VARSIZE_ANY_EXHDR(json);
-               jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */
+               jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in
+                                                                                                * populate_composite() */
        }
        else
        {
@@ -3174,7 +3175,7 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod,
        if (!*cache)
                *cache = MemoryContextAllocZero(mcxt, sizeof(ColumnIOData));
 
-       return populate_record_field(*cache , typid, typmod, NULL, mcxt,
+       return populate_record_field(*cache, typid, typmod, NULL, mcxt,
                                                                 PointerGetDatum(NULL), &jsv, isnull);
 }
 
index 0ac14153aaee329d518f89743429c5c3ff234e9d..da9df4ae766081f2420f15df22625578604ac47a 100644 (file)
@@ -1094,7 +1094,7 @@ typedef struct JsonPathMutableContext
 {
        List       *varnames;           /* list of variable names */
        List       *varexprs;           /* list of variable expressions */
-       JsonPathDatatypeStatus current; /* status of @ item */
+       JsonPathDatatypeStatus current; /* status of @ item */
        bool            lax;                    /* jsonpath is lax or strict */
        bool            mutable;                /* resulting mutability status */
 } JsonPathMutableContext;
@@ -1282,18 +1282,18 @@ jspIsMutableWalker(JsonPathItem *jpi, JsonPathMutableContext *cxt)
                                jspIsMutableWalker(&arg, cxt);
                                break;
 
-                       /* literals */
+                               /* literals */
                        case jpiNull:
                        case jpiString:
                        case jpiNumeric:
                        case jpiBool:
-                       /* accessors */
+                               /* accessors */
                        case jpiKey:
                        case jpiAnyKey:
-                       /* special items */
+                               /* special items */
                        case jpiSubscript:
                        case jpiLast:
-                       /* item methods */
+                               /* item methods */
                        case jpiType:
                        case jpiSize:
                        case jpiAbs:
index 2544c6b1551cb50eae9a0fe112685dd836b1a7ce..0943a381bacea7e9c1dd76814b0f1d660e1a2580 100644 (file)
@@ -288,9 +288,9 @@ static void getJsonPathItem(JsonPathExecContext *cxt, JsonPathItem *item,
                                                        JsonbValue *value);
 static void getJsonPathVariable(JsonPathExecContext *cxt,
                                                                JsonPathItem *variable, JsonbValue *value);
-static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
-                                                                               int varNameLen, JsonbValue *val,
-                                                                               JsonbValue *baseObject);
+static int     getJsonPathVariableFromJsonb(void *varsJsonb, char *varName,
+                                                                                int varNameLen, JsonbValue *val,
+                                                                                JsonbValue *baseObject);
 static int     JsonbArraySize(JsonbValue *jb);
 static JsonPathBool executeComparison(JsonPathItem *cmp, JsonbValue *lv,
                                                                          JsonbValue *rv, void *p);
@@ -322,7 +322,7 @@ static int  compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
 
 
 static JsonTableJoinState *JsonTableInitPlanState(JsonTableContext *cxt,
-                                                                       Node *plan, JsonTableScanState *parent);
+                                                                                                 Node *plan, JsonTableScanState *parent);
 static bool JsonTableNextRow(JsonTableScanState *scan);
 
 
@@ -2743,7 +2743,7 @@ static int
 compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
                                bool useTz, bool *cast_error)
 {
-       PGFunction cmpfunc;
+       PGFunction      cmpfunc;
 
        *cast_error = false;
 
@@ -2987,8 +2987,8 @@ JsonPathQuery(Datum jb, JsonPath *jp, JsonWrapper wrapper, bool *empty,
 JsonbValue *
 JsonPathValue(Datum jb, JsonPath *jp, bool *empty, bool *error, List *vars)
 {
-       JsonbValue   *res;
-       JsonValueList found = { 0 };
+       JsonbValue *res;
+       JsonValueList found = {0};
        JsonPathExecResult jper PG_USED_FOR_ASSERTS_ONLY;
        int                     count;
 
@@ -3123,8 +3123,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
                                text       *txt = DatumGetTextP(val);
                                char       *str = text_to_cstring(txt);
                                Jsonb      *jb =
-                                       DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
-                                                                                                          CStringGetDatum(str)));
+                               DatumGetJsonbP(DirectFunctionCall1(jsonb_in,
+                                                                                                  CStringGetDatum(str)));
 
                                pfree(str);
 
@@ -3221,7 +3221,7 @@ JsonTableInitOpaque(TableFuncScanState *state, int natts)
 {
        JsonTableContext *cxt;
        PlanState  *ps = &state->ss.ps;
-       TableFuncScan  *tfs = castNode(TableFuncScan, ps->plan);
+       TableFuncScan *tfs = castNode(TableFuncScan, ps->plan);
        TableFunc  *tf = tfs->tablefunc;
        JsonExpr   *ci = castNode(JsonExpr, tf->docexpr);
        JsonTableParent *root = castNode(JsonTableParent, tf->plan);
@@ -3298,7 +3298,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item)
 {
        MemoryContext oldcxt;
        JsonPathExecResult res;
-       Jsonb           *js = (Jsonb *) DatumGetJsonbP(item);
+       Jsonb      *js = (Jsonb *) DatumGetJsonbP(item);
 
        JsonValueListClear(&scan->found);
 
@@ -3307,7 +3307,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item)
        oldcxt = MemoryContextSwitchTo(scan->mcxt);
 
        res = executeJsonPath(scan->path, scan->args, EvalJsonPathVar, js,
-                                                 scan->errorOnError, &scan->found, false /* FIXME */);
+                                                 scan->errorOnError, &scan->found, false /* FIXME */ );
 
        MemoryContextSwitchTo(oldcxt);
 
@@ -3369,9 +3369,9 @@ JsonTableNextJoinRow(JsonTableJoinState *state)
 
                /* inner rows are exhausted */
                if (state->u.join.cross)
-                       state->u.join.advanceRight = false;     /* next outer row */
+                       state->u.join.advanceRight = false; /* next outer row */
                else
-                       return false;   /* end of scan */
+                       return false;           /* end of scan */
        }
 
        while (!state->u.join.advanceRight)
@@ -3387,7 +3387,7 @@ JsonTableNextJoinRow(JsonTableJoinState *state)
                        JsonTableRescanRecursive(state->u.join.right);
 
                        if (!JsonTableNextJoinRow(state->u.join.right))
-                               continue;       /* next outer row */
+                               continue;               /* next outer row */
 
                        state->u.join.advanceRight = true;      /* next inner row */
                }
@@ -3460,7 +3460,7 @@ JsonTableNextRow(JsonTableScanState *scan)
                {
                        scan->current = PointerGetDatum(NULL);
                        scan->currentIsNull = true;
-                       return false;   /* end of scan */
+                       return false;           /* end of scan */
                }
 
                /* set current row item */
@@ -3518,12 +3518,12 @@ JsonTableGetValue(TableFuncScanState *state, int colnum,
        JsonTableScanState *scan = cxt->colexprs[colnum].scan;
        Datum           result;
 
-       if (scan->currentIsNull) /* NULL from outer/union join */
+       if (scan->currentIsNull)        /* NULL from outer/union join */
        {
                result = (Datum) 0;
                *isnull = true;
        }
-       else if (estate)        /* regular column */
+       else if (estate)                        /* regular column */
        {
                result = ExecEvalExpr(estate, econtext, isnull);
        }
index 833ee8f814c875dc4404e2a9035b7bc1d837d1e9..e02fc3725ad8c17a73c0be7ce39a36b72b4b5e31 100644 (file)
@@ -181,8 +181,8 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
        if (!OidIsValid(collation))
        {
                /*
-                * This typically means that the parser could not resolve a
-                * conflict of implicit collations, so report it that way.
+                * This typically means that the parser could not resolve a conflict
+                * of implicit collations, so report it that way.
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_INDETERMINATE_COLLATION),
index 67d7d67fb83b1081b5fce92dc881ee9c63453292..da5c7d0906999247ca9c70a3606a1cf3d27d15f0 100644 (file)
@@ -1439,7 +1439,8 @@ multirange_agg_transfn(PG_FUNCTION_ARGS)
                if (range_count == 0)
                {
                        /*
-                        * Add an empty range so we get an empty result (not a null result).
+                        * Add an empty range so we get an empty result (not a null
+                        * result).
                         */
                        accumArrayResult(state,
                                                         RangeTypePGetDatum(make_empty_range(rngtypcache)),
index 45547f6ae7f3f0b3b3ba69898234f0cf3f8d0021..920a63b00816c0e665e94edd86a54dc031c1d35d 100644 (file)
@@ -8537,139 +8537,138 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result,
        alloc_var(result, res_ndigits);
        res_digits = result->digits;
 
+       /*
+        * The full multiple-place algorithm is taken from Knuth volume 2,
+        * Algorithm 4.3.1D.
+        *
+        * We need the first divisor digit to be >= NBASE/2.  If it isn't, make it
+        * so by scaling up both the divisor and dividend by the factor "d".  (The
+        * reason for allocating dividend[0] above is to leave room for possible
+        * carry here.)
+        */
+       if (divisor[1] < HALF_NBASE)
+       {
+               int                     d = NBASE / (divisor[1] + 1);
+
+               carry = 0;
+               for (i = var2ndigits; i > 0; i--)
+               {
+                       carry += divisor[i] * d;
+                       divisor[i] = carry % NBASE;
+                       carry = carry / NBASE;
+               }
+               Assert(carry == 0);
+               carry = 0;
+               /* at this point only var1ndigits of dividend can be nonzero */
+               for (i = var1ndigits; i >= 0; i--)
+               {
+                       carry += dividend[i] * d;
+                       dividend[i] = carry % NBASE;
+                       carry = carry / NBASE;
+               }
+               Assert(carry == 0);
+               Assert(divisor[1] >= HALF_NBASE);
+       }
+       /* First 2 divisor digits are used repeatedly in main loop */
+       divisor1 = divisor[1];
+       divisor2 = divisor[2];
+
+       /*
+        * Begin the main loop.  Each iteration of this loop produces the j'th
+        * quotient digit by dividing dividend[j .. j + var2ndigits] by the
+        * divisor; this is essentially the same as the common manual procedure
+        * for long division.
+        */
+       for (j = 0; j < res_ndigits; j++)
+       {
+               /* Estimate quotient digit from the first two dividend digits */
+               int                     next2digits = dividend[j] * NBASE + dividend[j + 1];
+               int                     qhat;
+
                /*
-                * The full multiple-place algorithm is taken from Knuth volume 2,
-                * Algorithm 4.3.1D.
-                *
-                * We need the first divisor digit to be >= NBASE/2.  If it isn't,
-                * make it so by scaling up both the divisor and dividend by the
-                * factor "d".  (The reason for allocating dividend[0] above is to
-                * leave room for possible carry here.)
+                * If next2digits are 0, then quotient digit must be 0 and there's no
+                * need to adjust the working dividend.  It's worth testing here to
+                * fall out ASAP when processing trailing zeroes in a dividend.
                 */
-               if (divisor[1] < HALF_NBASE)
+               if (next2digits == 0)
                {
-                       int                     d = NBASE / (divisor[1] + 1);
-
-                       carry = 0;
-                       for (i = var2ndigits; i > 0; i--)
-                       {
-                               carry += divisor[i] * d;
-                               divisor[i] = carry % NBASE;
-                               carry = carry / NBASE;
-                       }
-                       Assert(carry == 0);
-                       carry = 0;
-                       /* at this point only var1ndigits of dividend can be nonzero */
-                       for (i = var1ndigits; i >= 0; i--)
-                       {
-                               carry += dividend[i] * d;
-                               dividend[i] = carry % NBASE;
-                               carry = carry / NBASE;
-                       }
-                       Assert(carry == 0);
-                       Assert(divisor[1] >= HALF_NBASE);
+                       res_digits[j] = 0;
+                       continue;
                }
-               /* First 2 divisor digits are used repeatedly in main loop */
-               divisor1 = divisor[1];
-               divisor2 = divisor[2];
+
+               if (dividend[j] == divisor1)
+                       qhat = NBASE - 1;
+               else
+                       qhat = next2digits / divisor1;
 
                /*
-                * Begin the main loop.  Each iteration of this loop produces the j'th
-                * quotient digit by dividing dividend[j .. j + var2ndigits] by the
-                * divisor; this is essentially the same as the common manual
-                * procedure for long division.
+                * Adjust quotient digit if it's too large.  Knuth proves that after
+                * this step, the quotient digit will be either correct or just one
+                * too large.  (Note: it's OK to use dividend[j+2] here because we
+                * know the divisor length is at least 2.)
                 */
-               for (j = 0; j < res_ndigits; j++)
+               while (divisor2 * qhat >
+                          (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
+                       qhat--;
+
+               /* As above, need do nothing more when quotient digit is 0 */
+               if (qhat > 0)
                {
-                       /* Estimate quotient digit from the first two dividend digits */
-                       int                     next2digits = dividend[j] * NBASE + dividend[j + 1];
-                       int                     qhat;
+                       NumericDigit *dividend_j = &dividend[j];
 
                        /*
-                        * If next2digits are 0, then quotient digit must be 0 and there's
-                        * no need to adjust the working dividend.  It's worth testing
-                        * here to fall out ASAP when processing trailing zeroes in a
-                        * dividend.
+                        * Multiply the divisor by qhat, and subtract that from the
+                        * working dividend.  The multiplication and subtraction are
+                        * folded together here, noting that qhat <= NBASE (since it might
+                        * be one too large), and so the intermediate result "tmp_result"
+                        * is in the range [-NBASE^2, NBASE - 1], and "borrow" is in the
+                        * range [0, NBASE].
                         */
-                       if (next2digits == 0)
+                       borrow = 0;
+                       for (i = var2ndigits; i >= 0; i--)
                        {
-                               res_digits[j] = 0;
-                               continue;
-                       }
+                               int                     tmp_result;
 
-                       if (dividend[j] == divisor1)
-                               qhat = NBASE - 1;
-                       else
-                               qhat = next2digits / divisor1;
+                               tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
+                               borrow = (NBASE - 1 - tmp_result) / NBASE;
+                               dividend_j[i] = tmp_result + borrow * NBASE;
+                       }
 
                        /*
-                        * Adjust quotient digit if it's too large.  Knuth proves that
-                        * after this step, the quotient digit will be either correct or
-                        * just one too large.  (Note: it's OK to use dividend[j+2] here
-                        * because we know the divisor length is at least 2.)
+                        * If we got a borrow out of the top dividend digit, then indeed
+                        * qhat was one too large.  Fix it, and add back the divisor to
+                        * correct the working dividend.  (Knuth proves that this will
+                        * occur only about 3/NBASE of the time; hence, it's a good idea
+                        * to test this code with small NBASE to be sure this section gets
+                        * exercised.)
                         */
-                       while (divisor2 * qhat >
-                                  (next2digits - qhat * divisor1) * NBASE + dividend[j + 2])
-                               qhat--;
-
-                       /* As above, need do nothing more when quotient digit is 0 */
-                       if (qhat > 0)
+                       if (borrow)
                        {
-                               NumericDigit *dividend_j = &dividend[j];
-
-                               /*
-                                * Multiply the divisor by qhat, and subtract that from the
-                                * working dividend.  The multiplication and subtraction are
-                                * folded together here, noting that qhat <= NBASE (since it
-                                * might be one too large), and so the intermediate result
-                                * "tmp_result" is in the range [-NBASE^2, NBASE - 1], and
-                                * "borrow" is in the range [0, NBASE].
-                                */
-                               borrow = 0;
+                               qhat--;
+                               carry = 0;
                                for (i = var2ndigits; i >= 0; i--)
                                {
-                                       int                     tmp_result;
-
-                                       tmp_result = dividend_j[i] - borrow - divisor[i] * qhat;
-                                       borrow = (NBASE - 1 - tmp_result) / NBASE;
-                                       dividend_j[i] = tmp_result + borrow * NBASE;
-                               }
-
-                               /*
-                                * If we got a borrow out of the top dividend digit, then
-                                * indeed qhat was one too large.  Fix it, and add back the
-                                * divisor to correct the working dividend.  (Knuth proves
-                                * that this will occur only about 3/NBASE of the time; hence,
-                                * it's a good idea to test this code with small NBASE to be
-                                * sure this section gets exercised.)
-                                */
-                               if (borrow)
-                               {
-                                       qhat--;
-                                       carry = 0;
-                                       for (i = var2ndigits; i >= 0; i--)
+                                       carry += dividend_j[i] + divisor[i];
+                                       if (carry >= NBASE)
                                        {
-                                               carry += dividend_j[i] + divisor[i];
-                                               if (carry >= NBASE)
-                                               {
-                                                       dividend_j[i] = carry - NBASE;
-                                                       carry = 1;
-                                               }
-                                               else
-                                               {
-                                                       dividend_j[i] = carry;
-                                                       carry = 0;
-                                               }
+                                               dividend_j[i] = carry - NBASE;
+                                               carry = 1;
+                                       }
+                                       else
+                                       {
+                                               dividend_j[i] = carry;
+                                               carry = 0;
                                        }
-                                       /* A carry should occur here to cancel the borrow above */
-                                       Assert(carry == 1);
                                }
+                               /* A carry should occur here to cancel the borrow above */
+                               Assert(carry == 1);
                        }
-
-                       /* And we're done with this quotient digit */
-                       res_digits[j] = qhat;
                }
 
+               /* And we're done with this quotient digit */
+               res_digits[j] = qhat;
+       }
+
        pfree(dividend);
 
        /*
index 2c47dea34290c12fb1dc4bd3b9a1920890c9c7d1..a0490a752244411ca1676231ae4a2c35884a941d 100644 (file)
@@ -1625,7 +1625,7 @@ pg_newlocale_from_collation(Oid collid)
                }
 
                datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
-                                                                         &isnull);
+                                                               &isnull);
                if (!isnull)
                {
                        char       *actual_versionstr;
@@ -1992,7 +1992,7 @@ check_icu_locale(const char *icu_locale)
 {
 #ifdef USE_ICU
        UCollator  *collator;
-       UErrorCode  status;
+       UErrorCode      status;
 
        status = U_ZERO_ERROR;
        collator = ucol_open(icu_locale, &status);
index d3ad795a6ea35630d196db01883c5c39bab916f1..893690dad52049db1fde02ddeaf634793bcc8981 100644 (file)
@@ -2411,7 +2411,7 @@ pg_stat_have_stats(PG_FUNCTION_ARGS)
        char       *stats_type = text_to_cstring(PG_GETARG_TEXT_P(0));
        Oid                     dboid = PG_GETARG_OID(1);
        Oid                     objoid = PG_GETARG_OID(2);
-       PgStat_Kind     kind = pgstat_get_kind_from_str(stats_type);
+       PgStat_Kind kind = pgstat_get_kind_from_str(stats_type);
 
        PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objoid));
 }
index f90b0a3b35868cea9ff5d48f7a504d2439a974b7..1190b8000bcc7519c8d51c5580b94c440ae75e3e 100644 (file)
@@ -608,8 +608,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
                                        /*
                                         * Non-empty range A contains non-empty range B if lower
                                         * bound of A is lower or equal to lower bound of range B
-                                        * and upper bound of range A is greater than or equal to upper
-                                        * bound of range A.
+                                        * and upper bound of range A is greater than or equal to
+                                        * upper bound of range A.
                                         *
                                         * All non-empty ranges contain an empty range.
                                         */
index 01d4c22cfce1850473f91b867e5a155d202b64aa..51b3fdc9a01825efe059d3a87a36ddfd9b1db80f 100644 (file)
@@ -113,8 +113,10 @@ typedef struct RI_ConstraintInfo
        Oid                     fk_relid;               /* referencing relation */
        char            confupdtype;    /* foreign key's ON UPDATE action */
        char            confdeltype;    /* foreign key's ON DELETE action */
-       int                     ndelsetcols;    /* number of columns referenced in ON DELETE SET clause */
-       int16           confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on delete */
+       int                     ndelsetcols;    /* number of columns referenced in ON DELETE
+                                                                * SET clause */
+       int16           confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on
+                                                                                                * delete */
        char            confmatchtype;  /* foreign key's match type */
        int                     nkeys;                  /* number of key columns */
        int16           pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */
@@ -1059,7 +1061,8 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind)
        /*
         * Fetch or prepare a saved plan for the trigger.
         */
-       switch (tgkind) {
+       switch (tgkind)
+       {
                case RI_TRIGTYPE_UPDATE:
                        queryno = is_set_null
                                ? RI_PLAN_SETNULL_ONUPDATE
@@ -1086,25 +1089,29 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind)
                const char *qualsep;
                Oid                     queryoids[RI_MAX_NUMKEYS];
                const char *fk_only;
-               int num_cols_to_set;
+               int                     num_cols_to_set;
                const int16 *set_cols;
 
-               switch (tgkind) {
+               switch (tgkind)
+               {
                        case RI_TRIGTYPE_UPDATE:
                                num_cols_to_set = riinfo->nkeys;
                                set_cols = riinfo->fk_attnums;
                                break;
                        case RI_TRIGTYPE_DELETE:
+
                                /*
-                                * If confdelsetcols are present, then we only update
-                                * the columns specified in that array, otherwise we
-                                * update all the referencing columns.
+                                * If confdelsetcols are present, then we only update the
+                                * columns specified in that array, otherwise we update all
+                                * the referencing columns.
                                 */
-                               if (riinfo->ndelsetcols != 0) {
+                               if (riinfo->ndelsetcols != 0)
+                               {
                                        num_cols_to_set = riinfo->ndelsetcols;
                                        set_cols = riinfo->confdelsetcols;
                                }
-                               else {
+                               else
+                               {
                                        num_cols_to_set = riinfo->nkeys;
                                        set_cols = riinfo->fk_attnums;
                                }
index 5d49f564a2e14612c82460c62339467bd6d21f81..f22ecfc58323d6805e861e163a14dc5e426f8df9 100644 (file)
@@ -2331,7 +2331,10 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
                                if (string)
                                        appendStringInfo(&buf, " ON DELETE %s", string);
 
-                               /* Add columns specified to SET NULL or SET DEFAULT if provided. */
+                               /*
+                                * Add columns specified to SET NULL or SET DEFAULT if
+                                * provided.
+                                */
                                val = SysCacheGetAttr(CONSTROID, tup,
                                                                          Anum_pg_constraint_confdelsetcols, &isnull);
                                if (!isnull)
@@ -8260,7 +8263,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
                                case T_GroupingFunc:    /* own parentheses */
                                case T_WindowFunc:      /* own parentheses */
                                case T_CaseExpr:        /* other separators */
-                               case T_JsonExpr: /* own parentheses */
+                               case T_JsonExpr:        /* own parentheses */
                                        return true;
                                default:
                                        return false;
@@ -8456,8 +8459,8 @@ get_json_format(JsonFormat *format, StringInfo buf)
        if (format->encoding != JS_ENC_DEFAULT)
        {
                const char *encoding =
-                       format->encoding == JS_ENC_UTF16 ? "UTF16" :
-                       format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
+               format->encoding == JS_ENC_UTF16 ? "UTF16" :
+               format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8";
 
                appendStringInfo(buf, " ENCODING %s", encoding);
        }
@@ -8479,7 +8482,7 @@ get_json_returning(JsonReturning *returning, StringInfo buf,
 
        if (!json_format_by_default ||
                returning->format->format_type !=
-                       (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
+               (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON))
                get_json_format(returning->format, buf);
 }
 
@@ -9778,7 +9781,8 @@ get_rule_expr(Node *node, deparse_context *context,
 
                                if (jexpr->passing_values)
                                {
-                                       ListCell   *lc1, *lc2;
+                                       ListCell   *lc1,
+                                                          *lc2;
                                        bool            needcomma = false;
 
                                        appendStringInfoString(buf, " PASSING ");
@@ -10147,7 +10151,7 @@ get_json_constructor(JsonConstructorExpr *ctor, deparse_context *context,
                if (nargs > 0)
                {
                        const char *sep = ctor->type == JSCTOR_JSON_OBJECT &&
-                               (nargs % 2) != 0 ? " : " : ", ";
+                       (nargs % 2) != 0 ? " : " : ", ";
 
                        appendStringInfoString(buf, sep);
                }
@@ -10251,7 +10255,8 @@ get_agg_expr_helper(Aggref *aggref, deparse_context *context,
                                        if (is_json_objectagg)
                                        {
                                                if (i > 2)
-                                                       break; /* skip ABSENT ON NULL and WITH UNIQUE args */
+                                                       break;  /* skip ABSENT ON NULL and WITH UNIQUE
+                                                                        * args */
 
                                                appendStringInfoString(buf, " : ");
                                        }
@@ -11160,16 +11165,16 @@ get_json_table_nested_columns(TableFunc *tf, Node *node,
        }
        else
        {
-                JsonTableParent *n = castNode(JsonTableParent, node);
+               JsonTableParent *n = castNode(JsonTableParent, node);
 
-                if (needcomma)
-                        appendStringInfoChar(context->buf, ',');
+               if (needcomma)
+                       appendStringInfoChar(context->buf, ',');
 
-                appendStringInfoChar(context->buf, ' ');
-                appendContextKeyword(context,  "NESTED PATH ", 0, 0, 0);
-                get_const_expr(n->path, context, -1);
-                appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
-                get_json_table_columns(tf, n, context, showimplicit);
+               appendStringInfoChar(context->buf, ' ');
+               appendContextKeyword(context, "NESTED PATH ", 0, 0, 0);
+               get_const_expr(n->path, context, -1);
+               appendStringInfo(context->buf, " AS %s", quote_identifier(n->name));
+               get_json_table_columns(tf, n, context, showimplicit);
        }
 }
 
@@ -11199,17 +11204,17 @@ get_json_table_plan(TableFunc *tf, Node *node, deparse_context *context,
        }
        else
        {
-                JsonTableParent *n = castNode(JsonTableParent, node);
+               JsonTableParent *n = castNode(JsonTableParent, node);
 
-                appendStringInfoString(context->buf, quote_identifier(n->name));
+               appendStringInfoString(context->buf, quote_identifier(n->name));
 
-                if (n->child)
-                {
+               if (n->child)
+               {
                        appendStringInfoString(context->buf,
                                                                   n->outerJoin ? " OUTER " : " INNER ");
                        get_json_table_plan(tf, n->child, context,
                                                                IsA(n->child, JsonTableSibling));
-                }
+               }
        }
 
        if (parenthesize)
@@ -11348,7 +11353,8 @@ get_json_table(TableFunc *tf, deparse_context *context, bool showimplicit)
 
        if (jexpr->passing_values)
        {
-               ListCell   *lc1, *lc2;
+               ListCell   *lc1,
+                                  *lc2;
                bool            needcomma = false;
 
                appendStringInfoChar(buf, ' ');
index 71cbc1c3d8091d3a186ec839617eba41ea9ddfed..fa1f589fad873a24bf8f901d42c24f1e9ab2362f 100644 (file)
@@ -3380,9 +3380,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
  */
 double
 estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
-                                       double input_rows,
-                                       List **pgset, EstimationInfo *estinfo,
-                                       List **cache_varinfos, int prevNExprs)
+                                                               double input_rows,
+                                                               List **pgset, EstimationInfo *estinfo,
+                                                               List **cache_varinfos, int prevNExprs)
 {
        List       *varinfos = (cache_varinfos) ? *cache_varinfos : NIL;
        double          srf_multiplier = 1.0;
@@ -3433,7 +3433,7 @@ estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
                if (cache_varinfos && j++ < prevNExprs)
                {
                        if (pgset)
-                               i++; /* to keep in sync with lines below */
+                               i++;                    /* to keep in sync with lines below */
                        continue;
                }
 
@@ -3944,7 +3944,7 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel,
        Oid                     statOid = InvalidOid;
        MVNDistinct *stats;
        StatisticExtInfo *matched_info = NULL;
-       RangeTblEntry           *rte;
+       RangeTblEntry *rte;
 
        /* bail out immediately if the table has no extended statistics */
        if (!rel->statlist)
@@ -5255,7 +5255,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
                foreach(slist, onerel->statlist)
                {
                        StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist);
-                       RangeTblEntry    *rte = planner_rt_fetch(onerel->relid, root);
+                       RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root);
                        ListCell   *expr_item;
                        int                     pos;
 
index 8acb725bc8f17de6fb0ca9ea4fd23cc4a1bc94a5..f70f829d830f783420bd1ea8096f08d13bbbcf41 100644 (file)
@@ -2194,6 +2194,7 @@ timestamp_sortsupport(PG_FUNCTION_ARGS)
        SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
 #if SIZEOF_DATUM >= 8
+
        /*
         * If this build has pass-by-value timestamps, then we can use a standard
         * comparator function.
@@ -4349,59 +4350,59 @@ interval_trunc(PG_FUNCTION_ARGS)
        if (type == UNITS)
        {
                interval2itm(*interval, tm);
-                       switch (val)
-                       {
-                               case DTK_MILLENNIUM:
-                                       /* caution: C division may have negative remainder */
-                                       tm->tm_year = (tm->tm_year / 1000) * 1000;
-                                       /* FALL THRU */
-                               case DTK_CENTURY:
-                                       /* caution: C division may have negative remainder */
-                                       tm->tm_year = (tm->tm_year / 100) * 100;
-                                       /* FALL THRU */
-                               case DTK_DECADE:
-                                       /* caution: C division may have negative remainder */
-                                       tm->tm_year = (tm->tm_year / 10) * 10;
-                                       /* FALL THRU */
-                               case DTK_YEAR:
-                                       tm->tm_mon = 0;
-                                       /* FALL THRU */
-                               case DTK_QUARTER:
-                                       tm->tm_mon = 3 * (tm->tm_mon / 3);
-                                       /* FALL THRU */
-                               case DTK_MONTH:
-                                       tm->tm_mday = 0;
-                                       /* FALL THRU */
-                               case DTK_DAY:
-                                       tm->tm_hour = 0;
-                                       /* FALL THRU */
-                               case DTK_HOUR:
-                                       tm->tm_min = 0;
-                                       /* FALL THRU */
-                               case DTK_MINUTE:
-                                       tm->tm_sec = 0;
-                                       /* FALL THRU */
-                               case DTK_SECOND:
-                                       tm->tm_usec = 0;
-                                       break;
-                               case DTK_MILLISEC:
-                                       tm->tm_usec = (tm->tm_usec / 1000) * 1000;
-                                       break;
-                               case DTK_MICROSEC:
-                                       break;
-
-                               default:
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("unit \"%s\" not supported for type %s",
-                                                                       lowunits, format_type_be(INTERVALOID)),
-                                                        (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
-                       }
+               switch (val)
+               {
+                       case DTK_MILLENNIUM:
+                               /* caution: C division may have negative remainder */
+                               tm->tm_year = (tm->tm_year / 1000) * 1000;
+                               /* FALL THRU */
+                       case DTK_CENTURY:
+                               /* caution: C division may have negative remainder */
+                               tm->tm_year = (tm->tm_year / 100) * 100;
+                               /* FALL THRU */
+                       case DTK_DECADE:
+                               /* caution: C division may have negative remainder */
+                               tm->tm_year = (tm->tm_year / 10) * 10;
+                               /* FALL THRU */
+                       case DTK_YEAR:
+                               tm->tm_mon = 0;
+                               /* FALL THRU */
+                       case DTK_QUARTER:
+                               tm->tm_mon = 3 * (tm->tm_mon / 3);
+                               /* FALL THRU */
+                       case DTK_MONTH:
+                               tm->tm_mday = 0;
+                               /* FALL THRU */
+                       case DTK_DAY:
+                               tm->tm_hour = 0;
+                               /* FALL THRU */
+                       case DTK_HOUR:
+                               tm->tm_min = 0;
+                               /* FALL THRU */
+                       case DTK_MINUTE:
+                               tm->tm_sec = 0;
+                               /* FALL THRU */
+                       case DTK_SECOND:
+                               tm->tm_usec = 0;
+                               break;
+                       case DTK_MILLISEC:
+                               tm->tm_usec = (tm->tm_usec / 1000) * 1000;
+                               break;
+                       case DTK_MICROSEC:
+                               break;
 
-                       if (itm2interval(tm, result) != 0)
+                       default:
                                ereport(ERROR,
-                                               (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
-                                                errmsg("interval out of range")));
+                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                errmsg("unit \"%s\" not supported for type %s",
+                                                               lowunits, format_type_be(INTERVALOID)),
+                                                (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0));
+               }
+
+               if (itm2interval(tm, result) != 0)
+                       ereport(ERROR,
+                                       (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+                                        errmsg("interval out of range")));
        }
        else
        {
@@ -5225,80 +5226,80 @@ interval_part_common(PG_FUNCTION_ARGS, bool retnumeric)
        if (type == UNITS)
        {
                interval2itm(*interval, tm);
-                       switch (val)
-                       {
-                               case DTK_MICROSEC:
-                                       intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
-                                       break;
+               switch (val)
+               {
+                       case DTK_MICROSEC:
+                               intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec;
+                               break;
 
-                               case DTK_MILLISEC:
-                                       if (retnumeric)
-                                               /*---
-                                                * tm->tm_sec * 1000 + fsec / 1000
-                                                * = (tm->tm_sec * 1'000'000 + fsec) / 1000
-                                                */
-                                               PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
-                                       else
-                                               PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
-                                       break;
+                       case DTK_MILLISEC:
+                               if (retnumeric)
+                                       /*---
+                                        * tm->tm_sec * 1000 + fsec / 1000
+                                        * = (tm->tm_sec * 1'000'000 + fsec) / 1000
+                                        */
+                                       PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3));
+                               else
+                                       PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0);
+                               break;
 
-                               case DTK_SECOND:
-                                       if (retnumeric)
-                                               /*---
-                                                * tm->tm_sec + fsec / 1'000'000
-                                                * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
-                                                */
-                                               PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
-                                       else
-                                               PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
-                                       break;
+                       case DTK_SECOND:
+                               if (retnumeric)
+                                       /*---
+                                        * tm->tm_sec + fsec / 1'000'000
+                                        * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000
+                                        */
+                                       PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6));
+                               else
+                                       PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0);
+                               break;
 
-                               case DTK_MINUTE:
-                                       intresult = tm->tm_min;
-                                       break;
+                       case DTK_MINUTE:
+                               intresult = tm->tm_min;
+                               break;
 
-                               case DTK_HOUR:
-                                       intresult = tm->tm_hour;
-                                       break;
+                       case DTK_HOUR:
+                               intresult = tm->tm_hour;
+                               break;
 
-                               case DTK_DAY:
-                                       intresult = tm->tm_mday;
-                                       break;
+                       case DTK_DAY:
+                               intresult = tm->tm_mday;
+                               break;
 
-                               case DTK_MONTH:
-                                       intresult = tm->tm_mon;
-                                       break;
+                       case DTK_MONTH:
+                               intresult = tm->tm_mon;
+                               break;
 
-                               case DTK_QUARTER:
-                                       intresult = (tm->tm_mon / 3) + 1;
-                                       break;
+                       case DTK_QUARTER:
+                               intresult = (tm->tm_mon / 3) + 1;
+                               break;
 
-                               case DTK_YEAR:
-                                       intresult = tm->tm_year;
-                                       break;
+                       case DTK_YEAR:
+                               intresult = tm->tm_year;
+                               break;
 
-                               case DTK_DECADE:
-                                       /* caution: C division may have negative remainder */
-                                       intresult = tm->tm_year / 10;
-                                       break;
+                       case DTK_DECADE:
+                               /* caution: C division may have negative remainder */
+                               intresult = tm->tm_year / 10;
+                               break;
 
-                               case DTK_CENTURY:
-                                       /* caution: C division may have negative remainder */
-                                       intresult = tm->tm_year / 100;
-                                       break;
+                       case DTK_CENTURY:
+                               /* caution: C division may have negative remainder */
+                               intresult = tm->tm_year / 100;
+                               break;
 
-                               case DTK_MILLENNIUM:
-                                       /* caution: C division may have negative remainder */
-                                       intresult = tm->tm_year / 1000;
-                                       break;
+                       case DTK_MILLENNIUM:
+                               /* caution: C division may have negative remainder */
+                               intresult = tm->tm_year / 1000;
+                               break;
 
-                               default:
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("unit \"%s\" not supported for type %s",
-                                                                       lowunits, format_type_be(INTERVALOID))));
-                                       intresult = 0;
-                       }
+                       default:
+                               ereport(ERROR,
+                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                errmsg("unit \"%s\" not supported for type %s",
+                                                               lowunits, format_type_be(INTERVALOID))));
+                               intresult = 0;
+               }
        }
        else if (type == RESERV && val == DTK_EPOCH)
        {
index a157f864e129ef90e2c6fbeb4f37e654a4fadbd0..7cec9372485f9e54adb75bf85600bed0d769e8ee 100644 (file)
@@ -377,8 +377,8 @@ uuid_abbrev_convert(Datum original, SortSupport ssup)
         *
         * This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
         * 3-way comparator) works correctly on all platforms.  If we didn't do
-        * this, the comparator would have to call memcmp() with a pair of pointers
-        * to the first byte of each abbreviated key, which is slower.
+        * this, the comparator would have to call memcmp() with a pair of
+        * pointers to the first byte of each abbreviated key, which is slower.
         */
        res = DatumBigEndianToNative(res);
 
index 8b5b30ed714812886e98af74cc2a6e4fbe7d978c..bbeb0a2653a89dccc916268f12c6c2e81585a273 100644 (file)
@@ -744,7 +744,7 @@ bpchareq(PG_FUNCTION_ARGS)
        bool            result;
        Oid                     collid = PG_GET_COLLATION();
        bool            locale_is_c = false;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
 
        check_collation_set(collid);
 
@@ -789,7 +789,7 @@ bpcharne(PG_FUNCTION_ARGS)
        bool            result;
        Oid                     collid = PG_GET_COLLATION();
        bool            locale_is_c = false;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
 
        check_collation_set(collid);
 
index cfc135c7beb1e2e4f17cef9b4c1579e196613eb5..919138eaf329b42de1d4e7ca54b6c5d06a56f316 100644 (file)
@@ -1758,7 +1758,7 @@ texteq(PG_FUNCTION_ARGS)
 {
        Oid                     collid = PG_GET_COLLATION();
        bool            locale_is_c = false;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        bool            result;
 
        check_collation_set(collid);
@@ -1817,7 +1817,7 @@ textne(PG_FUNCTION_ARGS)
 {
        Oid                     collid = PG_GET_COLLATION();
        bool            locale_is_c = false;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        bool            result;
 
        check_collation_set(collid);
@@ -2674,8 +2674,8 @@ done:
         *
         * This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer
         * 3-way comparator) works correctly on all platforms.  If we didn't do
-        * this, the comparator would have to call memcmp() with a pair of pointers
-        * to the first byte of each abbreviated key, which is slower.
+        * this, the comparator would have to call memcmp() with a pair of
+        * pointers to the first byte of each abbreviated key, which is slower.
         */
        res = DatumBigEndianToNative(res);
 
index 4cf6db504ff7d8d8e4eab89507ed3823a1e738fb..0d6a295674873c06d0c86bca02bcab45fac4d166 100644 (file)
@@ -689,10 +689,10 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
                                                                                          queryEnv);
        else
                tlist = pg_analyze_and_rewrite_fixedparams(rawtree,
-                                                                          plansource->query_string,
-                                                                          plansource->param_types,
-                                                                          plansource->num_params,
-                                                                          queryEnv);
+                                                                                                  plansource->query_string,
+                                                                                                  plansource->param_types,
+                                                                                                  plansource->num_params,
+                                                                                                  queryEnv);
 
        /* Release snapshot if we got one */
        if (snapshot_set)
index 43f14c233d6af7a661c105fb205240d6a3d95667..60e72f9e8bf435b26e2ef1c4b0b4b0a119f819d1 100644 (file)
@@ -5107,7 +5107,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
        Bitmapset  *uindexattrs;        /* columns in unique indexes */
        Bitmapset  *pkindexattrs;       /* columns in the primary index */
        Bitmapset  *idindexattrs;       /* columns in the replica identity */
-       Bitmapset  *hotblockingattrs;   /* columns with HOT blocking indexes */
+       Bitmapset  *hotblockingattrs;   /* columns with HOT blocking indexes */
        List       *indexoidlist;
        List       *newindexoidlist;
        Oid                     relpkindex;
@@ -5237,7 +5237,7 @@ restart:
                        {
                                if (indexDesc->rd_indam->amhotblocking)
                                        hotblockingattrs = bms_add_member(hotblockingattrs,
-                                                                                                attrnum - FirstLowInvalidHeapAttributeNumber);
+                                                                                                         attrnum - FirstLowInvalidHeapAttributeNumber);
 
                                if (isKey && i < indexDesc->rd_index->indnkeyatts)
                                        uindexattrs = bms_add_member(uindexattrs,
@@ -5258,9 +5258,9 @@ restart:
                        pull_varattnos(indexExpressions, 1, &hotblockingattrs);
 
                /*
-                * Collect all attributes in the index predicate, too. We have to ignore
-                * amhotblocking flag, because the row might become indexable, in which
-                * case we have to add it to the index.
+                * Collect all attributes in the index predicate, too. We have to
+                * ignore amhotblocking flag, because the row might become indexable,
+                * in which case we have to add it to the index.
                 */
                pull_varattnos(indexPredicate, 1, &hotblockingattrs);
 
@@ -5308,9 +5308,8 @@ restart:
        /*
         * Now save copies of the bitmaps in the relcache entry.  We intentionally
         * set rd_attrsvalid last, because that's what signals validity of the
-        * values; if we run out of memory before making that copy, we won't
-        * leave the relcache entry looking like the other ones are valid but
-        * empty.
+        * values; if we run out of memory before making that copy, we won't leave
+        * the relcache entry looking like the other ones are valid but empty.
         */
        oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
        relation->rd_keyattr = bms_copy(uindexattrs);
@@ -5636,8 +5635,8 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
                pubdesc->pubactions.pubtruncate |= pubform->pubtruncate;
 
                /*
-                * Check if all columns referenced in the filter expression are part of
-                * the REPLICA IDENTITY index or not.
+                * Check if all columns referenced in the filter expression are part
+                * of the REPLICA IDENTITY index or not.
                 *
                 * If the publication is FOR ALL TABLES then it means the table has no
                 * row filters and we can skip the validation.
@@ -5645,7 +5644,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
                if (!pubform->puballtables &&
                        (pubform->pubupdate || pubform->pubdelete) &&
                        pub_rf_contains_invalid_column(pubid, relation, ancestors,
-                                                                        pubform->pubviaroot))
+                                                                                  pubform->pubviaroot))
                {
                        if (pubform->pubupdate)
                                pubdesc->rf_valid_for_update = false;
@@ -5662,7 +5661,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
                if (!pubform->puballtables &&
                        (pubform->pubupdate || pubform->pubdelete) &&
                        pub_collist_contains_invalid_column(pubid, relation, ancestors,
-                                                                        pubform->pubviaroot))
+                                                                                               pubform->pubviaroot))
                {
                        if (pubform->pubupdate)
                                pubdesc->cols_valid_for_update = false;
index 75a3aedc5af79e7d8f4d665a84cf07cbd468993c..2a330cf3ba40e094d55c155b07fc6b8e6e8b83c2 100644 (file)
@@ -287,7 +287,7 @@ RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId)
 void
 RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
 {
-       RelMapFile map;
+       RelMapFile      map;
 
        /*
         * Read the relmap file from the source database.
@@ -302,8 +302,8 @@ RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
         * RelationMappingLock.
         *
         * There's no point in trying to preserve files here. The new database
-        * isn't usable yet anyway, and won't ever be if we can't install a
-        * relmap file.
+        * isn't usable yet anyway, and won't ever be if we can't install a relmap
+        * file.
         */
        write_relmap_file(&map, true, false, false, dbid, tsid, dstdbpath);
 }
@@ -1089,11 +1089,11 @@ relmap_redo(XLogReaderState *record)
                 * There shouldn't be anyone else updating relmaps during WAL replay,
                 * but grab the lock to interlock against load_relmap_file().
                 *
-                * Note that we use the same WAL record for updating the relmap of
-                * an existing database as we do for creating a new database. In
-                * the latter case, taking the relmap log and sending sinval messages
-                * is unnecessary, but harmless. If we wanted to avoid it, we could
-                * add a flag to the WAL record to indicate which operation is being
+                * Note that we use the same WAL record for updating the relmap of an
+                * existing database as we do for creating a new database. In the
+                * latter case, taking the relmap log and sending sinval messages is
+                * unnecessary, but harmless. If we wanted to avoid it, we could add a
+                * flag to the WAL record to indicate which operation is being
                 * performed.
                 */
                LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
index d297ba082951870de11614a9063853b82ef48fed..fa701daa26f9326329adda3d36ada3fe3ac628e2 100644 (file)
@@ -429,6 +429,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
                iculocale = NULL;
 
        default_locale.provider = dbform->datlocprovider;
+
        /*
         * Default locale is currently always deterministic.  Nondeterministic
         * locales currently don't support pattern matching, which would break a
@@ -604,8 +605,8 @@ BaseInit(void)
        InitTemporaryFileAccess();
 
        /*
-        * Initialize local buffers for WAL record construction, in case we
-        * ever try to insert XLOG.
+        * Initialize local buffers for WAL record construction, in case we ever
+        * try to insert XLOG.
         */
        InitXLogInsert();
 
@@ -693,10 +694,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
        }
 
        /*
-        * If this is either a bootstrap process or a standalone backend, start
-        * up the XLOG machinery, and register to have it closed down at exit.
-        * In other cases, the startup process is responsible for starting up
-        * the XLOG machinery, and the checkpointer for closing it down.
+        * If this is either a bootstrap process or a standalone backend, start up
+        * the XLOG machinery, and register to have it closed down at exit. In
+        * other cases, the startup process is responsible for starting up the
+        * XLOG machinery, and the checkpointer for closing it down.
         */
        if (!IsUnderPostmaster)
        {
@@ -1241,7 +1242,8 @@ ShutdownPostgres(int code, Datum arg)
         */
 #ifdef USE_ASSERT_CHECKING
        {
-               int held_lwlocks = LWLockHeldCount();
+               int                     held_lwlocks = LWLockHeldCount();
+
                if (held_lwlocks)
                        elog(WARNING, "holding %d lwlocks at the end of ShutdownPostgres()",
                                 held_lwlocks);
index 2ffa014618f344bbc3852b770ec68afe09d0435f..d35027275f173086c297bcb21458d5f6e4d79c74 100644 (file)
@@ -787,7 +787,7 @@ JumbleExpr(JumbleState *jstate, Node *node)
                        break;
                case T_JsonExpr:
                        {
-                               JsonExpr    *jexpr = (JsonExpr *) node;
+                               JsonExpr   *jexpr = (JsonExpr *) node;
 
                                APP_JUMB(jexpr->op);
                                JumbleExpr(jstate, jexpr->formatted_expr);
index a4c3b736678295fa4ada18384e7de9beb81c70e5..8340a66052614ac949e83bab6c1dce7eb643fe6c 100644 (file)
@@ -469,7 +469,7 @@ struct Tuplesortstate
 
        /* These are specific to the index_btree subcase: */
        bool            enforceUnique;  /* complain if we find duplicate tuples */
-       bool            uniqueNullsNotDistinct; /* unique constraint null treatment */
+       bool            uniqueNullsNotDistinct; /* unique constraint null treatment */
 
        /* These are specific to the index_hash subcase: */
        uint32          high_mask;              /* masks for sortable part of hash code */
@@ -706,8 +706,8 @@ qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
                return compare;
 
        /*
-        * No need to waste effort calling the tiebreak function when there are
-        * no other keys to sort on.
+        * No need to waste effort calling the tiebreak function when there are no
+        * other keys to sort on.
         */
        if (state->onlyKey != NULL)
                return 0;
@@ -730,8 +730,8 @@ qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
                return compare;
 
        /*
-        * No need to waste effort calling the tiebreak function when there are
-        * no other keys to sort on.
+        * No need to waste effort calling the tiebreak function when there are no
+        * other keys to sort on.
         */
        if (state->onlyKey != NULL)
                return 0;
@@ -747,15 +747,15 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
        int                     compare;
 
        compare = ApplyInt32SortComparator(a->datum1, a->isnull1,
-                                                                               b->datum1, b->isnull1,
-                                                                               &state->sortKeys[0]);
+                                                                          b->datum1, b->isnull1,
+                                                                          &state->sortKeys[0]);
 
        if (compare != 0)
                return compare;
 
        /*
-        * No need to waste effort calling the tiebreak function when there are
-        * no other keys to sort on.
+        * No need to waste effort calling the tiebreak function when there are no
+        * other keys to sort on.
         */
        if (state->onlyKey != NULL)
                return 0;
index fcef651c2fc6e13c18eb993ea676be7e2c717036..ed6de7ca941346be6cd8db347601198961e68d30 100644 (file)
@@ -1808,8 +1808,8 @@ make_template0(FILE *cmdfd)
         * the new cluster should be the result of a fresh initdb.)
         *
         * We use "STRATEGY = file_copy" here because checkpoints during initdb
-        * are cheap. "STRATEGY = wal_log" would generate more WAL, which would
-        * be a little bit slower and make the new cluster a little bit bigger.
+        * are cheap. "STRATEGY = wal_log" would generate more WAL, which would be
+        * a little bit slower and make the new cluster a little bit bigger.
         */
        static const char *const template0_setup[] = {
                "CREATE DATABASE template0 IS_TEMPLATE = true ALLOW_CONNECTIONS = false"
index a3397777cf256cd67166c608f20efe070d37f764..a37f6dd9b334b6ee22d9fdd4d51422795cb54a39 100644 (file)
@@ -97,27 +97,45 @@ SKIP:
 
 if ($ENV{with_icu} eq 'yes')
 {
-       command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
+       command_fails_like(
+               [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
                qr/initdb: error: ICU locale must be specified/,
                'locale provider ICU requires --icu-locale');
 
-       command_ok(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=en', "$tempdir/data3"],
+       command_ok(
+               [
+                       'initdb',                '--no-sync',
+                       '--locale-provider=icu', '--icu-locale=en',
+                       "$tempdir/data3"
+               ],
                'option --icu-locale');
 
-       command_fails_like(['initdb', '--no-sync', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', "$tempdir/dataX"],
+       command_fails_like(
+               [
+                       'initdb',                '--no-sync',
+                       '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+                       "$tempdir/dataX"
+               ],
                qr/FATAL:  could not open collator for locale/,
                'fails for invalid ICU locale');
 }
 else
 {
-       command_fails(['initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2"],
-                                 'locale provider ICU fails since no ICU support');
+       command_fails(
+               [ 'initdb', '--no-sync', '--locale-provider=icu', "$tempdir/data2" ],
+               'locale provider ICU fails since no ICU support');
 }
 
-command_fails(['initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX"],
-                         'fails for invalid locale provider');
+command_fails(
+       [ 'initdb', '--no-sync', '--locale-provider=xyz', "$tempdir/dataX" ],
+       'fails for invalid locale provider');
 
-command_fails(['initdb', '--no-sync', '--locale-provider=libc', '--icu-locale=en', "$tempdir/dataX"],
-                         'fails for invalid option combination');
+command_fails(
+       [
+               'initdb',                 '--no-sync',
+               '--locale-provider=libc', '--icu-locale=en',
+               "$tempdir/dataX"
+       ],
+       'fails for invalid option combination');
 
 done_testing();
index 48cee8c1c4e8f25eeb4fb6d33265b28acd3cea9c..f0b818e987ae33ac90043ee964048eb1fed92d74 100644 (file)
@@ -1074,17 +1074,17 @@ verify_btree_slot_handler(PGresult *res, PGconn *conn, void *context)
 
        if (PQresultStatus(res) == PGRES_TUPLES_OK)
        {
-               int                     ntups = PQntuples(res);
+               int                     ntups = PQntuples(res);
 
                if (ntups > 1)
                {
                        /*
                         * We expect the btree checking functions to return one void row
                         * each, or zero rows if the check was skipped due to the object
-                        * being in the wrong state to be checked, so we should output some
-                        * sort of warning if we get anything more, not because it
-                        * indicates corruption, but because it suggests a mismatch between
-                        * amcheck and pg_amcheck versions.
+                        * being in the wrong state to be checked, so we should output
+                        * some sort of warning if we get anything more, not because it
+                        * indicates corruption, but because it suggests a mismatch
+                        * between amcheck and pg_amcheck versions.
                         *
                         * In conjunction with --progress, anything written to stderr at
                         * this time would present strangely to the user without an extra
index 6c0f97027ddb78cd46ca40c884ce7d188f8e295e..0c07016aa0c55053d8f2534cb3207480bdba28cd 100644 (file)
@@ -155,8 +155,7 @@ $node->command_checks_all(
        [
                qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres/
        ],
-       'multipart database patterns are rejected'
-);
+       'multipart database patterns are rejected');
 
 # Check that a three-part schema name is rejected
 $node->command_checks_all(
@@ -166,8 +165,7 @@ $node->command_checks_all(
        [
                qr/pg_amcheck: error: improper qualified name \(too many dotted names\): localhost\.postgres\.pg_catalog/
        ],
-       'three part schema patterns are rejected'
-);
+       'three part schema patterns are rejected');
 
 # Check that a four-part table name is rejected
 $node->command_checks_all(
@@ -177,39 +175,44 @@ $node->command_checks_all(
        [
                qr/pg_amcheck: error: improper relation name \(too many dotted names\): localhost\.postgres\.pg_catalog\.pg_class/
        ],
-       'four part table patterns are rejected'
-);
+       'four part table patterns are rejected');
 
 # Check that too many dotted names still draws an error under --no-strict-names
 # That flag means that it is ok for the object to be missing, not that it is ok
 # for the object name to be ungrammatical
 $node->command_checks_all(
-       [ 'pg_amcheck', '--no-strict-names', '-t', 'this.is.a.really.long.dotted.string' ],
+       [
+               'pg_amcheck', '--no-strict-names',
+               '-t',         'this.is.a.really.long.dotted.string'
+       ],
        2,
        [qr/^$/],
        [
                qr/pg_amcheck: error: improper relation name \(too many dotted names\): this\.is\.a\.really\.long\.dotted\.string/
        ],
-       'ungrammatical table names still draw errors under --no-strict-names'
-);
+       'ungrammatical table names still draw errors under --no-strict-names');
 $node->command_checks_all(
-       [ 'pg_amcheck', '--no-strict-names', '-s', 'postgres.long.dotted.string' ],
+       [
+               'pg_amcheck', '--no-strict-names', '-s',
+               'postgres.long.dotted.string'
+       ],
        2,
        [qr/^$/],
        [
                qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
        ],
-       'ungrammatical schema names still draw errors under --no-strict-names'
-);
+       'ungrammatical schema names still draw errors under --no-strict-names');
 $node->command_checks_all(
-       [ 'pg_amcheck', '--no-strict-names', '-d', 'postgres.long.dotted.string' ],
+       [
+               'pg_amcheck', '--no-strict-names', '-d',
+               'postgres.long.dotted.string'
+       ],
        2,
        [qr/^$/],
        [
                qr/pg_amcheck: error: improper qualified name \(too many dotted names\): postgres\.long\.dotted\.string/
        ],
-       'ungrammatical database names still draw errors under --no-strict-names'
-);
+       'ungrammatical database names still draw errors under --no-strict-names');
 
 # Likewise for exclusion patterns
 $node->command_checks_all(
@@ -262,7 +265,7 @@ $node->command_checks_all(
                '-r',         'postgres.none.none',
                '-r',         'postgres.pg_catalog.none',
                '-r',         'postgres.none.pg_class',
-               '-t',         'postgres.pg_catalog.pg_class',          # This exists
+               '-t',         'postgres.pg_catalog.pg_class',    # This exists
        ],
        0,
        [qr/^$/],
index a5e820827007111da0194682939f3552cd43e670..ce376f239cf3696369ef3c3c766a5121c7e31468 100644 (file)
@@ -33,8 +33,7 @@ $node->safe_psql(
 ));
 
 # We have not yet broken the index, so we should get no corruption
-$node->command_like(
-       [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
        qr/^$/,
        'pg_amcheck all schemas, tables and indexes reports no corruption');
 
index 393e9f340ced603e279fb926d03df60a02c1f762..1a94fb2796ceb30c750104459db9238734af79ba 100644 (file)
@@ -34,7 +34,7 @@ typedef struct bbstreamer_extractor
        void            (*report_output_file) (const char *);
        char            filename[MAXPGPATH];
        FILE       *file;
-}                      bbstreamer_extractor;
+} bbstreamer_extractor;
 
 static void bbstreamer_plain_writer_content(bbstreamer *streamer,
                                                                                        bbstreamer_member *member,
@@ -356,7 +356,7 @@ static void
 bbstreamer_extractor_finalize(bbstreamer *streamer)
 {
        bbstreamer_extractor *mystreamer PG_USED_FOR_ASSERTS_ONLY
-               = (bbstreamer_extractor *) streamer;
+       = (bbstreamer_extractor *) streamer;
 
        Assert(mystreamer->file == NULL);
 }
index b3bfcd62ac30ad13234ed02a123546456570cad9..e7261910d8109131ff8af580a5f322a41e93b765 100644 (file)
@@ -28,7 +28,7 @@ typedef struct bbstreamer_gzip_writer
        bbstreamer      base;
        char       *pathname;
        gzFile          gzfile;
-}                      bbstreamer_gzip_writer;
+} bbstreamer_gzip_writer;
 
 typedef struct bbstreamer_gzip_decompressor
 {
@@ -52,9 +52,9 @@ const bbstreamer_ops bbstreamer_gzip_writer_ops = {
 };
 
 static void bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
-                                                                                         bbstreamer_member *member,
-                                                                                         const char *data, int len,
-                                                                                         bbstreamer_archive_context context);
+                                                                                                bbstreamer_member *member,
+                                                                                                const char *data, int len,
+                                                                                                bbstreamer_archive_context context);
 static void bbstreamer_gzip_decompressor_finalize(bbstreamer *streamer);
 static void bbstreamer_gzip_decompressor_free(bbstreamer *streamer);
 static void *gzip_palloc(void *opaque, unsigned items, unsigned size);
@@ -214,8 +214,8 @@ bbstreamer *
 bbstreamer_gzip_decompressor_new(bbstreamer *next)
 {
 #ifdef HAVE_LIBZ
-       bbstreamer_gzip_decompressor    *streamer;
-       z_stream *zs;
+       bbstreamer_gzip_decompressor *streamer;
+       z_stream   *zs;
 
        Assert(next != NULL);
 
@@ -261,12 +261,12 @@ bbstreamer_gzip_decompressor_new(bbstreamer *next)
  */
 static void
 bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
-                                                                 bbstreamer_member *member,
-                                                                 const char *data, int len,
-                                                                 bbstreamer_archive_context context)
+                                                                        bbstreamer_member *member,
+                                                                        const char *data, int len,
+                                                                        bbstreamer_archive_context context)
 {
        bbstreamer_gzip_decompressor *mystreamer;
-       z_stream *zs;
+       z_stream   *zs;
 
        mystreamer = (bbstreamer_gzip_decompressor *) streamer;
 
@@ -277,7 +277,7 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
        /* Process the current chunk */
        while (zs->avail_in > 0)
        {
-               int res;
+               int                     res;
 
                Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen);
 
@@ -288,8 +288,9 @@ bbstreamer_gzip_decompressor_content(bbstreamer *streamer,
 
                /*
                 * This call decompresses data starting at zs->next_in and updates
-                * zs->next_in * and zs->avail_in. It generates output data starting at
-                * zs->next_out and updates zs->next_out and zs->avail_out accordingly.
+                * zs->next_in * and zs->avail_in. It generates output data starting
+                * at zs->next_out and updates zs->next_out and zs->avail_out
+                * accordingly.
                 */
                res = inflate(zs, Z_NO_FLUSH);
 
index 6070a72cdb53e6b8740f8df751b84b64cadf681f..b9752354c910325f606e7ae93e72a8f603127ffa 100644 (file)
@@ -27,9 +27,9 @@ typedef struct bbstreamer_lz4_frame
 {
        bbstreamer      base;
 
-       LZ4F_compressionContext_t       cctx;
-       LZ4F_decompressionContext_t     dctx;
-       LZ4F_preferences_t                      prefs;
+       LZ4F_compressionContext_t cctx;
+       LZ4F_decompressionContext_t dctx;
+       LZ4F_preferences_t prefs;
 
        size_t          bytes_written;
        bool            header_written;
@@ -70,9 +70,9 @@ bbstreamer *
 bbstreamer_lz4_compressor_new(bbstreamer *next, pg_compress_specification *compress)
 {
 #ifdef USE_LZ4
-       bbstreamer_lz4_frame   *streamer;
-       LZ4F_errorCode_t                ctxError;
-       LZ4F_preferences_t         *prefs;
+       bbstreamer_lz4_frame *streamer;
+       LZ4F_errorCode_t ctxError;
+       LZ4F_preferences_t *prefs;
 
        Assert(next != NULL);
 
@@ -119,12 +119,12 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
                                                                  const char *data, int len,
                                                                  bbstreamer_archive_context context)
 {
-       bbstreamer_lz4_frame   *mystreamer;
-       uint8                              *next_in,
-                                                  *next_out;
-       size_t                                  out_bound,
-                                                       compressed_size,
-                                                       avail_out;
+       bbstreamer_lz4_frame *mystreamer;
+       uint8      *next_in,
+                          *next_out;
+       size_t          out_bound,
+                               compressed_size,
+                               avail_out;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
        next_in = (uint8 *) data;
@@ -146,8 +146,8 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
        }
 
        /*
-        * Update the offset and capacity of output buffer based on number of bytes
-        * written to output buffer.
+        * Update the offset and capacity of output buffer based on number of
+        * bytes written to output buffer.
         */
        next_out = (uint8 *) mystreamer->base.bbs_buffer.data + mystreamer->bytes_written;
        avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written;
@@ -160,18 +160,18 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
        out_bound = LZ4F_compressBound(len, &mystreamer->prefs);
        if (avail_out < out_bound)
        {
-                       bbstreamer_content(mystreamer->base.bbs_next, member,
-                                                          mystreamer->base.bbs_buffer.data,
-                                                          mystreamer->bytes_written,
-                                                          context);
-
-                       /* Enlarge buffer if it falls short of out bound. */
-                       if (mystreamer->base.bbs_buffer.maxlen < out_bound)
-                               enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
-
-                       avail_out = mystreamer->base.bbs_buffer.maxlen;
-                       mystreamer->bytes_written = 0;
-                       next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+               bbstreamer_content(mystreamer->base.bbs_next, member,
+                                                  mystreamer->base.bbs_buffer.data,
+                                                  mystreamer->bytes_written,
+                                                  context);
+
+               /* Enlarge buffer if it falls short of out bound. */
+               if (mystreamer->base.bbs_buffer.maxlen < out_bound)
+                       enlargeStringInfo(&mystreamer->base.bbs_buffer, out_bound);
+
+               avail_out = mystreamer->base.bbs_buffer.maxlen;
+               mystreamer->bytes_written = 0;
+               next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
        }
 
        /*
@@ -199,11 +199,11 @@ bbstreamer_lz4_compressor_content(bbstreamer *streamer,
 static void
 bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
 {
-       bbstreamer_lz4_frame   *mystreamer;
-       uint8                              *next_out;
-       size_t                                  footer_bound,
-                                                       compressed_size,
-                                                       avail_out;
+       bbstreamer_lz4_frame *mystreamer;
+       uint8      *next_out;
+       size_t          footer_bound,
+                               compressed_size,
+                               avail_out;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
 
@@ -212,18 +212,18 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
        if ((mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written) <
                footer_bound)
        {
-                       bbstreamer_content(mystreamer->base.bbs_next, NULL,
-                                                          mystreamer->base.bbs_buffer.data,
-                                                          mystreamer->bytes_written,
-                                                          BBSTREAMER_UNKNOWN);
-
-                       /* Enlarge buffer if it falls short of footer bound. */
-                       if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
-                               enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
-
-                       avail_out = mystreamer->base.bbs_buffer.maxlen;
-                       mystreamer->bytes_written = 0;
-                       next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
+               bbstreamer_content(mystreamer->base.bbs_next, NULL,
+                                                  mystreamer->base.bbs_buffer.data,
+                                                  mystreamer->bytes_written,
+                                                  BBSTREAMER_UNKNOWN);
+
+               /* Enlarge buffer if it falls short of footer bound. */
+               if (mystreamer->base.bbs_buffer.maxlen < footer_bound)
+                       enlargeStringInfo(&mystreamer->base.bbs_buffer, footer_bound);
+
+               avail_out = mystreamer->base.bbs_buffer.maxlen;
+               mystreamer->bytes_written = 0;
+               next_out = (uint8 *) mystreamer->base.bbs_buffer.data;
        }
        else
        {
@@ -258,7 +258,7 @@ bbstreamer_lz4_compressor_finalize(bbstreamer *streamer)
 static void
 bbstreamer_lz4_compressor_free(bbstreamer *streamer)
 {
-       bbstreamer_lz4_frame    *mystreamer;
+       bbstreamer_lz4_frame *mystreamer;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
        bbstreamer_free(streamer->bbs_next);
@@ -276,8 +276,8 @@ bbstreamer *
 bbstreamer_lz4_decompressor_new(bbstreamer *next)
 {
 #ifdef USE_LZ4
-       bbstreamer_lz4_frame    *streamer;
-       LZ4F_errorCode_t                ctxError;
+       bbstreamer_lz4_frame *streamer;
+       LZ4F_errorCode_t ctxError;
 
        Assert(next != NULL);
 
@@ -313,11 +313,11 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
                                                                        const char *data, int len,
                                                                        bbstreamer_archive_context context)
 {
-       bbstreamer_lz4_frame   *mystreamer;
-       uint8                              *next_in,
-                                                  *next_out;
-       size_t                                  avail_in,
-                                                       avail_out;
+       bbstreamer_lz4_frame *mystreamer;
+       uint8      *next_in,
+                          *next_out;
+       size_t          avail_in,
+                               avail_out;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
        next_in = (uint8 *) data;
@@ -327,9 +327,9 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
 
        while (avail_in > 0)
        {
-               size_t  ret,
-                               read_size,
-                               out_size;
+               size_t          ret,
+                                       read_size,
+                                       out_size;
 
                read_size = avail_in;
                out_size = avail_out;
@@ -362,8 +362,8 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
                mystreamer->bytes_written += out_size;
 
                /*
-                * If output buffer is full then forward the content to next streamer and
-                * update the output buffer.
+                * If output buffer is full then forward the content to next streamer
+                * and update the output buffer.
                 */
                if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen)
                {
@@ -390,7 +390,7 @@ bbstreamer_lz4_decompressor_content(bbstreamer *streamer,
 static void
 bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
 {
-       bbstreamer_lz4_frame    *mystreamer;
+       bbstreamer_lz4_frame *mystreamer;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
 
@@ -412,7 +412,7 @@ bbstreamer_lz4_decompressor_finalize(bbstreamer *streamer)
 static void
 bbstreamer_lz4_decompressor_free(bbstreamer *streamer)
 {
-       bbstreamer_lz4_frame    *mystreamer;
+       bbstreamer_lz4_frame *mystreamer;
 
        mystreamer = (bbstreamer_lz4_frame *) streamer;
        bbstreamer_free(streamer->bbs_next);
index 6be04544763d12b238266aa369da8aa4d56750b6..4adb170d464ea12ef252de4cadc87f70917aa3bd 100644 (file)
@@ -58,7 +58,7 @@ typedef struct TablespaceList
 typedef struct ArchiveStreamState
 {
        int                     tablespacenum;
-       pg_compress_specification   *compress;
+       pg_compress_specification *compress;
        bbstreamer *streamer;
        bbstreamer *manifest_inject_streamer;
        PQExpBuffer manifest_buffer;
@@ -173,6 +173,7 @@ static int  bgpipe[2] = {-1, -1};
 /* Handle to child process */
 static pid_t bgchild = -1;
 static bool in_log_streamer = false;
+
 /* Flag to indicate if child process exited unexpectedly */
 static volatile sig_atomic_t bgchild_exited = false;
 
@@ -567,8 +568,8 @@ LogStreamerMain(logstreamer_param *param)
                 */
 #ifdef WIN32
                /*
-                * In order to signal the main thread of an ungraceful exit we
-                * set the same flag that we use on Unix to signal SIGCHLD.
+                * In order to signal the main thread of an ungraceful exit we set the
+                * same flag that we use on Unix to signal SIGCHLD.
                 */
                bgchild_exited = true;
 #endif
@@ -1010,7 +1011,7 @@ parse_compress_options(char *option, char **algorithm, char **detail,
        }
        else
        {
-               char   *alg;
+               char       *alg;
 
                alg = palloc((sep - option) + 1);
                memcpy(alg, option, sep - option);
@@ -1133,11 +1134,11 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
 
        /*
         * We have to parse the archive if (1) we're suppose to extract it, or if
-        * (2) we need to inject backup_manifest or recovery configuration into it.
-        * However, we only know how to parse tar archives.
+        * (2) we need to inject backup_manifest or recovery configuration into
+        * it. However, we only know how to parse tar archives.
         */
        must_parse_archive = (format == 'p' || inject_manifest ||
-               (spclocation == NULL && writerecoveryconf));
+                                                 (spclocation == NULL && writerecoveryconf));
 
        /* At present, we only know how to parse tar archives. */
        if (must_parse_archive && !is_tar && !is_compressed_tar)
@@ -1178,8 +1179,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
                /*
                 * In tar format, we just write the archive without extracting it.
                 * Normally, we write it to the archive name provided by the caller,
-                * but when the base directory is "-" that means we need to write
-                * to standard output.
+                * but when the base directory is "-" that means we need to write to
+                * standard output.
                 */
                if (strcmp(basedir, "-") == 0)
                {
@@ -1233,16 +1234,16 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
        }
 
        /*
-        * If we're supposed to inject the backup manifest into the results,
-        * it should be done here, so that the file content can be injected
-        * directly, without worrying about the details of the tar format.
+        * If we're supposed to inject the backup manifest into the results, it
+        * should be done here, so that the file content can be injected directly,
+        * without worrying about the details of the tar format.
         */
        if (inject_manifest)
                manifest_inject_streamer = streamer;
 
        /*
-        * If this is the main tablespace and we're supposed to write
-        * recovery information, arrange to do that.
+        * If this is the main tablespace and we're supposed to write recovery
+        * information, arrange to do that.
         */
        if (spclocation == NULL && writerecoveryconf)
        {
@@ -1253,11 +1254,10 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
        }
 
        /*
-        * If we're doing anything that involves understanding the contents of
-        * the archive, we'll need to parse it. If not, we can skip parsing it,
-        * but old versions of the server send improperly terminated tarfiles,
-        * so if we're talking to such a server we'll need to add the terminator
-        * here.
+        * If we're doing anything that involves understanding the contents of the
+        * archive, we'll need to parse it. If not, we can skip parsing it, but
+        * old versions of the server send improperly terminated tarfiles, so if
+        * we're talking to such a server we'll need to add the terminator here.
         */
        if (must_parse_archive)
                streamer = bbstreamer_tar_parser_new(streamer);
@@ -1265,8 +1265,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation,
                streamer = bbstreamer_tar_terminator_new(streamer);
 
        /*
-        * If the user has requested a server compressed archive along with archive
-        * extraction at client then we need to decompress it.
+        * If the user has requested a server compressed archive along with
+        * archive extraction at client then we need to decompress it.
         */
        if (format == 'p')
        {
@@ -1848,17 +1848,17 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
        }
        if (maxrate > 0)
                AppendIntegerCommandOption(&buf, use_new_option_syntax, "MAX_RATE",
-                                                                         maxrate);
+                                                                  maxrate);
        if (format == 't')
                AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP");
        if (!verify_checksums)
        {
                if (use_new_option_syntax)
                        AppendIntegerCommandOption(&buf, use_new_option_syntax,
-                                                                                 "VERIFY_CHECKSUMS", 0);
+                                                                          "VERIFY_CHECKSUMS", 0);
                else
                        AppendPlainCommandOption(&buf, use_new_option_syntax,
-                                                                               "NOVERIFY_CHECKSUMS");
+                                                                        "NOVERIFY_CHECKSUMS");
        }
 
        if (manifest)
@@ -1992,8 +1992,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
                 * we do anything anyway.
                 *
                 * Note that this is skipped for tar format backups and backups that
-                * the server is storing to a target location, since in that case
-                * we won't be storing anything into these directories and thus should
+                * the server is storing to a target location, since in that case we
+                * won't be storing anything into these directories and thus should
                 * not create them.
                 */
                if (backup_target == NULL && format == 'p' && !PQgetisnull(res, i, 1))
@@ -2019,8 +2019,8 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
         */
        if (includewal == STREAM_WAL)
        {
-               pg_compress_algorithm   wal_compress_algorithm;
-               int             wal_compress_level;
+               pg_compress_algorithm wal_compress_algorithm;
+               int                     wal_compress_level;
 
                if (verbose)
                        pg_log_info("starting background WAL receiver");
@@ -2315,8 +2315,8 @@ main(int argc, char **argv)
        int                     option_index;
        char       *compression_algorithm = "none";
        char       *compression_detail = NULL;
-       CompressionLocation     compressloc = COMPRESS_LOCATION_UNSPECIFIED;
-       pg_compress_specification       client_compress;
+       CompressionLocation compressloc = COMPRESS_LOCATION_UNSPECIFIED;
+       pg_compress_specification client_compress;
 
        pg_logging_init(argv[0]);
        progname = get_progname(argv[0]);
@@ -2539,8 +2539,8 @@ main(int argc, char **argv)
 
        /*
         * If the user has not specified where to perform backup compression,
-        * default to the client, unless the user specified --target, in which case
-        * the server is the only choice.
+        * default to the client, unless the user specified --target, in which
+        * case the server is the only choice.
         */
        if (compressloc == COMPRESS_LOCATION_UNSPECIFIED)
        {
@@ -2551,14 +2551,14 @@ main(int argc, char **argv)
        }
 
        /*
-        * If any compression that we're doing is happening on the client side,
-        * we must try to parse the compression algorithm and detail, but if it's
-        * all on the server side, then we're just going to pass through whatever
-        * was requested and let the server decide what to do.
+        * If any compression that we're doing is happening on the client side, we
+        * must try to parse the compression algorithm and detail, but if it's all
+        * on the server side, then we're just going to pass through whatever was
+        * requested and let the server decide what to do.
         */
        if (compressloc == COMPRESS_LOCATION_CLIENT)
        {
-               pg_compress_algorithm   alg;
+               pg_compress_algorithm alg;
                char       *error_detail;
 
                if (!parse_compress_algorithm(compression_algorithm, &alg))
@@ -2579,8 +2579,8 @@ main(int argc, char **argv)
        }
 
        /*
-        * Can't perform client-side compression if the backup is not being
-        * sent to the client.
+        * Can't perform client-side compression if the backup is not being sent
+        * to the client.
         */
        if (backup_target != NULL && compressloc == COMPRESS_LOCATION_CLIENT)
        {
@@ -2724,13 +2724,14 @@ main(int argc, char **argv)
        atexit(disconnect_atexit);
 
 #ifndef WIN32
+
        /*
         * Trap SIGCHLD to be able to handle the WAL stream process exiting. There
-        * is no SIGCHLD on Windows, there we rely on the background thread setting
-        * the signal variable on unexpected but graceful exit. If the WAL stream
-        * thread crashes on Windows it will bring down the entire process as it's
-        * a thread, so there is nothing to catch should that happen. A crash on
-        * UNIX will be caught by the signal handler.
+        * is no SIGCHLD on Windows, there we rely on the background thread
+        * setting the signal variable on unexpected but graceful exit. If the WAL
+        * stream thread crashes on Windows it will bring down the entire process
+        * as it's a thread, so there is nothing to catch should that happen. A
+        * crash on UNIX will be caught by the signal handler.
         */
        pqsignal(SIGCHLD, sigchld_handler);
 #endif
index 86c0493a949166f281073c547e5d7bb71d0ef913..299b9b76213a4ba778b86acc218014a1b31328d7 100644 (file)
@@ -619,7 +619,7 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin,
                        /* pg_recvlogical doesn't use an exported snapshot, so suppress */
                        if (use_new_option_syntax)
                                AppendStringCommandOption(query, use_new_option_syntax,
-                                                                                  "SNAPSHOT", "nothing");
+                                                                                 "SNAPSHOT", "nothing");
                        else
                                AppendPlainCommandOption(query, use_new_option_syntax,
                                                                                 "NOEXPORT_SNAPSHOT");
index 056fcf3597648a14b88dbd5da8a07b19c7086fa3..87a211315f05e38705a92812f46104939e8f975d 100644 (file)
@@ -28,8 +28,9 @@ my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
 umask(0077);
 
 # Initialize node without replication settings
-$node->init(extra => ['--data-checksums'],
-                       auth_extra => [ '--create-role', 'backupuser' ]);
+$node->init(
+       extra      => ['--data-checksums'],
+       auth_extra => [ '--create-role', 'backupuser' ]);
 $node->start;
 my $pgdata = $node->data_dir;
 
@@ -85,10 +86,9 @@ $node->restart;
 # Now that we have a server that supports replication commands, test whether
 # certain invalid compression commands fail on the client side with client-side
 # compression and on the server side with server-side compression.
-my $client_fails =
-       'pg_basebackup: error: ';
+my $client_fails = 'pg_basebackup: error: ';
 my $server_fails =
-       'pg_basebackup: error: could not initiate base backup: ERROR:  ';
+  'pg_basebackup: error: could not initiate base backup: ERROR:  ';
 my @compression_failure_tests = (
        [
                'extrasquishy',
@@ -134,8 +134,7 @@ my @compression_failure_tests = (
                'gzip:workers=3',
                'invalid compression specification: compression algorithm "gzip" does not accept a worker count',
                'failure on worker count for gzip'
-       ],
-);
+       ],);
 for my $cft (@compression_failure_tests)
 {
        my $cfail = quotemeta($client_fails . $cft->[1]);
@@ -143,10 +142,13 @@ for my $cft (@compression_failure_tests)
        $node->command_fails_like(
                [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', $cft->[0] ],
                qr/$cfail/,
-               'client '. $cft->[2]);
+               'client ' . $cft->[2]);
        $node->command_fails_like(
-               [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress',
-                  'server-' . $cft->[0] ],
+               [
+                       'pg_basebackup',   '-D',
+                       "$tempdir/backup", '--compress',
+                       'server-' . $cft->[0]
+               ],
                qr/$sfail/,
                'server ' . $cft->[2]);
 }
@@ -189,7 +191,8 @@ foreach my $filename (@tempRelationFiles)
 }
 
 # Run base backup.
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+$node->command_ok(
+       [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
        'pg_basebackup runs');
 ok(-f "$tempdir/backup/PG_VERSION",      'backup was created');
 ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
@@ -326,12 +329,12 @@ $node->start;
 # to our physical temp location.  That way we can use shorter names
 # for the tablespace directories, which hopefully won't run afoul of
 # the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir      = PostgreSQL::Test::Utils::tempdir_short;
 my $real_sys_tempdir = "$sys_tempdir/tempdir";
 dir_symlink "$tempdir", $real_sys_tempdir;
 
 mkdir "$tempdir/tblspc1";
-my $realTsDir    = "$real_sys_tempdir/tblspc1";
+my $realTsDir = "$real_sys_tempdir/tblspc1";
 $node->safe_psql('postgres',
        "CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
 $node->safe_psql('postgres',
@@ -368,7 +371,8 @@ SKIP:
        my $repTsDir     = "$tempdir/tblspc1replica";
        my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
        mkdir $repTsDir;
-       PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0], '-C', $repTsDir);
+       PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
+               '-C', $repTsDir);
 
        # Update tablespace map to point to new directory.
        # XXX Ideally pg_basebackup would handle this.
@@ -503,7 +507,8 @@ mkdir "$tempdir/$superlongname";
 $realTsDir = "$real_sys_tempdir/$superlongname";
 $node->safe_psql('postgres',
        "CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+$node->command_ok(
+       [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
        'pg_basebackup tar with long symlink target');
 $node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
 rmtree("$tempdir/tarbackup_l3");
@@ -541,7 +546,10 @@ ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
        'WAL files copied');
 rmtree("$tempdir/backupxs");
 $node->command_ok(
-       [ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream', '-Ft' ],
+       [
+               @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
+               '-Ft'
+       ],
        'pg_basebackup -X stream runs in tar mode');
 ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
 rmtree("$tempdir/backupxst");
@@ -570,7 +578,10 @@ $node->command_fails_like(
        qr/unrecognized target/,
        'backup target unrecognized');
 $node->command_fails_like(
-       [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-D', "$tempdir/blackhole" ],
+       [
+               @pg_basebackup_defs, '--target', 'blackhole', '-X',
+               'none',              '-D',       "$tempdir/blackhole"
+       ],
        qr/cannot specify both output directory and backup target/,
        'backup target and output directory');
 $node->command_fails_like(
@@ -581,7 +592,11 @@ $node->command_ok(
        [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
        'backup target blackhole');
 $node->command_ok(
-       [ @pg_basebackup_defs, '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+       [
+               @pg_basebackup_defs,              '--target',
+               "server:$tempdir/backuponserver", '-X',
+               'none'
+       ],
        'backup target server');
 ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
 rmtree("$tempdir/backuponserver");
@@ -590,9 +605,14 @@ $node->command_ok(
        [qw(createuser --replication --role=pg_write_server_files backupuser)],
        'create backup user');
 $node->command_ok(
-       [ @pg_basebackup_defs, '-U', 'backupuser', '--target', "server:$tempdir/backuponserver", '-X', 'none' ],
+       [
+               @pg_basebackup_defs, '-U', 'backupuser', '--target',
+               "server:$tempdir/backuponserver",
+               '-X', 'none'
+       ],
        'backup target server');
-ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created as non-superuser');
+ok( -f "$tempdir/backuponserver/base.tar",
+       'backup tar was created as non-superuser');
 rmtree("$tempdir/backuponserver");
 
 $node->command_fails(
@@ -617,7 +637,10 @@ $node->command_fails(
        ],
        'pg_basebackup fails with -C -S --no-slot');
 $node->command_fails_like(
-       [ @pg_basebackup_defs, '--target', 'blackhole', '-D', "$tempdir/blackhole" ],
+       [
+               @pg_basebackup_defs, '--target', 'blackhole', '-D',
+               "$tempdir/blackhole"
+       ],
        qr/cannot specify both output directory and backup target/,
        'backup target and output directory');
 
@@ -648,7 +671,11 @@ $node->command_fails(
        'pg_basebackup fails with -C -S --no-slot');
 
 $node->command_ok(
-       [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
+       [
+               @pg_basebackup_defs,      '-D',
+               "$tempdir/backupxs_slot", '-C',
+               '-S',                     'slot0'
+       ],
        'pg_basebackup -C runs');
 rmtree("$tempdir/backupxs_slot");
 
@@ -667,7 +694,11 @@ isnt(
        'restart LSN of new slot is not null');
 
 $node->command_fails(
-       [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot1", '-C', '-S', 'slot0' ],
+       [
+               @pg_basebackup_defs,       '-D',
+               "$tempdir/backupxs_slot1", '-C',
+               '-S',                      'slot0'
+       ],
        'pg_basebackup fails with -C -S and a previously existing slot');
 
 $node->safe_psql('postgres',
@@ -677,7 +708,10 @@ my $lsn = $node->safe_psql('postgres',
 );
 is($lsn, '', 'restart LSN of new slot is null');
 $node->command_fails(
-       [ @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
+       [
+               @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
+               'slot1',             '-X', 'none'
+       ],
        'pg_basebackup with replication slot fails without WAL streaming');
 $node->command_ok(
        [
@@ -843,8 +877,10 @@ my $sigchld_bb_timeout =
 my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
 my $sigchld_bb = IPC::Run::start(
        [
-               @pg_basebackup_defs, '--wal-method=stream', '-D', "$tempdir/sigchld",
-               '--max-rate=32', '-d', $node->connstr('postgres')
+               @pg_basebackup_defs, '--wal-method=stream',
+               '-D',                "$tempdir/sigchld",
+               '--max-rate=32',     '-d',
+               $node->connstr('postgres')
        ],
        '<',
        \$sigchld_bb_stdin,
@@ -854,16 +890,18 @@ my $sigchld_bb = IPC::Run::start(
        \$sigchld_bb_stderr,
        $sigchld_bb_timeout);
 
-is($node->poll_query_until('postgres',
-       "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE " .
-       "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' " .
-       "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
+is( $node->poll_query_until(
+               'postgres',
+               "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
+                 . "application_name = '010_pg_basebackup.pl' AND wait_event = 'WalSenderMain' "
+                 . "AND backend_type = 'walsender' AND query ~ 'START_REPLICATION'"),
        "1",
        "Walsender killed");
 
-ok(pump_until($sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr,
-  qr/background process terminated unexpectedly/),
-  'background process exit message');
+ok( pump_until(
+               $sigchld_bb,         $sigchld_bb_timeout,
+               \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
+       'background process exit message');
 $sigchld_bb->finish();
 
 done_testing();
index 465394404fd0d9dba5eb6784c404469ae40ed8c4..4f07bb890780a60b3dee026e2e23dde95a10c460 100644 (file)
@@ -45,7 +45,7 @@ $primary->command_ok(
        'creating a replication slot');
 my $slot = $primary->slot($slot_name);
 is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '',       'restart LSN of new slot is null');
+is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
 $primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
        'dropping a replication slot');
 is($primary->slot($slot_name)->{'slot_type'},
@@ -281,7 +281,7 @@ $standby->psql(
 $primary->wait_for_catchup($standby);
 # Get a walfilename from before the promotion to make sure it is archived
 # after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot         = $standby->slot($archive_slot);
 my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
 
 # pg_walfile_name() is not supported while in recovery, so use the primary
index 201196f95732b7730cde3743ea33dfcc231e6274..38576c2e0082830c931d3587773da3a86effabac 100644 (file)
@@ -78,7 +78,8 @@ $node->command_ok(
        [
                'pg_recvlogical',           '-S',
                'test',                     '-d',
-               $node->connstr('postgres'), '--create-slot', '--two-phase'
+               $node->connstr('postgres'), '--create-slot',
+               '--two-phase'
        ],
        'slot with two-phase created');
 
@@ -87,16 +88,18 @@ isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
 
 $node->safe_psql('postgres',
        "BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
-$node->safe_psql('postgres',
-       "COMMIT PREPARED 'test'");
-$nextlsn =
-  $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
+$node->safe_psql('postgres', "COMMIT PREPARED 'test'");
+$nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
 chomp($nextlsn);
 
 $node->command_fails(
        [
-               'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
-               '--start', '--endpos', "$nextlsn", '--two-phase', '--no-loop', '-f', '-'
+               'pg_recvlogical',           '-S',
+               'test',                     '-d',
+               $node->connstr('postgres'), '--start',
+               '--endpos',                 "$nextlsn",
+               '--two-phase',              '--no-loop',
+               '-f',                       '-'
        ],
        'incorrect usage');
 
index f605e02da887297ce879b305d4c4f72ff6dc0cd9..dd78e5bc660de28ac6c65d36fa662d3901543b1f 100644 (file)
@@ -1750,7 +1750,7 @@ typedef BOOL (WINAPI * __QueryInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS,
  * achieves the goal of postmaster running in a similar environment as pg_ctl.
  */
 static void
-InheritStdHandles(STARTUPINFOsi)
+InheritStdHandles(STARTUPINFO *si)
 {
        si->dwFlags |= STARTF_USESTDHANDLES;
        si->hStdInput = GetStdHandle(STD_INPUT_HANDLE);
@@ -1802,8 +1802,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser
        si.cb = sizeof(si);
 
        /*
-        * Set stdin/stdout/stderr handles to be inherited in the child
-        * process. That allows postmaster and the processes it starts to perform
+        * Set stdin/stdout/stderr handles to be inherited in the child process.
+        * That allows postmaster and the processes it starts to perform
         * additional checks to see if running in a service (otherwise they get
         * the default console handles - which point to "somewhere").
         */
index 2503d74a76d46ea8f25c40b8a162cdf04ead4698..ab26ee686ca1c038edc5a24c2cae1b6950566653 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
        4, 'pg_ctl status with nonexistent directory');
index 24e42fa5d7d79fcd6e19a1ba5ad9a9ca1d8f4e9c..77fe51a3a5348f81ec185a5796bd60a7a76501d2 100644 (file)
@@ -2580,12 +2580,12 @@ ReadToc(ArchiveHandle *AH)
                        is_supported = false;
                else
                {
-                               tmp = ReadStr(AH);
+                       tmp = ReadStr(AH);
 
-                               if (strcmp(tmp, "true") == 0)
-                                       is_supported = false;
+                       if (strcmp(tmp, "true") == 0)
+                               is_supported = false;
 
-                               free(tmp);
+                       free(tmp);
                }
 
                if (!is_supported)
index c3b9c365d5c57d01a75f6e391fc067812f2ea7f1..3443eef6b0e13e53a8042be2b889fdd8dfb4b1ac 100644 (file)
@@ -956,11 +956,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
        int                     byt;
 
        /*
-        * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal() inside
-        * ReadInt rather than returning EOF.  It doesn't seem worth jumping
-        * through hoops to deal with that case better, because no such files are
-        * likely to exist in the wild: only some 7.1 development versions of
-        * pg_dump ever generated such files.
+        * Note: if we are at EOF with a pre-1.3 input file, we'll pg_fatal()
+        * inside ReadInt rather than returning EOF.  It doesn't seem worth
+        * jumping through hoops to deal with that case better, because no such
+        * files are likely to exist in the wild: only some 7.1 development
+        * versions of pg_dump ever generated such files.
         */
        if (AH->version < K_VERS_1_3)
                *type = BLK_DATA;
index 786d592e2ba0a8ea1e2940e11432741c5a5931f9..7cc9c72e4922a6b3b554df7e08b4f795a668ccf7 100644 (file)
@@ -1318,8 +1318,8 @@ expand_schema_name_patterns(Archive *fout,
 
        for (cell = patterns->head; cell; cell = cell->next)
        {
-               PQExpBufferData dbbuf;
-               int             dotcnt;
+               PQExpBufferData dbbuf;
+               int                     dotcnt;
 
                appendPQExpBufferStr(query,
                                                         "SELECT oid FROM pg_catalog.pg_namespace n\n");
@@ -1376,7 +1376,7 @@ expand_extension_name_patterns(Archive *fout,
         */
        for (cell = patterns->head; cell; cell = cell->next)
        {
-               int             dotcnt;
+               int                     dotcnt;
 
                appendPQExpBufferStr(query,
                                                         "SELECT oid FROM pg_catalog.pg_extension e\n");
@@ -1429,7 +1429,7 @@ expand_foreign_server_name_patterns(Archive *fout,
 
        for (cell = patterns->head; cell; cell = cell->next)
        {
-               int             dotcnt;
+               int                     dotcnt;
 
                appendPQExpBufferStr(query,
                                                         "SELECT oid FROM pg_catalog.pg_foreign_server s\n");
@@ -1481,8 +1481,8 @@ expand_table_name_patterns(Archive *fout,
 
        for (cell = patterns->head; cell; cell = cell->next)
        {
-               PQExpBufferData dbbuf;
-               int             dotcnt;
+               PQExpBufferData dbbuf;
+               int                     dotcnt;
 
                /*
                 * Query must remain ABSOLUTELY devoid of unqualified names.  This
@@ -4342,7 +4342,8 @@ dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo)
        {
                /*
                 * It's necessary to add parentheses around the expression because
-                * pg_get_expr won't supply the parentheses for things like WHERE TRUE.
+                * pg_get_expr won't supply the parentheses for things like WHERE
+                * TRUE.
                 */
                appendPQExpBuffer(query, " WHERE (%s)", pubrinfo->pubrelqual);
        }
@@ -4858,8 +4859,8 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
 
                /*
                 * Not every relation has storage. Also, in a pre-v12 database,
-                * partitioned tables have a relfilenode, which should not be preserved
-                * when upgrading.
+                * partitioned tables have a relfilenode, which should not be
+                * preserved when upgrading.
                 */
                if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
                        appendPQExpBuffer(upgrade_buffer,
index 52f9f7c4d6673c59c10f26c8780d828137311cb0..ae41a652d799e6337cff1eb79e0c168a0d1e0804 100644 (file)
@@ -1269,7 +1269,7 @@ expand_dbname_patterns(PGconn *conn,
 
        for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
        {
-               int             dotcnt;
+               int                     dotcnt;
 
                appendPQExpBufferStr(query,
                                                         "SELECT datname FROM pg_catalog.pg_database n\n");
index 65e6c01fed7762077d41f7756b88fcd59191c177..a583c8a6d246566366820ad7906bb3324ac13822 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 #########################################
 # Basic checks
index 3b31e13f62be5a335d1b217062215bd69f79e08e..1f08716f690a14e1caa3b642759e8ecd629ffa69 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 ###############################################################
 # Definition of the pg_dump runs to make.
@@ -2439,7 +2439,7 @@ my %tests = (
        'CREATE PUBLICATION pub3' => {
                create_order => 50,
                create_sql   => 'CREATE PUBLICATION pub3;',
-               regexp => qr/^
+               regexp       => qr/^
                        \QCREATE PUBLICATION pub3 WITH (publish = 'insert, update, delete, truncate');\E
                        /xm,
                like => { %full_runs, section_post_data => 1, },
@@ -2448,7 +2448,7 @@ my %tests = (
        'CREATE PUBLICATION pub4' => {
                create_order => 50,
                create_sql   => 'CREATE PUBLICATION pub4;',
-               regexp => qr/^
+               regexp       => qr/^
                        \QCREATE PUBLICATION pub4 WITH (publish = 'insert, update, delete, truncate');\E
                        /xm,
                like => { %full_runs, section_post_data => 1, },
@@ -2501,7 +2501,8 @@ my %tests = (
                unlike => { exclude_dump_test_schema => 1, },
        },
 
-       'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)' => {
+       'ALTER PUBLICATION pub1 ADD TABLE test_seventh_table (col3, col2) WHERE (col1 = 1)'
+         => {
                create_order => 52,
                create_sql =>
                  'ALTER PUBLICATION pub1 ADD TABLE dump_test.test_seventh_table (col3, col2) WHERE (col1 = 1);',
@@ -2510,7 +2511,7 @@ my %tests = (
                        /xm,
                like => { %full_runs, section_post_data => 1, },
                unlike => { exclude_dump_test_schema => 1, },
-       },
+         },
 
        'ALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test' => {
                create_order => 51,
@@ -2519,7 +2520,7 @@ my %tests = (
                regexp => qr/^
                        \QALTER PUBLICATION pub3 ADD ALL TABLES IN SCHEMA dump_test;\E
                        /xm,
-               like   => { %full_runs, section_post_data => 1, },
+               like => { %full_runs, section_post_data => 1, },
                unlike => { exclude_dump_test_schema => 1, },
        },
 
@@ -2540,14 +2541,15 @@ my %tests = (
                regexp => qr/^
                        \QALTER PUBLICATION pub4 ADD TABLE ONLY dump_test.test_table WHERE ((col1 > 0));\E
                        /xm,
-               like => { %full_runs, section_post_data => 1, },
+               like   => { %full_runs, section_post_data => 1, },
                unlike => {
                        exclude_dump_test_schema => 1,
                        exclude_test_table       => 1,
                },
        },
 
-       'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');' => {
+       'ALTER PUBLICATION pub4 ADD TABLE test_second_table WHERE (col2 = \'test\');'
+         => {
                create_order => 52,
                create_sql =>
                  'ALTER PUBLICATION pub4 ADD TABLE dump_test.test_second_table WHERE (col2 = \'test\');',
@@ -2556,7 +2558,7 @@ my %tests = (
                        /xm,
                like => { %full_runs, section_post_data => 1, },
                unlike => { exclude_dump_test_schema => 1, },
-       },
+         },
 
        'CREATE SCHEMA public' => {
                regexp => qr/^CREATE SCHEMA public;/m,
@@ -3979,14 +3981,12 @@ command_fails_like(
 $node->command_fails_like(
        [ 'pg_dumpall', '--exclude-database', '.' ],
        qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
-       'pg_dumpall: option --exclude-database rejects multipart pattern "."'
-);
+       'pg_dumpall: option --exclude-database rejects multipart pattern "."');
 
 $node->command_fails_like(
        [ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
        qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
-       'pg_dumpall: option --exclude-database rejects multipart database names'
-);
+       'pg_dumpall: option --exclude-database rejects multipart database names');
 
 #########################################
 # Test valid database exclusion patterns
@@ -4002,20 +4002,17 @@ $node->command_ok(
 $node->command_fails_like(
        [ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
        qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
-       'pg_dump: option --schema rejects three-part schema names'
-);
+       'pg_dump: option --schema rejects three-part schema names');
 
 $node->command_fails_like(
        [ 'pg_dump', '--schema', 'otherdb.myschema' ],
        qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
-       'pg_dump: option --schema rejects cross-database multipart schema names'
-);
+       'pg_dump: option --schema rejects cross-database multipart schema names');
 
 $node->command_fails_like(
        [ 'pg_dump', '--schema', '.' ],
        qr/pg_dump: error: cross-database references are not implemented: \./,
-       'pg_dump: option --schema rejects degenerate two-part schema name: "."'
-);
+       'pg_dump: option --schema rejects degenerate two-part schema name: "."');
 
 $node->command_fails_like(
        [ 'pg_dump', '--schema', '"some.other.db".myschema' ],
@@ -4035,17 +4032,18 @@ $node->command_fails_like(
 $node->command_fails_like(
        [ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
        qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
-       'pg_dump: option --table rejects four-part table names'
-);
+       'pg_dump: option --table rejects four-part table names');
 
 $node->command_fails_like(
        [ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
        qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
-       'pg_dump: option --table rejects cross-database three part table names'
-);
+       'pg_dump: option --table rejects cross-database three part table names');
 
 command_fails_like(
-       [ 'pg_dump', '-p', "$port", '--table', '"some.other.db".pg_catalog.pg_class' ],
+       [
+               'pg_dump', '-p', "$port", '--table',
+               '"some.other.db".pg_catalog.pg_class'
+       ],
        qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
        'pg_dump: option --table rejects cross-database three part table names with embedded dots'
 );
index c28486632642086d8e15deb28e05c35fc4916da0..a0b23aae0fb6c9ba7c907f7721e02fc92f1a176d 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 my $node = PostgreSQL::Test::Cluster->new('main');
 my $port = $node->port;
index 7a745ade0fbc14edc1838fc38c85bb6bd4dd06c3..6e497447c35333af442b739a304202bae32f98a0 100644 (file)
@@ -30,8 +30,10 @@ my $dbname1 =
   . generate_ascii_string(1,  9)
   . generate_ascii_string(11, 12)
   . generate_ascii_string(14, 33)
-  . ($PostgreSQL::Test::Utils::windows_os ? '' : '"x"')   # IPC::Run mishandles '"' on Windows
-  . generate_ascii_string(35, 43)         # skip ','
+  . ($PostgreSQL::Test::Utils::windows_os
+       ? ''
+       : '"x"')    # IPC::Run mishandles '"' on Windows
+  . generate_ascii_string(35, 43)    # skip ','
   . generate_ascii_string(45, 54);
 my $dbname2 = 'regression' . generate_ascii_string(55, 65)    # skip 'B'-'W'
   . generate_ascii_string(88,  99)                            # skip 'd'-'w'
@@ -171,7 +173,8 @@ system_log('cat', $plain);
 my ($stderr, $result);
 my $restore_super = qq{regress_a'b\\c=d\\ne"f};
 $restore_super =~ s/"//g
-  if $PostgreSQL::Test::Utils::windows_os;    # IPC::Run mishandles '"' on Windows
+  if
+  $PostgreSQL::Test::Utils::windows_os;   # IPC::Run mishandles '"' on Windows
 
 
 # Restore full dump through psql using environment variables for
index d61067f6b2eed6e9b0f33b89e3de9e4974d35eb8..62529310415504a92468fd37817ea52280826942 100644 (file)
@@ -139,9 +139,9 @@ static const struct exclude_list_item excludeFiles[] =
        {"pg_internal.init", true}, /* defined as RELCACHE_INIT_FILENAME */
 
        /*
-        * If there is a backup_label or tablespace_map file, it indicates that
-        * a recovery failed and this cluster probably can't be rewound, but
-        * exclude them anyway if they are found.
+        * If there is a backup_label or tablespace_map file, it indicates that a
+        * recovery failed and this cluster probably can't be rewound, but exclude
+        * them anyway if they are found.
         */
        {"backup_label", false},        /* defined as BACKUP_LABEL_FILE */
        {"tablespace_map", false},      /* defined as TABLESPACE_MAP */
index 805935c6fd5111672ca699e159d2bf09a0b20de0..5aafe586e14cc43143dfb7b1a43f3daf0699db3a 100644 (file)
@@ -20,7 +20,8 @@ sub run_test
 {
        my $test_mode = shift;
 
-       my $primary_xlogdir = "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
+       my $primary_xlogdir =
+         "${PostgreSQL::Test::Utils::tmp_check}/xlog_primary";
 
        rmtree($primary_xlogdir);
        RewindTest::setup_cluster($test_mode);
index a5a58dbe060c5a38de78a012cffea26fb7488982..9422828712a3bd06fb29f0a4edca0b21be80ddf2 100644 (file)
@@ -51,12 +51,13 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a';
 # copy operation and the result will be an error.
 my $ret = run_log(
        [
-               'pg_rewind', '--debug',
+               'pg_rewind',       '--debug',
                '--source-pgdata', $standby_pgdata,
                '--target-pgdata', $primary_pgdata,
                '--no-sync',
        ],
-       '2>>', "$standby_pgdata/tst_both_dir/file1");
+       '2>>',
+       "$standby_pgdata/tst_both_dir/file1");
 ok(!$ret, 'Error out on copying growing file');
 
 # Ensure that the files are of different size, the final error message should
index 8fd1f4b9de4114e2eaa21f93e7772d626fdeed86..98b66b01f82b24b55505da50e1beb8ef4f6305f5 100644 (file)
@@ -101,8 +101,8 @@ sub check_query
          ],
          '>', \$stdout, '2>', \$stderr;
 
-       is($result, 1, "$test_name: psql exit code");
-       is($stderr, '', "$test_name: psql no stderr");
+       is($result, 1,                "$test_name: psql exit code");
+       is($stderr, '',               "$test_name: psql no stderr");
        is($stdout, $expected_stdout, "$test_name: query result matches");
 
        return;
@@ -115,7 +115,8 @@ sub setup_cluster
 
        # Initialize primary, data checksums are mandatory
        $node_primary =
-         PostgreSQL::Test::Cluster->new('primary' . ($extra_name ? "_${extra_name}" : ''));
+         PostgreSQL::Test::Cluster->new(
+               'primary' . ($extra_name ? "_${extra_name}" : ''));
 
        # Set up pg_hba.conf and pg_ident.conf for the role running
        # pg_rewind.  This role is used for all the tests, and has
@@ -163,7 +164,8 @@ sub create_standby
        my $extra_name = shift;
 
        $node_standby =
-         PostgreSQL::Test::Cluster->new('standby' . ($extra_name ? "_${extra_name}" : ''));
+         PostgreSQL::Test::Cluster->new(
+               'standby' . ($extra_name ? "_${extra_name}" : ''));
        $node_primary->backup('my_backup');
        $node_standby->init_from_backup($node_primary, 'my_backup');
        my $connstr_primary = $node_primary->connstr();
@@ -305,7 +307,8 @@ sub run_pg_rewind
                # segments from the old primary to the archives.  These
                # will be used by pg_rewind.
                rmtree($node_primary->archive_dir);
-               PostgreSQL::Test::RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
+               PostgreSQL::Test::RecursiveCopy::copypath(
+                       $node_primary->data_dir . "/pg_wal",
                        $node_primary->archive_dir);
 
                # Fast way to remove entire directory content
index 76b8dab4b73cd2c7215d28ff29fe7a546e2f9ff0..8372a85e6ef9d06c797ddf785a287d9dafb44958 100644 (file)
@@ -51,7 +51,8 @@ if (   (defined($ENV{olddump}) && !defined($ENV{oldinstall}))
 my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 # Initialize node to upgrade
-my $oldnode = PostgreSQL::Test::Cluster->new('old_node',
+my $oldnode =
+  PostgreSQL::Test::Cluster->new('old_node',
        install_path => $ENV{oldinstall});
 
 # To increase coverage of non-standard segment size and group access without
@@ -132,7 +133,7 @@ if (defined($ENV{oldinstall}))
        $oldnode->command_ok(
                [
                        'psql', '-X',
-                       '-f',   "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
+                       '-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
                        'regression'
                ]);
 }
index 414de063496b1d4d303b2105c18795ce5bff9f5b..9edfe7c3605502231b990372055c0bd0d2bcd90c 100644 (file)
@@ -143,6 +143,7 @@ pg_log_v(eLogType type, const char *fmt, va_list ap)
                        break;
 
                case PG_STATUS:
+
                        /*
                         * For output to a display, do leading truncation. Append \r so
                         * that the next message is output at the start of the line.
index 843016ad80c549bce0a645a0d523d04fa6b54424..3dba7d8a698927b83418da025ac28c0921792b70 100644 (file)
@@ -16,7 +16,7 @@ $primary->start;
 
 # Include a user-defined tablespace in the hopes of detecting problems in that
 # area.
-my $source_ts_path   =PostgreSQL::Test::Utils::tempdir_short();
+my $source_ts_path   = PostgreSQL::Test::Utils::tempdir_short();
 my $source_ts_prefix = $source_ts_path;
 $source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
 
index 6fdd74e5eeacf62483d574a0c63615f830c8450d..8cda66ca001b1ea141dff70f8b15b103ccdb655b 100644 (file)
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
 $primary->init(allows_streaming => 1);
 $primary->start;
 my $backup_path = $primary->backup_dir . '/test_options';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+       [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
        "base backup ok");
 
 # Verify that pg_verifybackup -q succeeds and produces no output.
index 48fecfa31529218a2925c5dfdac1a3e6a3fdae80..b9573c57426fc743e3b766a29f7bf568deb0dc63 100644 (file)
@@ -12,10 +12,8 @@ use Test::More;
 
 my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
-test_bad_manifest(
-       'input string ended unexpectedly',
-       qr/could not parse backup manifest: parsing failed/,
-       <<EOM);
+test_bad_manifest('input string ended unexpectedly',
+       qr/could not parse backup manifest: parsing failed/, <<EOM);
 {
 EOM
 
index bef2701ef75741f20c1c111af00eb212729e0c43..6e9fafcd55a7ff4ed56322a81b8ac7ebac23a101 100644 (file)
@@ -15,7 +15,8 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
 $primary->init(allows_streaming => 1);
 $primary->start;
 my $backup_path = $primary->backup_dir . '/test_wal';
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
+$primary->command_ok(
+       [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ],
        "base backup ok");
 
 # Rename pg_wal.
@@ -69,7 +70,8 @@ $primary->safe_psql('postgres', 'SELECT pg_switch_wal()');
 my $backup_path2 = $primary->backup_dir . '/test_tli';
 # The base backup run below does a checkpoint, that removes the first segment
 # of the current timeline.
-$primary->command_ok([ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
+$primary->command_ok(
+       [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ],
        "base backup 2 ok");
 command_ok(
        [ 'pg_verifybackup', $backup_path2 ],
index 915249a19def7d60ee1e0dbb47e6ffba0d7d0acf..4c4959516dd750af1faa574379db7f1ae55be842 100644 (file)
@@ -16,89 +16,90 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
 $primary->init(allows_streaming => 1);
 $primary->start;
 
-my $backup_path = $primary->backup_dir . '/server-backup';
+my $backup_path  = $primary->backup_dir . '/server-backup';
 my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
        {
                'compression_method' => 'none',
-               'backup_flags' => [],
-               'backup_archive' => 'base.tar',
-               'enabled' => 1
+               'backup_flags'       => [],
+               'backup_archive'     => 'base.tar',
+               'enabled'            => 1
        },
        {
                'compression_method' => 'gzip',
-               'backup_flags' => ['--compress', 'server-gzip'],
-               'backup_archive' => 'base.tar.gz',
+               'backup_flags'       => [ '--compress', 'server-gzip' ],
+               'backup_archive'     => 'base.tar.gz',
                'decompress_program' => $ENV{'GZIP_PROGRAM'},
-               'decompress_flags' => [ '-d' ],
-               'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+               'decompress_flags'   => ['-d'],
+               'enabled'            => check_pg_config("#define HAVE_LIBZ 1")
        },
        {
                'compression_method' => 'lz4',
-               'backup_flags' => ['--compress', 'server-lz4'],
-               'backup_archive' => 'base.tar.lz4',
+               'backup_flags'       => [ '--compress', 'server-lz4' ],
+               'backup_archive'     => 'base.tar.lz4',
                'decompress_program' => $ENV{'LZ4'},
-               'decompress_flags' => [ '-d', '-m'],
-               'enabled' => check_pg_config("#define USE_LZ4 1")
+               'decompress_flags'   => [ '-d', '-m' ],
+               'enabled'            => check_pg_config("#define USE_LZ4 1")
        },
        {
                'compression_method' => 'zstd',
-               'backup_flags' => ['--compress', 'server-zstd'],
-               'backup_archive' => 'base.tar.zst',
+               'backup_flags'       => [ '--compress', 'server-zstd' ],
+               'backup_archive'     => 'base.tar.zst',
                'decompress_program' => $ENV{'ZSTD'},
-               'decompress_flags' => [ '-d' ],
-               'enabled' => check_pg_config("#define USE_ZSTD 1")
-       }
-);
+               'decompress_flags'   => ['-d'],
+               'enabled'            => check_pg_config("#define USE_ZSTD 1")
+       });
 
 for my $tc (@test_configuration)
 {
        my $method = $tc->{'compression_method'};
 
-       SKIP: {
+  SKIP:
+       {
                skip "$method compression not supported by this build", 3
-                       if ! $tc->{'enabled'};
+                 if !$tc->{'enabled'};
                skip "no decompressor available for $method", 3
                  if exists $tc->{'decompress_program'}
                  && (!defined $tc->{'decompress_program'}
-                   || $tc->{'decompress_program'} eq '');
+                       || $tc->{'decompress_program'} eq '');
 
                # Take a server-side backup.
                my @backup = (
-                       'pg_basebackup', '--no-sync', '-cfast', '--target',
-                       "server:$backup_path", '-Xfetch'
-               );
-               push @backup, @{$tc->{'backup_flags'}};
+                       'pg_basebackup',       '--no-sync',
+                       '-cfast',              '--target',
+                       "server:$backup_path", '-Xfetch');
+               push @backup, @{ $tc->{'backup_flags'} };
                $primary->command_ok(\@backup,
-                                                        "server side backup, compression $method");
+                       "server side backup, compression $method");
 
 
                # Verify that the we got the files we expected.
                my $backup_files = join(',',
                        sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
-               my $expected_backup_files = join(',',
-                       sort ('backup_manifest', $tc->{'backup_archive'}));
-               is($backup_files,$expected_backup_files,
+               my $expected_backup_files =
+                 join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+               is($backup_files, $expected_backup_files,
                        "found expected backup files, compression $method");
 
                # Decompress.
                if (exists $tc->{'decompress_program'})
                {
                        my @decompress = ($tc->{'decompress_program'});
-                       push @decompress, @{$tc->{'decompress_flags'}}
-                               if $tc->{'decompress_flags'};
+                       push @decompress, @{ $tc->{'decompress_flags'} }
+                         if $tc->{'decompress_flags'};
                        push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
                        system_or_bail(@decompress);
                }
 
-               SKIP: {
+         SKIP:
+               {
                        my $tar = $ENV{TAR};
                        # don't check for a working tar here, to accommodate various odd
                        # cases such as AIX. If tar doesn't work the init_from_backup below
                        # will fail.
                        skip "no tar program available", 1
-                               if (!defined $tar || $tar eq '');
+                         if (!defined $tar || $tar eq '');
 
                        # Untar.
                        mkdir($extract_path);
@@ -106,8 +107,12 @@ for my $tc (@test_configuration)
                                '-C', $extract_path);
 
                        # Verify.
-                       $primary->command_ok([ 'pg_verifybackup', '-n',
-                               '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+                       $primary->command_ok(
+                               [
+                                       'pg_verifybackup', '-n',
+                                       '-m', "$backup_path/backup_manifest",
+                                       '-e', $extract_path
+                               ],
                                "verify backup, compression $method");
                }
 
index d6f11b95535d88cbca8b8b868b05ddf0191990e5..56889e1ece97765049aed94bb5299deddd526e98 100644 (file)
@@ -17,46 +17,47 @@ $primary->start;
 my @test_configuration = (
        {
                'compression_method' => 'none',
-               'backup_flags' => [],
-               'enabled' => 1
+               'backup_flags'       => [],
+               'enabled'            => 1
        },
        {
                'compression_method' => 'gzip',
-               'backup_flags' => ['--compress', 'server-gzip:5'],
-               'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+               'backup_flags'       => [ '--compress', 'server-gzip:5' ],
+               'enabled'            => check_pg_config("#define HAVE_LIBZ 1")
        },
        {
                'compression_method' => 'lz4',
-               'backup_flags' => ['--compress', 'server-lz4:5'],
-               'enabled' => check_pg_config("#define USE_LZ4 1")
+               'backup_flags'       => [ '--compress', 'server-lz4:5' ],
+               'enabled'            => check_pg_config("#define USE_LZ4 1")
        },
        {
                'compression_method' => 'zstd',
-               'backup_flags' => ['--compress', 'server-zstd:5'],
-               'enabled' => check_pg_config("#define USE_ZSTD 1")
+               'backup_flags'       => [ '--compress', 'server-zstd:5' ],
+               'enabled'            => check_pg_config("#define USE_ZSTD 1")
        },
        {
                'compression_method' => 'parallel zstd',
-               'backup_flags' => ['--compress', 'server-zstd:workers=3'],
-               'enabled' => check_pg_config("#define USE_ZSTD 1"),
-               'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
-       }
-);
+               'backup_flags'       => [ '--compress', 'server-zstd:workers=3' ],
+               'enabled'            => check_pg_config("#define USE_ZSTD 1"),
+               'possibly_unsupported' =>
+                 qr/could not set compression worker count to 3: Unsupported parameter/
+       });
 
 for my $tc (@test_configuration)
 {
        my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
-       my $method = $tc->{'compression_method'};
+       my $method      = $tc->{'compression_method'};
 
-       SKIP: {
+  SKIP:
+       {
                skip "$method compression not supported by this build", 2
-                       if ! $tc->{'enabled'};
+                 if !$tc->{'enabled'};
 
                # Take backup with server compression enabled.
-               my @backup      = (
+               my @backup = (
                        'pg_basebackup', '-D', $backup_path,
                        '-Xfetch', '--no-sync', '-cfast', '-Fp');
-               push @backup, @{$tc->{'backup_flags'}};
+               push @backup, @{ $tc->{'backup_flags'} };
 
                my @verify = ('pg_verifybackup', '-e', $backup_path);
 
@@ -64,7 +65,7 @@ for my $tc (@test_configuration)
                my $backup_stdout = '';
                my $backup_stderr = '';
                my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
-                                                                                         '2>', \$backup_stderr);
+                       '2>', \$backup_stderr);
                if ($backup_stdout ne '')
                {
                        print "# standard output was:\n$backup_stdout";
@@ -73,8 +74,9 @@ for my $tc (@test_configuration)
                {
                        print "# standard error was:\n$backup_stderr";
                }
-               if (! $backup_result && $tc->{'possibly_unsupported'} &&
-                       $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+               if (  !$backup_result
+                       && $tc->{'possibly_unsupported'}
+                       && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
                {
                        skip "compression with $method not supported by this build", 2;
                }
@@ -85,7 +87,7 @@ for my $tc (@test_configuration)
 
                # Make sure that it verifies OK.
                $primary->command_ok(\@verify,
-                                                       "backup verified, compression method \"$method\"");
+                       "backup verified, compression method \"$method\"");
        }
 
        # Remove backup immediately to save disk space.
index c1cd12cb065f17facc6415c9af9c570d3e8b8655..77cb503784c189996fff429d18054e9c36c1d677 100644 (file)
@@ -15,73 +15,74 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
 $primary->init(allows_streaming => 1);
 $primary->start;
 
-my $backup_path = $primary->backup_dir . '/client-backup';
+my $backup_path  = $primary->backup_dir . '/client-backup';
 my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
        {
                'compression_method' => 'none',
-               'backup_flags' => [],
-               'backup_archive' => 'base.tar',
-               'enabled' => 1
+               'backup_flags'       => [],
+               'backup_archive'     => 'base.tar',
+               'enabled'            => 1
        },
        {
                'compression_method' => 'gzip',
-               'backup_flags' => ['--compress', 'client-gzip:5'],
-               'backup_archive' => 'base.tar.gz',
+               'backup_flags'       => [ '--compress', 'client-gzip:5' ],
+               'backup_archive'     => 'base.tar.gz',
                'decompress_program' => $ENV{'GZIP_PROGRAM'},
-               'decompress_flags' => [ '-d' ],
-               'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+               'decompress_flags'   => ['-d'],
+               'enabled'            => check_pg_config("#define HAVE_LIBZ 1")
        },
        {
                'compression_method' => 'lz4',
-               'backup_flags' => ['--compress', 'client-lz4:5'],
-               'backup_archive' => 'base.tar.lz4',
+               'backup_flags'       => [ '--compress', 'client-lz4:5' ],
+               'backup_archive'     => 'base.tar.lz4',
                'decompress_program' => $ENV{'LZ4'},
-               'decompress_flags' => [ '-d' ],
-               'output_file' => 'base.tar',
-               'enabled' => check_pg_config("#define USE_LZ4 1")
+               'decompress_flags'   => ['-d'],
+               'output_file'        => 'base.tar',
+               'enabled'            => check_pg_config("#define USE_LZ4 1")
        },
        {
                'compression_method' => 'zstd',
-               'backup_flags' => ['--compress', 'client-zstd:5'],
-               'backup_archive' => 'base.tar.zst',
+               'backup_flags'       => [ '--compress', 'client-zstd:5' ],
+               'backup_archive'     => 'base.tar.zst',
                'decompress_program' => $ENV{'ZSTD'},
-               'decompress_flags' => [ '-d' ],
-               'enabled' => check_pg_config("#define USE_ZSTD 1")
+               'decompress_flags'   => ['-d'],
+               'enabled'            => check_pg_config("#define USE_ZSTD 1")
        },
        {
                'compression_method' => 'parallel zstd',
-               'backup_flags' => ['--compress', 'client-zstd:workers=3'],
-               'backup_archive' => 'base.tar.zst',
+               'backup_flags'       => [ '--compress', 'client-zstd:workers=3' ],
+               'backup_archive'     => 'base.tar.zst',
                'decompress_program' => $ENV{'ZSTD'},
-               'decompress_flags' => [ '-d' ],
-               'enabled' => check_pg_config("#define USE_ZSTD 1"),
-               'possibly_unsupported' => qr/could not set compression worker count to 3: Unsupported parameter/
-       }
-);
+               'decompress_flags'   => ['-d'],
+               'enabled'            => check_pg_config("#define USE_ZSTD 1"),
+               'possibly_unsupported' =>
+                 qr/could not set compression worker count to 3: Unsupported parameter/
+       });
 
 for my $tc (@test_configuration)
 {
        my $method = $tc->{'compression_method'};
 
-       SKIP: {
+  SKIP:
+       {
                skip "$method compression not supported by this build", 3
-                       if ! $tc->{'enabled'};
+                 if !$tc->{'enabled'};
                skip "no decompressor available for $method", 3
                  if exists $tc->{'decompress_program'}
                  && (!defined $tc->{'decompress_program'}
-                   || $tc->{'decompress_program'} eq '');
+                       || $tc->{'decompress_program'} eq '');
 
                # Take a client-side backup.
-               my @backup      = (
+               my @backup = (
                        'pg_basebackup', '-D', $backup_path,
                        '-Xfetch', '--no-sync', '-cfast', '-Ft');
-               push @backup, @{$tc->{'backup_flags'}};
+               push @backup, @{ $tc->{'backup_flags'} };
                my $backup_stdout = '';
                my $backup_stderr = '';
                my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
-                                                                                         '2>', \$backup_stderr);
+                       '2>', \$backup_stderr);
                if ($backup_stdout ne '')
                {
                        print "# standard output was:\n$backup_stdout";
@@ -90,8 +91,9 @@ for my $tc (@test_configuration)
                {
                        print "# standard error was:\n$backup_stderr";
                }
-               if (! $backup_result && $tc->{'possibly_unsupported'} &&
-                       $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+               if (  !$backup_result
+                       && $tc->{'possibly_unsupported'}
+                       && $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
                {
                        skip "compression with $method not supported by this build", 3;
                }
@@ -103,30 +105,31 @@ for my $tc (@test_configuration)
                # Verify that the we got the files we expected.
                my $backup_files = join(',',
                        sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
-               my $expected_backup_files = join(',',
-                       sort ('backup_manifest', $tc->{'backup_archive'}));
-               is($backup_files,$expected_backup_files,
+               my $expected_backup_files =
+                 join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+               is($backup_files, $expected_backup_files,
                        "found expected backup files, compression $method");
 
                # Decompress.
                if (exists $tc->{'decompress_program'})
                {
                        my @decompress = ($tc->{'decompress_program'});
-                       push @decompress, @{$tc->{'decompress_flags'}}
-                               if $tc->{'decompress_flags'};
+                       push @decompress, @{ $tc->{'decompress_flags'} }
+                         if $tc->{'decompress_flags'};
                        push @decompress, $backup_path . '/' . $tc->{'backup_archive'};
                        push @decompress, $backup_path . '/' . $tc->{'output_file'}
-                               if $tc->{'output_file'};
+                         if $tc->{'output_file'};
                        system_or_bail(@decompress);
                }
 
-               SKIP: {
+         SKIP:
+               {
                        my $tar = $ENV{TAR};
                        # don't check for a working tar here, to accommodate various odd
                        # cases such as AIX. If tar doesn't work the init_from_backup below
                        # will fail.
                        skip "no tar program available", 1
-                               if (!defined $tar || $tar eq '');
+                         if (!defined $tar || $tar eq '');
 
                        # Untar.
                        mkdir($extract_path);
@@ -134,8 +137,12 @@ for my $tc (@test_configuration)
                                '-C', $extract_path);
 
                        # Verify.
-                       $primary->command_ok([ 'pg_verifybackup', '-n',
-                               '-m', "$backup_path/backup_manifest", '-e', $extract_path ],
+                       $primary->command_ok(
+                               [
+                                       'pg_verifybackup', '-n',
+                                       '-m', "$backup_path/backup_manifest",
+                                       '-e', $extract_path
+                               ],
                                "verify backup, compression $method");
                }
 
index 4f265ef54605820dbf5019f157350b8784c077e3..3151cb5562b8524effea31b000cca7d8fa80c37c 100644 (file)
@@ -695,7 +695,7 @@ main(int argc, char **argv)
        XLogReaderState *xlogreader_state;
        XLogDumpPrivate private;
        XLogDumpConfig config;
-       XLogStats stats;
+       XLogStats       stats;
        XLogRecord *record;
        XLogRecPtr      first_record;
        char       *waldir = NULL;
index 02f250f5119649360c07ec56a01e3ab3e0df8b4e..79c0cd374d35a2ef359bd2ce6b2d94e45c85cd02 100644 (file)
@@ -277,9 +277,9 @@ bool                progress_timestamp = false; /* progress report with Unix time */
 int                    nclients = 1;           /* number of clients */
 int                    nthreads = 1;           /* number of threads */
 bool           is_connect;                     /* establish connection for each transaction */
-bool           report_per_command = false;     /* report per-command latencies, retries
-                                                                                * after errors and failures (errors
-                                                                                * without retrying) */
+bool           report_per_command = false; /* report per-command latencies,
+                                                                                * retries after errors and failures
+                                                                                * (errors without retrying) */
 int                    main_pid;                       /* main process id used in log filename */
 
 /*
@@ -302,8 +302,8 @@ int                 main_pid;                       /* main process id used in log filename */
  */
 uint32         max_tries = 1;
 
-bool           failures_detailed = false;      /* whether to group failures in reports
-                                                                                * or logs by basic types */
+bool           failures_detailed = false;      /* whether to group failures in
+                                                                                * reports or logs by basic types */
 
 const char *pghost = NULL;
 const char *pgport = NULL;
@@ -349,8 +349,8 @@ typedef struct
 
        /*
         * The maximum number of variables that we can currently store in 'vars'
-        * without having to reallocate more space. We must always have max_vars >=
-        * nvars.
+        * without having to reallocate more space. We must always have max_vars
+        * >= nvars.
         */
        int                     max_vars;
 
@@ -390,17 +390,17 @@ typedef struct StatsData
 {
        pg_time_usec_t start_time;      /* interval start time, for aggregates */
 
-       /*
-        * Transactions are counted depending on their execution and outcome. First
-        * a transaction may have started or not: skipped transactions occur under
-        * --rate and --latency-limit when the client is too late to execute them.
-        * Secondly, a started transaction may ultimately succeed or fail, possibly
-        * after some retries when --max-tries is not one. Thus
+       /*----------
+        * Transactions are counted depending on their execution and outcome.
+        * First a transaction may have started or not: skipped transactions occur
+        * under --rate and --latency-limit when the client is too late to execute
+        * them. Secondly, a started transaction may ultimately succeed or fail,
+        * possibly after some retries when --max-tries is not one. Thus
         *
         * the number of all transactions =
         *   'skipped' (it was too late to execute them) +
         *   'cnt' (the number of successful transactions) +
-        *   failed (the number of failed transactions).
+        *   'failed' (the number of failed transactions).
         *
         * A successful transaction can have several unsuccessful tries before a
         * successful run. Thus
@@ -419,11 +419,11 @@ typedef struct StatsData
         * failed (the number of failed transactions) =
         *   'serialization_failures' (they got a serialization error and were not
         *                             successfully retried) +
-        *   'deadlock_failures' (they got a deadlock error and were not successfully
-        *                        retried).
+        *   'deadlock_failures' (they got a deadlock error and were not
+        *                        successfully retried).
         *
-        * If the transaction was retried after a serialization or a deadlock error
-        * this does not guarantee that this retry was successful. Thus
+        * If the transaction was retried after a serialization or a deadlock
+        * error this does not guarantee that this retry was successful. Thus
         *
         * 'retries' (number of retries) =
         *   number of retries in all retried transactions =
@@ -433,18 +433,20 @@ typedef struct StatsData
         * 'retried' (number of all retried transactions) =
         *   successfully retried transactions +
         *   failed transactions.
+        *----------
         */
        int64           cnt;                    /* number of successful transactions, not
                                                                 * including 'skipped' */
        int64           skipped;                /* number of transactions skipped under --rate
                                                                 * and --latency-limit */
-       int64           retries;                /* number of retries after a serialization or a
-                                                                * deadlock error in all the transactions */
-       int64           retried;                /* number of all transactions that were retried
-                                                                * after a serialization or a deadlock error
-                                                                * (perhaps the last try was unsuccessful) */
-       int64           serialization_failures; /* number of transactions that were not
-                                                                                * successfully retried after a
+       int64           retries;                /* number of retries after a serialization or
+                                                                * a deadlock error in all the transactions */
+       int64           retried;                /* number of all transactions that were
+                                                                * retried after a serialization or a deadlock
+                                                                * error (perhaps the last try was
+                                                                * unsuccessful) */
+       int64           serialization_failures; /* number of transactions that were
+                                                                                * not successfully retried after a
                                                                                 * serialization error */
        int64           deadlock_failures;      /* number of transactions that were not
                                                                         * successfully retried after a deadlock
@@ -559,16 +561,15 @@ typedef enum
         * States for failed commands.
         *
         * If the SQL/meta command fails, in CSTATE_ERROR clean up after an error:
-        * - clear the conditional stack;
-        * - if we have an unterminated (possibly failed) transaction block, send
-        * the rollback command to the server and wait for the result in
-        * CSTATE_WAIT_ROLLBACK_RESULT. If something goes wrong with rolling back,
-        * go to CSTATE_ABORTED.
+        * (1) clear the conditional stack; (2) if we have an unterminated
+        * (possibly failed) transaction block, send the rollback command to the
+        * server and wait for the result in CSTATE_WAIT_ROLLBACK_RESULT.  If
+        * something goes wrong with rolling back, go to CSTATE_ABORTED.
         *
-        * But if everything is ok we are ready for future transactions: if this is
-        * a serialization or deadlock error and we can re-execute the transaction
-        * from the very beginning, go to CSTATE_RETRY; otherwise go to
-        * CSTATE_FAILURE.
+        * But if everything is ok we are ready for future transactions: if this
+        * is a serialization or deadlock error and we can re-execute the
+        * transaction from the very beginning, go to CSTATE_RETRY; otherwise go
+        * to CSTATE_FAILURE.
         *
         * In CSTATE_RETRY report an error, set the same parameters for the
         * transaction execution as in the previous tries and process the first
@@ -622,7 +623,7 @@ typedef struct
        int                     command;                /* command number in script */
 
        /* client variables */
-       Variables   variables;
+       Variables       variables;
 
        /* various times about current transaction in microseconds */
        pg_time_usec_t txn_scheduled;   /* scheduled start time of transaction */
@@ -633,19 +634,20 @@ typedef struct
        bool            prepared[MAX_SCRIPTS];  /* whether client prepared the script */
 
        /*
-        * For processing failures and repeating transactions with serialization or
-        * deadlock errors:
+        * For processing failures and repeating transactions with serialization
+        * or deadlock errors:
         */
-       EStatus         estatus;        /* the error status of the current transaction
-                                                        * execution; this is ESTATUS_NO_ERROR if there were
-                                                        * no errors */
-       pg_prng_state   random_state;   /* random state */
-       uint32                  tries;          /* how many times have we already tried the
+       EStatus         estatus;                /* the error status of the current transaction
+                                                                * execution; this is ESTATUS_NO_ERROR if
+                                                                * there were no errors */
+       pg_prng_state random_state; /* random state */
+       uint32          tries;                  /* how many times have we already tried the
                                                                 * current transaction? */
 
        /* per client collected stats */
-       int64           cnt;                    /* client transaction count, for -t; skipped and
-                                                                * failed transactions are also counted here */
+       int64           cnt;                    /* client transaction count, for -t; skipped
+                                                                * and failed transactions are also counted
+                                                                * here */
 } CState;
 
 /*
@@ -771,7 +773,7 @@ static ParsedScript sql_script[MAX_SCRIPTS];        /* SQL script files */
 static int     num_scripts;            /* number of scripts in sql_script[] */
 static int64 total_weight = 0;
 
-static bool    verbose_errors = false; /* print verbose messages of all errors */
+static bool verbose_errors = false; /* print verbose messages of all errors */
 
 /* Builtin test scripts */
 typedef struct BuiltinScript
@@ -3050,7 +3052,7 @@ commandError(CState *st, const char *message)
 {
        Assert(sql_script[st->use_file].commands[st->command]->type == SQL_COMMAND);
        pg_log_info("client %d got an error in command %d (SQL) of script %d; %s",
-                                st->id, st->command, st->use_file, message);
+                               st->id, st->command, st->use_file, message);
 }
 
 /* return a script number with a weighted choice. */
@@ -3289,8 +3291,8 @@ readCommandResponse(CState *st, MetaCommand meta, char *varprefix)
 
                        case PGRES_NONFATAL_ERROR:
                        case PGRES_FATAL_ERROR:
-                               st->estatus = getSQLErrorStatus(
-                                       PQresultErrorField(res, PG_DIAG_SQLSTATE));
+                               st->estatus = getSQLErrorStatus(PQresultErrorField(res,
+                                                                                                                                  PG_DIAG_SQLSTATE));
                                if (canRetryError(st->estatus))
                                {
                                        if (verbose_errors)
@@ -3397,13 +3399,15 @@ doRetry(CState *st, pg_time_usec_t *now)
        Assert(max_tries || latency_limit || duration > 0);
 
        /*
-        * We cannot retry the error if we have reached the maximum number of tries.
+        * We cannot retry the error if we have reached the maximum number of
+        * tries.
         */
        if (max_tries && st->tries >= max_tries)
                return false;
 
        /*
-        * We cannot retry the error if we spent too much time on this transaction.
+        * We cannot retry the error if we spent too much time on this
+        * transaction.
         */
        if (latency_limit)
        {
@@ -3432,14 +3436,15 @@ discardUntilSync(CState *st)
        if (!PQpipelineSync(st->con))
        {
                pg_log_error("client %d aborted: failed to send a pipeline sync",
-                                       st->id);
+                                        st->id);
                return 0;
        }
 
        /* receive PGRES_PIPELINE_SYNC and null following it */
-       for(;;)
+       for (;;)
        {
-               PGresult *res = PQgetResult(st->con);
+               PGresult   *res = PQgetResult(st->con);
+
                if (PQresultStatus(res) == PGRES_PIPELINE_SYNC)
                {
                        PQclear(res);
@@ -3484,9 +3489,10 @@ getTransactionStatus(PGconn *con)
                        /* fall through */
                case PQTRANS_ACTIVE:
                default:
+
                        /*
-                        * We cannot find out whether we are in a transaction block or not.
-                        * Internal error which should never occur.
+                        * We cannot find out whether we are in a transaction block or
+                        * not. Internal error which should never occur.
                         */
                        pg_log_error("unexpected transaction status %d", tx_status);
                        return TSTATUS_OTHER_ERROR;
@@ -3513,8 +3519,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
        printfPQExpBuffer(buf, "client %d ", st->id);
        appendPQExpBuffer(buf, "%s",
                                          (is_retry ?
-                                               "repeats the transaction after the error" :
-                                               "ends the failed transaction"));
+                                          "repeats the transaction after the error" :
+                                          "ends the failed transaction"));
        appendPQExpBuffer(buf, " (try %u", st->tries);
 
        /* Print max_tries if it is not unlimitted. */
@@ -3522,8 +3528,8 @@ printVerboseErrorMessages(CState *st, pg_time_usec_t *now, bool is_retry)
                appendPQExpBuffer(buf, "/%u", max_tries);
 
        /*
-        * If the latency limit is used, print a percentage of the current transaction
-        * latency from the latency limit.
+        * If the latency limit is used, print a percentage of the current
+        * transaction latency from the latency limit.
         */
        if (latency_limit)
        {
@@ -3619,8 +3625,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
 
                                /*
                                 * It is the first try to run this transaction. Remember the
-                                * random state: maybe it will get an error and we will need to
-                                * run it again.
+                                * random state: maybe it will get an error and we will need
+                                * to run it again.
                                 */
                                st->random_state = st->cs_func_rs;
 
@@ -3998,8 +4004,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                        }
 
                                        /*
-                                        * Check if we have a (failed) transaction block or not, and
-                                        * roll it back if any.
+                                        * Check if we have a (failed) transaction block or not,
+                                        * and roll it back if any.
                                         */
                                        tstatus = getTransactionStatus(st->con);
                                        if (tstatus == TSTATUS_IN_BLOCK)
@@ -4017,9 +4023,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                        else if (tstatus == TSTATUS_IDLE)
                                        {
                                                /*
-                                               * If time is over, we're done;
-                                               * otherwise, check if we can retry the error.
-                                               */
+                                                * If time is over, we're done; otherwise, check if we
+                                                * can retry the error.
+                                                */
                                                st->state = timer_exceeded ? CSTATE_FINISHED :
                                                        doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
                                        }
@@ -4039,7 +4045,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                 */
                        case CSTATE_WAIT_ROLLBACK_RESULT:
                                {
-                                       PGresult *res;
+                                       PGresult   *res;
 
                                        pg_log_debug("client %d receiving", st->id);
                                        if (!PQconsumeInput(st->con))
@@ -4050,7 +4056,7 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                                break;
                                        }
                                        if (PQisBusy(st->con))
-                                               return;         /* don't have the whole result yet */
+                                               return; /* don't have the whole result yet */
 
                                        /*
                                         * Read and discard the query result;
@@ -4066,8 +4072,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                                        Assert(res == NULL);
 
                                                        /*
-                                                        * If time is over, we're done;
-                                                        * otherwise, check if we can retry the error.
+                                                        * If time is over, we're done; otherwise, check
+                                                        * if we can retry the error.
                                                         */
                                                        st->state = timer_exceeded ? CSTATE_FINISHED :
                                                                doRetry(st, &now) ? CSTATE_RETRY : CSTATE_FAILURE;
@@ -4089,7 +4095,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                command = sql_script[st->use_file].commands[st->command];
 
                                /*
-                                * Inform that the transaction will be retried after the error.
+                                * Inform that the transaction will be retried after the
+                                * error.
                                 */
                                if (verbose_errors)
                                        printVerboseErrorMessages(st, &now, true);
@@ -4099,8 +4106,8 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                command->retries++;
 
                                /*
-                                * Reset the random state as they were at the beginning
-                                * of the transaction.
+                                * Reset the random state as they were at the beginning of the
+                                * transaction.
                                 */
                                st->cs_func_rs = st->random_state;
 
@@ -4188,8 +4195,9 @@ advanceConnectionState(TState *thread, CState *st, StatsData *agg)
                                        st->state = CSTATE_CHOOSE_SCRIPT;
 
                                        /*
-                                        * Ensure that we always return on this point, so as to avoid
-                                        * an infinite loop if the script only contains meta commands.
+                                        * Ensure that we always return on this point, so as to
+                                        * avoid an infinite loop if the script only contains meta
+                                        * commands.
                                         */
                                        return;
                                }
@@ -4518,10 +4526,10 @@ doLog(TState *thread, CState *st,
                                lag_max = agg->lag.max;
                        }
                        fprintf(logfile, " %.0f %.0f %.0f %.0f",
-                                               lag_sum,
-                                               lag_sum2,
-                                               lag_min,
-                                               lag_max);
+                                       lag_sum,
+                                       lag_sum2,
+                                       lag_min,
+                                       lag_max);
 
                        if (latency_limit)
                                skipped = agg->skipped;
@@ -4588,7 +4596,7 @@ processXactStats(TState *thread, CState *st, pg_time_usec_t *now,
        double          latency = 0.0,
                                lag = 0.0;
        bool            detailed = progress || throttle_delay || latency_limit ||
-                                                  use_log || per_script_stats;
+       use_log || per_script_stats;
 
        if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR)
        {
@@ -4838,7 +4846,7 @@ initGenerateDataClientSide(PGconn *con)
        PGresult   *res;
        int                     i;
        int64           k;
-       char            *copy_statement;
+       char       *copy_statement;
 
        /* used to track elapsed time and estimate of the remaining time */
        pg_time_usec_t start;
@@ -6365,7 +6373,7 @@ printResults(StatsData *total,
                                StatsData  *sstats = &sql_script[i].stats;
                                int64           script_failures = getFailures(sstats);
                                int64           script_total_cnt =
-                                       sstats->cnt + sstats->skipped + script_failures;
+                               sstats->cnt + sstats->skipped + script_failures;
 
                                printf("SQL script %d: %s\n"
                                           " - weight: %d (targets %.1f%% of total)\n"
index ca71f968dc4db5e17965e67bb9cbaf3be121df11..2c0dc369652d67c7b46406ee26953e9f412ef265 100644 (file)
@@ -1202,17 +1202,21 @@ check_pgbench_logs($bdir, '001_pgbench_log_3', 1, 10, 10,
 
 # abortion of the client if the script contains an incomplete transaction block
 $node->pgbench(
-       '--no-vacuum', 2, [ qr{processed: 1/10} ],
-       [ qr{client 0 aborted: end of script reached without completing the last transaction} ],
+       '--no-vacuum',
+       2,
+       [qr{processed: 1/10}],
+       [
+               qr{client 0 aborted: end of script reached without completing the last transaction}
+       ],
        'incomplete transaction block',
        { '001_pgbench_incomplete_transaction_block' => q{BEGIN;SELECT 1;} });
 
 # Test the concurrent update in the table row and deadlocks.
 
 $node->safe_psql('postgres',
-       'CREATE UNLOGGED TABLE first_client_table (value integer); '
-  . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
-  . 'INSERT INTO xy VALUES (1, 2);');
+           'CREATE UNLOGGED TABLE first_client_table (value integer); '
+         . 'CREATE UNLOGGED TABLE xy (x integer, y integer); '
+         . 'INSERT INTO xy VALUES (1, 2);');
 
 # Serialization error and retry
 
@@ -1221,7 +1225,7 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=repeatable\\ read";
 # Check that we have a serialization error and the same random value of the
 # delta variable in the next try
 my $err_pattern =
-       "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
+    "(client (0|1) sending UPDATE xy SET y = y \\+ -?\\d+\\b).*"
   . "client \\2 got an error in command 3 \\(SQL\\) of script 0; "
   . "ERROR:  could not serialize access due to concurrent update\\b.*"
   . "\\1";
@@ -1229,9 +1233,12 @@ my $err_pattern =
 $node->pgbench(
        "-n -c 2 -t 1 -d --verbose-errors --max-tries 2",
        0,
-       [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
-         qr{total number of retries: 1\b} ],
-       [ qr/$err_pattern/s ],
+       [
+               qr{processed: 2/2\b},
+               qr{number of transactions retried: 1\b},
+               qr{total number of retries: 1\b}
+       ],
+       [qr/$err_pattern/s],
        'concurrent update with retrying',
        {
                '001_pgbench_serialization' => q{
@@ -1304,15 +1311,18 @@ local $ENV{PGOPTIONS} = "-c default_transaction_isolation=read\\ committed";
 
 # Check that we have a deadlock error
 $err_pattern =
-       "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
+    "client (0|1) got an error in command (3|5) \\(SQL\\) of script 0; "
   . "ERROR:  deadlock detected\\b";
 
 $node->pgbench(
        "-n -c 2 -t 1 --max-tries 2 --verbose-errors",
        0,
-       [ qr{processed: 2/2\b}, qr{number of transactions retried: 1\b},
-         qr{total number of retries: 1\b} ],
-       [ qr{$err_pattern} ],
+       [
+               qr{processed: 2/2\b},
+               qr{number of transactions retried: 1\b},
+               qr{total number of retries: 1\b}
+       ],
+       [qr{$err_pattern}],
        'deadlock with retrying',
        {
                '001_pgbench_deadlock' => q{
index a5074c70d9dc895944d7e3e24eb582d44aef31d0..50bde7dd0fc12e50d74f6b970466bf4ef2fe05c6 100644 (file)
@@ -37,7 +37,7 @@ sub pgbench_scripts
        local $Test::Builder::Level = $Test::Builder::Level + 1;
 
        my ($opts, $stat, $out, $err, $name, $files) = @_;
-       my @cmd       = ('pgbench', split /\s+/, $opts);
+       my @cmd = ('pgbench', split /\s+/, $opts);
        my @filenames = ();
        if (defined $files)
        {
@@ -196,7 +196,9 @@ my @options = (
        [
                'an infinite number of tries',
                '--max-tries 0',
-               [qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}]
+               [
+                       qr{an unlimited number of transaction tries can only be used with --latency-limit or a duration}
+               ]
        ],
 
        # logging sub-options
index feb1d547d4d81dfe4bf41080685c904c9c4948af..9b140badeb9f8380da58d5114355957158615a97 100644 (file)
@@ -32,8 +32,8 @@
 
 static bool DescribeQuery(const char *query, double *elapsed_msec);
 static bool ExecQueryUsingCursor(const char *query, double *elapsed_msec);
-static int ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
-                                                                         bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
+static int     ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
+                                                                          bool is_watch, const printQueryOpt *opt, FILE *printQueryFout);
 static bool command_no_begin(const char *query);
 static bool is_select_command(const char *query);
 
@@ -482,7 +482,7 @@ ClearOrSaveResult(PGresult *result)
 static void
 ClearOrSaveAllResults(void)
 {
-       PGresult        *result;
+       PGresult   *result;
 
        while ((result = PQgetResult(pset.db)) != NULL)
                ClearOrSaveResult(result);
@@ -697,7 +697,8 @@ PrintQueryTuples(const PGresult *result, const printQueryOpt *opt, FILE *printQu
        }
        else
        {
-               FILE *fout = printQueryFout ? printQueryFout : pset.queryFout;
+               FILE       *fout = printQueryFout ? printQueryFout : pset.queryFout;
+
                printQuery(result, opt ? opt : &pset.popt, fout, false, pset.logfile);
                if (ferror(fout))
                {
@@ -907,9 +908,9 @@ HandleCopyResult(PGresult **resultp)
                        && (copystream != NULL);
 
                /*
-                * Suppress status printing if the report would go to the same
-                * place as the COPY data just went.  Note this doesn't
-                * prevent error reporting, since handleCopyOut did that.
+                * Suppress status printing if the report would go to the same place
+                * as the COPY data just went.  Note this doesn't prevent error
+                * reporting, since handleCopyOut did that.
                 */
                if (copystream == pset.queryFout)
                {
@@ -943,8 +944,8 @@ HandleCopyResult(PGresult **resultp)
        ResetCancelConn();
 
        /*
-        * Replace the PGRES_COPY_OUT/IN result with COPY command's exit
-        * status, or with NULL if we want to suppress printing anything.
+        * Replace the PGRES_COPY_OUT/IN result with COPY command's exit status,
+        * or with NULL if we want to suppress printing anything.
         */
        PQclear(*resultp);
        *resultp = copy_result;
@@ -1069,7 +1070,7 @@ PrintQueryResult(PGresult *result, bool last, bool is_watch, const printQueryOpt
  */
 struct t_notice_messages
 {
-       PQExpBufferData messages[2];
+       PQExpBufferData messages[2];
        int                     current;
 };
 
@@ -1080,6 +1081,7 @@ static void
 AppendNoticeMessage(void *arg, const char *msg)
 {
        struct t_notice_messages *notices = arg;
+
        appendPQExpBufferStr(&notices->messages[notices->current], msg);
 }
 
@@ -1089,7 +1091,8 @@ AppendNoticeMessage(void *arg, const char *msg)
 static void
 ShowNoticeMessage(struct t_notice_messages *notices)
 {
-       PQExpBufferData *current = &notices->messages[notices->current];
+       PQExpBufferData *current = &notices->messages[notices->current];
+
        if (*current->data != '\0')
                pg_log_info("%s", current->data);
        resetPQExpBuffer(current);
@@ -1234,6 +1237,7 @@ SendQuery(const char *query)
                                break;
 
                        case PQTRANS_INTRANS:
+
                                /*
                                 * Release our savepoint, but do nothing if they are messing
                                 * with savepoints themselves
@@ -1472,7 +1476,7 @@ DescribeQuery(const char *query, double *elapsed_msec)
  */
 static int
 ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_gone_p,
-       bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
+                                                  bool is_watch, const printQueryOpt *opt, FILE *printQueryFout)
 {
        bool            timing = pset.timing;
        bool            success;
@@ -1527,8 +1531,8 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
                if (!AcceptResult(result, false))
                {
                        /*
-                        * Some error occured, either a server-side failure or
-                        * a failure to submit the command string.  Record that.
+                        * Some error occured, either a server-side failure or a failure
+                        * to submit the command string.  Record that.
                         */
                        const char *error = PQresultErrorMessage(result);
 
@@ -1551,10 +1555,12 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
                        if (result_status == PGRES_COPY_BOTH ||
                                result_status == PGRES_COPY_OUT ||
                                result_status == PGRES_COPY_IN)
+
                                /*
-                                * For some obscure reason PQgetResult does *not* return a NULL in copy
-                                * cases despite the result having been cleared, but keeps returning an
-                                * "empty" result that we have to ignore manually.
+                                * For some obscure reason PQgetResult does *not* return a
+                                * NULL in copy cases despite the result having been cleared,
+                                * but keeps returning an "empty" result that we have to
+                                * ignore manually.
                                 */
                                result = NULL;
                        else
@@ -1565,12 +1571,13 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
                else if (svpt_gone_p && !*svpt_gone_p)
                {
                        /*
-                        * Check if the user ran any command that would destroy our internal
-                        * savepoint: If the user did COMMIT AND CHAIN, RELEASE or ROLLBACK, our
-                        * savepoint is gone. If they issued a SAVEPOINT, releasing ours would
-                        * remove theirs.
+                        * Check if the user ran any command that would destroy our
+                        * internal savepoint: If the user did COMMIT AND CHAIN, RELEASE
+                        * or ROLLBACK, our savepoint is gone. If they issued a SAVEPOINT,
+                        * releasing ours would remove theirs.
                         */
                        const char *cmd = PQcmdStatus(result);
+
                        *svpt_gone_p = (strcmp(cmd, "COMMIT") == 0 ||
                                                        strcmp(cmd, "SAVEPOINT") == 0 ||
                                                        strcmp(cmd, "RELEASE") == 0 ||
@@ -1614,11 +1621,11 @@ ExecQueryAndProcessResults(const char *query, double *elapsed_msec, bool *svpt_g
                /*
                 * Get timing measure before printing the last result.
                 *
-                * It will include the display of previous results, if any.
-                * This cannot be helped because the server goes on processing
-                * further queries anyway while the previous ones are being displayed.
-                * The parallel execution of the client display hides the server time
-                * when it is shorter.
+                * It will include the display of previous results, if any. This
+                * cannot be helped because the server goes on processing further
+                * queries anyway while the previous ones are being displayed. The
+                * parallel execution of the client display hides the server time when
+                * it is shorter.
                 *
                 * With combined queries, timing must be understood as an upper bound
                 * of the time spent processing them.
index 31df8b759cdd2a06325db14b89f5bd95724bface..1a5d924a23fa3564d55a13ef889dd1e3978c8a1c 100644 (file)
@@ -109,9 +109,9 @@ describeAggregates(const char *pattern, bool verbose, bool showSystem)
                                                         "      AND n.nspname <> 'information_schema'\n");
 
        if (!validateSQLNamePattern(&buf, pattern, true, false,
-                                                         "n.nspname", "p.proname", NULL,
-                                                         "pg_catalog.pg_function_is_visible(p.oid)",
-                                                         NULL, 3))
+                                                               "n.nspname", "p.proname", NULL,
+                                                               "pg_catalog.pg_function_is_visible(p.oid)",
+                                                               NULL, 3))
                return false;
 
        appendPQExpBufferStr(&buf, "ORDER BY 1, 2, 4;");
@@ -6002,7 +6002,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
                                           const char *visibilityrule, bool *added_clause,
                                           int maxparts)
 {
-       PQExpBufferData dbbuf;
+       PQExpBufferData dbbuf;
        int                     dotcnt;
        bool            added;
 
@@ -6021,7 +6021,7 @@ validateSQLNamePattern(PQExpBuffer buf, const char *pattern, bool have_where,
                return false;
        }
 
-       if (maxparts > 1 && dotcnt == maxparts-1)
+       if (maxparts > 1 && dotcnt == maxparts - 1)
        {
                if (PQdb(pset.db) == NULL)
                {
index 98996d9a3792b08655bcb6839f369e9a94abd4f9..90e69d7cdba368fca5a32cfc18bd8828f293c3e0 100644 (file)
@@ -36,9 +36,8 @@ sub psql_fails_like
        my ($node, $sql, $expected_stderr, $test_name) = @_;
 
        # Use the context of a WAL sender, some of the tests rely on that.
-       my ($ret, $stdout, $stderr) = $node->psql(
-               'postgres', $sql,
-               replication  => 'database');
+       my ($ret, $stdout, $stderr) =
+         $node->psql('postgres', $sql, replication => 'database');
 
        isnt($ret, 0, "$test_name: exit code not 0");
        like($stderr, $expected_stderr, "$test_name: matches");
@@ -69,9 +68,9 @@ max_wal_senders = 4
 });
 $node->start;
 
-psql_like($node, '\copyright', qr/Copyright/, '\copyright');
-psql_like($node, '\help', qr/ALTER/, '\help without arguments');
-psql_like($node, '\help SELECT', qr/SELECT/, '\help with argument');
+psql_like($node, '\copyright',   qr/Copyright/, '\copyright');
+psql_like($node, '\help',        qr/ALTER/,     '\help without arguments');
+psql_like($node, '\help SELECT', qr/SELECT/,    '\help with argument');
 
 # Test clean handling of unsupported replication command responses
 psql_fails_like(
@@ -116,16 +115,16 @@ NOTIFY foo, 'bar';",
        'notification with payload');
 
 # test behavior and output on server crash
-my ($ret, $out, $err) = $node->psql(
-       'postgres',
-       "SELECT 'before' AS running;\n" .
-       "SELECT pg_terminate_backend(pg_backend_pid());\n" .
-       "SELECT 'AFTER' AS not_running;\n");
+my ($ret, $out, $err) = $node->psql('postgres',
+           "SELECT 'before' AS running;\n"
+         . "SELECT pg_terminate_backend(pg_backend_pid());\n"
+         . "SELECT 'AFTER' AS not_running;\n");
 
 is($ret, 2, 'server crash: psql exit code');
 like($out, qr/before/, 'server crash: output before crash');
 ok($out !~ qr/AFTER/, 'server crash: no output after crash');
-is($err, 'psql:<stdin>:2: FATAL:  terminating connection due to administrator command
+is( $err,
+       'psql:<stdin>:2: FATAL:  terminating connection due to administrator command
 psql:<stdin>:2: server closed the connection unexpectedly
        This probably means the server terminated abnormally
        before or while processing the request.
@@ -149,34 +148,46 @@ psql_like(
 # \errverbose: The normal way, using a cursor by setting FETCH_COUNT,
 # and using \gdesc.  Test them all.
 
-like(($node->psql('postgres', "SELECT error;\n\\errverbose", on_error_stop => 0))[2],
-  qr/\A^psql:<stdin>:1: ERROR:  .*$
+like(
+       (   $node->psql(
+                       'postgres',
+                       "SELECT error;\n\\errverbose",
+                       on_error_stop => 0))[2],
+       qr/\A^psql:<stdin>:1: ERROR:  .*$
 ^LINE 1: SELECT error;$
 ^ *^.*$
 ^psql:<stdin>:2: error: ERROR:  [0-9A-Z]{5}: .*$
 ^LINE 1: SELECT error;$
 ^ *^.*$
 ^LOCATION: .*$/m,
-  '\errverbose after normal query with error');
-
-like(($node->psql('postgres', "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose", on_error_stop => 0))[2],
-  qr/\A^psql:<stdin>:2: ERROR:  .*$
+       '\errverbose after normal query with error');
+
+like(
+       (   $node->psql(
+                       'postgres',
+                       "\\set FETCH_COUNT 1\nSELECT error;\n\\errverbose",
+                       on_error_stop => 0))[2],
+       qr/\A^psql:<stdin>:2: ERROR:  .*$
 ^LINE 2: SELECT error;$
 ^ *^.*$
 ^psql:<stdin>:3: error: ERROR:  [0-9A-Z]{5}: .*$
 ^LINE 2: SELECT error;$
 ^ *^.*$
 ^LOCATION: .*$/m,
-  '\errverbose after FETCH_COUNT query with error');
-
-like(($node->psql('postgres', "SELECT error\\gdesc\n\\errverbose", on_error_stop => 0))[2],
-  qr/\A^psql:<stdin>:1: ERROR:  .*$
+       '\errverbose after FETCH_COUNT query with error');
+
+like(
+       (   $node->psql(
+                       'postgres',
+                       "SELECT error\\gdesc\n\\errverbose",
+                       on_error_stop => 0))[2],
+       qr/\A^psql:<stdin>:1: ERROR:  .*$
 ^LINE 1: SELECT error$
 ^ *^.*$
 ^psql:<stdin>:2: error: ERROR:  [0-9A-Z]{5}: .*$
 ^LINE 1: SELECT error$
 ^ *^.*$
 ^LOCATION: .*$/m,
-  '\errverbose after \gdesc with error');
+       '\errverbose after \gdesc with error');
 
 done_testing();
index 2711935a2cc9dd127154681aa2702ba0dcd02fc9..2eea515e8713f08d04f532ed30720714a98cab50 100644 (file)
@@ -212,10 +212,7 @@ check_completion(
 clear_line();
 
 # check case folding
-check_completion(
-       "select * from TAB\t",
-       qr/tab1 /,
-       "automatically fold case");
+check_completion("select * from TAB\t", qr/tab1 /, "automatically fold case");
 
 clear_query();
 
@@ -228,15 +225,10 @@ check_completion("\\DRD\t", qr/drds /, "complete \\DRD<tab> to \\drds");
 clear_line();
 
 # check completion of a schema-qualified name
-check_completion(
-       "select * from pub\t",
-       qr/public\./,
-       "complete schema when relevant");
+check_completion("select * from pub\t",
+       qr/public\./, "complete schema when relevant");
 
-check_completion(
-       "tab\t",
-       qr/tab1 /,
-       "complete schema-qualified name");
+check_completion("tab\t", qr/tab1 /, "complete schema-qualified name");
 
 clear_query();
 
@@ -339,15 +331,10 @@ check_completion(
 clear_line();
 
 # check timezone name completion
-check_completion(
-       "SET timezone TO am\t",
-       qr|'America/|,
-       "offer partial timezone name");
+check_completion("SET timezone TO am\t",
+       qr|'America/|, "offer partial timezone name");
 
-check_completion(
-       "new_\t",
-       qr|New_York|,
-       "complete partial timezone name");
+check_completion("new_\t", qr|New_York|, "complete partial timezone name");
 
 clear_line();
 
index d57d342952172ced5f9c2d256c789bb338188bd7..f4dbd36c39113f8e894cc62b0484dadeb7517669 100644 (file)
@@ -21,7 +21,8 @@ $node->start;
 # the process from IPC::Run.  As a workaround, we have psql print its
 # own PID (which is the parent of the shell launched by psql) to a
 # file.
-SKIP: {
+SKIP:
+{
        skip "cancel test requires a Unix shell", 2 if $windows_os;
 
        local %ENV = $node->_get_env();
@@ -31,31 +32,38 @@ SKIP: {
        # Test whether shell supports $PPID.  It's part of POSIX, but some
        # pre-/non-POSIX shells don't support it (e.g., NetBSD).
        $stdin = "\\! echo \$PPID";
-       IPC::Run::run(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], '<', \$stdin, '>', \$stdout, '2>', \$stderr);
+       IPC::Run::run([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+               '<', \$stdin, '>', \$stdout, '2>', \$stderr);
        $stdout =~ /^\d+$/ or skip "shell apparently does not support \$PPID", 2;
 
        # Now start the real test
-       my $h = IPC::Run::start(['psql', '-X', '-v', 'ON_ERROR_STOP=1'], \$stdin, \$stdout, \$stderr);
+       my $h = IPC::Run::start([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
+               \$stdin, \$stdout, \$stderr);
 
        # Get the PID
        $stdout = '';
        $stderr = '';
-       $stdin = "\\! echo \$PPID >$tempdir/psql.pid\n";
+       $stdin  = "\\! echo \$PPID >$tempdir/psql.pid\n";
        pump $h while length $stdin;
        my $count;
        my $psql_pid;
-       until (-s "$tempdir/psql.pid" and ($psql_pid = PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~ /^\d+\n/s)
+       until (
+               -s "$tempdir/psql.pid"
+                 and ($psql_pid =
+                       PostgreSQL::Test::Utils::slurp_file("$tempdir/psql.pid")) =~
+                 /^\d+\n/s)
        {
                ($count++ < 100 * $PostgreSQL::Test::Utils::timeout_default)
                  or die "pid file did not appear";
-               usleep(10_000)
+               usleep(10_000);
        }
 
        # Send sleep command and wait until the server has registered it
        $stdin = "select pg_sleep($PostgreSQL::Test::Utils::timeout_default);\n";
        pump $h while length $stdin;
-       $node->poll_query_until('postgres', q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;})
-         or die "timed out";
+       $node->poll_query_until('postgres',
+               q{SELECT (SELECT count(*) FROM pg_stat_activity WHERE query ~ '^select pg_sleep') > 0;}
+       ) or die "timed out";
 
        # Send cancel request
        kill 'INT', $psql_pid;
@@ -63,7 +71,10 @@ SKIP: {
        my $result = finish $h;
 
        ok(!$result, 'query failed as expected');
-       like($stderr, qr/canceling statement due to user request/, 'query was canceled');
+       like(
+               $stderr,
+               qr/canceling statement due to user request/,
+               'query was canceled');
 }
 
 done_testing();
index 588c0841fee1554d0861cea94f45a5d14bf1f046..55af9eb04e472e307056b594474dfef08a6d7284 100644 (file)
@@ -826,7 +826,7 @@ static const SchemaQuery Query_for_list_of_mergetargets = {
        .selcondition =
        "c.relkind IN (" CppAsString2(RELKIND_RELATION) ", "
        CppAsString2(RELKIND_PARTITIONED_TABLE) ") ",
-       .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
+       .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
        .namespace = "c.relnamespace",
        .result = "c.relname",
 };
@@ -1827,6 +1827,7 @@ psql_completion(const char *text, int start, int end)
                         (HeadMatches("ALTER", "PUBLICATION", MatchAny, "ADD|SET", "TABLE") &&
                          ends_with(prev_wd, ',')))
                COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables);
+
        /*
         * "ALTER PUBLICATION <name> SET TABLE <name> WHERE (" - complete with
         * table attributes
index 18f6e313d57223a5dcd36f2cbea07a12e152b7de..78733f64d2517f42aff25ffe0ccdacd81ddb4eb2 100644 (file)
@@ -35,12 +35,19 @@ if ($ENV{with_icu} eq 'yes')
                'create database with ICU fails without ICU locale specified');
 
        $node->issues_sql_like(
-               [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=en', 'foobar5' ],
+               [
+                       'createdb',        '-T',
+                       'template0',       '--locale-provider=icu',
+                       '--icu-locale=en', 'foobar5'
+               ],
                qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/,
                'create database with ICU locale specified');
 
        $node->command_fails(
-               [ 'createdb', '-T', 'template0', '--locale-provider=icu', '--icu-locale=@colNumeric=lower', 'foobarX' ],
+               [
+                       'createdb', '-T', 'template0', '--locale-provider=icu',
+                       '--icu-locale=@colNumeric=lower', 'foobarX'
+               ],
                'fails for invalid ICU locale');
 }
 else
@@ -53,7 +60,8 @@ else
 $node->command_fails([ 'createdb', 'foobar1' ],
        'fails if database already exists');
 
-$node->command_fails([ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
+$node->command_fails(
+       [ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ],
        'fails for invalid locale provider');
 
 # Check use of templates with shared dependencies copied from the template.
index 632fae61442770b38bc54150a35f44db5ba51b50..da3c291c0ff39f73966aee3d353a7d75546c4e13 100644 (file)
@@ -72,7 +72,7 @@ get_compress_algorithm_name(pg_compress_algorithm algorithm)
                        /* no default, to provoke compiler warnings if values are added */
        }
        Assert(false);
-       return "???";   /* placate compiler */
+       return "???";                           /* placate compiler */
 }
 
 /*
@@ -93,7 +93,7 @@ get_compress_algorithm_name(pg_compress_algorithm algorithm)
  */
 void
 parse_compress_specification(pg_compress_algorithm algorithm, char *specification,
-                                          pg_compress_specification *result)
+                                                        pg_compress_specification *result)
 {
        int                     bare_level;
        char       *bare_level_endp;
index 8e76ffdee99015e5fee44fb82b3fcccdd8b5e0e3..2fb29b1f855820e231476b6c637a7ee1e923e320 100644 (file)
@@ -185,6 +185,7 @@ pg_cryptohash_init(pg_cryptohash_ctx *ctx)
        {
                ctx->errreason = SSLerrmessage(ERR_get_error());
                ctx->error = PG_CRYPTOHASH_ERROR_OPENSSL;
+
                /*
                 * The OpenSSL error queue should normally be empty since we've
                 * consumed an error, but cipher initialization can in FIPS-enabled
index 289b1f26b869ad4955c1a6bdb0517cce4696f101..9da588daf9104f0e0f495e7591e4c16c0136b193 100644 (file)
@@ -35,7 +35,7 @@
 
 /* Inhibit mingw CRT's auto-globbing of command line arguments */
 #if defined(WIN32) && !defined(_MSC_VER)
-extern int _CRT_glob = 0; /* 0 turns off globbing; 1 turns it on */
+extern int     _CRT_glob = 0;          /* 0 turns off globbing; 1 turns it on */
 #endif
 
 /*
index a382551a981cb8dfec97d916f6676df078b74a2a..0b89f399f08040941d48f714f35acd68fc0044db 100644 (file)
@@ -245,7 +245,7 @@ typedef struct IndexAmRoutine
        /* does AM use maintenance_work_mem? */
        bool            amusemaintenanceworkmem;
        /* does AM block HOT update? */
-       bool        amhotblocking;
+       bool            amhotblocking;
        /* OR of parallel vacuum flags.  See vacuum.h for flags. */
        uint8           amparallelvacuumoptions;
        /* type of data stored in index, or InvalidOid if variable */
index 5d7f7fd800eeebe74501187f52697be8e49b3287..abf62d9df79140b1e557f9c59e0b1fa8f73ca9d6 100644 (file)
@@ -188,7 +188,7 @@ extern int  heap_page_prune(Relation relation, Buffer buffer,
                                                        struct GlobalVisState *vistest,
                                                        TransactionId old_snap_xmin,
                                                        TimestampTz old_snap_ts_ts,
-                                                       int     *nnewlpdead,
+                                                       int *nnewlpdead,
                                                        OffsetNumber *off_loc);
 extern void heap_page_prune_execute(Buffer buffer,
                                                                        OffsetNumber *redirected, int nredirected,
index e465800e445ef0754c4df9a283687c147ff72933..3b6a497e1b40558eac70d6578e4898da92afe8a2 100644 (file)
@@ -26,7 +26,7 @@ typedef enum RmgrIds
 {
 #include "access/rmgrlist.h"
        RM_NEXT_ID
-} RmgrIds;
+}                      RmgrIds;
 
 #undef PG_RMGR
 
index 837fe7de0b96592348de5056857f40ab9749471c..4794941df31b4456a948094061cc5bd51e2b9c89 100644 (file)
@@ -231,9 +231,10 @@ typedef struct xl_xact_assignment
 typedef struct xl_xact_xinfo
 {
        /*
-        * Even though we right now only require two bytes of space in xinfo we use
-        * four so following records don't have to care about alignment. Commit
-        * records can be large, so copying large portions isn't attractive.
+        * Even though we right now only require two bytes of space in xinfo we
+        * use four so following records don't have to care about alignment.
+        * Commit records can be large, so copying large portions isn't
+        * attractive.
         */
        uint32          xinfo;
 } xl_xact_xinfo;
@@ -274,7 +275,7 @@ typedef struct xl_xact_stats_item
 
 typedef struct xl_xact_stats_items
 {
-       int             nitems;
+       int                     nitems;
        xl_xact_stats_item items[FLEXIBLE_ARRAY_MEMBER];
 } xl_xact_stats_items;
 #define MinSizeOfXactStatsItems offsetof(xl_xact_stats_items, items)
@@ -378,7 +379,7 @@ typedef struct xl_xact_parsed_commit
        char            twophase_gid[GIDSIZE];  /* only for 2PC */
        int                     nabortrels;             /* only for 2PC */
        RelFileNode *abortnodes;        /* only for 2PC */
-       int                     nabortstats;            /* only for 2PC */
+       int                     nabortstats;    /* only for 2PC */
        xl_xact_stats_item *abortstats; /* only for 2PC */
 
        XLogRecPtr      origin_lsn;
index be59eece2254f0ad55352f3ada93f25ad4248ad8..7eb4370f2d5308c5744915cd3082b7641edc35fa 100644 (file)
 
 typedef struct XLogRecStats
 {
-       uint64  count;
-       uint64  rec_len;
-       uint64  fpi_len;
+       uint64          count;
+       uint64          rec_len;
+       uint64          fpi_len;
 } XLogRecStats;
 
 typedef struct XLogStats
 {
-       uint64  count;
+       uint64          count;
 #ifdef FRONTEND
        XLogRecPtr      startptr;
        XLogRecPtr      endptr;
 #endif
-       XLogRecStats    rmgr_stats[RM_MAX_ID + 1];
-       XLogRecStats    record_stats[RM_MAX_ID + 1][MAX_XLINFO_TYPES];
+       XLogRecStats rmgr_stats[RM_MAX_ID + 1];
+       XLogRecStats record_stats[RM_MAX_ID + 1][MAX_XLINFO_TYPES];
 } XLogStats;
 
 extern void XLogRecGetLen(XLogReaderState *record, uint32 *rec_len,
index 5fcbbc136f9ba66e18b05fcf7d117f4d7e158b25..c9d0b75a01b4cc8bf4cc95df402d402989935bb6 100644 (file)
@@ -78,7 +78,7 @@ typedef enum
 /* Private data of the read_local_xlog_page_no_wait callback. */
 typedef struct ReadLocalXLogPageNoWaitPrivate
 {
-       bool end_of_wal;        /* true, when end of WAL is reached */
+       bool            end_of_wal;             /* true, when end of WAL is reached */
 } ReadLocalXLogPageNoWaitPrivate;
 
 extern XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record,
index ac6adcb7300b8538ea2bec8592785d5a76a68dca..567ab63e855d363c74ab2bbead67a7208b9a7e4e 100644 (file)
@@ -129,10 +129,10 @@ typedef void (*object_access_hook_type) (ObjectAccessType access,
                                                                                 void *arg);
 
 typedef void (*object_access_hook_type_str) (ObjectAccessType access,
-                                                                                Oid classId,
-                                                                                const char *objectStr,
-                                                                                int subId,
-                                                                                void *arg);
+                                                                                        Oid classId,
+                                                                                        const char *objectStr,
+                                                                                        int subId,
+                                                                                        void *arg);
 
 /* Plugin sets this variable to a suitable hook function. */
 extern PGDLLIMPORT object_access_hook_type object_access_hook;
@@ -152,12 +152,12 @@ extern void RunFunctionExecuteHook(Oid objectId);
 
 /* String versions */
 extern void RunObjectPostCreateHookStr(Oid classId, const char *objectStr, int subId,
-                                                                       bool is_internal);
+                                                                          bool is_internal);
 extern void RunObjectDropHookStr(Oid classId, const char *objectStr, int subId,
-                                                         int dropflags);
+                                                                int dropflags);
 extern void RunObjectTruncateHookStr(const char *objectStr);
 extern void RunObjectPostAlterHookStr(Oid classId, const char *objectStr, int subId,
-                                                                  Oid auxiliaryId, bool is_internal);
+                                                                         Oid auxiliaryId, bool is_internal);
 extern bool RunNamespaceSearchHookStr(const char *objectStr, bool ereport_on_violation);
 extern void RunFunctionExecuteHookStr(const char *objectStr);
 
index 62156346cf4f6f084928b05230f53e18bd487c1b..86cc6507983bca46b85260a863cbe1fb76f3a775 100644 (file)
 { aggfnoid => 'range_agg(anyrange)', aggtransfn => 'range_agg_transfn',
   aggfinalfn => 'range_agg_finalfn', aggfinalextra => 't',
   aggtranstype => 'internal' },
-{ aggfnoid => 'range_agg(anymultirange)', aggtransfn => 'multirange_agg_transfn',
+{ aggfnoid => 'range_agg(anymultirange)',
+  aggtransfn => 'multirange_agg_transfn',
   aggfinalfn => 'multirange_agg_finalfn', aggfinalextra => 't',
   aggtranstype => 'internal' },
 
index 304e8c18d52aaf66ede796e285a6dd29652fa8e9..e1f4eefa22053155a1c3fa194e2b7cabcf0cb97b 100644 (file)
@@ -223,7 +223,7 @@ DECLARE_INDEX(pg_class_tblspc_relfilenode_index, 3455, ClassTblspcRelfilenodeInd
         (relkind) == RELKIND_TOASTVALUE || \
         (relkind) == RELKIND_MATVIEW)
 
-extern int errdetail_relkind_not_supported(char relkind);
+extern int     errdetail_relkind_not_supported(char relkind);
 
 #endif                                                 /* EXPOSE_TO_CLIENT_CODE */
 
index c642c3bb9520d6dc498b160b170fa8ee64fb13ed..2190ccb5b88aae9d5aa2e488dea0af6f2f502e5f 100644 (file)
@@ -41,7 +41,7 @@ CATALOG(pg_collation,3456,CollationRelationId)
        int32           collencoding;   /* encoding for this collation; -1 = "all" */
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
        text            collcollate BKI_DEFAULT(_null_);        /* LC_COLLATE setting */
-       text            collctype BKI_DEFAULT(_null_);          /* LC_CTYPE setting */
+       text            collctype BKI_DEFAULT(_null_);  /* LC_CTYPE setting */
        text            colliculocale BKI_DEFAULT(_null_);      /* ICU locale ID */
        text            collversion BKI_DEFAULT(_null_);        /* provider-dependent
                                                                                                         * version of collation
index 05873f74f68446c122e4ee3aa607d39f6e9c571b..47dcbfb343bae39b7127b6839d694445c64f536d 100644 (file)
@@ -14,7 +14,8 @@
 
 { oid => '1', oid_symbol => 'Template1DbOid',
   descr => 'default template for new databases',
-  datname => 'template1', encoding => 'ENCODING', datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
+  datname => 'template1', encoding => 'ENCODING',
+  datlocprovider => 'LOCALE_PROVIDER', datistemplate => 't',
   datallowconn => 't', datconnlimit => '-1', datfrozenxid => '0',
   datminmxid => '1', dattablespace => 'pg_default', datcollate => 'LC_COLLATE',
   datctype => 'LC_CTYPE', daticulocale => 'ICU_LOCALE', datacl => '_null_' },
index 263079c9e1ded4918f30452a8ce18f510480d54d..aa7264a4bbb482443d4202b06f9162aa8414b978 100644 (file)
@@ -46,7 +46,7 @@ CATALOG(pg_parameter_acl,8924,ParameterAclRelationId) BKI_SHARED_RELATION
  *             the format of pg_parameter_acl relation.
  * ----------------
  */
-typedef FormData_pg_parameter_acl *Form_pg_parameter_acl;
+typedef FormData_pg_parameter_acl * Form_pg_parameter_acl;
 
 DECLARE_TOAST_WITH_MACRO(pg_parameter_acl, 8925, 8926, PgParameterAclToastTable, PgParameterAclToastIndex);
 
index 6d378ff7859485e5bcf86c68c86647056c51813e..c1bc1c3cce1294caf8711ddb8a6ff41cda12ba1c 100644 (file)
   prosrc => 'pg_stat_have_stats' },
 
 { oid => '8523', descr => 'statistics: information about subscription stats',
-  proname => 'pg_stat_get_subscription_stats',
-  provolatile => 's', proparallel => 'r',
-  prorettype => 'record', proargtypes => 'oid',
+  proname => 'pg_stat_get_subscription_stats', provolatile => 's',
+  proparallel => 'r', prorettype => 'record', proargtypes => 'oid',
   proallargtypes => '{oid,oid,int8,int8,timestamptz}',
   proargmodes => '{i,o,o,o,o}',
   proargnames => '{subid,subid,apply_error_count,sync_error_count,stats_reset}',
   proargnames => '{wal_records,wal_fpi,wal_bytes,wal_buffers_full,wal_write,wal_sync,wal_write_time,wal_sync_time,stats_reset}',
   prosrc => 'pg_stat_get_wal' },
 { oid => '9085', descr => 'statistics: information about WAL prefetching',
-  proname => 'pg_stat_get_recovery_prefetch', prorows => '1', provolatile => 'v',
-  proretset => 't', prorettype => 'record', proargtypes => '',
+  proname => 'pg_stat_get_recovery_prefetch', prorows => '1', proretset => 't',
+  provolatile => 'v', prorettype => 'record', proargtypes => '',
   proallargtypes => '{timestamptz,int8,int8,int8,int8,int8,int8,int4,int4,int4}',
   proargmodes => '{o,o,o,o,o,o,o,o,o,o}',
   proargnames => '{stats_reset,prefetch,hit,skip_init,skip_new,skip_fpw,skip_rep,wal_distance,block_distance,io_depth}',
 { oid => '2739', descr => 'finish taking an online backup',
   proname => 'pg_backup_stop', provolatile => 'v', proparallel => 'r',
   prorettype => 'record', proargtypes => 'bool',
-  proallargtypes => '{bool,pg_lsn,text,text}',
-  proargmodes => '{i,o,o,o}',
+  proallargtypes => '{bool,pg_lsn,text,text}', proargmodes => '{i,o,o,o}',
   proargnames => '{wait_for_archive,lsn,labelfile,spcmapfile}',
   prosrc => 'pg_backup_stop' },
 { oid => '3436', descr => 'promote standby server',
   prosrc => 'json_object_agg_transfn' },
 { oid => '8175', descr => 'json object aggregate transition function',
   proname => 'json_object_agg_strict_transfn', proisstrict => 'f',
-  provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+  provolatile => 's', prorettype => 'internal',
+  proargtypes => 'internal any any',
   prosrc => 'json_object_agg_strict_transfn' },
 { oid => '8176', descr => 'json object aggregate transition function',
   proname => 'json_object_agg_unique_transfn', proisstrict => 'f',
-  provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+  provolatile => 's', prorettype => 'internal',
+  proargtypes => 'internal any any',
   prosrc => 'json_object_agg_unique_transfn' },
 { oid => '8177', descr => 'json object aggregate transition function',
   proname => 'json_object_agg_unique_strict_transfn', proisstrict => 'f',
-  provolatile => 's', prorettype => 'internal', proargtypes => 'internal any any',
+  provolatile => 's', prorettype => 'internal',
+  proargtypes => 'internal any any',
   prosrc => 'json_object_agg_unique_strict_transfn' },
 { oid => '3196', descr => 'json object aggregate final function',
   proname => 'json_object_agg_finalfn', proisstrict => 'f',
   proname => 'json_object_agg_strict', prokind => 'a', proisstrict => 'f',
   provolatile => 's', prorettype => 'json', proargtypes => 'any any',
   prosrc => 'aggregate_dummy' },
-{ oid => '8179', descr => 'aggregate input into a json object with unique keys',
+{ oid => '8179',
+  descr => 'aggregate input into a json object with unique keys',
   proname => 'json_object_agg_unique', prokind => 'a', proisstrict => 'f',
   provolatile => 's', prorettype => 'json', proargtypes => 'any any',
   prosrc => 'aggregate_dummy' },
 # SQL-spec window functions
 { oid => '3100', descr => 'row number within partition',
   proname => 'row_number', prosupport => 'window_row_number_support',
-  prokind => 'w', proisstrict => 'f',  prorettype => 'int8',
-  proargtypes => '', prosrc => 'window_row_number' },
+  prokind => 'w', proisstrict => 'f', prorettype => 'int8', proargtypes => '',
+  prosrc => 'window_row_number' },
 { oid => '8799', descr => 'planner support for row_number run condition',
   proname => 'window_row_number_support', prorettype => 'internal',
   proargtypes => 'internal', prosrc => 'window_row_number_support' },
 { oid => '3101', descr => 'integer rank with gaps',
-  proname => 'rank', prosupport => 'window_rank_support',
-  prokind => 'w', proisstrict => 'f', prorettype => 'int8',
-  proargtypes => '', prosrc => 'window_rank' },
+  proname => 'rank', prosupport => 'window_rank_support', prokind => 'w',
+  proisstrict => 'f', prorettype => 'int8', proargtypes => '',
+  prosrc => 'window_rank' },
 { oid => '8800', descr => 'planner support for rank run condition',
   proname => 'window_rank_support', prorettype => 'internal',
   proargtypes => 'internal', prosrc => 'window_rank_support' },
   prorettype => 'anymultirange', proargtypes => 'anyrange',
   prosrc => 'aggregate_dummy' },
 { oid => '8205', descr => 'aggregate transition function',
-  proname => 'multirange_agg_transfn', proisstrict => 'f', prorettype => 'internal',
-  proargtypes => 'internal anymultirange', prosrc => 'multirange_agg_transfn' },
+  proname => 'multirange_agg_transfn', proisstrict => 'f',
+  prorettype => 'internal', proargtypes => 'internal anymultirange',
+  prosrc => 'multirange_agg_transfn' },
 { oid => '8206', descr => 'aggregate final function',
   proname => 'multirange_agg_finalfn', proisstrict => 'f',
   prorettype => 'anymultirange', proargtypes => 'internal anymultirange',
   prorettype => 'anymultirange', proargtypes => 'anymultirange',
   prosrc => 'aggregate_dummy' },
 { oid => '1293', descr => 'expand multirange to set of ranges',
-  proname => 'unnest', prorows => '100',
-  proretset => 't', prorettype => 'anyrange', proargtypes => 'anymultirange',
+  proname => 'unnest', prorows => '100', proretset => 't',
+  prorettype => 'anyrange', proargtypes => 'anymultirange',
   prosrc => 'multirange_unnest' },
 
 # date, time, timestamp constructors
index 29b18566657ea1e033c5d1147086db9227e7a7dc..48205ba42930f513c866369847cb5993a0f722cf 100644 (file)
@@ -87,8 +87,8 @@ typedef struct PublicationDesc
        bool            rf_valid_for_delete;
 
        /*
-        * true if the columns are part of the replica identity or the publication actions
-        * do not include UPDATE or DELETE.
+        * true if the columns are part of the replica identity or the publication
+        * actions do not include UPDATE or DELETE.
         */
        bool            cols_valid_for_update;
        bool            cols_valid_for_delete;
index b01e620597425beaabdaa3a69835d70ad4922ffb..0ea3c41b5b014794abcf72dfb893090e2e14568a 100644 (file)
@@ -50,7 +50,7 @@ CATALOG(pg_statistic_ext_data,3429,StatisticExtDataRelationId)
  *             the format of pg_statistic_ext_data relation.
  * ----------------
  */
-typedef FormData_pg_statistic_ext_data * Form_pg_statistic_ext_data;
+typedef FormData_pg_statistic_ext_data *Form_pg_statistic_ext_data;
 
 DECLARE_TOAST(pg_statistic_ext_data, 3430, 3431);
 
index ba8c69c87e9af0ca294485717f6856fd1b041660..1e33450b444d1445f00ce2f535690082b7ec1056 100755 (executable)
@@ -140,7 +140,9 @@ foreach my $input_file (@header_files)
                                $changed = 1;
                        }
                }
-               elsif ($line =~ m/^(DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*)(\d+)(,\s*)(\d+)(,\s*\w+,\s*\w+)\)/)
+               elsif ($line =~
+                       m/^(DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*)(\d+)(,\s*)(\d+)(,\s*\w+,\s*\w+)\)/
+                 )
                {
                        my $oid2 = $2;
                        my $oid4 = $4;
@@ -148,19 +150,21 @@ foreach my $input_file (@header_files)
                        {
                                $oid2 = $maphash{$oid2};
                                my $repl = $1 . $oid2 . $3 . $oid4 . $5 . ")";
-                               $line =~ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
+                               $line =~
+                                 s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
                                $changed = 1;
                        }
                        if (exists $maphash{$oid4})
                        {
                                $oid4 = $maphash{$oid4};
                                my $repl = $1 . $oid2 . $3 . $oid4 . $5 . ")";
-                               $line =~ s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
+                               $line =~
+                                 s/^DECLARE_TOAST_WITH_MACRO\(\s*\w+,\s*\d+,\s*\d+,\s*\w+,\s*\w+\)/$repl/;
                                $changed = 1;
                        }
                }
-               elsif (
-                       $line =~ m/^(DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*\w+,\s*)(\d+)(,\s*.+)\)/)
+               elsif ($line =~
+                       m/^(DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*\w+,\s*)(\d+)(,\s*.+)\)/)
                {
                        if (exists $maphash{$4})
                        {
index ae87caf089d5ab796ecf74b07c7db9b7ff388fec..57df3fc1e3d2102e658789ef2250cafb75982a72 100644 (file)
@@ -32,8 +32,8 @@ extern ObjectAddress AlterPublicationOwner(const char *name, Oid newOwnerId);
 extern void AlterPublicationOwner_oid(Oid pubid, Oid newOwnerId);
 extern void InvalidatePublicationRels(List *relids);
 extern bool pub_rf_contains_invalid_column(Oid pubid, Relation relation,
-                                                                        List *ancestors, bool pubviaroot);
+                                                                                  List *ancestors, bool pubviaroot);
 extern bool pub_collist_contains_invalid_column(Oid pubid, Relation relation,
-                                                                        List *ancestors, bool pubviaroot);
+                                                                                               List *ancestors, bool pubviaroot);
 
 #endif                                                 /* PUBLICATIONCMDS_H */
index 9df70e6f06ff75752e7d8cb1e09cb95d761f5da2..e34db8c93cb1bec1b3a70a2cc010c390aa551abc 100644 (file)
@@ -684,49 +684,49 @@ typedef struct ExprEvalStep
                        {
                                int                     category;
                                Oid                     outfuncid;
-                       }                  *arg_type_cache;             /* cache for datum_to_json[b]() */
+                       }                  *arg_type_cache; /* cache for datum_to_json[b]() */
                        int                     nargs;
                }                       json_constructor;
 
                /* for EEOP_IS_JSON */
                struct
                {
-                                       JsonIsPredicate *pred;  /* original expression node */
+                       JsonIsPredicate *pred;  /* original expression node */
                }                       is_json;
 
                /* for EEOP_JSONEXPR */
                struct
                {
-                       JsonExpr   *jsexpr;                     /* original expression node */
+                       JsonExpr   *jsexpr; /* original expression node */
 
                        struct
                        {
-                               FmgrInfo        func;           /* typinput function for output type */
+                               FmgrInfo        func;   /* typinput function for output type */
                                Oid                     typioparam;
-                       } input;                                        /* I/O info for output type */
+                       }                       input;  /* I/O info for output type */
 
                        NullableDatum
-                                          *formatted_expr,             /* formatted context item value */
-                                          *res_expr,                   /* result item */
-                                          *coercion_expr,              /* input for JSON item coercion */
-                                          *pathspec;                   /* path specification value */
+                                          *formatted_expr, /* formatted context item value */
+                                          *res_expr,   /* result item */
+                                          *coercion_expr,      /* input for JSON item coercion */
+                                          *pathspec;   /* path specification value */
 
-                       ExprState  *result_expr;                /* coerced to output type */
+                       ExprState  *result_expr;        /* coerced to output type */
                        ExprState  *default_on_empty;   /* ON EMPTY DEFAULT expression */
                        ExprState  *default_on_error;   /* ON ERROR DEFAULT expression */
-                       List       *args;                               /* passing arguments */
+                       List       *args;       /* passing arguments */
 
-                       void       *cache;                              /* cache for json_populate_type() */
+                       void       *cache;      /* cache for json_populate_type() */
 
                        struct JsonCoercionsState
                        {
                                struct JsonCoercionState
                                {
-                                       JsonCoercion *coercion;         /* coercion expression */
-                                       ExprState  *estate;     /* coercion expression state */
-                               }                       null,
+                                       JsonCoercion *coercion; /* coercion expression */
+                                       ExprState  *estate; /* coercion expression state */
+                               }                       null,
                                                        string,
-                                                       numeric,
+                               numeric    ,
                                                        boolean,
                                                        date,
                                                        time,
index 873772f188374b0de4eeddcb3850e4346abc2574..d68a6b9d28cd13992ebfc97ae0611425c7b11b5a 100644 (file)
@@ -266,7 +266,7 @@ ExecProcNode(PlanState *node)
 extern ExprState *ExecInitExpr(Expr *node, PlanState *parent);
 extern ExprState *ExecInitExprWithParams(Expr *node, ParamListInfo ext_params);
 extern ExprState *ExecInitExprWithCaseValue(Expr *node, PlanState *parent,
-                                                 Datum *caseval, bool *casenull);
+                                                                                       Datum *caseval, bool *casenull);
 extern ExprState *ExecInitQual(List *qual, PlanState *parent);
 extern ExprState *ExecInitCheck(List *qual, PlanState *parent);
 extern List *ExecInitExprList(List *nodes, PlanState *parent);
index d55abc5414d40a81b0ec8e37552191f9027c927a..5314b737052cdc42dc26e86399732fb9d61edc81 100644 (file)
@@ -473,7 +473,7 @@ typedef struct
        FMGR_ABI_EXTRA, \
 }
 
-StaticAssertDecl(sizeof(FMGR_ABI_EXTRA) <= sizeof(((Pg_magic_struct*)0)->abi_extra),
+StaticAssertDecl(sizeof(FMGR_ABI_EXTRA) <= sizeof(((Pg_magic_struct *) 0)->abi_extra),
                                 "FMGR_ABI_EXTRA too long");
 
 /*
index 94b191f8ae05df814d6c7596ddd3ece22a52b0a7..5728801379522c4ee486ddd239d29c7dcf71889e 100644 (file)
@@ -2158,8 +2158,8 @@ typedef struct MemoizeState
                                                                 * by bit, false when using hash equality ops */
        MemoizeInstrumentation stats;   /* execution statistics */
        SharedMemoizeInfo *shared_info; /* statistics for parallel workers */
-       Bitmapset          *keyparamids; /* Param->paramids of expressions belonging to
-                                                                 * param_exprs */
+       Bitmapset  *keyparamids;        /* Param->paramids of expressions belonging to
+                                                                * param_exprs */
 } MemoizeState;
 
 /* ----------------
index 340d28f4e1a9800bce78194fc22136cb692af1fe..b3b407579b06b8f2be2abcdc01083a44fb904f8d 100644 (file)
@@ -705,7 +705,8 @@ extern bool equal(const void *a, const void *b);
  */
 typedef double Selectivity;            /* fraction of tuples a qualifier will pass */
 typedef double Cost;                   /* execution cost (in page-access units) */
-typedef double Cardinality;            /* (estimated) number of rows or other integer count */
+typedef double Cardinality;            /* (estimated) number of rows or other integer
+                                                                * count */
 
 
 /*
index 9a716f3794fd0ca4344db74da8fcd7ee7dd0aa96..73f635b4553b04a4da9c6fd309e9574cb3298e23 100644 (file)
@@ -301,6 +301,7 @@ typedef struct A_Expr
 typedef struct A_Const
 {
        NodeTag         type;
+
        /*
         * Value nodes are inline for performance.  You can treat 'val' as a node,
         * as in IsA(&val, Integer).  'val' is not valid if isnull is true.
@@ -763,7 +764,8 @@ typedef struct DefElem
        NodeTag         type;
        char       *defnamespace;       /* NULL if unqualified name */
        char       *defname;
-       Node       *arg;                        /* typically Integer, Float, String, or TypeName */
+       Node       *arg;                        /* typically Integer, Float, String, or
+                                                                * TypeName */
        DefElemAction defaction;        /* unspecified action, or SET/ADD/DROP */
        int                     location;               /* token location, or -1 if unknown */
 } DefElem;
@@ -1151,7 +1153,7 @@ typedef struct RangeTblEntry
         * Fields valid for ENR RTEs (else NULL/zero):
         */
        char       *enrname;            /* name of ephemeral named relation */
-       Cardinality     enrtuples;              /* estimated or actual from caller */
+       Cardinality enrtuples;          /* estimated or actual from caller */
 
        /*
         * Fields valid in all RTEs:
@@ -1667,7 +1669,7 @@ typedef struct JsonFuncExpr
        JsonOutput *output;                     /* output clause, if specified */
        JsonBehavior *on_empty;         /* ON EMPTY behavior, if specified */
        JsonBehavior *on_error;         /* ON ERROR behavior, if specified */
-       JsonWrapper     wrapper;                /* array wrapper behavior (JSON_QUERY only) */
+       JsonWrapper wrapper;            /* array wrapper behavior (JSON_QUERY only) */
        bool            omit_quotes;    /* omit or keep quotes? (JSON_QUERY only) */
        int                     location;               /* token location, or -1 if unknown */
 } JsonFuncExpr;
@@ -1680,17 +1682,17 @@ typedef struct JsonTableColumn
 {
        NodeTag         type;
        JsonTableColumnType coltype;    /* column type */
-       char       *name;                               /* column name */
-       TypeName   *typeName;                   /* column type name */
-       char       *pathspec;                   /* path specification, if any */
-       char       *pathname;                   /* path name, if any */
-       JsonFormat *format;                             /* JSON format clause, if specified */
-       JsonWrapper     wrapper;                        /* WRAPPER behavior for formatted columns */
-       bool            omit_quotes;            /* omit or keep quotes on scalar strings? */
-       List       *columns;                    /* nested columns */
-       JsonBehavior *on_empty;                 /* ON EMPTY behavior */
-       JsonBehavior *on_error;                 /* ON ERROR behavior */
-       int                     location;                       /* token location, or -1 if unknown */
+       char       *name;                       /* column name */
+       TypeName   *typeName;           /* column type name */
+       char       *pathspec;           /* path specification, if any */
+       char       *pathname;           /* path name, if any */
+       JsonFormat *format;                     /* JSON format clause, if specified */
+       JsonWrapper wrapper;            /* WRAPPER behavior for formatted columns */
+       bool            omit_quotes;    /* omit or keep quotes on scalar strings? */
+       List       *columns;            /* nested columns */
+       JsonBehavior *on_empty;         /* ON EMPTY behavior */
+       JsonBehavior *on_error;         /* ON ERROR behavior */
+       int                     location;               /* token location, or -1 if unknown */
 } JsonTableColumn;
 
 /*
@@ -1725,12 +1727,12 @@ typedef struct JsonTablePlan JsonTablePlan;
 struct JsonTablePlan
 {
        NodeTag         type;
-       JsonTablePlanType plan_type;            /* plan type */
+       JsonTablePlanType plan_type;    /* plan type */
        JsonTablePlanJoinType join_type;        /* join type (for joined plan only) */
-       JsonTablePlan *plan1;                           /* first joined plan */
-       JsonTablePlan *plan2;                           /* second joined plan */
-       char       *pathname;                           /* path name (for simple plan only) */
-       int                     location;                               /* token location, or -1 if unknown */
+       JsonTablePlan *plan1;           /* first joined plan */
+       JsonTablePlan *plan2;           /* second joined plan */
+       char       *pathname;           /* path name (for simple plan only) */
+       int                     location;               /* token location, or -1 if unknown */
 };
 
 /*
@@ -1740,13 +1742,13 @@ struct JsonTablePlan
 typedef struct JsonTable
 {
        NodeTag         type;
-       JsonCommon *common;                                     /* common JSON path syntax fields */
-       List       *columns;                            /* list of JsonTableColumn */
-       JsonTablePlan *plan;                            /* join plan, if specified */
-       JsonBehavior *on_error;                         /* ON ERROR behavior, if specified */
-       Alias      *alias;                                      /* table alias in FROM clause */
-       bool            lateral;                                /* does it have LATERAL prefix? */
-       int                     location;                               /* token location, or -1 if unknown */
+       JsonCommon *common;                     /* common JSON path syntax fields */
+       List       *columns;            /* list of JsonTableColumn */
+       JsonTablePlan *plan;            /* join plan, if specified */
+       JsonBehavior *on_error;         /* ON ERROR behavior, if specified */
+       Alias      *alias;                      /* table alias in FROM clause */
+       bool            lateral;                /* does it have LATERAL prefix? */
+       int                     location;               /* token location, or -1 if unknown */
 } JsonTable;
 
 /*
@@ -1807,7 +1809,7 @@ typedef struct JsonObjectConstructor
        NodeTag         type;
        List       *exprs;                      /* list of JsonKeyValue pairs */
        JsonOutput *output;                     /* RETURNING clause, if specified  */
-       bool            absent_on_null; /* skip NULL values? */
+       bool            absent_on_null; /* skip NULL values? */
        bool            unique;                 /* check key uniqueness? */
        int                     location;               /* token location, or -1 if unknown */
 } JsonObjectConstructor;
@@ -1821,7 +1823,7 @@ typedef struct JsonArrayConstructor
        NodeTag         type;
        List       *exprs;                      /* list of JsonValueExpr elements */
        JsonOutput *output;                     /* RETURNING clause, if specified  */
-       bool            absent_on_null; /* skip NULL elements? */
+       bool            absent_on_null; /* skip NULL elements? */
        int                     location;               /* token location, or -1 if unknown */
 } JsonArrayConstructor;
 
@@ -1835,7 +1837,7 @@ typedef struct JsonArrayQueryConstructor
        Node       *query;                      /* subquery */
        JsonOutput *output;                     /* RETURNING clause, if specified  */
        JsonFormat *format;                     /* FORMAT clause for subquery, if specified */
-       bool            absent_on_null; /* skip NULL elements? */
+       bool            absent_on_null; /* skip NULL elements? */
        int                     location;               /* token location, or -1 if unknown */
 } JsonArrayQueryConstructor;
 
@@ -1861,9 +1863,9 @@ typedef struct JsonAggConstructor
 typedef struct JsonObjectAgg
 {
        NodeTag         type;
-       JsonAggConstructor *constructor; /* common fields */
+       JsonAggConstructor *constructor;        /* common fields */
        JsonKeyValue *arg;                      /* object key-value pair */
-       bool            absent_on_null; /* skip NULL values? */
+       bool            absent_on_null; /* skip NULL values? */
        bool            unique;                 /* check key uniqueness? */
 } JsonObjectAgg;
 
@@ -1874,9 +1876,9 @@ typedef struct JsonObjectAgg
 typedef struct JsonArrayAgg
 {
        NodeTag         type;
-       JsonAggConstructor *constructor; /* common fields */
+       JsonAggConstructor *constructor;        /* common fields */
        JsonValueExpr *arg;                     /* array element expression */
-       bool            absent_on_null; /* skip NULL elements? */
+       bool            absent_on_null; /* skip NULL elements? */
 } JsonArrayAgg;
 
 
@@ -2621,7 +2623,7 @@ typedef struct Constraint
        char            generated_when; /* ALWAYS or BY DEFAULT */
 
        /* Fields used for unique constraints (UNIQUE and PRIMARY KEY): */
-       bool            nulls_not_distinct;     /* null treatment for UNIQUE constraints */
+       bool            nulls_not_distinct; /* null treatment for UNIQUE constraints */
        List       *keys;                       /* String nodes naming referenced key
                                                                 * column(s) */
        List       *including;          /* String nodes naming referenced nonkey
@@ -3250,7 +3252,7 @@ typedef struct IndexStmt
        SubTransactionId oldFirstRelfilenodeSubid;      /* rd_firstRelfilenodeSubid of
                                                                                                 * oldNode */
        bool            unique;                 /* is index unique? */
-       bool            nulls_not_distinct;     /* null treatment for UNIQUE constraints */
+       bool            nulls_not_distinct; /* null treatment for UNIQUE constraints */
        bool            primary;                /* is index a primary key? */
        bool            isconstraint;   /* is it for a pkey/unique constraint? */
        bool            deferrable;             /* is the constraint DEFERRABLE? */
index 244d1e11974a65e4249b7533daf7a8a2839bf7cd..a6e5db4eecc29e6d07280929aa439c17ba700382 100644 (file)
@@ -335,11 +335,11 @@ struct PlannerInfo
 
        MemoryContext planner_cxt;      /* context holding PlannerInfo */
 
-       Cardinality     total_table_pages;      /* # of pages in all non-dummy tables of
+       Cardinality total_table_pages;  /* # of pages in all non-dummy tables of
                                                                         * query */
 
-       Selectivity     tuple_fraction; /* tuple_fraction passed to query_planner */
-       Cardinality     limit_tuples;   /* limit_tuples passed to query_planner */
+       Selectivity tuple_fraction; /* tuple_fraction passed to query_planner */
+       Cardinality limit_tuples;       /* limit_tuples passed to query_planner */
 
        Index           qual_security_level;    /* minimum security_level for quals */
        /* Note: qual_security_level is zero if there are no securityQuals */
@@ -682,7 +682,7 @@ typedef struct RelOptInfo
        Relids          relids;                 /* set of base relids (rangetable indexes) */
 
        /* size estimates generated by planner */
-       Cardinality     rows;                   /* estimated number of result tuples */
+       Cardinality rows;                       /* estimated number of result tuples */
 
        /* per-relation planner control flags */
        bool            consider_startup;       /* keep cheap-startup-cost paths? */
@@ -719,7 +719,7 @@ typedef struct RelOptInfo
        List       *indexlist;          /* list of IndexOptInfo */
        List       *statlist;           /* list of StatisticExtInfo */
        BlockNumber pages;                      /* size estimates derived from pg_class */
-       Cardinality     tuples;
+       Cardinality tuples;
        double          allvisfrac;
        Bitmapset  *eclass_indexes; /* Indexes in PlannerInfo's eq_classes list of
                                                                 * ECs that mention this rel */
@@ -842,7 +842,7 @@ struct IndexOptInfo
 
        /* index-size statistics (from pg_class and elsewhere) */
        BlockNumber pages;                      /* number of disk pages in index */
-       Cardinality     tuples;                 /* number of index tuples in index */
+       Cardinality tuples;                     /* number of index tuples in index */
        int                     tree_height;    /* index tree height, or -1 if unknown */
 
        /* index descriptor information */
@@ -1151,7 +1151,7 @@ typedef struct ParamPathInfo
        NodeTag         type;
 
        Relids          ppi_req_outer;  /* rels supplying parameters used by path */
-       Cardinality     ppi_rows;               /* estimated number of result tuples */
+       Cardinality ppi_rows;           /* estimated number of result tuples */
        List       *ppi_clauses;        /* join clauses available from outer rels */
 } ParamPathInfo;
 
@@ -1201,7 +1201,7 @@ typedef struct Path
        int                     parallel_workers;       /* desired # of workers; 0 = not parallel */
 
        /* estimated size/costs for path (see costsize.c for more info) */
-       Cardinality     rows;                   /* estimated number of result tuples */
+       Cardinality rows;                       /* estimated number of result tuples */
        Cost            startup_cost;   /* cost expended before fetching any tuples */
        Cost            total_cost;             /* total cost (assuming all tuples fetched) */
 
@@ -1464,7 +1464,7 @@ typedef struct AppendPath
        List       *subpaths;           /* list of component Paths */
        /* Index of first partial path in subpaths; list_length(subpaths) if none */
        int                     first_partial_path;
-       Cardinality     limit_tuples;   /* hard limit on output tuples, or -1 */
+       Cardinality limit_tuples;       /* hard limit on output tuples, or -1 */
 } AppendPath;
 
 #define IS_DUMMY_APPEND(p) \
@@ -1486,7 +1486,7 @@ typedef struct MergeAppendPath
 {
        Path            path;
        List       *subpaths;           /* list of component Paths */
-       Cardinality     limit_tuples;   /* hard limit on output tuples, or -1 */
+       Cardinality limit_tuples;       /* hard limit on output tuples, or -1 */
 } MergeAppendPath;
 
 /*
@@ -1529,7 +1529,7 @@ typedef struct MemoizePath
                                                                 * complete after caching the first record. */
        bool            binary_mode;    /* true when cache key should be compared bit
                                                                 * by bit, false when using hash equality ops */
-       Cardinality     calls;                  /* expected number of rescans */
+       Cardinality calls;                      /* expected number of rescans */
        uint32          est_entries;    /* The maximum number of entries that the
                                                                 * planner expects will fit in the cache, or 0
                                                                 * if unknown */
@@ -1681,7 +1681,7 @@ typedef struct HashPath
        JoinPath        jpath;
        List       *path_hashclauses;   /* join clauses used for hashing */
        int                     num_batches;    /* number of batches expected */
-       Cardinality     inner_rows_total;       /* total inner rows expected */
+       Cardinality inner_rows_total;   /* total inner rows expected */
 } HashPath;
 
 /*
@@ -1784,7 +1784,7 @@ typedef struct AggPath
        Path       *subpath;            /* path representing input source */
        AggStrategy aggstrategy;        /* basic strategy, see nodes.h */
        AggSplit        aggsplit;               /* agg-splitting mode, see nodes.h */
-       Cardinality     numGroups;              /* estimated number of groups in input */
+       Cardinality numGroups;          /* estimated number of groups in input */
        uint64          transitionSpace;        /* for pass-by-ref transition data */
        List       *groupClause;        /* a list of SortGroupClause's */
        List       *qual;                       /* quals (HAVING quals), if any */
@@ -1798,7 +1798,7 @@ typedef struct GroupingSetData
 {
        NodeTag         type;
        List       *set;                        /* grouping set as list of sortgrouprefs */
-       Cardinality     numGroups;              /* est. number of result groups */
+       Cardinality numGroups;          /* est. number of result groups */
 } GroupingSetData;
 
 typedef struct RollupData
@@ -1807,7 +1807,7 @@ typedef struct RollupData
        List       *groupClause;        /* applicable subset of parse->groupClause */
        List       *gsets;                      /* lists of integer indexes into groupClause */
        List       *gsets_data;         /* list of GroupingSetData */
-       Cardinality     numGroups;              /* est. number of result groups */
+       Cardinality numGroups;          /* est. number of result groups */
        bool            hashable;               /* can be hashed */
        bool            is_hashed;              /* to be implemented as a hashagg */
 } RollupData;
@@ -1861,7 +1861,7 @@ typedef struct SetOpPath
        List       *distinctList;       /* SortGroupClauses identifying target cols */
        AttrNumber      flagColIdx;             /* where is the flag column, if any */
        int                     firstFlag;              /* flag value for first input relation */
-       Cardinality     numGroups;              /* estimated number of groups in input */
+       Cardinality numGroups;          /* estimated number of groups in input */
 } SetOpPath;
 
 /*
@@ -1874,7 +1874,7 @@ typedef struct RecursiveUnionPath
        Path       *rightpath;
        List       *distinctList;       /* SortGroupClauses identifying target cols */
        int                     wtParam;                /* ID of Param representing work table */
-       Cardinality     numGroups;              /* estimated number of groups in input */
+       Cardinality numGroups;          /* estimated number of groups in input */
 } RecursiveUnionPath;
 
 /*
@@ -2632,7 +2632,7 @@ typedef struct
 typedef struct
 {
        bool            limit_needed;
-       Cardinality     limit_tuples;
+       Cardinality limit_tuples;
        int64           count_est;
        int64           offset_est;
 } FinalPathExtraData;
@@ -2663,15 +2663,15 @@ typedef struct JoinCostWorkspace
        Cost            inner_rescan_run_cost;
 
        /* private for cost_mergejoin code */
-       Cardinality     outer_rows;
-       Cardinality     inner_rows;
-       Cardinality     outer_skip_rows;
-       Cardinality     inner_skip_rows;
+       Cardinality outer_rows;
+       Cardinality inner_rows;
+       Cardinality outer_skip_rows;
+       Cardinality inner_skip_rows;
 
        /* private for cost_hashjoin code */
        int                     numbuckets;
        int                     numbatches;
-       Cardinality     inner_rows_total;
+       Cardinality inner_rows_total;
 } JoinCostWorkspace;
 
 /*
index e43e360d9be1d0781af29314b45e5ce7b5db14e9..e319e83bd826e1d68287dd81118087dacf5a4959 100644 (file)
@@ -121,7 +121,7 @@ typedef struct Plan
        /*
         * planner's estimate of result size of this plan step
         */
-       Cardinality     plan_rows;              /* number of rows plan is expected to emit */
+       Cardinality plan_rows;          /* number of rows plan is expected to emit */
        int                     plan_width;             /* average row width in bytes */
 
        /*
@@ -834,7 +834,7 @@ typedef struct Memoize
        uint32          est_entries;    /* The maximum number of entries that the
                                                                 * planner expects will fit in the cache, or 0
                                                                 * if unknown */
-       Bitmapset   *keyparamids;       /* paramids from param_exprs */
+       Bitmapset  *keyparamids;        /* paramids from param_exprs */
 } Memoize;
 
 /* ----------------
@@ -1013,7 +1013,7 @@ typedef struct Hash
        AttrNumber      skewColumn;             /* outer join key's column #, or zero */
        bool            skewInherit;    /* is outer join rel an inheritance tree? */
        /* all other info is in the parent HashJoin node */
-       Cardinality     rows_total;             /* estimate total rows if parallel_aware */
+       Cardinality rows_total;         /* estimate total rows if parallel_aware */
 } Hash;
 
 /* ----------------
index 66d32fc006259d919fd3f749e007d9975e8be454..66e179c4356766d5c8c2d17e4c5217baa2eaa3c1 100644 (file)
@@ -1251,7 +1251,7 @@ typedef enum JsonExprOp
        JSON_VALUE_OP,                          /* JSON_VALUE() */
        JSON_QUERY_OP,                          /* JSON_QUERY() */
        JSON_EXISTS_OP,                         /* JSON_EXISTS() */
-       JSON_TABLE_OP               /* JSON_TABLE() */
+       JSON_TABLE_OP                           /* JSON_TABLE() */
 } JsonExprOp;
 
 /*
@@ -1274,7 +1274,8 @@ typedef enum JsonFormatType
 {
        JS_FORMAT_DEFAULT,                      /* unspecified */
        JS_FORMAT_JSON,                         /* FORMAT JSON [ENCODING ...] */
-       JS_FORMAT_JSONB                         /* implicit internal format for RETURNING jsonb */
+       JS_FORMAT_JSONB                         /* implicit internal format for RETURNING
+                                                                * jsonb */
 } JsonFormatType;
 
 /*
@@ -1315,7 +1316,7 @@ typedef enum JsonWrapper
 typedef struct JsonFormat
 {
        NodeTag         type;
-       JsonFormatType format_type;     /* format type */
+       JsonFormatType format_type; /* format type */
        JsonEncoding encoding;          /* JSON encoding */
        int                     location;               /* token location, or -1 if unknown */
 } JsonFormat;
@@ -1340,7 +1341,7 @@ typedef struct JsonValueExpr
 {
        NodeTag         type;
        Expr       *raw_expr;           /* raw expression */
-       Expr       *formatted_expr;     /* formatted expression or NULL */
+       Expr       *formatted_expr; /* formatted expression or NULL */
        JsonFormat *format;                     /* FORMAT clause, if specified */
 } JsonValueExpr;
 
@@ -1367,7 +1368,7 @@ typedef struct JsonConstructorExpr
        Expr       *func;                       /* underlying json[b]_xxx() function call */
        Expr       *coercion;           /* coercion to RETURNING type */
        JsonReturning *returning;       /* RETURNING clause */
-       bool            absent_on_null; /* ABSENT ON NULL? */
+       bool            absent_on_null; /* ABSENT ON NULL? */
        bool            unique;                 /* WITH UNIQUE KEYS? (JSON_OBJECT[AGG] only) */
        int                     location;
 } JsonConstructorExpr;
@@ -1380,7 +1381,7 @@ typedef enum JsonValueType
 {
        JS_TYPE_ANY,                            /* IS JSON [VALUE] */
        JS_TYPE_OBJECT,                         /* IS JSON OBJECT */
-       JS_TYPE_ARRAY,                          /* IS JSON ARRAY*/
+       JS_TYPE_ARRAY,                          /* IS JSON ARRAY */
        JS_TYPE_SCALAR                          /* IS JSON SCALAR */
 } JsonValueType;
 
@@ -1450,17 +1451,17 @@ typedef struct JsonExpr
 {
        Expr            xpr;
        JsonExprOp      op;                             /* json function ID */
-       Node       *formatted_expr;     /* formatted context item expression */
+       Node       *formatted_expr; /* formatted context item expression */
        JsonCoercion *result_coercion;  /* resulting coercion to RETURNING type */
        JsonFormat *format;                     /* context item format (JSON/JSONB) */
        Node       *path_spec;          /* JSON path specification expression */
        List       *passing_names;      /* PASSING argument names */
-       List       *passing_values;     /* PASSING argument values */
+       List       *passing_values; /* PASSING argument values */
        JsonReturning *returning;       /* RETURNING clause type/format info */
        JsonBehavior *on_empty;         /* ON EMPTY behavior */
        JsonBehavior *on_error;         /* ON ERROR behavior */
-       JsonItemCoercions *coercions; /* coercions for JSON_VALUE */
-       JsonWrapper     wrapper;                /* WRAPPER for JSON_QUERY */
+       JsonItemCoercions *coercions;   /* coercions for JSON_VALUE */
+       JsonWrapper wrapper;            /* WRAPPER for JSON_QUERY */
        bool            omit_quotes;    /* KEEP/OMIT QUOTES for JSON_QUERY */
        int                     location;               /* token location, or -1 if unknown */
 } JsonExpr;
@@ -1472,13 +1473,15 @@ typedef struct JsonExpr
 typedef struct JsonTableParent
 {
        NodeTag         type;
-       Const      *path;               /* jsonpath constant */
-       char       *name;               /* path name */
-       Node       *child;              /* nested columns, if any */
-       bool            outerJoin;      /* outer or inner join for nested columns? */
-       int                     colMin;         /* min column index in the resulting column list */
-       int                     colMax;         /* max column index in the resulting column list */
-       bool            errorOnError; /* ERROR/EMPTY ON ERROR behavior */
+       Const      *path;                       /* jsonpath constant */
+       char       *name;                       /* path name */
+       Node       *child;                      /* nested columns, if any */
+       bool            outerJoin;              /* outer or inner join for nested columns? */
+       int                     colMin;                 /* min column index in the resulting column
+                                                                * list */
+       int                     colMax;                 /* max column index in the resulting column
+                                                                * list */
+       bool            errorOnError;   /* ERROR/EMPTY ON ERROR behavior */
 } JsonTableParent;
 
 /*
@@ -1488,9 +1491,9 @@ typedef struct JsonTableParent
 typedef struct JsonTableSibling
 {
        NodeTag         type;
-       Node       *larg;               /* left join node */
-       Node       *rarg;               /* right join node */
-       bool            cross;          /* cross or union join? */
+       Node       *larg;                       /* left join node */
+       Node       *rarg;                       /* right join node */
+       bool            cross;                  /* cross or union join? */
 } JsonTableSibling;
 
 /* ----------------
index 3d95e6bfc8875556b531730c8dd1cae74cab0601..b6e137cf83fe90bcf3a9bb78d8822015e18eab44 100644 (file)
@@ -203,9 +203,9 @@ typedef enum
 extern PathKeysComparison compare_pathkeys(List *keys1, List *keys2);
 extern bool pathkeys_contained_in(List *keys1, List *keys2);
 extern bool pathkeys_count_contained_in(List *keys1, List *keys2, int *n_common);
-extern int group_keys_reorder_by_pathkeys(List *pathkeys,
-                                                                                 List **group_pathkeys,
-                                                                                 List **group_clauses);
+extern int     group_keys_reorder_by_pathkeys(List *pathkeys,
+                                                                                  List **group_pathkeys,
+                                                                                  List **group_clauses);
 extern List *get_useful_group_keys_orderings(PlannerInfo *root, double nrows,
                                                                                         List *path_pathkeys,
                                                                                         List *pathkeys, List *clauses);
index c69920d10870ee5f62091966dd8ad5799cbee5e4..dc379547c70e39eefe2825a1e1f129b27ed6e295 100644 (file)
@@ -26,7 +26,7 @@ extern PGDLLIMPORT post_parse_analyze_hook_type post_parse_analyze_hook;
 
 
 extern Query *parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText,
-                                                       const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv);
+                                                                               const Oid *paramTypes, int numParams, QueryEnvironment *queryEnv);
 extern Query *parse_analyze_varparams(RawStmt *parseTree, const char *sourceText,
                                                                          Oid **paramTypes, int *numParams, QueryEnvironment *queryEnv);
 extern Query *parse_analyze_withcb(RawStmt *parseTree, const char *sourceText,
index d6f0b656495ab908de8f0eeca9b9b999976668e9..df1ee660d83102cd4bb277f008fa88d252e3e81f 100644 (file)
@@ -16,9 +16,9 @@
 #include "parser/parse_node.h"
 
 extern void setup_parse_fixed_parameters(ParseState *pstate,
-                                                                  const Oid *paramTypes, int numParams);
+                                                                                const Oid *paramTypes, int numParams);
 extern void setup_parse_variable_parameters(ParseState *pstate,
-                                                                         Oid **paramTypes, int *numParams);
+                                                                                       Oid **paramTypes, int *numParams);
 extern void check_variable_parameters(ParseState *pstate, Query *query);
 extern bool query_contains_extern_params(Query *query);
 
index 3d103a2b31bce4a81380941d04d779c165d291fb..12c05b5d9f904d7f967e1a9f133ca9af84ccf2c7 100644 (file)
@@ -140,7 +140,7 @@ extern char *pipe_read_line(char *cmd, char *line, int maxsize);
 
 #ifdef EXEC_BACKEND
 /* Disable ASLR before exec, for developer builds only (in exec.c) */
-extern int pg_disable_aslr(void);
+extern int     pg_disable_aslr(void);
 #endif
 
 
index 38cb1c64774b78507b4dbef98da26b35646e2c49..f366a159a8efa156f7b0fe6d2a6c3033e84921bf 100644 (file)
@@ -55,19 +55,19 @@ typedef struct ArchiveModuleCallbacks
        ArchiveCheckConfiguredCB check_configured_cb;
        ArchiveFileCB archive_file_cb;
        ArchiveShutdownCB shutdown_cb;
-}                      ArchiveModuleCallbacks;
+} ArchiveModuleCallbacks;
 
 /*
  * Type of the shared library symbol _PG_archive_module_init that is looked
  * up when loading an archive library.
  */
-typedef void (*ArchiveModuleInit) (ArchiveModuleCallbacks * cb);
+typedef void (*ArchiveModuleInit) (ArchiveModuleCallbacks *cb);
 
 /*
  * Since the logic for archiving via a shell command is in the core server
  * and does not need to be loaded via a shared library, it has a special
  * initialization function.
  */
-extern void shell_archive_init(ArchiveModuleCallbacks * cb);
+extern void shell_archive_init(ArchiveModuleCallbacks *cb);
 
 #endif                                                 /* _PGARCH_H */
index e23ac29a89d7b5d7754028fadd39663d15cdc94f..1cf3c0777dc3c294d7cd1d3edad220762b195985 100644 (file)
@@ -42,7 +42,7 @@ typedef struct BaseBackupTargetHandle BaseBackupTargetHandle;
  */
 extern void BaseBackupAddTarget(char *name,
                                                                void *(*check_detail) (char *, char *),
-                                                               bbsink * (*get_sink) (bbsink *, void *));
+                                                               bbsink *(*get_sink) (bbsink *, void *));
 
 /*
  * These functions are used by the core code to access base backup targets
index a33c2a718a7802787e72d06e27c4302973c8e1d6..741bf65cf7afe33e656f65952c40f56330614350 100644 (file)
@@ -28,7 +28,7 @@ extern void xact_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
 extern void standby_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
 extern void logicalmsg_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf);
 
-extern void    LogicalDecodingProcessRecord(LogicalDecodingContext *ctx,
+extern void LogicalDecodingProcessRecord(LogicalDecodingContext *ctx,
                                                                                 XLogReaderState *record);
 
 #endif
index 1ee63c4cf44e925a5c795fa50578069289a320d1..8c9f3321d50d625f3532600683d1211b87ce0b6b 100644 (file)
@@ -216,7 +216,7 @@ extern bool ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive);
 extern void ReplicationSlotsDropDBSlots(Oid dboid);
 extern bool InvalidateObsoleteReplicationSlots(XLogSegNo oldestSegno);
 extern ReplicationSlot *SearchNamedReplicationSlot(const char *name, bool need_lock);
-extern int ReplicationSlotIndex(ReplicationSlot *slot);
+extern int     ReplicationSlotIndex(ReplicationSlot *slot);
 extern void ReplicationSlotNameForTablesync(Oid suboid, Oid relid, char *syncslotname, int szslot);
 extern void ReplicationSlotDropAtPubNode(WalReceiverConn *wrconn, char *slotname, bool missing_ok);
 
index 0dd79d73fa2e18a8d8ee396ff7ad563ee9d45e5d..68ab740f1618fd86a527143289793ea5a6782cea 100644 (file)
@@ -181,6 +181,6 @@ extern int  WaitLatchOrSocket(Latch *latch, int wakeEvents,
                                                          pgsocket sock, long timeout, uint32 wait_event_info);
 extern void InitializeLatchWaitSet(void);
 extern int     GetNumRegisteredWaitEvents(WaitEventSet *set);
-extern bool    WaitEventSetCanReportClosed(void);
+extern bool WaitEventSetCanReportClosed(void);
 
 #endif                                                 /* LATCH_H */
index 87e408b7199ef716fa3d2d961f677c0029140e07..70d9dab25b8d2144527584f7974a452f7c78fda1 100644 (file)
@@ -46,9 +46,9 @@ extern PGDLLIMPORT int log_statement;
 extern List *pg_parse_query(const char *query_string);
 extern List *pg_rewrite_query(Query *query);
 extern List *pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree,
-                                                                       const char *query_string,
-                                                                       const Oid *paramTypes, int numParams,
-                                                                       QueryEnvironment *queryEnv);
+                                                                                               const char *query_string,
+                                                                                               const Oid *paramTypes, int numParams,
+                                                                                               QueryEnvironment *queryEnv);
 extern List *pg_analyze_and_rewrite_varparams(RawStmt *parsetree,
                                                                                          const char *query_string,
                                                                                          Oid **paramTypes,
index 0a22af80a217686190c27dc68d9deb15daf0c87f..f048eb0869d93e17df0392e86f0b5431c821c236 100644 (file)
@@ -32,6 +32,6 @@ extern char *asc_initcap(const char *buff, size_t nbytes);
 extern Datum parse_datetime(text *date_txt, text *fmt, Oid collid, bool strict,
                                                        Oid *typid, int32 *typmod, int *tz,
                                                        bool *have_error);
-extern int datetime_format_flags(const char *fmt_str, bool *have_error);
+extern int     datetime_format_flags(const char *fmt_str, bool *have_error);
 
 #endif
index 358b9eb61102ec2ab4d080191bc9f4341ccac654..8e79b8dc9f07d241030d778e4fcf92da11bde469 100644 (file)
@@ -263,8 +263,8 @@ typedef struct JsonPathVariableEvalContext
        Oid                     typid;
        int32           typmod;
        struct ExprContext *econtext;
-       struct ExprState  *estate;
-       MemoryContext mcxt;             /* memory context for cached value */
+       struct ExprState *estate;
+       MemoryContext mcxt;                     /* memory context for cached value */
        Datum           value;
        bool            isnull;
        bool            evaluated;
@@ -274,14 +274,14 @@ typedef struct JsonPathVariableEvalContext
 extern void JsonItemFromDatum(Datum val, Oid typid, int32 typmod,
                                                          JsonbValue *res);
 
-extern bool  JsonPathExists(Datum jb, JsonPath *path, List *vars, bool *error);
+extern bool JsonPathExists(Datum jb, JsonPath *path, List *vars, bool *error);
 extern Datum JsonPathQuery(Datum jb, JsonPath *jp, JsonWrapper wrapper,
                                                   bool *empty, bool *error, List *vars);
 extern JsonbValue *JsonPathValue(Datum jb, JsonPath *jp, bool *empty,
                                                                 bool *error, List *vars);
 
-extern int EvalJsonPathVar(void *vars, char *varName, int varNameLen,
-                                                  JsonbValue *val, JsonbValue *baseObject);
+extern int     EvalJsonPathVar(void *vars, char *varName, int varNameLen,
+                                                       JsonbValue *val, JsonbValue *baseObject);
 
 extern PGDLLIMPORT const TableFuncRoutine JsonbTableRoutine;
 
index eadbd009045c21a9fdfab800a2f0e2737a0212bb..90b3c49bc12393625edc1ee2acfd33b3d5a7e9ca 100644 (file)
@@ -159,7 +159,7 @@ typedef struct RelationData
        Bitmapset  *rd_keyattr;         /* cols that can be ref'd by foreign keys */
        Bitmapset  *rd_pkattr;          /* cols included in primary key */
        Bitmapset  *rd_idattr;          /* included in replica identity index */
-       Bitmapset  *rd_hotblockingattr; /* cols blocking HOT update */
+       Bitmapset  *rd_hotblockingattr; /* cols blocking HOT update */
 
        PublicationDesc *rd_pubdesc;    /* publication descriptor, or NULL */
 
@@ -246,7 +246,7 @@ typedef struct RelationData
         */
        Oid                     rd_toastoid;    /* Real TOAST table's OID, or InvalidOid */
 
-       bool            pgstat_enabled; /* should relation stats be counted */
+       bool            pgstat_enabled; /* should relation stats be counted */
        /* use "struct" here to avoid needing to include pgstat.h: */
        struct PgStat_TableStatus *pgstat_info; /* statistics collection area */
 } RelationData;
index f10353e1390e79ad564378033099de59aa3fb750..557f77e35a9f9f506d02c2e644b1e099f89ba868 100644 (file)
@@ -38,7 +38,7 @@ typedef struct xl_relmap_update
 extern Oid     RelationMapOidToFilenode(Oid relationId, bool shared);
 
 extern Oid     RelationMapFilenodeToOid(Oid relationId, bool shared);
-extern Oid RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId);
+extern Oid     RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId);
 extern void RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath,
                                                        char *dstdbpath);
 extern void RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
index c313a08d541fee87e0bd6ebefe4dc23ff18869ce..d485b9bfcd9cfcfaad216c6bb00daa2c36f4bf45 100644 (file)
@@ -215,9 +215,9 @@ extern double estimate_num_groups(PlannerInfo *root, List *groupExprs,
                                                                  EstimationInfo *estinfo);
 
 extern double estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs,
-                                       double input_rows, List **pgset,
-                                       EstimationInfo *estinfo,
-                                       List **cache_varinfos, int prevNExprs);
+                                                                                         double input_rows, List **pgset,
+                                                                                         EstimationInfo *estinfo,
+                                                                                         List **cache_varinfos, int prevNExprs);
 
 extern void estimate_hash_bucket_stats(PlannerInfo *root,
                                                                           Node *hashkey, double nbuckets,
index 140a9f9ffc4062ac3e75850f4f7b68e0706f6221..8c36cf8d82c6028b93c0b5a23671da6b93aa4ffe 100644 (file)
@@ -375,11 +375,11 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
  * Datatypes that install these as their comparator or abbrevated comparator
  * are eligible for faster sorting.
  */
-extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);
+extern int     ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);
 #if SIZEOF_DATUM >= 8
-extern int ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup);
+extern int     ssup_datum_signed_cmp(Datum x, Datum y, SortSupport ssup);
 #endif
-extern int ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup);
+extern int     ssup_datum_int32_cmp(Datum x, Datum y, SortSupport ssup);
 
 /* Other functions in utils/sort/sortsupport.c */
 extern void PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup);
index 6fceff561b94fa55e8fc75f9ef6a92e62de38277..0a072a36dc22bcc5c867a1135343fcf65cbd98e0 100644 (file)
@@ -1123,10 +1123,10 @@ pg_fe_getusername(uid_t user_id, PQExpBuffer errorMessage)
 
        /*
         * Some users are using configure --enable-thread-safety-force, so we
-        * might as well do the locking within our library to protect
-        * getpwuid(). In fact, application developers can use getpwuid() in
-        * their application if they use the locking call we provide, or install
-        * their own locking function using PQregisterThreadLock().
+        * might as well do the locking within our library to protect getpwuid().
+        * In fact, application developers can use getpwuid() in their application
+        * if they use the locking call we provide, or install their own locking
+        * function using PQregisterThreadLock().
         */
        pglock_thread();
 
index 165a6ed9b7bf1defcd6ff74df890978e0d9044d0..8046fcd884aa084aad1cca744fdcfe0e53d4923f 100644 (file)
@@ -203,6 +203,7 @@ pq_verify_peer_name_matches_certificate_ip(PGconn *conn,
                                match = 1;
                }
        }
+
        /*
         * If they don't have inet_pton(), skip this.  Then, an IPv6 address in a
         * certificate will cause an error.
index 8b3355e6dd62e6762a9c175cd90d46b885f3fb7c..fa00221ae292aea9e3349f68b4a1eedbf8d164b6 100644 (file)
@@ -6,7 +6,7 @@ use PostgreSQL::Test::Utils;
 use Test::More;
 
 # Test PQsslAttribute(NULL, "library")
-my ($out, $err) = run_command(['libpq_testclient', '--ssl']);
+my ($out, $err) = run_command([ 'libpq_testclient', '--ssl' ]);
 
 if ($ENV{with_ssl} eq 'openssl')
 {
@@ -14,7 +14,9 @@ if ($ENV{with_ssl} eq 'openssl')
 }
 else
 {
-       is($err, 'SSL is not enabled', 'PQsslAttribute(NULL, "library") returns NULL');
+       is( $err,
+               'SSL is not enabled',
+               'PQsslAttribute(NULL, "library") returns NULL');
 }
 
 done_testing();
index 07a1084b09d00d68a190386d7d560faef75a3630..7035ff3c209e16f697642cb75bbbcea06f1c11cd 100644 (file)
@@ -16,30 +16,34 @@ $node1->init;
 $node1->start;
 
 $node1->safe_psql('postgres',
-       q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0});
+       q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0}
+);
 
-$node1->safe_psql('dbicu',
-q{
+$node1->safe_psql(
+       'dbicu',
+       q{
 CREATE COLLATION upperfirst (provider = icu, locale = 'en@colCaseFirst=upper');
 CREATE TABLE icu (def text, en text COLLATE "en-x-icu", upfirst text COLLATE upperfirst);
 INSERT INTO icu VALUES ('a', 'a', 'a'), ('b', 'b', 'b'), ('A', 'A', 'A'), ('B', 'B', 'B');
 });
 
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
+is( $node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
        qq(A
 a
 B
 b),
        'sort by database default locale');
 
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
+is( $node1->safe_psql(
+               'dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
        qq(a
 A
 b
 B),
        'sort by explicit collation standard');
 
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
+is( $node1->safe_psql(
+               'dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
        qq(A
 a
 B
@@ -51,8 +55,12 @@ b),
 
 my ($ret, $stdout, $stderr) = $node1->psql('postgres',
        q{CREATE DATABASE dbicu LOCALE_PROVIDER icu TEMPLATE template0});
-isnt($ret, 0, "ICU locale must be specified for ICU provider: exit code not 0");
-like($stderr, qr/ERROR:  ICU locale must be specified/, "ICU locale must be specified for ICU provider: error message");
+isnt($ret, 0,
+       "ICU locale must be specified for ICU provider: exit code not 0");
+like(
+       $stderr,
+       qr/ERROR:  ICU locale must be specified/,
+       "ICU locale must be specified for ICU provider: error message");
 
 
 done_testing();
index b342146e556afcf874e8902528bdfb4a6c681418..86dff8bd1f3b5ac27665d6f4f74ca37d1cae5519 100644 (file)
@@ -46,7 +46,8 @@ elsif ($^O eq 'openbsd')
 }
 else
 {
-       plan skip_all => "ldap tests not supported on $^O or dependencies not installed";
+       plan skip_all =>
+         "ldap tests not supported on $^O or dependencies not installed";
 }
 
 # make your own edits here
index cc79d96d473561512c96389b6fc8f4ac0f2d1b35..4cb1170438aef42967ce20282c6e00e282877941 100644 (file)
@@ -28,7 +28,8 @@ for my $testname (@tests)
                  pipeline_abort transaction disallowed_in_pipeline)) > 0;
 
        # For a bunch of tests, generate a libpq trace file too.
-       my $traceout = "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
+       my $traceout =
+         "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
        if ($cmptrace)
        {
                push @extraargs, "-t", $traceout;
index 0429861b16a7bb12b67aa6fbb9a6e1d7ee70cc20..5be5ac39eb69d4bc920a853ee5dbafa5b8f56bd6 100644 (file)
@@ -63,7 +63,8 @@ like(
 $node->append_conf('postgresql.conf', "ssl_passphrase.passphrase = 'blurfl'");
 
 # try to start the server again
-my $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
+my $ret =
+  PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
        $node->logfile, 'start');
 
 
index 04e54394c126413c4f6c5ca5782d5141ab134822..95cd2b7b65f5da3cb5b07a71e76a2f200a055db7 100644 (file)
@@ -13,9 +13,9 @@ $node->init;
 $node->start;
 
 # Create a couple of directories to use as tablespaces.
-my $basedir = $node->basedir();
+my $basedir      = $node->basedir();
 my $TS1_LOCATION = "$basedir/ts1";
-$TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar
+$TS1_LOCATION =~ s/\/\.\//\//g;    # collapse foo/./bar to foo/bar
 mkdir($TS1_LOCATION);
 my $TS2_LOCATION = "$basedir/ts2";
 $TS2_LOCATION =~ s/\/\.\//\//g;
@@ -34,13 +34,11 @@ $result = $node->psql('postgres',
 ok($result != 0, 'clobber tablespace with absolute path');
 
 # Create table in it
-$result = $node->psql('postgres',
-       "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
 ok($result == 0, 'create table in tablespace with absolute path');
 
 # Can't drop a tablespace that still has a table in it
-$result = $node->psql('postgres',
-       "DROP TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
 ok($result != 0, 'drop tablespace with absolute path');
 
 # Drop the table
@@ -60,32 +58,28 @@ $result = $node->psql('postgres',
        "CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
 ok($result == 0, 'create tablespace 2 with absolute path');
 $result = $node->psql('postgres',
-       "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''");
+       "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
+);
 ok($result == 0, 'create tablespace 3 with in-place directory');
 $result = $node->psql('postgres',
-       "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''");
+       "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
+);
 ok($result == 0, 'create tablespace 4 with in-place directory');
 
 # Create a table and test moving between absolute and in-place tablespaces
-$result = $node->psql('postgres',
-       "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
 ok($result == 0, 'create table in tablespace 1');
-$result = $node->psql('postgres',
-       "ALTER TABLE t SET tablespace regress_ts2");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
 ok($result == 0, 'move table abs->abs');
-$result = $node->psql('postgres',
-       "ALTER TABLE t SET tablespace regress_ts3");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
 ok($result == 0, 'move table abs->in-place');
-$result = $node->psql('postgres',
-       "ALTER TABLE t SET tablespace regress_ts4");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
 ok($result == 0, 'move table in-place->in-place');
-$result = $node->psql('postgres',
-       "ALTER TABLE t SET tablespace regress_ts1");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
 ok($result == 0, 'move table in-place->abs');
 
 # Drop everything
-$result = $node->psql('postgres',
-       "DROP TABLE t");
+$result = $node->psql('postgres', "DROP TABLE t");
 ok($result == 0, 'create table in tablespace 1');
 $result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
 ok($result == 0, 'drop tablespace 1');
index 6f9838f93b593bf0bdfbcbaa114bfea0d4ef948e..7ef272cc7ae08d1c43c3f9eaa1c1b76537159e25 100644 (file)
@@ -234,9 +234,9 @@ static void
 emit_audit_message(const char *type, const char *hook, char *action, char *objName)
 {
        /*
-        * Ensure that audit messages are not duplicated by only emitting them from
-        * a leader process, not a worker process. This makes the test results
-        * deterministic even if run with force_parallel_mode = regress.
+        * Ensure that audit messages are not duplicated by only emitting them
+        * from a leader process, not a worker process. This makes the test
+        * results deterministic even if run with force_parallel_mode = regress.
         */
        if (REGRESS_audit && !IsParallelWorker())
        {
@@ -285,7 +285,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
 
        if (next_object_access_hook_str)
        {
-               (*next_object_access_hook_str)(access, classId, objName, subId, arg);
+               (*next_object_access_hook_str) (access, classId, objName, subId, arg);
        }
 
        switch (access)
@@ -325,7 +325,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
 }
 
 static void
-REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
+REGRESS_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
 {
        audit_attempt("object access",
                                  accesstype_to_string(access, 0),
@@ -340,7 +340,7 @@ REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId,
 
        /* Forward to next hook in the chain */
        if (next_object_access_hook)
-               (*next_object_access_hook)(access, classId, objectId, subId, arg);
+               (*next_object_access_hook) (access, classId, objectId, subId, arg);
 
        audit_success("object access",
                                  accesstype_to_string(access, 0),
@@ -381,18 +381,18 @@ REGRESS_exec_check_perms(List *rangeTabls, bool do_abort)
 
 static void
 REGRESS_utility_command(PlannedStmt *pstmt,
-                                         const char *queryString,
-                                         bool readOnlyTree,
-                                         ProcessUtilityContext context,
-                                         ParamListInfo params,
-                                         QueryEnvironment *queryEnv,
-                                         DestReceiver *dest,
-                                         QueryCompletion *qc)
+                                               const char *queryString,
+                                               bool readOnlyTree,
+                                               ProcessUtilityContext context,
+                                               ParamListInfo params,
+                                               QueryEnvironment *queryEnv,
+                                               DestReceiver *dest,
+                                               QueryCompletion *qc)
 {
        Node       *parsetree = pstmt->utilityStmt;
 
        const char *action;
-       NodeTag tag = nodeTag(parsetree);
+       NodeTag         tag = nodeTag(parsetree);
 
        switch (tag)
        {
@@ -441,438 +441,1302 @@ nodetag_to_string(NodeTag tag)
 {
        switch (tag)
        {
-               case T_Invalid: return "Invalid"; break;
-               case T_IndexInfo: return "IndexInfo"; break;
-               case T_ExprContext: return "ExprContext"; break;
-               case T_ProjectionInfo: return "ProjectionInfo"; break;
-               case T_JunkFilter: return "JunkFilter"; break;
-               case T_OnConflictSetState: return "OnConflictSetState"; break;
-               case T_ResultRelInfo: return "ResultRelInfo"; break;
-               case T_EState: return "EState"; break;
-               case T_TupleTableSlot: return "TupleTableSlot"; break;
-               case T_Plan: return "Plan"; break;
-               case T_Result: return "Result"; break;
-               case T_ProjectSet: return "ProjectSet"; break;
-               case T_ModifyTable: return "ModifyTable"; break;
-               case T_Append: return "Append"; break;
-               case T_MergeAppend: return "MergeAppend"; break;
-               case T_RecursiveUnion: return "RecursiveUnion"; break;
-               case T_BitmapAnd: return "BitmapAnd"; break;
-               case T_BitmapOr: return "BitmapOr"; break;
-               case T_Scan: return "Scan"; break;
-               case T_SeqScan: return "SeqScan"; break;
-               case T_SampleScan: return "SampleScan"; break;
-               case T_IndexScan: return "IndexScan"; break;
-               case T_IndexOnlyScan: return "IndexOnlyScan"; break;
-               case T_BitmapIndexScan: return "BitmapIndexScan"; break;
-               case T_BitmapHeapScan: return "BitmapHeapScan"; break;
-               case T_TidScan: return "TidScan"; break;
-               case T_TidRangeScan: return "TidRangeScan"; break;
-               case T_SubqueryScan: return "SubqueryScan"; break;
-               case T_FunctionScan: return "FunctionScan"; break;
-               case T_ValuesScan: return "ValuesScan"; break;
-               case T_TableFuncScan: return "TableFuncScan"; break;
-               case T_CteScan: return "CteScan"; break;
-               case T_NamedTuplestoreScan: return "NamedTuplestoreScan"; break;
-               case T_WorkTableScan: return "WorkTableScan"; break;
-               case T_ForeignScan: return "ForeignScan"; break;
-               case T_CustomScan: return "CustomScan"; break;
-               case T_Join: return "Join"; break;
-               case T_NestLoop: return "NestLoop"; break;
-               case T_MergeJoin: return "MergeJoin"; break;
-               case T_HashJoin: return "HashJoin"; break;
-               case T_Material: return "Material"; break;
-               case T_Memoize: return "Memoize"; break;
-               case T_Sort: return "Sort"; break;
-               case T_IncrementalSort: return "IncrementalSort"; break;
-               case T_Group: return "Group"; break;
-               case T_Agg: return "Agg"; break;
-               case T_WindowAgg: return "WindowAgg"; break;
-               case T_Unique: return "Unique"; break;
-               case T_Gather: return "Gather"; break;
-               case T_GatherMerge: return "GatherMerge"; break;
-               case T_Hash: return "Hash"; break;
-               case T_SetOp: return "SetOp"; break;
-               case T_LockRows: return "LockRows"; break;
-               case T_Limit: return "Limit"; break;
-               case T_NestLoopParam: return "NestLoopParam"; break;
-               case T_PlanRowMark: return "PlanRowMark"; break;
-               case T_PartitionPruneInfo: return "PartitionPruneInfo"; break;
-               case T_PartitionedRelPruneInfo: return "PartitionedRelPruneInfo"; break;
-               case T_PartitionPruneStepOp: return "PartitionPruneStepOp"; break;
-               case T_PartitionPruneStepCombine: return "PartitionPruneStepCombine"; break;
-               case T_PlanInvalItem: return "PlanInvalItem"; break;
-               case T_PlanState: return "PlanState"; break;
-               case T_ResultState: return "ResultState"; break;
-               case T_ProjectSetState: return "ProjectSetState"; break;
-               case T_ModifyTableState: return "ModifyTableState"; break;
-               case T_AppendState: return "AppendState"; break;
-               case T_MergeAppendState: return "MergeAppendState"; break;
-               case T_RecursiveUnionState: return "RecursiveUnionState"; break;
-               case T_BitmapAndState: return "BitmapAndState"; break;
-               case T_BitmapOrState: return "BitmapOrState"; break;
-               case T_ScanState: return "ScanState"; break;
-               case T_SeqScanState: return "SeqScanState"; break;
-               case T_SampleScanState: return "SampleScanState"; break;
-               case T_IndexScanState: return "IndexScanState"; break;
-               case T_IndexOnlyScanState: return "IndexOnlyScanState"; break;
-               case T_BitmapIndexScanState: return "BitmapIndexScanState"; break;
-               case T_BitmapHeapScanState: return "BitmapHeapScanState"; break;
-               case T_TidScanState: return "TidScanState"; break;
-               case T_TidRangeScanState: return "TidRangeScanState"; break;
-               case T_SubqueryScanState: return "SubqueryScanState"; break;
-               case T_FunctionScanState: return "FunctionScanState"; break;
-               case T_TableFuncScanState: return "TableFuncScanState"; break;
-               case T_ValuesScanState: return "ValuesScanState"; break;
-               case T_CteScanState: return "CteScanState"; break;
-               case T_NamedTuplestoreScanState: return "NamedTuplestoreScanState"; break;
-               case T_WorkTableScanState: return "WorkTableScanState"; break;
-               case T_ForeignScanState: return "ForeignScanState"; break;
-               case T_CustomScanState: return "CustomScanState"; break;
-               case T_JoinState: return "JoinState"; break;
-               case T_NestLoopState: return "NestLoopState"; break;
-               case T_MergeJoinState: return "MergeJoinState"; break;
-               case T_HashJoinState: return "HashJoinState"; break;
-               case T_MaterialState: return "MaterialState"; break;
-               case T_MemoizeState: return "MemoizeState"; break;
-               case T_SortState: return "SortState"; break;
-               case T_IncrementalSortState: return "IncrementalSortState"; break;
-               case T_GroupState: return "GroupState"; break;
-               case T_AggState: return "AggState"; break;
-               case T_WindowAggState: return "WindowAggState"; break;
-               case T_UniqueState: return "UniqueState"; break;
-               case T_GatherState: return "GatherState"; break;
-               case T_GatherMergeState: return "GatherMergeState"; break;
-               case T_HashState: return "HashState"; break;
-               case T_SetOpState: return "SetOpState"; break;
-               case T_LockRowsState: return "LockRowsState"; break;
-               case T_LimitState: return "LimitState"; break;
-               case T_Alias: return "Alias"; break;
-               case T_RangeVar: return "RangeVar"; break;
-               case T_TableFunc: return "TableFunc"; break;
-               case T_Var: return "Var"; break;
-               case T_Const: return "Const"; break;
-               case T_Param: return "Param"; break;
-               case T_Aggref: return "Aggref"; break;
-               case T_GroupingFunc: return "GroupingFunc"; break;
-               case T_WindowFunc: return "WindowFunc"; break;
-               case T_SubscriptingRef: return "SubscriptingRef"; break;
-               case T_FuncExpr: return "FuncExpr"; break;
-               case T_NamedArgExpr: return "NamedArgExpr"; break;
-               case T_OpExpr: return "OpExpr"; break;
-               case T_DistinctExpr: return "DistinctExpr"; break;
-               case T_NullIfExpr: return "NullIfExpr"; break;
-               case T_ScalarArrayOpExpr: return "ScalarArrayOpExpr"; break;
-               case T_BoolExpr: return "BoolExpr"; break;
-               case T_SubLink: return "SubLink"; break;
-               case T_SubPlan: return "SubPlan"; break;
-               case T_AlternativeSubPlan: return "AlternativeSubPlan"; break;
-               case T_FieldSelect: return "FieldSelect"; break;
-               case T_FieldStore: return "FieldStore"; break;
-               case T_RelabelType: return "RelabelType"; break;
-               case T_CoerceViaIO: return "CoerceViaIO"; break;
-               case T_ArrayCoerceExpr: return "ArrayCoerceExpr"; break;
-               case T_ConvertRowtypeExpr: return "ConvertRowtypeExpr"; break;
-               case T_CollateExpr: return "CollateExpr"; break;
-               case T_CaseExpr: return "CaseExpr"; break;
-               case T_CaseWhen: return "CaseWhen"; break;
-               case T_CaseTestExpr: return "CaseTestExpr"; break;
-               case T_ArrayExpr: return "ArrayExpr"; break;
-               case T_RowExpr: return "RowExpr"; break;
-               case T_RowCompareExpr: return "RowCompareExpr"; break;
-               case T_CoalesceExpr: return "CoalesceExpr"; break;
-               case T_MinMaxExpr: return "MinMaxExpr"; break;
-               case T_SQLValueFunction: return "SQLValueFunction"; break;
-               case T_XmlExpr: return "XmlExpr"; break;
-               case T_NullTest: return "NullTest"; break;
-               case T_BooleanTest: return "BooleanTest"; break;
-               case T_CoerceToDomain: return "CoerceToDomain"; break;
-               case T_CoerceToDomainValue: return "CoerceToDomainValue"; break;
-               case T_SetToDefault: return "SetToDefault"; break;
-               case T_CurrentOfExpr: return "CurrentOfExpr"; break;
-               case T_NextValueExpr: return "NextValueExpr"; break;
-               case T_InferenceElem: return "InferenceElem"; break;
-               case T_TargetEntry: return "TargetEntry"; break;
-               case T_RangeTblRef: return "RangeTblRef"; break;
-               case T_JoinExpr: return "JoinExpr"; break;
-               case T_FromExpr: return "FromExpr"; break;
-               case T_OnConflictExpr: return "OnConflictExpr"; break;
-               case T_IntoClause: return "IntoClause"; break;
-               case T_ExprState: return "ExprState"; break;
-               case T_WindowFuncExprState: return "WindowFuncExprState"; break;
-               case T_SetExprState: return "SetExprState"; break;
-               case T_SubPlanState: return "SubPlanState"; break;
-               case T_DomainConstraintState: return "DomainConstraintState"; break;
-               case T_PlannerInfo: return "PlannerInfo"; break;
-               case T_PlannerGlobal: return "PlannerGlobal"; break;
-               case T_RelOptInfo: return "RelOptInfo"; break;
-               case T_IndexOptInfo: return "IndexOptInfo"; break;
-               case T_ForeignKeyOptInfo: return "ForeignKeyOptInfo"; break;
-               case T_ParamPathInfo: return "ParamPathInfo"; break;
-               case T_Path: return "Path"; break;
-               case T_IndexPath: return "IndexPath"; break;
-               case T_BitmapHeapPath: return "BitmapHeapPath"; break;
-               case T_BitmapAndPath: return "BitmapAndPath"; break;
-               case T_BitmapOrPath: return "BitmapOrPath"; break;
-               case T_TidPath: return "TidPath"; break;
-               case T_TidRangePath: return "TidRangePath"; break;
-               case T_SubqueryScanPath: return "SubqueryScanPath"; break;
-               case T_ForeignPath: return "ForeignPath"; break;
-               case T_CustomPath: return "CustomPath"; break;
-               case T_NestPath: return "NestPath"; break;
-               case T_MergePath: return "MergePath"; break;
-               case T_HashPath: return "HashPath"; break;
-               case T_AppendPath: return "AppendPath"; break;
-               case T_MergeAppendPath: return "MergeAppendPath"; break;
-               case T_GroupResultPath: return "GroupResultPath"; break;
-               case T_MaterialPath: return "MaterialPath"; break;
-               case T_MemoizePath: return "MemoizePath"; break;
-               case T_UniquePath: return "UniquePath"; break;
-               case T_GatherPath: return "GatherPath"; break;
-               case T_GatherMergePath: return "GatherMergePath"; break;
-               case T_ProjectionPath: return "ProjectionPath"; break;
-               case T_ProjectSetPath: return "ProjectSetPath"; break;
-               case T_SortPath: return "SortPath"; break;
-               case T_IncrementalSortPath: return "IncrementalSortPath"; break;
-               case T_GroupPath: return "GroupPath"; break;
-               case T_UpperUniquePath: return "UpperUniquePath"; break;
-               case T_AggPath: return "AggPath"; break;
-               case T_GroupingSetsPath: return "GroupingSetsPath"; break;
-               case T_MinMaxAggPath: return "MinMaxAggPath"; break;
-               case T_WindowAggPath: return "WindowAggPath"; break;
-               case T_SetOpPath: return "SetOpPath"; break;
-               case T_RecursiveUnionPath: return "RecursiveUnionPath"; break;
-               case T_LockRowsPath: return "LockRowsPath"; break;
-               case T_ModifyTablePath: return "ModifyTablePath"; break;
-               case T_LimitPath: return "LimitPath"; break;
-               case T_EquivalenceClass: return "EquivalenceClass"; break;
-               case T_EquivalenceMember: return "EquivalenceMember"; break;
-               case T_PathKey: return "PathKey"; break;
-               case T_PathTarget: return "PathTarget"; break;
-               case T_RestrictInfo: return "RestrictInfo"; break;
-               case T_IndexClause: return "IndexClause"; break;
-               case T_PlaceHolderVar: return "PlaceHolderVar"; break;
-               case T_SpecialJoinInfo: return "SpecialJoinInfo"; break;
-               case T_AppendRelInfo: return "AppendRelInfo"; break;
-               case T_RowIdentityVarInfo: return "RowIdentityVarInfo"; break;
-               case T_PlaceHolderInfo: return "PlaceHolderInfo"; break;
-               case T_MinMaxAggInfo: return "MinMaxAggInfo"; break;
-               case T_PlannerParamItem: return "PlannerParamItem"; break;
-               case T_RollupData: return "RollupData"; break;
-               case T_GroupingSetData: return "GroupingSetData"; break;
-               case T_StatisticExtInfo: return "StatisticExtInfo"; break;
-               case T_AllocSetContext: return "AllocSetContext"; break;
-               case T_SlabContext: return "SlabContext"; break;
-               case T_GenerationContext: return "GenerationContext"; break;
-               case T_Integer: return "Integer"; break;
-               case T_Float: return "Float"; break;
-               case T_Boolean: return "Boolean"; break;
-               case T_String: return "String"; break;
-               case T_BitString: return "BitString"; break;
-               case T_List: return "List"; break;
-               case T_IntList: return "IntList"; break;
-               case T_OidList: return "OidList"; break;
-               case T_ExtensibleNode: return "ExtensibleNode"; break;
-               case T_RawStmt: return "RawStmt"; break;
-               case T_Query: return "Query"; break;
-               case T_PlannedStmt: return "PlannedStmt"; break;
-               case T_InsertStmt: return "InsertStmt"; break;
-               case T_DeleteStmt: return "DeleteStmt"; break;
-               case T_UpdateStmt: return "UpdateStmt"; break;
-               case T_SelectStmt: return "SelectStmt"; break;
-               case T_ReturnStmt: return "ReturnStmt"; break;
-               case T_PLAssignStmt: return "PLAssignStmt"; break;
-               case T_AlterTableStmt: return "AlterTableStmt"; break;
-               case T_AlterTableCmd: return "AlterTableCmd"; break;
-               case T_AlterDomainStmt: return "AlterDomainStmt"; break;
-               case T_SetOperationStmt: return "SetOperationStmt"; break;
-               case T_GrantStmt: return "GrantStmt"; break;
-               case T_GrantRoleStmt: return "GrantRoleStmt"; break;
-               case T_AlterDefaultPrivilegesStmt: return "AlterDefaultPrivilegesStmt"; break;
-               case T_ClosePortalStmt: return "ClosePortalStmt"; break;
-               case T_ClusterStmt: return "ClusterStmt"; break;
-               case T_CopyStmt: return "CopyStmt"; break;
-               case T_CreateStmt: return "CreateStmt"; break;
-               case T_DefineStmt: return "DefineStmt"; break;
-               case T_DropStmt: return "DropStmt"; break;
-               case T_TruncateStmt: return "TruncateStmt"; break;
-               case T_CommentStmt: return "CommentStmt"; break;
-               case T_FetchStmt: return "FetchStmt"; break;
-               case T_IndexStmt: return "IndexStmt"; break;
-               case T_CreateFunctionStmt: return "CreateFunctionStmt"; break;
-               case T_AlterFunctionStmt: return "AlterFunctionStmt"; break;
-               case T_DoStmt: return "DoStmt"; break;
-               case T_RenameStmt: return "RenameStmt"; break;
-               case T_RuleStmt: return "RuleStmt"; break;
-               case T_NotifyStmt: return "NotifyStmt"; break;
-               case T_ListenStmt: return "ListenStmt"; break;
-               case T_UnlistenStmt: return "UnlistenStmt"; break;
-               case T_TransactionStmt: return "TransactionStmt"; break;
-               case T_ViewStmt: return "ViewStmt"; break;
-               case T_LoadStmt: return "LoadStmt"; break;
-               case T_CreateDomainStmt: return "CreateDomainStmt"; break;
-               case T_CreatedbStmt: return "CreatedbStmt"; break;
-               case T_DropdbStmt: return "DropdbStmt"; break;
-               case T_VacuumStmt: return "VacuumStmt"; break;
-               case T_ExplainStmt: return "ExplainStmt"; break;
-               case T_CreateTableAsStmt: return "CreateTableAsStmt"; break;
-               case T_CreateSeqStmt: return "CreateSeqStmt"; break;
-               case T_AlterSeqStmt: return "AlterSeqStmt"; break;
-               case T_VariableSetStmt: return "VariableSetStmt"; break;
-               case T_VariableShowStmt: return "VariableShowStmt"; break;
-               case T_DiscardStmt: return "DiscardStmt"; break;
-               case T_CreateTrigStmt: return "CreateTrigStmt"; break;
-               case T_CreatePLangStmt: return "CreatePLangStmt"; break;
-               case T_CreateRoleStmt: return "CreateRoleStmt"; break;
-               case T_AlterRoleStmt: return "AlterRoleStmt"; break;
-               case T_DropRoleStmt: return "DropRoleStmt"; break;
-               case T_LockStmt: return "LockStmt"; break;
-               case T_ConstraintsSetStmt: return "ConstraintsSetStmt"; break;
-               case T_ReindexStmt: return "ReindexStmt"; break;
-               case T_CheckPointStmt: return "CheckPointStmt"; break;
-               case T_CreateSchemaStmt: return "CreateSchemaStmt"; break;
-               case T_AlterDatabaseStmt: return "AlterDatabaseStmt"; break;
-               case T_AlterDatabaseRefreshCollStmt: return "AlterDatabaseRefreshCollStmt"; break;
-               case T_AlterDatabaseSetStmt: return "AlterDatabaseSetStmt"; break;
-               case T_AlterRoleSetStmt: return "AlterRoleSetStmt"; break;
-               case T_CreateConversionStmt: return "CreateConversionStmt"; break;
-               case T_CreateCastStmt: return "CreateCastStmt"; break;
-               case T_CreateOpClassStmt: return "CreateOpClassStmt"; break;
-               case T_CreateOpFamilyStmt: return "CreateOpFamilyStmt"; break;
-               case T_AlterOpFamilyStmt: return "AlterOpFamilyStmt"; break;
-               case T_PrepareStmt: return "PrepareStmt"; break;
-               case T_ExecuteStmt: return "ExecuteStmt"; break;
-               case T_DeallocateStmt: return "DeallocateStmt"; break;
-               case T_DeclareCursorStmt: return "DeclareCursorStmt"; break;
-               case T_CreateTableSpaceStmt: return "CreateTableSpaceStmt"; break;
-               case T_DropTableSpaceStmt: return "DropTableSpaceStmt"; break;
-               case T_AlterObjectDependsStmt: return "AlterObjectDependsStmt"; break;
-               case T_AlterObjectSchemaStmt: return "AlterObjectSchemaStmt"; break;
-               case T_AlterOwnerStmt: return "AlterOwnerStmt"; break;
-               case T_AlterOperatorStmt: return "AlterOperatorStmt"; break;
-               case T_AlterTypeStmt: return "AlterTypeStmt"; break;
-               case T_DropOwnedStmt: return "DropOwnedStmt"; break;
-               case T_ReassignOwnedStmt: return "ReassignOwnedStmt"; break;
-               case T_CompositeTypeStmt: return "CompositeTypeStmt"; break;
-               case T_CreateEnumStmt: return "CreateEnumStmt"; break;
-               case T_CreateRangeStmt: return "CreateRangeStmt"; break;
-               case T_AlterEnumStmt: return "AlterEnumStmt"; break;
-               case T_AlterTSDictionaryStmt: return "AlterTSDictionaryStmt"; break;
-               case T_AlterTSConfigurationStmt: return "AlterTSConfigurationStmt"; break;
-               case T_CreateFdwStmt: return "CreateFdwStmt"; break;
-               case T_AlterFdwStmt: return "AlterFdwStmt"; break;
-               case T_CreateForeignServerStmt: return "CreateForeignServerStmt"; break;
-               case T_AlterForeignServerStmt: return "AlterForeignServerStmt"; break;
-               case T_CreateUserMappingStmt: return "CreateUserMappingStmt"; break;
-               case T_AlterUserMappingStmt: return "AlterUserMappingStmt"; break;
-               case T_DropUserMappingStmt: return "DropUserMappingStmt"; break;
-               case T_AlterTableSpaceOptionsStmt: return "AlterTableSpaceOptionsStmt"; break;
-               case T_AlterTableMoveAllStmt: return "AlterTableMoveAllStmt"; break;
-               case T_SecLabelStmt: return "SecLabelStmt"; break;
-               case T_CreateForeignTableStmt: return "CreateForeignTableStmt"; break;
-               case T_ImportForeignSchemaStmt: return "ImportForeignSchemaStmt"; break;
-               case T_CreateExtensionStmt: return "CreateExtensionStmt"; break;
-               case T_AlterExtensionStmt: return "AlterExtensionStmt"; break;
-               case T_AlterExtensionContentsStmt: return "AlterExtensionContentsStmt"; break;
-               case T_CreateEventTrigStmt: return "CreateEventTrigStmt"; break;
-               case T_AlterEventTrigStmt: return "AlterEventTrigStmt"; break;
-               case T_RefreshMatViewStmt: return "RefreshMatViewStmt"; break;
-               case T_ReplicaIdentityStmt: return "ReplicaIdentityStmt"; break;
-               case T_AlterSystemStmt: return "AlterSystemStmt"; break;
-               case T_CreatePolicyStmt: return "CreatePolicyStmt"; break;
-               case T_AlterPolicyStmt: return "AlterPolicyStmt"; break;
-               case T_CreateTransformStmt: return "CreateTransformStmt"; break;
-               case T_CreateAmStmt: return "CreateAmStmt"; break;
-               case T_CreatePublicationStmt: return "CreatePublicationStmt"; break;
-               case T_AlterPublicationStmt: return "AlterPublicationStmt"; break;
-               case T_CreateSubscriptionStmt: return "CreateSubscriptionStmt"; break;
-               case T_AlterSubscriptionStmt: return "AlterSubscriptionStmt"; break;
-               case T_DropSubscriptionStmt: return "DropSubscriptionStmt"; break;
-               case T_CreateStatsStmt: return "CreateStatsStmt"; break;
-               case T_AlterCollationStmt: return "AlterCollationStmt"; break;
-               case T_CallStmt: return "CallStmt"; break;
-               case T_AlterStatsStmt: return "AlterStatsStmt"; break;
-               case T_A_Expr: return "A_Expr"; break;
-               case T_ColumnRef: return "ColumnRef"; break;
-               case T_ParamRef: return "ParamRef"; break;
-               case T_A_Const: return "A_Const"; break;
-               case T_FuncCall: return "FuncCall"; break;
-               case T_A_Star: return "A_Star"; break;
-               case T_A_Indices: return "A_Indices"; break;
-               case T_A_Indirection: return "A_Indirection"; break;
-               case T_A_ArrayExpr: return "A_ArrayExpr"; break;
-               case T_ResTarget: return "ResTarget"; break;
-               case T_MultiAssignRef: return "MultiAssignRef"; break;
-               case T_TypeCast: return "TypeCast"; break;
-               case T_CollateClause: return "CollateClause"; break;
-               case T_SortBy: return "SortBy"; break;
-               case T_WindowDef: return "WindowDef"; break;
-               case T_RangeSubselect: return "RangeSubselect"; break;
-               case T_RangeFunction: return "RangeFunction"; break;
-               case T_RangeTableSample: return "RangeTableSample"; break;
-               case T_RangeTableFunc: return "RangeTableFunc"; break;
-               case T_RangeTableFuncCol: return "RangeTableFuncCol"; break;
-               case T_TypeName: return "TypeName"; break;
-               case T_ColumnDef: return "ColumnDef"; break;
-               case T_IndexElem: return "IndexElem"; break;
-               case T_StatsElem: return "StatsElem"; break;
-               case T_Constraint: return "Constraint"; break;
-               case T_DefElem: return "DefElem"; break;
-               case T_RangeTblEntry: return "RangeTblEntry"; break;
-               case T_RangeTblFunction: return "RangeTblFunction"; break;
-               case T_TableSampleClause: return "TableSampleClause"; break;
-               case T_WithCheckOption: return "WithCheckOption"; break;
-               case T_SortGroupClause: return "SortGroupClause"; break;
-               case T_GroupingSet: return "GroupingSet"; break;
-               case T_WindowClause: return "WindowClause"; break;
-               case T_ObjectWithArgs: return "ObjectWithArgs"; break;
-               case T_AccessPriv: return "AccessPriv"; break;
-               case T_CreateOpClassItem: return "CreateOpClassItem"; break;
-               case T_TableLikeClause: return "TableLikeClause"; break;
-               case T_FunctionParameter: return "FunctionParameter"; break;
-               case T_LockingClause: return "LockingClause"; break;
-               case T_RowMarkClause: return "RowMarkClause"; break;
-               case T_XmlSerialize: return "XmlSerialize"; break;
-               case T_WithClause: return "WithClause"; break;
-               case T_InferClause: return "InferClause"; break;
-               case T_OnConflictClause: return "OnConflictClause"; break;
-               case T_CTESearchClause: return "CTESearchClause"; break;
-               case T_CTECycleClause: return "CTECycleClause"; break;
-               case T_CommonTableExpr: return "CommonTableExpr"; break;
-               case T_RoleSpec: return "RoleSpec"; break;
-               case T_TriggerTransition: return "TriggerTransition"; break;
-               case T_PartitionElem: return "PartitionElem"; break;
-               case T_PartitionSpec: return "PartitionSpec"; break;
-               case T_PartitionBoundSpec: return "PartitionBoundSpec"; break;
-               case T_PartitionRangeDatum: return "PartitionRangeDatum"; break;
-               case T_PartitionCmd: return "PartitionCmd"; break;
-               case T_VacuumRelation: return "VacuumRelation"; break;
-               case T_PublicationObjSpec: return "PublicationObjSpec"; break;
-               case T_PublicationTable: return "PublicationTable"; break;
-               case T_IdentifySystemCmd: return "IdentifySystemCmd"; break;
-               case T_BaseBackupCmd: return "BaseBackupCmd"; break;
-               case T_CreateReplicationSlotCmd: return "CreateReplicationSlotCmd"; break;
-               case T_DropReplicationSlotCmd: return "DropReplicationSlotCmd"; break;
-               case T_ReadReplicationSlotCmd: return "ReadReplicationSlotCmd"; break;
-               case T_StartReplicationCmd: return "StartReplicationCmd"; break;
-               case T_TimeLineHistoryCmd: return "TimeLineHistoryCmd"; break;
-               case T_TriggerData: return "TriggerData"; break;
-               case T_EventTriggerData: return "EventTriggerData"; break;
-               case T_ReturnSetInfo: return "ReturnSetInfo"; break;
-               case T_WindowObjectData: return "WindowObjectData"; break;
-               case T_TIDBitmap: return "TIDBitmap"; break;
-               case T_InlineCodeBlock: return "InlineCodeBlock"; break;
-               case T_FdwRoutine: return "FdwRoutine"; break;
-               case T_IndexAmRoutine: return "IndexAmRoutine"; break;
-               case T_TableAmRoutine: return "TableAmRoutine"; break;
-               case T_TsmRoutine: return "TsmRoutine"; break;
-               case T_ForeignKeyCacheInfo: return "ForeignKeyCacheInfo"; break;
-               case T_CallContext: return "CallContext"; break;
-               case T_SupportRequestSimplify: return "SupportRequestSimplify"; break;
-               case T_SupportRequestSelectivity: return "SupportRequestSelectivity"; break;
-               case T_SupportRequestCost: return "SupportRequestCost"; break;
-               case T_SupportRequestRows: return "SupportRequestRows"; break;
-               case T_SupportRequestIndexCondition: return "SupportRequestIndexCondition"; break;
+               case T_Invalid:
+                       return "Invalid";
+                       break;
+               case T_IndexInfo:
+                       return "IndexInfo";
+                       break;
+               case T_ExprContext:
+                       return "ExprContext";
+                       break;
+               case T_ProjectionInfo:
+                       return "ProjectionInfo";
+                       break;
+               case T_JunkFilter:
+                       return "JunkFilter";
+                       break;
+               case T_OnConflictSetState:
+                       return "OnConflictSetState";
+                       break;
+               case T_ResultRelInfo:
+                       return "ResultRelInfo";
+                       break;
+               case T_EState:
+                       return "EState";
+                       break;
+               case T_TupleTableSlot:
+                       return "TupleTableSlot";
+                       break;
+               case T_Plan:
+                       return "Plan";
+                       break;
+               case T_Result:
+                       return "Result";
+                       break;
+               case T_ProjectSet:
+                       return "ProjectSet";
+                       break;
+               case T_ModifyTable:
+                       return "ModifyTable";
+                       break;
+               case T_Append:
+                       return "Append";
+                       break;
+               case T_MergeAppend:
+                       return "MergeAppend";
+                       break;
+               case T_RecursiveUnion:
+                       return "RecursiveUnion";
+                       break;
+               case T_BitmapAnd:
+                       return "BitmapAnd";
+                       break;
+               case T_BitmapOr:
+                       return "BitmapOr";
+                       break;
+               case T_Scan:
+                       return "Scan";
+                       break;
+               case T_SeqScan:
+                       return "SeqScan";
+                       break;
+               case T_SampleScan:
+                       return "SampleScan";
+                       break;
+               case T_IndexScan:
+                       return "IndexScan";
+                       break;
+               case T_IndexOnlyScan:
+                       return "IndexOnlyScan";
+                       break;
+               case T_BitmapIndexScan:
+                       return "BitmapIndexScan";
+                       break;
+               case T_BitmapHeapScan:
+                       return "BitmapHeapScan";
+                       break;
+               case T_TidScan:
+                       return "TidScan";
+                       break;
+               case T_TidRangeScan:
+                       return "TidRangeScan";
+                       break;
+               case T_SubqueryScan:
+                       return "SubqueryScan";
+                       break;
+               case T_FunctionScan:
+                       return "FunctionScan";
+                       break;
+               case T_ValuesScan:
+                       return "ValuesScan";
+                       break;
+               case T_TableFuncScan:
+                       return "TableFuncScan";
+                       break;
+               case T_CteScan:
+                       return "CteScan";
+                       break;
+               case T_NamedTuplestoreScan:
+                       return "NamedTuplestoreScan";
+                       break;
+               case T_WorkTableScan:
+                       return "WorkTableScan";
+                       break;
+               case T_ForeignScan:
+                       return "ForeignScan";
+                       break;
+               case T_CustomScan:
+                       return "CustomScan";
+                       break;
+               case T_Join:
+                       return "Join";
+                       break;
+               case T_NestLoop:
+                       return "NestLoop";
+                       break;
+               case T_MergeJoin:
+                       return "MergeJoin";
+                       break;
+               case T_HashJoin:
+                       return "HashJoin";
+                       break;
+               case T_Material:
+                       return "Material";
+                       break;
+               case T_Memoize:
+                       return "Memoize";
+                       break;
+               case T_Sort:
+                       return "Sort";
+                       break;
+               case T_IncrementalSort:
+                       return "IncrementalSort";
+                       break;
+               case T_Group:
+                       return "Group";
+                       break;
+               case T_Agg:
+                       return "Agg";
+                       break;
+               case T_WindowAgg:
+                       return "WindowAgg";
+                       break;
+               case T_Unique:
+                       return "Unique";
+                       break;
+               case T_Gather:
+                       return "Gather";
+                       break;
+               case T_GatherMerge:
+                       return "GatherMerge";
+                       break;
+               case T_Hash:
+                       return "Hash";
+                       break;
+               case T_SetOp:
+                       return "SetOp";
+                       break;
+               case T_LockRows:
+                       return "LockRows";
+                       break;
+               case T_Limit:
+                       return "Limit";
+                       break;
+               case T_NestLoopParam:
+                       return "NestLoopParam";
+                       break;
+               case T_PlanRowMark:
+                       return "PlanRowMark";
+                       break;
+               case T_PartitionPruneInfo:
+                       return "PartitionPruneInfo";
+                       break;
+               case T_PartitionedRelPruneInfo:
+                       return "PartitionedRelPruneInfo";
+                       break;
+               case T_PartitionPruneStepOp:
+                       return "PartitionPruneStepOp";
+                       break;
+               case T_PartitionPruneStepCombine:
+                       return "PartitionPruneStepCombine";
+                       break;
+               case T_PlanInvalItem:
+                       return "PlanInvalItem";
+                       break;
+               case T_PlanState:
+                       return "PlanState";
+                       break;
+               case T_ResultState:
+                       return "ResultState";
+                       break;
+               case T_ProjectSetState:
+                       return "ProjectSetState";
+                       break;
+               case T_ModifyTableState:
+                       return "ModifyTableState";
+                       break;
+               case T_AppendState:
+                       return "AppendState";
+                       break;
+               case T_MergeAppendState:
+                       return "MergeAppendState";
+                       break;
+               case T_RecursiveUnionState:
+                       return "RecursiveUnionState";
+                       break;
+               case T_BitmapAndState:
+                       return "BitmapAndState";
+                       break;
+               case T_BitmapOrState:
+                       return "BitmapOrState";
+                       break;
+               case T_ScanState:
+                       return "ScanState";
+                       break;
+               case T_SeqScanState:
+                       return "SeqScanState";
+                       break;
+               case T_SampleScanState:
+                       return "SampleScanState";
+                       break;
+               case T_IndexScanState:
+                       return "IndexScanState";
+                       break;
+               case T_IndexOnlyScanState:
+                       return "IndexOnlyScanState";
+                       break;
+               case T_BitmapIndexScanState:
+                       return "BitmapIndexScanState";
+                       break;
+               case T_BitmapHeapScanState:
+                       return "BitmapHeapScanState";
+                       break;
+               case T_TidScanState:
+                       return "TidScanState";
+                       break;
+               case T_TidRangeScanState:
+                       return "TidRangeScanState";
+                       break;
+               case T_SubqueryScanState:
+                       return "SubqueryScanState";
+                       break;
+               case T_FunctionScanState:
+                       return "FunctionScanState";
+                       break;
+               case T_TableFuncScanState:
+                       return "TableFuncScanState";
+                       break;
+               case T_ValuesScanState:
+                       return "ValuesScanState";
+                       break;
+               case T_CteScanState:
+                       return "CteScanState";
+                       break;
+               case T_NamedTuplestoreScanState:
+                       return "NamedTuplestoreScanState";
+                       break;
+               case T_WorkTableScanState:
+                       return "WorkTableScanState";
+                       break;
+               case T_ForeignScanState:
+                       return "ForeignScanState";
+                       break;
+               case T_CustomScanState:
+                       return "CustomScanState";
+                       break;
+               case T_JoinState:
+                       return "JoinState";
+                       break;
+               case T_NestLoopState:
+                       return "NestLoopState";
+                       break;
+               case T_MergeJoinState:
+                       return "MergeJoinState";
+                       break;
+               case T_HashJoinState:
+                       return "HashJoinState";
+                       break;
+               case T_MaterialState:
+                       return "MaterialState";
+                       break;
+               case T_MemoizeState:
+                       return "MemoizeState";
+                       break;
+               case T_SortState:
+                       return "SortState";
+                       break;
+               case T_IncrementalSortState:
+                       return "IncrementalSortState";
+                       break;
+               case T_GroupState:
+                       return "GroupState";
+                       break;
+               case T_AggState:
+                       return "AggState";
+                       break;
+               case T_WindowAggState:
+                       return "WindowAggState";
+                       break;
+               case T_UniqueState:
+                       return "UniqueState";
+                       break;
+               case T_GatherState:
+                       return "GatherState";
+                       break;
+               case T_GatherMergeState:
+                       return "GatherMergeState";
+                       break;
+               case T_HashState:
+                       return "HashState";
+                       break;
+               case T_SetOpState:
+                       return "SetOpState";
+                       break;
+               case T_LockRowsState:
+                       return "LockRowsState";
+                       break;
+               case T_LimitState:
+                       return "LimitState";
+                       break;
+               case T_Alias:
+                       return "Alias";
+                       break;
+               case T_RangeVar:
+                       return "RangeVar";
+                       break;
+               case T_TableFunc:
+                       return "TableFunc";
+                       break;
+               case T_Var:
+                       return "Var";
+                       break;
+               case T_Const:
+                       return "Const";
+                       break;
+               case T_Param:
+                       return "Param";
+                       break;
+               case T_Aggref:
+                       return "Aggref";
+                       break;
+               case T_GroupingFunc:
+                       return "GroupingFunc";
+                       break;
+               case T_WindowFunc:
+                       return "WindowFunc";
+                       break;
+               case T_SubscriptingRef:
+                       return "SubscriptingRef";
+                       break;
+               case T_FuncExpr:
+                       return "FuncExpr";
+                       break;
+               case T_NamedArgExpr:
+                       return "NamedArgExpr";
+                       break;
+               case T_OpExpr:
+                       return "OpExpr";
+                       break;
+               case T_DistinctExpr:
+                       return "DistinctExpr";
+                       break;
+               case T_NullIfExpr:
+                       return "NullIfExpr";
+                       break;
+               case T_ScalarArrayOpExpr:
+                       return "ScalarArrayOpExpr";
+                       break;
+               case T_BoolExpr:
+                       return "BoolExpr";
+                       break;
+               case T_SubLink:
+                       return "SubLink";
+                       break;
+               case T_SubPlan:
+                       return "SubPlan";
+                       break;
+               case T_AlternativeSubPlan:
+                       return "AlternativeSubPlan";
+                       break;
+               case T_FieldSelect:
+                       return "FieldSelect";
+                       break;
+               case T_FieldStore:
+                       return "FieldStore";
+                       break;
+               case T_RelabelType:
+                       return "RelabelType";
+                       break;
+               case T_CoerceViaIO:
+                       return "CoerceViaIO";
+                       break;
+               case T_ArrayCoerceExpr:
+                       return "ArrayCoerceExpr";
+                       break;
+               case T_ConvertRowtypeExpr:
+                       return "ConvertRowtypeExpr";
+                       break;
+               case T_CollateExpr:
+                       return "CollateExpr";
+                       break;
+               case T_CaseExpr:
+                       return "CaseExpr";
+                       break;
+               case T_CaseWhen:
+                       return "CaseWhen";
+                       break;
+               case T_CaseTestExpr:
+                       return "CaseTestExpr";
+                       break;
+               case T_ArrayExpr:
+                       return "ArrayExpr";
+                       break;
+               case T_RowExpr:
+                       return "RowExpr";
+                       break;
+               case T_RowCompareExpr:
+                       return "RowCompareExpr";
+                       break;
+               case T_CoalesceExpr:
+                       return "CoalesceExpr";
+                       break;
+               case T_MinMaxExpr:
+                       return "MinMaxExpr";
+                       break;
+               case T_SQLValueFunction:
+                       return "SQLValueFunction";
+                       break;
+               case T_XmlExpr:
+                       return "XmlExpr";
+                       break;
+               case T_NullTest:
+                       return "NullTest";
+                       break;
+               case T_BooleanTest:
+                       return "BooleanTest";
+                       break;
+               case T_CoerceToDomain:
+                       return "CoerceToDomain";
+                       break;
+               case T_CoerceToDomainValue:
+                       return "CoerceToDomainValue";
+                       break;
+               case T_SetToDefault:
+                       return "SetToDefault";
+                       break;
+               case T_CurrentOfExpr:
+                       return "CurrentOfExpr";
+                       break;
+               case T_NextValueExpr:
+                       return "NextValueExpr";
+                       break;
+               case T_InferenceElem:
+                       return "InferenceElem";
+                       break;
+               case T_TargetEntry:
+                       return "TargetEntry";
+                       break;
+               case T_RangeTblRef:
+                       return "RangeTblRef";
+                       break;
+               case T_JoinExpr:
+                       return "JoinExpr";
+                       break;
+               case T_FromExpr:
+                       return "FromExpr";
+                       break;
+               case T_OnConflictExpr:
+                       return "OnConflictExpr";
+                       break;
+               case T_IntoClause:
+                       return "IntoClause";
+                       break;
+               case T_ExprState:
+                       return "ExprState";
+                       break;
+               case T_WindowFuncExprState:
+                       return "WindowFuncExprState";
+                       break;
+               case T_SetExprState:
+                       return "SetExprState";
+                       break;
+               case T_SubPlanState:
+                       return "SubPlanState";
+                       break;
+               case T_DomainConstraintState:
+                       return "DomainConstraintState";
+                       break;
+               case T_PlannerInfo:
+                       return "PlannerInfo";
+                       break;
+               case T_PlannerGlobal:
+                       return "PlannerGlobal";
+                       break;
+               case T_RelOptInfo:
+                       return "RelOptInfo";
+                       break;
+               case T_IndexOptInfo:
+                       return "IndexOptInfo";
+                       break;
+               case T_ForeignKeyOptInfo:
+                       return "ForeignKeyOptInfo";
+                       break;
+               case T_ParamPathInfo:
+                       return "ParamPathInfo";
+                       break;
+               case T_Path:
+                       return "Path";
+                       break;
+               case T_IndexPath:
+                       return "IndexPath";
+                       break;
+               case T_BitmapHeapPath:
+                       return "BitmapHeapPath";
+                       break;
+               case T_BitmapAndPath:
+                       return "BitmapAndPath";
+                       break;
+               case T_BitmapOrPath:
+                       return "BitmapOrPath";
+                       break;
+               case T_TidPath:
+                       return "TidPath";
+                       break;
+               case T_TidRangePath:
+                       return "TidRangePath";
+                       break;
+               case T_SubqueryScanPath:
+                       return "SubqueryScanPath";
+                       break;
+               case T_ForeignPath:
+                       return "ForeignPath";
+                       break;
+               case T_CustomPath:
+                       return "CustomPath";
+                       break;
+               case T_NestPath:
+                       return "NestPath";
+                       break;
+               case T_MergePath:
+                       return "MergePath";
+                       break;
+               case T_HashPath:
+                       return "HashPath";
+                       break;
+               case T_AppendPath:
+                       return "AppendPath";
+                       break;
+               case T_MergeAppendPath:
+                       return "MergeAppendPath";
+                       break;
+               case T_GroupResultPath:
+                       return "GroupResultPath";
+                       break;
+               case T_MaterialPath:
+                       return "MaterialPath";
+                       break;
+               case T_MemoizePath:
+                       return "MemoizePath";
+                       break;
+               case T_UniquePath:
+                       return "UniquePath";
+                       break;
+               case T_GatherPath:
+                       return "GatherPath";
+                       break;
+               case T_GatherMergePath:
+                       return "GatherMergePath";
+                       break;
+               case T_ProjectionPath:
+                       return "ProjectionPath";
+                       break;
+               case T_ProjectSetPath:
+                       return "ProjectSetPath";
+                       break;
+               case T_SortPath:
+                       return "SortPath";
+                       break;
+               case T_IncrementalSortPath:
+                       return "IncrementalSortPath";
+                       break;
+               case T_GroupPath:
+                       return "GroupPath";
+                       break;
+               case T_UpperUniquePath:
+                       return "UpperUniquePath";
+                       break;
+               case T_AggPath:
+                       return "AggPath";
+                       break;
+               case T_GroupingSetsPath:
+                       return "GroupingSetsPath";
+                       break;
+               case T_MinMaxAggPath:
+                       return "MinMaxAggPath";
+                       break;
+               case T_WindowAggPath:
+                       return "WindowAggPath";
+                       break;
+               case T_SetOpPath:
+                       return "SetOpPath";
+                       break;
+               case T_RecursiveUnionPath:
+                       return "RecursiveUnionPath";
+                       break;
+               case T_LockRowsPath:
+                       return "LockRowsPath";
+                       break;
+               case T_ModifyTablePath:
+                       return "ModifyTablePath";
+                       break;
+               case T_LimitPath:
+                       return "LimitPath";
+                       break;
+               case T_EquivalenceClass:
+                       return "EquivalenceClass";
+                       break;
+               case T_EquivalenceMember:
+                       return "EquivalenceMember";
+                       break;
+               case T_PathKey:
+                       return "PathKey";
+                       break;
+               case T_PathTarget:
+                       return "PathTarget";
+                       break;
+               case T_RestrictInfo:
+                       return "RestrictInfo";
+                       break;
+               case T_IndexClause:
+                       return "IndexClause";
+                       break;
+               case T_PlaceHolderVar:
+                       return "PlaceHolderVar";
+                       break;
+               case T_SpecialJoinInfo:
+                       return "SpecialJoinInfo";
+                       break;
+               case T_AppendRelInfo:
+                       return "AppendRelInfo";
+                       break;
+               case T_RowIdentityVarInfo:
+                       return "RowIdentityVarInfo";
+                       break;
+               case T_PlaceHolderInfo:
+                       return "PlaceHolderInfo";
+                       break;
+               case T_MinMaxAggInfo:
+                       return "MinMaxAggInfo";
+                       break;
+               case T_PlannerParamItem:
+                       return "PlannerParamItem";
+                       break;
+               case T_RollupData:
+                       return "RollupData";
+                       break;
+               case T_GroupingSetData:
+                       return "GroupingSetData";
+                       break;
+               case T_StatisticExtInfo:
+                       return "StatisticExtInfo";
+                       break;
+               case T_AllocSetContext:
+                       return "AllocSetContext";
+                       break;
+               case T_SlabContext:
+                       return "SlabContext";
+                       break;
+               case T_GenerationContext:
+                       return "GenerationContext";
+                       break;
+               case T_Integer:
+                       return "Integer";
+                       break;
+               case T_Float:
+                       return "Float";
+                       break;
+               case T_Boolean:
+                       return "Boolean";
+                       break;
+               case T_String:
+                       return "String";
+                       break;
+               case T_BitString:
+                       return "BitString";
+                       break;
+               case T_List:
+                       return "List";
+                       break;
+               case T_IntList:
+                       return "IntList";
+                       break;
+               case T_OidList:
+                       return "OidList";
+                       break;
+               case T_ExtensibleNode:
+                       return "ExtensibleNode";
+                       break;
+               case T_RawStmt:
+                       return "RawStmt";
+                       break;
+               case T_Query:
+                       return "Query";
+                       break;
+               case T_PlannedStmt:
+                       return "PlannedStmt";
+                       break;
+               case T_InsertStmt:
+                       return "InsertStmt";
+                       break;
+               case T_DeleteStmt:
+                       return "DeleteStmt";
+                       break;
+               case T_UpdateStmt:
+                       return "UpdateStmt";
+                       break;
+               case T_SelectStmt:
+                       return "SelectStmt";
+                       break;
+               case T_ReturnStmt:
+                       return "ReturnStmt";
+                       break;
+               case T_PLAssignStmt:
+                       return "PLAssignStmt";
+                       break;
+               case T_AlterTableStmt:
+                       return "AlterTableStmt";
+                       break;
+               case T_AlterTableCmd:
+                       return "AlterTableCmd";
+                       break;
+               case T_AlterDomainStmt:
+                       return "AlterDomainStmt";
+                       break;
+               case T_SetOperationStmt:
+                       return "SetOperationStmt";
+                       break;
+               case T_GrantStmt:
+                       return "GrantStmt";
+                       break;
+               case T_GrantRoleStmt:
+                       return "GrantRoleStmt";
+                       break;
+               case T_AlterDefaultPrivilegesStmt:
+                       return "AlterDefaultPrivilegesStmt";
+                       break;
+               case T_ClosePortalStmt:
+                       return "ClosePortalStmt";
+                       break;
+               case T_ClusterStmt:
+                       return "ClusterStmt";
+                       break;
+               case T_CopyStmt:
+                       return "CopyStmt";
+                       break;
+               case T_CreateStmt:
+                       return "CreateStmt";
+                       break;
+               case T_DefineStmt:
+                       return "DefineStmt";
+                       break;
+               case T_DropStmt:
+                       return "DropStmt";
+                       break;
+               case T_TruncateStmt:
+                       return "TruncateStmt";
+                       break;
+               case T_CommentStmt:
+                       return "CommentStmt";
+                       break;
+               case T_FetchStmt:
+                       return "FetchStmt";
+                       break;
+               case T_IndexStmt:
+                       return "IndexStmt";
+                       break;
+               case T_CreateFunctionStmt:
+                       return "CreateFunctionStmt";
+                       break;
+               case T_AlterFunctionStmt:
+                       return "AlterFunctionStmt";
+                       break;
+               case T_DoStmt:
+                       return "DoStmt";
+                       break;
+               case T_RenameStmt:
+                       return "RenameStmt";
+                       break;
+               case T_RuleStmt:
+                       return "RuleStmt";
+                       break;
+               case T_NotifyStmt:
+                       return "NotifyStmt";
+                       break;
+               case T_ListenStmt:
+                       return "ListenStmt";
+                       break;
+               case T_UnlistenStmt:
+                       return "UnlistenStmt";
+                       break;
+               case T_TransactionStmt:
+                       return "TransactionStmt";
+                       break;
+               case T_ViewStmt:
+                       return "ViewStmt";
+                       break;
+               case T_LoadStmt:
+                       return "LoadStmt";
+                       break;
+               case T_CreateDomainStmt:
+                       return "CreateDomainStmt";
+                       break;
+               case T_CreatedbStmt:
+                       return "CreatedbStmt";
+                       break;
+               case T_DropdbStmt:
+                       return "DropdbStmt";
+                       break;
+               case T_VacuumStmt:
+                       return "VacuumStmt";
+                       break;
+               case T_ExplainStmt:
+                       return "ExplainStmt";
+                       break;
+               case T_CreateTableAsStmt:
+                       return "CreateTableAsStmt";
+                       break;
+               case T_CreateSeqStmt:
+                       return "CreateSeqStmt";
+                       break;
+               case T_AlterSeqStmt:
+                       return "AlterSeqStmt";
+                       break;
+               case T_VariableSetStmt:
+                       return "VariableSetStmt";
+                       break;
+               case T_VariableShowStmt:
+                       return "VariableShowStmt";
+                       break;
+               case T_DiscardStmt:
+                       return "DiscardStmt";
+                       break;
+               case T_CreateTrigStmt:
+                       return "CreateTrigStmt";
+                       break;
+               case T_CreatePLangStmt:
+                       return "CreatePLangStmt";
+                       break;
+               case T_CreateRoleStmt:
+                       return "CreateRoleStmt";
+                       break;
+               case T_AlterRoleStmt:
+                       return "AlterRoleStmt";
+                       break;
+               case T_DropRoleStmt:
+                       return "DropRoleStmt";
+                       break;
+               case T_LockStmt:
+                       return "LockStmt";
+                       break;
+               case T_ConstraintsSetStmt:
+                       return "ConstraintsSetStmt";
+                       break;
+               case T_ReindexStmt:
+                       return "ReindexStmt";
+                       break;
+               case T_CheckPointStmt:
+                       return "CheckPointStmt";
+                       break;
+               case T_CreateSchemaStmt:
+                       return "CreateSchemaStmt";
+                       break;
+               case T_AlterDatabaseStmt:
+                       return "AlterDatabaseStmt";
+                       break;
+               case T_AlterDatabaseRefreshCollStmt:
+                       return "AlterDatabaseRefreshCollStmt";
+                       break;
+               case T_AlterDatabaseSetStmt:
+                       return "AlterDatabaseSetStmt";
+                       break;
+               case T_AlterRoleSetStmt:
+                       return "AlterRoleSetStmt";
+                       break;
+               case T_CreateConversionStmt:
+                       return "CreateConversionStmt";
+                       break;
+               case T_CreateCastStmt:
+                       return "CreateCastStmt";
+                       break;
+               case T_CreateOpClassStmt:
+                       return "CreateOpClassStmt";
+                       break;
+               case T_CreateOpFamilyStmt:
+                       return "CreateOpFamilyStmt";
+                       break;
+               case T_AlterOpFamilyStmt:
+                       return "AlterOpFamilyStmt";
+                       break;
+               case T_PrepareStmt:
+                       return "PrepareStmt";
+                       break;
+               case T_ExecuteStmt:
+                       return "ExecuteStmt";
+                       break;
+               case T_DeallocateStmt:
+                       return "DeallocateStmt";
+                       break;
+               case T_DeclareCursorStmt:
+                       return "DeclareCursorStmt";
+                       break;
+               case T_CreateTableSpaceStmt:
+                       return "CreateTableSpaceStmt";
+                       break;
+               case T_DropTableSpaceStmt:
+                       return "DropTableSpaceStmt";
+                       break;
+               case T_AlterObjectDependsStmt:
+                       return "AlterObjectDependsStmt";
+                       break;
+               case T_AlterObjectSchemaStmt:
+                       return "AlterObjectSchemaStmt";
+                       break;
+               case T_AlterOwnerStmt:
+                       return "AlterOwnerStmt";
+                       break;
+               case T_AlterOperatorStmt:
+                       return "AlterOperatorStmt";
+                       break;
+               case T_AlterTypeStmt:
+                       return "AlterTypeStmt";
+                       break;
+               case T_DropOwnedStmt:
+                       return "DropOwnedStmt";
+                       break;
+               case T_ReassignOwnedStmt:
+                       return "ReassignOwnedStmt";
+                       break;
+               case T_CompositeTypeStmt:
+                       return "CompositeTypeStmt";
+                       break;
+               case T_CreateEnumStmt:
+                       return "CreateEnumStmt";
+                       break;
+               case T_CreateRangeStmt:
+                       return "CreateRangeStmt";
+                       break;
+               case T_AlterEnumStmt:
+                       return "AlterEnumStmt";
+                       break;
+               case T_AlterTSDictionaryStmt:
+                       return "AlterTSDictionaryStmt";
+                       break;
+               case T_AlterTSConfigurationStmt:
+                       return "AlterTSConfigurationStmt";
+                       break;
+               case T_CreateFdwStmt:
+                       return "CreateFdwStmt";
+                       break;
+               case T_AlterFdwStmt:
+                       return "AlterFdwStmt";
+                       break;
+               case T_CreateForeignServerStmt:
+                       return "CreateForeignServerStmt";
+                       break;
+               case T_AlterForeignServerStmt:
+                       return "AlterForeignServerStmt";
+                       break;
+               case T_CreateUserMappingStmt:
+                       return "CreateUserMappingStmt";
+                       break;
+               case T_AlterUserMappingStmt:
+                       return "AlterUserMappingStmt";
+                       break;
+               case T_DropUserMappingStmt:
+                       return "DropUserMappingStmt";
+                       break;
+               case T_AlterTableSpaceOptionsStmt:
+                       return "AlterTableSpaceOptionsStmt";
+                       break;
+               case T_AlterTableMoveAllStmt:
+                       return "AlterTableMoveAllStmt";
+                       break;
+               case T_SecLabelStmt:
+                       return "SecLabelStmt";
+                       break;
+               case T_CreateForeignTableStmt:
+                       return "CreateForeignTableStmt";
+                       break;
+               case T_ImportForeignSchemaStmt:
+                       return "ImportForeignSchemaStmt";
+                       break;
+               case T_CreateExtensionStmt:
+                       return "CreateExtensionStmt";
+                       break;
+               case T_AlterExtensionStmt:
+                       return "AlterExtensionStmt";
+                       break;
+               case T_AlterExtensionContentsStmt:
+                       return "AlterExtensionContentsStmt";
+                       break;
+               case T_CreateEventTrigStmt:
+                       return "CreateEventTrigStmt";
+                       break;
+               case T_AlterEventTrigStmt:
+                       return "AlterEventTrigStmt";
+                       break;
+               case T_RefreshMatViewStmt:
+                       return "RefreshMatViewStmt";
+                       break;
+               case T_ReplicaIdentityStmt:
+                       return "ReplicaIdentityStmt";
+                       break;
+               case T_AlterSystemStmt:
+                       return "AlterSystemStmt";
+                       break;
+               case T_CreatePolicyStmt:
+                       return "CreatePolicyStmt";
+                       break;
+               case T_AlterPolicyStmt:
+                       return "AlterPolicyStmt";
+                       break;
+               case T_CreateTransformStmt:
+                       return "CreateTransformStmt";
+                       break;
+               case T_CreateAmStmt:
+                       return "CreateAmStmt";
+                       break;
+               case T_CreatePublicationStmt:
+                       return "CreatePublicationStmt";
+                       break;
+               case T_AlterPublicationStmt:
+                       return "AlterPublicationStmt";
+                       break;
+               case T_CreateSubscriptionStmt:
+                       return "CreateSubscriptionStmt";
+                       break;
+               case T_AlterSubscriptionStmt:
+                       return "AlterSubscriptionStmt";
+                       break;
+               case T_DropSubscriptionStmt:
+                       return "DropSubscriptionStmt";
+                       break;
+               case T_CreateStatsStmt:
+                       return "CreateStatsStmt";
+                       break;
+               case T_AlterCollationStmt:
+                       return "AlterCollationStmt";
+                       break;
+               case T_CallStmt:
+                       return "CallStmt";
+                       break;
+               case T_AlterStatsStmt:
+                       return "AlterStatsStmt";
+                       break;
+               case T_A_Expr:
+                       return "A_Expr";
+                       break;
+               case T_ColumnRef:
+                       return "ColumnRef";
+                       break;
+               case T_ParamRef:
+                       return "ParamRef";
+                       break;
+               case T_A_Const:
+                       return "A_Const";
+                       break;
+               case T_FuncCall:
+                       return "FuncCall";
+                       break;
+               case T_A_Star:
+                       return "A_Star";
+                       break;
+               case T_A_Indices:
+                       return "A_Indices";
+                       break;
+               case T_A_Indirection:
+                       return "A_Indirection";
+                       break;
+               case T_A_ArrayExpr:
+                       return "A_ArrayExpr";
+                       break;
+               case T_ResTarget:
+                       return "ResTarget";
+                       break;
+               case T_MultiAssignRef:
+                       return "MultiAssignRef";
+                       break;
+               case T_TypeCast:
+                       return "TypeCast";
+                       break;
+               case T_CollateClause:
+                       return "CollateClause";
+                       break;
+               case T_SortBy:
+                       return "SortBy";
+                       break;
+               case T_WindowDef:
+                       return "WindowDef";
+                       break;
+               case T_RangeSubselect:
+                       return "RangeSubselect";
+                       break;
+               case T_RangeFunction:
+                       return "RangeFunction";
+                       break;
+               case T_RangeTableSample:
+                       return "RangeTableSample";
+                       break;
+               case T_RangeTableFunc:
+                       return "RangeTableFunc";
+                       break;
+               case T_RangeTableFuncCol:
+                       return "RangeTableFuncCol";
+                       break;
+               case T_TypeName:
+                       return "TypeName";
+                       break;
+               case T_ColumnDef:
+                       return "ColumnDef";
+                       break;
+               case T_IndexElem:
+                       return "IndexElem";
+                       break;
+               case T_StatsElem:
+                       return "StatsElem";
+                       break;
+               case T_Constraint:
+                       return "Constraint";
+                       break;
+               case T_DefElem:
+                       return "DefElem";
+                       break;
+               case T_RangeTblEntry:
+                       return "RangeTblEntry";
+                       break;
+               case T_RangeTblFunction:
+                       return "RangeTblFunction";
+                       break;
+               case T_TableSampleClause:
+                       return "TableSampleClause";
+                       break;
+               case T_WithCheckOption:
+                       return "WithCheckOption";
+                       break;
+               case T_SortGroupClause:
+                       return "SortGroupClause";
+                       break;
+               case T_GroupingSet:
+                       return "GroupingSet";
+                       break;
+               case T_WindowClause:
+                       return "WindowClause";
+                       break;
+               case T_ObjectWithArgs:
+                       return "ObjectWithArgs";
+                       break;
+               case T_AccessPriv:
+                       return "AccessPriv";
+                       break;
+               case T_CreateOpClassItem:
+                       return "CreateOpClassItem";
+                       break;
+               case T_TableLikeClause:
+                       return "TableLikeClause";
+                       break;
+               case T_FunctionParameter:
+                       return "FunctionParameter";
+                       break;
+               case T_LockingClause:
+                       return "LockingClause";
+                       break;
+               case T_RowMarkClause:
+                       return "RowMarkClause";
+                       break;
+               case T_XmlSerialize:
+                       return "XmlSerialize";
+                       break;
+               case T_WithClause:
+                       return "WithClause";
+                       break;
+               case T_InferClause:
+                       return "InferClause";
+                       break;
+               case T_OnConflictClause:
+                       return "OnConflictClause";
+                       break;
+               case T_CTESearchClause:
+                       return "CTESearchClause";
+                       break;
+               case T_CTECycleClause:
+                       return "CTECycleClause";
+                       break;
+               case T_CommonTableExpr:
+                       return "CommonTableExpr";
+                       break;
+               case T_RoleSpec:
+                       return "RoleSpec";
+                       break;
+               case T_TriggerTransition:
+                       return "TriggerTransition";
+                       break;
+               case T_PartitionElem:
+                       return "PartitionElem";
+                       break;
+               case T_PartitionSpec:
+                       return "PartitionSpec";
+                       break;
+               case T_PartitionBoundSpec:
+                       return "PartitionBoundSpec";
+                       break;
+               case T_PartitionRangeDatum:
+                       return "PartitionRangeDatum";
+                       break;
+               case T_PartitionCmd:
+                       return "PartitionCmd";
+                       break;
+               case T_VacuumRelation:
+                       return "VacuumRelation";
+                       break;
+               case T_PublicationObjSpec:
+                       return "PublicationObjSpec";
+                       break;
+               case T_PublicationTable:
+                       return "PublicationTable";
+                       break;
+               case T_IdentifySystemCmd:
+                       return "IdentifySystemCmd";
+                       break;
+               case T_BaseBackupCmd:
+                       return "BaseBackupCmd";
+                       break;
+               case T_CreateReplicationSlotCmd:
+                       return "CreateReplicationSlotCmd";
+                       break;
+               case T_DropReplicationSlotCmd:
+                       return "DropReplicationSlotCmd";
+                       break;
+               case T_ReadReplicationSlotCmd:
+                       return "ReadReplicationSlotCmd";
+                       break;
+               case T_StartReplicationCmd:
+                       return "StartReplicationCmd";
+                       break;
+               case T_TimeLineHistoryCmd:
+                       return "TimeLineHistoryCmd";
+                       break;
+               case T_TriggerData:
+                       return "TriggerData";
+                       break;
+               case T_EventTriggerData:
+                       return "EventTriggerData";
+                       break;
+               case T_ReturnSetInfo:
+                       return "ReturnSetInfo";
+                       break;
+               case T_WindowObjectData:
+                       return "WindowObjectData";
+                       break;
+               case T_TIDBitmap:
+                       return "TIDBitmap";
+                       break;
+               case T_InlineCodeBlock:
+                       return "InlineCodeBlock";
+                       break;
+               case T_FdwRoutine:
+                       return "FdwRoutine";
+                       break;
+               case T_IndexAmRoutine:
+                       return "IndexAmRoutine";
+                       break;
+               case T_TableAmRoutine:
+                       return "TableAmRoutine";
+                       break;
+               case T_TsmRoutine:
+                       return "TsmRoutine";
+                       break;
+               case T_ForeignKeyCacheInfo:
+                       return "ForeignKeyCacheInfo";
+                       break;
+               case T_CallContext:
+                       return "CallContext";
+                       break;
+               case T_SupportRequestSimplify:
+                       return "SupportRequestSimplify";
+                       break;
+               case T_SupportRequestSelectivity:
+                       return "SupportRequestSelectivity";
+                       break;
+               case T_SupportRequestCost:
+                       return "SupportRequestCost";
+                       break;
+               case T_SupportRequestRows:
+                       return "SupportRequestRows";
+                       break;
+               case T_SupportRequestIndexCondition:
+                       return "SupportRequestIndexCondition";
+                       break;
                default:
                        break;
        }
@@ -928,45 +1792,46 @@ accesstype_arg_to_string(ObjectAccessType access, void *arg)
        {
                case OAT_POST_CREATE:
                        {
-                               ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *)arg;
+                               ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *) arg;
+
                                return pstrdup(pc_arg->is_internal ? "internal" : "explicit");
                        }
                        break;
                case OAT_DROP:
                        {
-                               ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
+                               ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
 
                                return psprintf("%s%s%s%s%s%s",
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "internal action," : ""),
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "concurrent drop," : ""),
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "suppress notices," : ""),
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "keep original object," : ""),
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "keep extensions," : ""),
-                                       ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
-                                               ? "normal concurrent drop," : ""));
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "internal action," : ""),
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "concurrent drop," : ""),
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "suppress notices," : ""),
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "keep original object," : ""),
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "keep extensions," : ""),
+                                                               ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+                                                                ? "normal concurrent drop," : ""));
                        }
                        break;
                case OAT_POST_ALTER:
                        {
-                               ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter*)arg;
+                               ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter *) arg;
 
                                return psprintf("%s %s auxiliary object",
-                                       (pa_arg->is_internal ? "internal" : "explicit"),
-                                       (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
+                                                               (pa_arg->is_internal ? "internal" : "explicit"),
+                                                               (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
                        }
                        break;
                case OAT_NAMESPACE_SEARCH:
                        {
-                               ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *)arg;
+                               ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *) arg;
 
                                return psprintf("%s, %s",
-                                       (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
-                                       (ns_arg->result ? "allowed" : "denied"));
+                                                               (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
+                                                               (ns_arg->result ? "allowed" : "denied"));
                        }
                        break;
                case OAT_TRUNCATE:
index d842f934a3a36e5fc80c9222dc5debfbad62bc63..f5da6bf46d65e52936b57520ba91b11dfdd2ca07 100644 (file)
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
 use PostgreSQL::Test::Utils;
 use Test::More;
 
-my $tempdir       = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 ###############################################################
 # This structure is based off of the src/bin/pg_dump/t test
index 9a2ada0a103ab48245162fcc5d5350a3c9dc7e76..f842be1a72bd5f4a92aaac2abc7e42e33ed321d1 100644 (file)
@@ -135,8 +135,8 @@ INIT
                $test_pghost = PostgreSQL::Test::Utils::tempdir_short;
                $test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
        }
-       $ENV{PGHOST}        = $test_pghost;
-       $ENV{PGDATABASE}    = 'postgres';
+       $ENV{PGHOST}     = $test_pghost;
+       $ENV{PGDATABASE} = 'postgres';
 
        # Tracking of last port value assigned to accelerate free port lookup.
        $last_port_assigned = int(rand() * 16384) + 49152;
@@ -409,8 +409,10 @@ sub set_replication_conf
          or croak "set_replication_conf only works with the default host";
 
        open my $hba, '>>', "$pgdata/pg_hba.conf";
-       print $hba "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
-       if ($PostgreSQL::Test::Utils::windows_os && !$PostgreSQL::Test::Utils::use_unix_sockets)
+       print $hba
+         "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
+       if ($PostgreSQL::Test::Utils::windows_os
+               && !$PostgreSQL::Test::Utils::use_unix_sockets)
        {
                print $hba
                  "host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
@@ -459,10 +461,10 @@ sub init
        mkdir $self->backup_dir;
        mkdir $self->archive_dir;
 
-       PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A', 'trust', '-N',
-               @{ $params{extra} });
-       PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata,
-               @{ $params{auth_extra} });
+       PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A',
+               'trust', '-N', @{ $params{extra} });
+       PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS},
+               '--config-auth', $pgdata, @{ $params{auth_extra} });
 
        open my $conf, '>>', "$pgdata/postgresql.conf";
        print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
@@ -575,7 +577,7 @@ sub adjust_conf
        my $conffile = $self->data_dir . '/' . $filename;
 
        my $contents = PostgreSQL::Test::Utils::slurp_file($conffile);
-       my @lines    = split(/\n/, $contents);
+       my @lines = split(/\n/, $contents);
        my @result;
        my $eq = $skip_equals ? '' : '= ';
        foreach my $line (@lines)
@@ -809,8 +811,10 @@ sub start
        # sub init) so that it does not get copied to standbys.
        # -w is now the default but having it here does no harm and helps
        # compatibility with older versions.
-       $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D', $self->data_dir, '-l',
-               $self->logfile, '-o', "--cluster-name=$name", 'start');
+       $ret = PostgreSQL::Test::Utils::system_log(
+               'pg_ctl', '-w',           '-D', $self->data_dir,
+               '-l',     $self->logfile, '-o', "--cluster-name=$name",
+               'start');
 
        if ($ret != 0)
        {
@@ -919,7 +923,8 @@ sub reload
        local %ENV = $self->_get_env();
 
        print "### Reloading node \"$name\"\n";
-       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, 'reload');
+       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata,
+               'reload');
        return;
 }
 
@@ -945,8 +950,8 @@ sub restart
 
        # -w is now the default but having it here does no harm and helps
        # compatibility with older versions.
-       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata, '-l', $logfile,
-               'restart');
+       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata,
+               '-l', $logfile, 'restart');
 
        $self->_update_pid(1);
        return;
@@ -971,8 +976,8 @@ sub promote
        local %ENV = $self->_get_env();
 
        print "### Promoting node \"$name\"\n";
-       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
-               'promote');
+       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+               $logfile, 'promote');
        return;
 }
 
@@ -995,8 +1000,8 @@ sub logrotate
        local %ENV = $self->_get_env();
 
        print "### Rotating log in node \"$name\"\n";
-       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
-               'logrotate');
+       PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+               $logfile, 'logrotate');
        return;
 }
 
@@ -1232,13 +1237,16 @@ sub new
        my $testname = basename($0);
        $testname =~ s/\.[^.]+$//;
        my $node = {
-               _port    => $port,
-               _host    => $host,
-               _basedir => "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
-               _name    => $name,
+               _port => $port,
+               _host => $host,
+               _basedir =>
+                 "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
+               _name               => $name,
                _logfile_generation => 0,
-               _logfile_base       => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
-               _logfile            => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
+               _logfile_base =>
+                 "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
+               _logfile =>
+                 "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
        };
 
        if ($params{install_path})
@@ -1261,8 +1269,8 @@ sub new
        # isn't fully compatible. Warn if the version is too old and thus we don't
        # have a subclass of this class.
        if (ref $ver && $ver < $min_compat)
-    {
-               my $maj      = $ver->major(separator => '_');
+       {
+               my $maj = $ver->major(separator => '_');
                my $subclass = $class . "::V_$maj";
                if ($subclass->isa($class))
                {
@@ -1270,9 +1278,10 @@ sub new
                }
                else
                {
-                       carp "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
+                       carp
+                         "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
                }
-    }
+       }
 
        # Add node to list of nodes
        push(@all_nodes, $node);
@@ -1528,7 +1537,8 @@ END
                next if defined $ENV{'PG_TEST_NOCLEAN'};
 
                # clean basedir on clean test invocation
-               $node->clean_node if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
+               $node->clean_node
+                 if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
        }
 
        $? = $exit_code;
@@ -2178,7 +2188,8 @@ sub connect_ok
 
        if (@log_like or @log_unlike)
        {
-               my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+               my $log_contents =
+                 PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
 
                while (my $regex = shift @log_like)
                {
@@ -2248,7 +2259,8 @@ sub connect_fails
 
        if (@log_like or @log_unlike)
        {
-               my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+               my $log_contents =
+                 PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
 
                while (my $regex = shift @log_like)
                {
@@ -2444,7 +2456,8 @@ sub issues_sql_like
 
        my $result = PostgreSQL::Test::Utils::run_log($cmd);
        ok($result, "@$cmd exit code 0");
-       my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+       my $log =
+         PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
        like($log, $expected_sql, "$test_name: SQL found in server log");
        return;
 }
@@ -2550,7 +2563,8 @@ sub wait_for_catchup
          unless exists($valid_modes{$mode});
 
        # Allow passing of a PostgreSQL::Test::Cluster instance as shorthand
-       if (blessed($standby_name) && $standby_name->isa("PostgreSQL::Test::Cluster"))
+       if (blessed($standby_name)
+               && $standby_name->isa("PostgreSQL::Test::Cluster"))
        {
                $standby_name = $standby_name->name;
        }
@@ -2566,8 +2580,7 @@ sub wait_for_catchup
          . $self->name . "\n";
        # Before release 12 walreceiver just set the application name to
        # "walreceiver"
-       my $query =
-         qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
+       my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
          FROM pg_catalog.pg_stat_replication
          WHERE application_name IN ('$standby_name', 'walreceiver')];
        $self->poll_query_until('postgres', $query)
@@ -2641,9 +2654,10 @@ sub wait_for_log
 
        while ($attempts < $max_attempts)
        {
-               my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
+               my $log =
+                 PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
 
-               return $offset+length($log) if ($log =~ m/$regexp/);
+               return $offset + length($log) if ($log =~ m/$regexp/);
 
                # Wait 0.1 second before retrying.
                usleep(100_000);
@@ -2858,7 +2872,8 @@ sub corrupt_page_checksum
 
 ##########################################################################
 
-package PostgreSQL::Test::Cluster::V_11; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_11
+  ;    ## no critic (ProhibitMultiplePackages)
 
 # parent.pm is not present in all perl versions before 5.10.1, so instead
 # do directly what it would do for this:
@@ -2874,21 +2889,22 @@ sub _recovery_file { return "recovery.conf"; }
 
 sub set_standby_mode
 {
-    my $self = shift;
-    $self->append_conf("recovery.conf", "standby_mode = on\n");
+       my $self = shift;
+       $self->append_conf("recovery.conf", "standby_mode = on\n");
 }
 
 sub init
 {
-    my ($self, %params) = @_;
-    $self->SUPER::init(%params);
-    $self->adjust_conf('postgresql.conf', 'max_wal_senders',
-                      $params{allows_streaming} ? 5 : 0);
+       my ($self, %params) = @_;
+       $self->SUPER::init(%params);
+       $self->adjust_conf('postgresql.conf', 'max_wal_senders',
+               $params{allows_streaming} ? 5 : 0);
 }
 
 ##########################################################################
 
-package PostgreSQL::Test::Cluster::V_10; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_10
+  ;    ## no critic (ProhibitMultiplePackages)
 
 # use parent -norequire, qw(PostgreSQL::Test::Cluster::V_11);
 push @PostgreSQL::Test::Cluster::V_10::ISA, 'PostgreSQL::Test::Cluster::V_11';
index 7cb8591fed2e339fc700f260571ff7c9a6e87677..ec13714c3311c32625624a9af10d95bf91885431 100644 (file)
@@ -27,13 +27,13 @@ BEGIN { $last_time = time; }
 
 sub _time_str
 {
-       my $tm = time;
+       my $tm   = time;
        my $diff = $tm - $last_time;
        $last_time = $tm;
        my ($sec, $min, $hour) = localtime($tm);
        my $msec = int(1000 * ($tm - int($tm)));
        return sprintf("[%.2d:%.2d:%.2d.%.3d](%.3fs) ",
-                                  $hour, $min, $sec, $msec, $diff);
+               $hour, $min, $sec, $msec, $diff);
 }
 
 sub TIEHANDLE
@@ -50,11 +50,11 @@ sub PRINT
        # the original stdout, which is what PROVE sees. Additional decorations
        # confuse it, so only put out the time string on files after the first.
        my $skip = 1;
-       my $ts = _time_str;
+       my $ts   = _time_str;
        for my $fh (@$self)
        {
                print $fh ($skip ? "" : $ts), @_ or $ok = 0;
-               $fh->flush   or $ok = 0;
+               $fh->flush or $ok = 0;
                $skip = 0;
        }
        return $ok;
index dca1b3b17c4e95c630d8e52331916f9aa25446ac..1ca2cc591708be41c79f02de542e7e64feeba9c5 100644 (file)
@@ -142,14 +142,15 @@ BEGIN
        # Must be set early
        $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
        # Check if this environment is MSYS2.
-       $is_msys2 = $windows_os && -x '/usr/bin/uname'  &&
-         `uname -or` =~ /^[2-9].*Msys/;
+       $is_msys2 =
+            $windows_os
+         && -x '/usr/bin/uname'
+         && `uname -or` =~ /^[2-9].*Msys/;
 
        if ($windows_os)
        {
                require Win32API::File;
-               Win32API::File->import(
-                       qw(createFile OsFHandleOpen CloseHandle));
+               Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle));
        }
 
        # Specifies whether to use Unix sockets for test setups.  On
@@ -428,12 +429,16 @@ sub pump_until
                last if $$stream =~ /$until/;
                if ($timeout->is_expired)
                {
-                       diag("pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\"");
+                       diag(
+                               "pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\""
+                       );
                        return 0;
                }
                if (not $proc->pumpable())
                {
-                       diag("pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\"");
+                       diag(
+                               "pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\""
+                       );
                        return 0;
                }
                $proc->pump();
index 30d328103b524972bd3f3c04eb0d3715ffeacf98..8f704911895f348a93ceb301e5f7ebc818c00d47 100644 (file)
@@ -151,14 +151,14 @@ a dot unless the separator argument is given.
 
 sub major
 {
-    my ($self, %params) = @_;
-    my $result = $self->{num}->[0];
-    if ($result + 0 < 10)
-    {
-        my $sep = $params{separator} || '.';
-        $result .= "$sep$self->{num}->[1]";
-    }
-    return $result;
+       my ($self, %params) = @_;
+       my $result = $self->{num}->[0];
+       if ($result + 0 < 10)
+       {
+               my $sep = $params{separator} || '.';
+               $result .= "$sep$self->{num}->[1]";
+       }
+       return $result;
 }
 
 1;
index 583ee87da829816536574454ecbc5a825214640d..86864098f9e27655f64acb41687a947206bdfdf0 100644 (file)
@@ -374,7 +374,8 @@ sub replay_check
        );
        my $primary_lsn = $node_primary->lsn('write');
        $node_primary->wait_for_catchup($node_standby_1, 'replay', $primary_lsn);
-       $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $primary_lsn);
+       $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+               $primary_lsn);
 
        $node_standby_1->safe_psql('postgres',
                qq[SELECT 1 FROM replayed WHERE val = $newval])
index 01c52d8e7f76b123655a4e3bb5697712246cca92..d69da4e5efd30f01f5e7af23b806f8254c2fc65c 100644 (file)
@@ -125,7 +125,7 @@ my $log_location = -s $node_standby2->logfile;
 $node_standby2->promote;
 
 # Check the logs of the standby to see that the commands have failed.
-my $log_contents       = slurp_file($node_standby2->logfile, $log_location);
+my $log_contents = slurp_file($node_standby2->logfile, $log_location);
 my $node_standby2_data = $node_standby2->data_dir;
 
 like(
index 3ccced2ea24036f95523f3a130436f38a677fdcc..0cd0467fbb83872ce086db3838b66e1cf1b25390 100644 (file)
@@ -206,62 +206,68 @@ my $stats_test_slot2 = 'logical_slot';
 # Test that reset works for pg_stat_replication_slots
 
 # Stats exist for stats test slot 1
-is($node_primary->safe_psql(
-       'postgres',
-       qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.));
+is( $node_primary->safe_psql(
+               'postgres',
+               qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+       ),
+       qq(t|t),
+       qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.)
+);
 
 # Do reset of stats for stats test slot 1
-$node_primary->safe_psql(
-       'postgres',
-       qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+       qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
 
 # Get reset value after reset
-my $reset1 = $node_primary->safe_psql(
-       'postgres',
+my $reset1 = $node_primary->safe_psql('postgres',
        qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
 );
 
 # Do reset again
-$node_primary->safe_psql(
-       'postgres',
-       qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+       qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
 
-is($node_primary->safe_psql(
-       'postgres',
-       qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.));
+is( $node_primary->safe_psql(
+               'postgres',
+               qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+       ),
+       qq(t|t),
+       qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.)
+);
 
 # Check that test slot 2 has NULL in reset timestamp
-is($node_primary->safe_psql(
-       'postgres',
-       qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
+is( $node_primary->safe_psql(
+               'postgres',
+               qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+       ),
+       qq(t),
+       qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
 
 # Get reset value again for test slot 1
-$reset1 = $node_primary->safe_psql(
-       'postgres',
+$reset1 = $node_primary->safe_psql('postgres',
        qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
 );
 
 # Reset stats for all replication slots
-$node_primary->safe_psql(
-       'postgres',
-       qq(SELECT pg_stat_reset_replication_slot(NULL))
-);
+$node_primary->safe_psql('postgres',
+       qq(SELECT pg_stat_reset_replication_slot(NULL)));
 
 # Check that test slot 2 reset timestamp is no longer NULL after reset
-is($node_primary->safe_psql(
-       'postgres',
-       qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.));
-
-is($node_primary->safe_psql(
-       'postgres',
-       qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t), qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.));
+is( $node_primary->safe_psql(
+               'postgres',
+               qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+       ),
+       qq(t),
+       qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.)
+);
+
+is( $node_primary->safe_psql(
+               'postgres',
+               qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+       ),
+       qq(t),
+       qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.)
+);
 
 # done with the node
 $node_primary->stop;
index 10da6cb0c1c1df3859775d535e66c0501fb57b6d..c22844d39c04b88114c81b70003a032a8ed7bf5e 100644 (file)
@@ -66,7 +66,8 @@ CREATE TABLE alive(status text);
 INSERT INTO alive VALUES($$committed-before-sigquit$$);
 SELECT pg_backend_pid();
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+               $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
        'acquired pid for SIGQUIT');
 my $pid = $killme_stdout;
 chomp($pid);
@@ -78,7 +79,9 @@ $killme_stdin .= q[
 BEGIN;
 INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status;
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m),
+ok( pump_until(
+               $killme,         $psql_timeout,
+               \$killme_stdout, qr/in-progress-before-sigquit/m),
        'inserted in-progress-before-sigquit');
 $killme_stdout = '';
 $killme_stderr = '';
@@ -91,7 +94,8 @@ $monitor_stdin .= q[
 SELECT $$psql-connected$$;
 SELECT pg_sleep(3600);
 ];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+               $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
        'monitor connected');
 $monitor_stdout = '';
 $monitor_stderr = '';
@@ -145,7 +149,8 @@ $monitor->run();
 $killme_stdin .= q[
 SELECT pg_backend_pid();
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+               $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
        "acquired pid for SIGKILL");
 $pid = $killme_stdout;
 chomp($pid);
@@ -158,7 +163,9 @@ INSERT INTO alive VALUES($$committed-before-sigkill$$) RETURNING status;
 BEGIN;
 INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status;
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+               $killme,         $psql_timeout,
+               \$killme_stdout, qr/in-progress-before-sigkill/m),
        'inserted in-progress-before-sigkill');
 $killme_stdout = '';
 $killme_stderr = '';
@@ -170,7 +177,8 @@ $monitor_stdin .= q[
 SELECT $$psql-connected$$;
 SELECT pg_sleep(3600);
 ];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+               $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
        'monitor connected');
 $monitor_stdout = '';
 $monitor_stderr = '';
index 0dca3f69fe3d3aadfc22605851a302c6408e271f..72895104ed93ce025f71cc103d2493afc937e599 100644 (file)
@@ -44,7 +44,8 @@ is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"),
 
 my $tablespaceDir = PostgreSQL::Test::Utils::tempdir;
 
-$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
+$node->safe_psql('postgres',
+       "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
 $node->safe_psql('postgres',
        'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1');
 
index 5654f3b545537e5d96f1664b72f1061213c0f50b..6bbf55c3ee127303e5f511953776439f86cd8075 100644 (file)
@@ -347,16 +347,18 @@ while (1)
        my ($stdout, $stderr);
 
        $senderpid = $node_primary3->safe_psql('postgres',
-           "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
+               "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
 
        last if $senderpid =~ qr/^[0-9]+$/;
 
        diag "multiple walsenders active in iteration $i";
 
        # show information about all active connections
-       $node_primary3->psql('postgres',
-                                                "\\a\\t\nSELECT * FROM pg_stat_activity",
-                                                stdout => \$stdout, stderr => \$stderr);
+       $node_primary3->psql(
+               'postgres',
+               "\\a\\t\nSELECT * FROM pg_stat_activity",
+               stdout => \$stdout,
+               stderr => \$stderr);
        diag $stdout, $stderr;
 
        # unlikely that the problem would resolve after 15s, so give up at point
index 24fb141785da5ad3db4d83a09273afaad3378e0b..53a55c7a8acac7f804e9914bb779b0c2d542ce3c 100644 (file)
@@ -53,7 +53,8 @@ my $killme = IPC::Run::start(
 $killme_stdin .= q[
 SELECT pg_backend_pid();
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+               $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
        'acquired pid for SIGKILL');
 my $pid = $killme_stdout;
 chomp($pid);
@@ -82,7 +83,8 @@ BEGIN;
 INSERT INTO tab_crash (a) VALUES(1);
 SELECT $$insert-tuple-to-lock-next-insert$$;
 ];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+       qr/insert-tuple-to-lock-next-insert/m);
 $killme_stdout2 = '';
 $killme_stderr2 = '';
 
@@ -95,7 +97,9 @@ BEGIN;
 SELECT $$in-progress-before-sigkill$$;
 INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+               $killme,         $psql_timeout,
+               \$killme_stdout, qr/in-progress-before-sigkill/m),
        'insert in-progress-before-sigkill');
 $killme_stdout = '';
 $killme_stderr = '';
@@ -117,7 +121,8 @@ END; $c$;
 SELECT $$insert-tuple-lock-waiting$$;
 ];
 
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+       qr/insert-tuple-lock-waiting/m);
 $killme_stdout2 = '';
 $killme_stderr2 = '';
 
@@ -167,7 +172,8 @@ $killme->run();
 $killme_stdin .= q[
 SELECT pg_backend_pid();
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+               $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
        'acquired pid for SIGKILL');
 $pid = $killme_stdout;
 chomp($pid);
@@ -184,7 +190,8 @@ BEGIN;
 INSERT INTO tab_crash (a) VALUES(1);
 SELECT $$insert-tuple-to-lock-next-insert$$;
 ];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+       qr/insert-tuple-to-lock-next-insert/m);
 $killme_stdout2 = '';
 $killme_stderr2 = '';
 
@@ -197,7 +204,9 @@ BEGIN;
 SELECT $$in-progress-before-sigkill$$;
 INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
 ];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+               $killme,         $psql_timeout,
+               \$killme_stdout, qr/in-progress-before-sigkill/m),
        'insert in-progress-before-sigkill');
 $killme_stdout = '';
 $killme_stderr = '';
@@ -219,7 +228,8 @@ END; $c$;
 SELECT $$insert-tuple-lock-waiting$$;
 ];
 
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+       qr/insert-tuple-lock-waiting/m);
 $killme_stdout2 = '';
 $killme_stderr2 = '';
 
index be9799c0a4686c7f5cbd7670d3853a6c00ae60b0..fdb4ea0bf50e1766ae9782c91a33d27ab6a8de0e 100644 (file)
@@ -19,7 +19,8 @@ $node_primary->init(allows_streaming => 1);
 
 # Increase some settings that Cluster->new makes too low by default.
 $node_primary->adjust_conf('postgresql.conf', 'max_connections', '25');
-$node_primary->append_conf('postgresql.conf', 'max_prepared_transactions = 10');
+$node_primary->append_conf('postgresql.conf',
+       'max_prepared_transactions = 10');
 # We'll stick with Cluster->new's small default shared_buffers, but since that
 # makes synchronized seqscans more probable, it risks changing the results of
 # some test queries.  Disable synchronized seqscans to prevent that.
@@ -27,18 +28,19 @@ $node_primary->append_conf('postgresql.conf', 'synchronize_seqscans = off');
 
 # WAL consistency checking is resource intensive so require opt-in with the
 # PG_TEST_EXTRA environment variable.
-if ($ENV{PG_TEST_EXTRA} &&
-       $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/) {
+if (   $ENV{PG_TEST_EXTRA}
+       && $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/)
+{
        $node_primary->append_conf('postgresql.conf',
                'wal_consistency_checking = all');
 }
 
 $node_primary->start;
 is( $node_primary->psql(
-        'postgres',
-        qq[SELECT pg_create_physical_replication_slot('standby_1');]),
-    0,
-    'physical slot created on primary');
+               'postgres',
+               qq[SELECT pg_create_physical_replication_slot('standby_1');]),
+       0,
+       'physical slot created on primary');
 my $backup_name = 'my_backup';
 
 # Take backup
@@ -49,25 +51,29 @@ my $node_standby_1 = PostgreSQL::Test::Cluster->new('standby_1');
 $node_standby_1->init_from_backup($node_primary, $backup_name,
        has_streaming => 1);
 $node_standby_1->append_conf('postgresql.conf',
-    "primary_slot_name = standby_1");
+       "primary_slot_name = standby_1");
 $node_standby_1->append_conf('postgresql.conf',
        'max_standby_streaming_delay = 600s');
 $node_standby_1->start;
 
-my $dlpath = dirname($ENV{REGRESS_SHLIB});
+my $dlpath    = dirname($ENV{REGRESS_SHLIB});
 my $outputdir = $PostgreSQL::Test::Utils::tmp_check;
 
 # Run the regression tests against the primary.
 my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || "";
-my $rc = system($ENV{PG_REGRESS} . " $extra_opts " .
-                           "--dlpath=\"$dlpath\" " .
-                           "--bindir= " .
-                           "--host=" . $node_primary->host . " " .
-                           "--port=" . $node_primary->port . " " .
-                           "--schedule=../regress/parallel_schedule " .
-                           "--max-concurrent-tests=20 " .
-                           "--inputdir=../regress " .
-                           "--outputdir=\"$outputdir\"");
+my $rc =
+  system($ENV{PG_REGRESS}
+         . " $extra_opts "
+         . "--dlpath=\"$dlpath\" "
+         . "--bindir= "
+         . "--host="
+         . $node_primary->host . " "
+         . "--port="
+         . $node_primary->port . " "
+         . "--schedule=../regress/parallel_schedule "
+         . "--max-concurrent-tests=20 "
+         . "--inputdir=../regress "
+         . "--outputdir=\"$outputdir\"");
 if ($rc != 0)
 {
        # Dump out the regression diffs file, if there is one
@@ -92,12 +98,16 @@ $node_primary->wait_for_catchup($node_standby_1, 'replay',
 
 # Perform a logical dump of primary and standby, and check that they match
 command_ok(
-       [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', '--no-sync',
-         '-p', $node_primary->port ],
+       [
+               'pg_dumpall', '-f', $outputdir . '/primary.dump',
+               '--no-sync', '-p', $node_primary->port
+       ],
        'dump primary server');
 command_ok(
-       [ 'pg_dumpall', '-f', $outputdir . '/standby.dump', '--no-sync',
-         '-p', $node_standby_1->port ],
+       [
+               'pg_dumpall', '-f', $outputdir . '/standby.dump',
+               '--no-sync', '-p', $node_standby_1->port
+       ],
        'dump standby server');
 command_ok(
        [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ],
index 2fe8db88079e629fc6a314ab7008fd6e68b93c3c..1bf7b568ccb3eceef863fad0f6a06631417865d9 100644 (file)
@@ -273,7 +273,8 @@ $sect = "post immediate restart";
 my $wal_restart_immediate = wal_stats();
 
 cmp_ok(
-       $wal_reset_restart->{reset}, 'lt',
+       $wal_reset_restart->{reset},
+       'lt',
        $wal_restart_immediate->{reset},
        "$sect: reset timestamp is new");
 
index 8dcb3da0de9e4c2b45317e76723b53287b68cfaf..545d523edff0f4f6af509fa7c1a88e4a4de59171 100644 (file)
@@ -229,8 +229,10 @@ $expected_conflicts++;
 # Want to test recovery deadlock conflicts, not buffer pin conflicts. Without
 # changing max_standby_streaming_delay it'd be timing dependent what we hit
 # first
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
-                                                  "${PostgreSQL::Test::Utils::timeout_default}s");
+$node_standby->adjust_conf(
+       'postgresql.conf',
+       'max_standby_streaming_delay',
+       "${PostgreSQL::Test::Utils::timeout_default}s");
 $node_standby->restart();
 reconnect_and_clear();
 
@@ -289,7 +291,8 @@ check_conflict_stat("deadlock");
 
 # clean up for next tests
 $node_primary->safe_psql($test_db, qq[ROLLBACK PREPARED 'lock';]);
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', '50ms');
+$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
+       '50ms');
 $node_standby->restart();
 reconnect_and_clear();
 
index ac9340b7dd1f413465f12b323e82a8018ab4ecb2..ae7e32763fb3c78e886091f01e3ca873343eecb2 100644 (file)
@@ -8,7 +8,8 @@ use File::Basename;
 
 my $node_primary = PostgreSQL::Test::Cluster->new('primary');
 $node_primary->init(allows_streaming => 1);
-$node_primary->append_conf('postgresql.conf', q[
+$node_primary->append_conf(
+       'postgresql.conf', q[
 allow_in_place_tablespaces = true
 log_connections=on
 # to avoid "repairing" corruption
@@ -61,28 +62,28 @@ $psql_standby{run} = IPC::Run::start(
 # rows. Using a template database + preexisting rows makes it a bit easier to
 # reproduce, because there's no cache invalidations generated.
 
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db_template OID = 50000;");
-$node_primary->safe_psql('conflict_db_template', q[
+$node_primary->safe_psql('postgres',
+       "CREATE DATABASE conflict_db_template OID = 50000;");
+$node_primary->safe_psql(
+       'conflict_db_template', q[
     CREATE TABLE large(id serial primary key, dataa text, datab text);
-    INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]);
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+    INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]
+);
+$node_primary->safe_psql('postgres',
+       "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
 
-$node_primary->safe_psql('postgres', q[
+$node_primary->safe_psql(
+       'postgres', q[
     CREATE EXTENSION pg_prewarm;
     CREATE TABLE replace_sb(data text);
-    INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]);
+    INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]
+);
 
 $node_primary->wait_for_catchup($node_standby);
 
 # Use longrunning transactions, so that AtEOXact_SMgr doesn't close files
-send_query_and_wait(
-       \%psql_primary,
-       q[BEGIN;],
-       qr/BEGIN/m);
-send_query_and_wait(
-       \%psql_standby,
-       q[BEGIN;],
-       qr/BEGIN/m);
+send_query_and_wait(\%psql_primary, q[BEGIN;], qr/BEGIN/m);
+send_query_and_wait(\%psql_standby, q[BEGIN;], qr/BEGIN/m);
 
 # Cause lots of dirty rows in shared_buffers
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 1;");
@@ -94,10 +95,10 @@ cause_eviction(\%psql_primary, \%psql_standby);
 
 # drop and recreate database
 $node_primary->safe_psql('postgres', "DROP DATABASE conflict_db;");
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+$node_primary->safe_psql('postgres',
+       "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
 
-verify($node_primary, $node_standby, 1,
-          "initial contents as expected");
+verify($node_primary, $node_standby, 1, "initial contents as expected");
 
 # Again cause lots of dirty rows in shared_buffers, but use a different update
 # value so we can check everything is OK
@@ -109,17 +110,17 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 2;");
 cause_eviction(\%psql_primary, \%psql_standby);
 
 verify($node_primary, $node_standby, 2,
-          "update to reused relfilenode (due to DB oid conflict) is not lost");
+       "update to reused relfilenode (due to DB oid conflict) is not lost");
 
 
 $node_primary->safe_psql('conflict_db', "VACUUM FULL large;");
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 3;");
 
-verify($node_primary, $node_standby, 3,
-          "restored contents as expected");
+verify($node_primary, $node_standby, 3, "restored contents as expected");
 
 # Test for old filehandles after moving a database in / out of tablespace
-$node_primary->safe_psql('postgres', q[CREATE TABLESPACE test_tablespace LOCATION '']);
+$node_primary->safe_psql('postgres',
+       q[CREATE TABLESPACE test_tablespace LOCATION '']);
 
 # cause dirty buffers
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
@@ -127,23 +128,25 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
 cause_eviction(\%psql_primary, \%psql_standby);
 
 # move database back / forth
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
+$node_primary->safe_psql('postgres',
+       'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+       'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
 
 # cause dirty buffers
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 5;");
 cause_eviction(\%psql_primary, \%psql_standby);
 
-verify($node_primary, $node_standby, 5,
-          "post move contents as expected");
+verify($node_primary, $node_standby, 5, "post move contents as expected");
 
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+       'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
 
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;");
 cause_eviction(\%psql_primary, \%psql_standby);
 $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;");
-$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db');
-$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',    'DROP DATABASE conflict_db');
+$node_primary->safe_psql('postgres',    'DROP TABLESPACE test_tablespace');
 
 $node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database');
 
@@ -160,25 +163,28 @@ $node_standby->stop();
 
 # Make sure that there weren't crashes during shutdown
 
-command_like([ 'pg_controldata', $node_primary->data_dir ],
-       qr/Database cluster state:\s+shut down\n/, 'primary shut down ok');
-command_like([ 'pg_controldata', $node_standby->data_dir ],
-       qr/Database cluster state:\s+shut down in recovery\n/, 'standby shut down ok');
+command_like(
+       [ 'pg_controldata', $node_primary->data_dir ],
+       qr/Database cluster state:\s+shut down\n/,
+       'primary shut down ok');
+command_like(
+       [ 'pg_controldata', $node_standby->data_dir ],
+       qr/Database cluster state:\s+shut down in recovery\n/,
+       'standby shut down ok');
 done_testing();
 
 sub verify
 {
        my ($primary, $standby, $counter, $message) = @_;
 
-       my $query = "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
+       my $query =
+         "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
        is($primary->safe_psql('conflict_db', $query),
-          "$counter|4000",
-          "primary: $message");
+               "$counter|4000", "primary: $message");
 
        $primary->wait_for_catchup($standby);
        is($standby->safe_psql('conflict_db', $query),
-          "$counter|4000",
-          "standby: $message");
+               "$counter|4000", "standby: $message");
 }
 
 sub cause_eviction
index ade4b51fb8dec782a9f1703c6a4439810225087b..ba3532a51e852b99d1ad8e6c520283474c3cbd71 100644 (file)
@@ -1226,8 +1226,8 @@ PG_FUNCTION_INFO_V1(get_columns_length);
 Datum
 get_columns_length(PG_FUNCTION_ARGS)
 {
-       ArrayType       *ta = PG_GETARG_ARRAYTYPE_P(0);
-       Oid                     *type_oids;
+       ArrayType  *ta = PG_GETARG_ARRAYTYPE_P(0);
+       Oid                *type_oids;
        int                     ntypes;
        int                     column_offset = 0;
 
@@ -1241,7 +1241,7 @@ get_columns_length(PG_FUNCTION_ARGS)
        ntypes = ArrayGetNItems(ARR_NDIM(ta), ARR_DIMS(ta));
        for (int i = 0; i < ntypes; i++)
        {
-               Oid typeoid = type_oids[i];
+               Oid                     typeoid = type_oids[i];
                int16           typlen;
                bool            typbyval;
                char            typalign;
index 58d2bc336f5b27db5e46247c970cec20eb9febc5..c0b4a5739cee39cefad77bd6d436eaa94bc2a76e 100644 (file)
@@ -19,10 +19,12 @@ if ($ENV{with_ssl} ne 'openssl')
 }
 
 my $ssl_server = SSL::Server->new();
+
 sub sslkey
 {
        return $ssl_server->sslkey(@_);
 }
+
 sub switch_server_cert
 {
        $ssl_server->switch_server_cert(@_);
@@ -56,28 +58,30 @@ my $result = $node->safe_psql('postgres', "SHOW ssl_library");
 is($result, $ssl_server->ssl_library(), 'ssl_library parameter');
 
 $ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
-                                                                                  $SERVERHOSTCIDR,     'trust');
+       $SERVERHOSTCIDR, 'trust');
 
 note "testing password-protected keys";
 
-switch_server_cert($node,
-       certfile => 'server-cn-only',
-       cafile => 'root+client_ca',
-       keyfile => 'server-password',
+switch_server_cert(
+       $node,
+       certfile       => 'server-cn-only',
+       cafile         => 'root+client_ca',
+       keyfile        => 'server-password',
        passphrase_cmd => 'echo wrongpassword',
-       restart => 'no' );
+       restart        => 'no');
 
 command_fails(
        [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
        'restart fails with password-protected key file with wrong password');
 $node->_update_pid(0);
 
-switch_server_cert($node,
-       certfile => 'server-cn-only',
-       cafile => 'root+client_ca',
-       keyfile => 'server-password',
+switch_server_cert(
+       $node,
+       certfile       => 'server-cn-only',
+       cafile         => 'root+client_ca',
+       keyfile        => 'server-password',
        passphrase_cmd => 'echo secret1',
-       restart => 'no');
+       restart        => 'no');
 
 command_ok(
        [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
@@ -115,7 +119,8 @@ switch_server_cert($node, certfile => 'server-cn-only');
 # Set of default settings for SSL parameters in connection string.  This
 # makes the tests protected against any defaults the environment may have
 # in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+  "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
 
 $common_connstr =
   "$default_ssl_connstr user=ssltestuser dbname=trustdb hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
@@ -416,9 +421,11 @@ switch_server_cert($node, certfile => 'server-ip-cn-and-dns-alt-names');
 
 $node->connect_ok("$common_connstr host=192.0.2.1",
        "certificate with both an IP CN and DNS SANs matches CN");
-$node->connect_ok("$common_connstr host=dns1.alt-name.pg-ssltest.test",
+$node->connect_ok(
+       "$common_connstr host=dns1.alt-name.pg-ssltest.test",
        "certificate with both an IP CN and DNS SANs matches SAN 1");
-$node->connect_ok("$common_connstr host=dns2.alt-name.pg-ssltest.test",
+$node->connect_ok(
+       "$common_connstr host=dns2.alt-name.pg-ssltest.test",
        "certificate with both an IP CN and DNS SANs matches SAN 2");
 
 # Finally, test a server certificate that has no CN or SANs. Of course, that's
@@ -506,42 +513,50 @@ $node->connect_fails(
 
 # correct client cert in unencrypted PEM
 $node->connect_ok(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client.key'),
        "certificate authorization succeeds with correct client cert in PEM format"
 );
 
 # correct client cert in unencrypted DER
 $node->connect_ok(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-der.key'),
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client-der.key'),
        "certificate authorization succeeds with correct client cert in DER format"
 );
 
 # correct client cert in encrypted PEM
 $node->connect_ok(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='dUmmyP^#+'",
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client-encrypted-pem.key')
+         . " sslpassword='dUmmyP^#+'",
        "certificate authorization succeeds with correct client cert in encrypted PEM format"
 );
 
 # correct client cert in encrypted DER
 $node->connect_ok(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-der.key') . " sslpassword='dUmmyP^#+'",
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client-encrypted-der.key')
+         . " sslpassword='dUmmyP^#+'",
        "certificate authorization succeeds with correct client cert in encrypted DER format"
 );
 
 # correct client cert in encrypted PEM with wrong password
 $node->connect_fails(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='wrong'",
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client-encrypted-pem.key')
+         . " sslpassword='wrong'",
        "certificate authorization fails with correct client cert and wrong password in encrypted PEM format",
        expected_stderr =>
-         qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,
-);
+         qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,);
 
 
 # correct client cert using whole DN
 my $dn_connstr = "$common_connstr dbname=certdb_dn";
 
 $node->connect_ok(
-       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+         . sslkey('client-dn.key'),
        "certificate authorization succeeds with DN mapping",
        log_like => [
                qr/connection authenticated: identity="CN=ssltestuser-dn,OU=Testing,OU=Engineering,O=PGDG" method=cert/
@@ -551,14 +566,16 @@ $node->connect_ok(
 $dn_connstr = "$common_connstr dbname=certdb_dn_re";
 
 $node->connect_ok(
-       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+         . sslkey('client-dn.key'),
        "certificate authorization succeeds with DN regex mapping");
 
 # same thing but using explicit CN
 $dn_connstr = "$common_connstr dbname=certdb_cn";
 
 $node->connect_ok(
-       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+       "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+         . sslkey('client-dn.key'),
        "certificate authorization succeeds with CN mapping",
        # the full DN should still be used as the authenticated identity
        log_like => [
@@ -576,7 +593,9 @@ TODO:
 
        # correct client cert in encrypted PEM with empty password
        $node->connect_fails(
-               "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword=''",
+               "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+                 . sslkey('client-encrypted-pem.key')
+                 . " sslpassword=''",
                "certificate authorization fails with correct client cert and empty password in encrypted PEM format",
                expected_stderr =>
                  qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -584,7 +603,8 @@ TODO:
 
        # correct client cert in encrypted PEM with no password
        $node->connect_fails(
-               "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key'),
+               "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+                 . sslkey('client-encrypted-pem.key'),
                "certificate authorization fails with correct client cert and no password in encrypted PEM format",
                expected_stderr =>
                  qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -630,7 +650,8 @@ command_like(
                '-P',
                'null=_null_',
                '-d',
-               "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+               "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+                 . sslkey('client.key'),
                '-c',
                "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
        ],
@@ -644,7 +665,8 @@ SKIP:
        skip "Permissions check not enforced on Windows", 2 if ($windows_os);
 
        $node->connect_fails(
-               "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client_wrongperms.key'),
+               "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+                 . sslkey('client_wrongperms.key'),
                "certificate authorization fails because of file permissions",
                expected_stderr =>
                  qr!private key file \".*client_wrongperms\.key\" has group or world access!
@@ -653,7 +675,8 @@ SKIP:
 
 # client cert belonging to another user
 $node->connect_fails(
-       "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+       "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+         . sslkey('client.key'),
        "certificate authorization fails with client cert belonging to another user",
        expected_stderr =>
          qr/certificate authentication failed for user "anotheruser"/,
@@ -663,7 +686,8 @@ $node->connect_fails(
 
 # revoked client cert
 $node->connect_fails(
-       "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+       "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+         . sslkey('client-revoked.key'),
        "certificate authorization fails with revoked client cert",
        expected_stderr => qr/SSL error: sslv3 alert certificate revoked/,
        # revoked certificates should not authenticate the user
@@ -676,13 +700,15 @@ $common_connstr =
   "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=verifydb hostaddr=$SERVERHOSTADDR host=localhost";
 
 $node->connect_ok(
-       "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+       "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+         . sslkey('client.key'),
        "auth_option clientcert=verify-full succeeds with matching username and Common Name",
        # verify-full does not provide authentication
        log_unlike => [qr/connection authenticated:/],);
 
 $node->connect_fails(
-       "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+       "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+         . sslkey('client.key'),
        "auth_option clientcert=verify-full fails with mismatching username and Common Name",
        expected_stderr =>
          qr/FATAL: .* "trust" authentication failed for user "anotheruser"/,
@@ -692,7 +718,8 @@ $node->connect_fails(
 # Check that connecting with auth-option verify-ca in pg_hba :
 # works, when username doesn't match Common Name
 $node->connect_ok(
-       "$common_connstr user=yetanotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+       "$common_connstr user=yetanotheruser sslcert=ssl/client.crt "
+         . sslkey('client.key'),
        "auth_option clientcert=verify-ca succeeds with mismatching username and Common Name",
        # verify-full does not provide authentication
        log_unlike => [qr/connection authenticated:/],);
@@ -700,7 +727,9 @@ $node->connect_ok(
 # intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file
 switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca');
 $common_connstr =
-  "$default_ssl_connstr user=ssltestuser dbname=certdb " . sslkey('client.key') . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
+    "$default_ssl_connstr user=ssltestuser dbname=certdb "
+  . sslkey('client.key')
+  . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
 
 $node->connect_ok(
        "$common_connstr sslmode=require sslcert=ssl/client+client_ca.crt",
@@ -711,11 +740,15 @@ $node->connect_fails(
        expected_stderr => qr/SSL error: tlsv1 alert unknown ca/);
 
 # test server-side CRL directory
-switch_server_cert($node, certfile => 'server-cn-only', crldir => 'root+client-crldir');
+switch_server_cert(
+       $node,
+       certfile => 'server-cn-only',
+       crldir   => 'root+client-crldir');
 
 # revoked client cert
 $node->connect_fails(
-       "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+       "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+         . sslkey('client-revoked.key'),
        "certificate authorization fails with revoked client cert with server-side CRL directory",
        expected_stderr => qr/SSL error: sslv3 alert certificate revoked/);
 
index 4354901f539e940c405cd60c237b0c4a3c227e11..588f47a39b990dd271074c09eb6f54d53eecc060 100644 (file)
@@ -22,10 +22,12 @@ if ($ENV{with_ssl} ne 'openssl')
 }
 
 my $ssl_server = SSL::Server->new();
+
 sub sslkey
 {
        return $ssl_server->sslkey(@_);
 }
+
 sub switch_server_cert
 {
        $ssl_server->switch_server_cert(@_);
@@ -57,8 +59,11 @@ $ENV{PGPORT} = $node->port;
 $node->start;
 
 # Configure server for SSL connections, with password handling.
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
-       "scram-sha-256", 'password' => "pass", 'password_enc' => "scram-sha-256");
+$ssl_server->configure_test_server_for_ssl(
+       $node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
+       "scram-sha-256",
+       'password'     => "pass",
+       'password_enc' => "scram-sha-256");
 switch_server_cert($node, certfile => 'server-cn-only');
 $ENV{PGPASSWORD} = "pass";
 $common_connstr =
@@ -104,7 +109,7 @@ $node->connect_fails(
 # because channel binding is not performed.  Note that ssl/client.key may
 # be used in a different test, so the name of this temporary client key
 # is chosen here to be unique.
-my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
+my $cert_tempdir   = PostgreSQL::Test::Utils::tempdir();
 my $client_tmp_key = "$cert_tempdir/client_scram.key";
 copy("ssl/client.key", "$cert_tempdir/client_scram.key")
   or die
index 96a5db86721fa0d9495211481b4de7688a90e6fc..87fb18a31e00f1cb2d7fb778dad383e415ea3a0e 100644 (file)
@@ -21,10 +21,12 @@ if ($ENV{with_ssl} ne 'openssl')
 
 #### Some configuration
 my $ssl_server = SSL::Server->new();
+
 sub sslkey
 {
        return $ssl_server->sslkey(@_);
 }
+
 sub switch_server_cert
 {
        $ssl_server->switch_server_cert(@_);
@@ -52,8 +54,8 @@ $ENV{PGHOST} = $node->host;
 $ENV{PGPORT} = $node->port;
 $node->start;
 
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
-       'trust', extensions => [ qw(sslinfo) ]);
+$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
+       $SERVERHOSTCIDR, 'trust', extensions => [qw(sslinfo)]);
 
 # We aren't using any CRL's in this suite so we can keep using server-revoked
 # as server certificate for simple client.crt connection much like how the
@@ -63,11 +65,13 @@ switch_server_cert($node, certfile => 'server-revoked');
 # Set of default settings for SSL parameters in connection string.  This
 # makes the tests protected against any defaults the environment may have
 # in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+  "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
 
 $common_connstr =
-  "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost " .
-  "user=ssltestuser sslcert=ssl/client_ext.crt " . sslkey('client_ext.key');
+  "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost "
+  . "user=ssltestuser sslcert=ssl/client_ext.crt "
+  . sslkey('client_ext.key');
 
 # Make sure we can connect even though previous test suites have established this
 $node->connect_ok(
@@ -77,62 +81,85 @@ $node->connect_ok(
 
 my $result;
 
-$result = $node->safe_psql("certdb", "SELECT ssl_is_used();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_is_used();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_is_used() for TLS connection");
 
-$result = $node->safe_psql("certdb", "SELECT ssl_version();",
-  connstr => $common_connstr . " ssl_min_protocol_version=TLSv1.2 " .
-  "ssl_max_protocol_version=TLSv1.2");
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_version();",
+       connstr => $common_connstr
+         . " ssl_min_protocol_version=TLSv1.2 "
+         . "ssl_max_protocol_version=TLSv1.2");
 is($result, 'TLSv1.2', "ssl_version() correctly returning TLS protocol");
 
-$result = $node->safe_psql("certdb",
-  "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_cipher() compared with pg_stat_ssl");
 
-$result = $node->safe_psql("certdb", "SELECT ssl_client_cert_present();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_client_cert_present();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_client_cert_present() for connection with cert");
 
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_cert_present();",
-  connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
-  "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+       "trustdb",
+       "SELECT ssl_client_cert_present();",
+       connstr =>
+         "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+         . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
 is($result, 'f', "ssl_client_cert_present() for connection without cert");
 
-$result = $node->safe_psql("certdb",
-  "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_client_serial() compared with pg_stat_ssl");
 
 # Must not use safe_psql since we expect an error here
-$result = $node->psql("certdb", "SELECT ssl_client_dn_field('invalid');",
-  connstr => $common_connstr);
+$result = $node->psql(
+       "certdb",
+       "SELECT ssl_client_dn_field('invalid');",
+       connstr => $common_connstr);
 is($result, '3', "ssl_client_dn_field() for an invalid field");
 
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_dn_field('commonName');",
-  connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
-  "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+       "trustdb",
+       "SELECT ssl_client_dn_field('commonName');",
+       connstr =>
+         "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+         . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
 is($result, '', "ssl_client_dn_field() for connection without cert");
 
-$result = $node->safe_psql("certdb",
-  "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_client_dn_field() for commonName");
 
-$result = $node->safe_psql("certdb",
-  "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_issuer_dn() for connection with cert");
 
-$result = $node->safe_psql("certdb",
-  "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+       connstr => $common_connstr);
 is($result, 't', "ssl_issuer_field() for commonName");
 
-$result = $node->safe_psql("certdb",
-  "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
-  connstr => $common_connstr);
+$result = $node->safe_psql(
+       "certdb",
+       "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
+       connstr => $common_connstr);
 is($result, 'CA:FALSE|t', 'extract extension from cert');
 
 done_testing();
index d6d99fa636a9163a130156fa43d54e1750ecb9ed..aed6005b4327adc98cc5e060280558aeae2b183f 100644 (file)
@@ -84,7 +84,7 @@ sub init
        # the tests. To get the full path for inclusion in connection strings, the
        # %key hash can be interrogated.
        my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
-       my @keys = (
+       my @keys         = (
                "client.key",               "client-revoked.key",
                "client-der.key",           "client-encrypted-pem.key",
                "client-encrypted-der.key", "client-dn.key",
@@ -108,8 +108,10 @@ sub init
          or die
          "couldn't copy ssl/client_key to $cert_tempdir/client_wrongperms.key for permission change: $!";
        chmod 0644, "$cert_tempdir/client_wrongperms.key"
-         or die "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
-       $self->{key}->{'client_wrongperms.key'} = "$cert_tempdir/client_wrongperms.key";
+         or die
+         "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
+       $self->{key}->{'client_wrongperms.key'} =
+         "$cert_tempdir/client_wrongperms.key";
        $self->{key}->{'client_wrongperms.key'} =~ s!\\!/!g
          if $PostgreSQL::Test::Utils::windows_os;
 }
@@ -171,9 +173,10 @@ sub set_server_cert
 {
        my ($self, $params) = @_;
 
-       $params->{cafile} = 'root+client_ca' unless defined $params->{cafile};
+       $params->{cafile}  = 'root+client_ca'  unless defined $params->{cafile};
        $params->{crlfile} = 'root+client.crl' unless defined $params->{crlfile};
-       $params->{keyfile} = $params->{certfile} unless defined $params->{keyfile};
+       $params->{keyfile} = $params->{certfile}
+         unless defined $params->{keyfile};
 
        my $sslconf =
            "ssl_ca_file='$params->{cafile}.crt'\n"
index de460c2d96f431cf3bda4671d7a1be7c6403ce33..62f54dcbf1698b8f0b560199911a81f468e07c31 100644 (file)
@@ -94,7 +94,7 @@ sub new
        bless $self, $class;
        if ($flavor =~ /\Aopenssl\z/i)
        {
-               $self->{flavor} = 'openssl';
+               $self->{flavor}  = 'openssl';
                $self->{backend} = SSL::Backend::OpenSSL->new();
        }
        else
@@ -115,7 +115,7 @@ string.
 
 sub sslkey
 {
-       my $self = shift;
+       my $self    = shift;
        my $keyfile = shift;
        my $backend = $self->{backend};
 
@@ -140,12 +140,14 @@ C<listen_addresses> and B<cidr> for configuring C<pg_hba.conf>.
 
 sub configure_test_server_for_ssl
 {
-       my $self=shift;
+       my $self = shift;
        my ($node, $serverhost, $servercidr, $authmethod, %params) = @_;
        my $backend = $self->{backend};
-       my $pgdata = $node->data_dir;
+       my $pgdata  = $node->data_dir;
 
-       my @databases = ( 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', 'certdb_cn', 'verifydb' );
+       my @databases = (
+               'trustdb',   'certdb', 'certdb_dn', 'certdb_dn_re',
+               'certdb_cn', 'verifydb');
 
        # Create test users and databases
        $node->psql('postgres', "CREATE USER ssltestuser");
@@ -162,7 +164,7 @@ sub configure_test_server_for_ssl
        if (defined($params{password}))
        {
                die "Password encryption must be specified when password is set"
-                       unless defined($params{password_enc});
+                 unless defined($params{password_enc});
 
                $node->psql('postgres',
                        "SET password_encryption='$params{password_enc}'; ALTER USER ssltestuser PASSWORD '$params{password}';"
@@ -179,7 +181,7 @@ sub configure_test_server_for_ssl
        # Create any extensions requested in the setup
        if (defined($params{extensions}))
        {
-               foreach my $extension (@{$params{extensions}})
+               foreach my $extension (@{ $params{extensions} })
                {
                        foreach my $db (@databases)
                        {
@@ -227,7 +229,7 @@ Get the name of the currently used SSL backend.
 
 sub ssl_library
 {
-       my $self = shift;
+       my $self    = shift;
        my $backend = $self->{backend};
 
        return $backend->get_library();
@@ -282,16 +284,17 @@ returning.
 
 sub switch_server_cert
 {
-       my $self = shift;
-       my $node   = shift;
+       my $self    = shift;
+       my $node    = shift;
        my $backend = $self->{backend};
-       my %params = @_;
-       my $pgdata = $node->data_dir;
+       my %params  = @_;
+       my $pgdata  = $node->data_dir;
 
        open my $sslconf, '>', "$pgdata/sslconfig.conf";
        print $sslconf "ssl=on\n";
        print $sslconf $backend->set_server_cert(\%params);
-       print $sslconf "ssl_passphrase_command='" . $params{passphrase_cmd} . "'\n"
+       print $sslconf "ssl_passphrase_command='"
+         . $params{passphrase_cmd} . "'\n"
          if defined $params{passphrase_cmd};
        close $sslconf;
 
index d35a133f1541de4cdd4e757bca9c68e12e13d98e..f53b3b7db0c5f60fe16c455a5489a6169c0fc92b 100644 (file)
@@ -427,7 +427,9 @@ $node_subscriber->safe_psql('postgres',
 );
 $node_publisher->poll_query_until('postgres',
        "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing CONNECTION";
+  )
+  or die
+  "Timed out while waiting for apply to restart after changing CONNECTION";
 
 $oldpid = $node_publisher->safe_psql('postgres',
        "SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
@@ -437,7 +439,9 @@ $node_subscriber->safe_psql('postgres',
 );
 $node_publisher->poll_query_until('postgres',
        "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing PUBLICATION";
+  )
+  or die
+  "Timed out while waiting for apply to restart after changing PUBLICATION";
 
 $node_publisher->safe_psql('postgres',
        "INSERT INTO tab_ins SELECT generate_series(1001,1100)");
@@ -489,16 +493,14 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
 $node_publisher->wait_for_catchup('tap_sub');
 
 $logfile = slurp_file($node_publisher->logfile, $log_location);
-ok( $logfile =~
-         qr/skipped replication of an empty transaction with XID/,
+ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
        'empty transaction is skipped');
 
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT count(*) FROM tab_notrep");
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_notrep");
 is($result, qq(0), 'check non-replicated table is empty on subscriber');
 
-$node_publisher->append_conf('postgresql.conf',
-       "log_min_messages = warning");
+$node_publisher->append_conf('postgresql.conf', "log_min_messages = warning");
 $node_publisher->reload;
 
 # note that data are different on provider and subscriber
@@ -519,7 +521,9 @@ $node_subscriber->safe_psql('postgres',
        "ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
 $node_publisher->poll_query_until('postgres',
        "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub_renamed' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
+  )
+  or die
+  "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
 
 # check all the cleanup
 $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
index 39c32eda44d82bdf879d0c2353f207286374423b..cdd6b119ffb7828005c6b1db19752f531b44ba23 100644 (file)
@@ -62,21 +62,21 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
   or die "Timed out while waiting for subscriber to synchronize data";
 
 # Specifying non-existent publication along with add publication.
-($ret, $stdout, $stderr) = $node_subscriber->psql(
-       'postgres',
+($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
        "ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
 );
 ok( $stderr =~
          m/WARNING:  publications "non_existent_pub1", "non_existent_pub2" do not exist in the publisher/,
-       "Alter subscription add publication throws warning for non-existent publications");
+       "Alter subscription add publication throws warning for non-existent publications"
+);
 
 # Specifying non-existent publication along with set publication.
 ($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
-       "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub"
-);
+       "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
 ok( $stderr =~
          m/WARNING:  publication "non_existent_pub" does not exist in the publisher/,
-       "Alter subscription set publication throws warning for non-existent publication");
+       "Alter subscription set publication throws warning for non-existent publication"
+);
 
 $node_subscriber->stop;
 $node_publisher->stop;
index 66e63e755ef0febec77ac3227e9c6a2ee8fce961..e7f4a94f197323bbfc6e2b6c2dd81205afed36f6 100644 (file)
@@ -413,7 +413,8 @@ $node_publisher->safe_psql('postgres',
 $node_publisher->safe_psql('postgres',
        "CREATE TABLE tab4 (a int PRIMARY KEY) PARTITION BY LIST (a)");
 $node_publisher->safe_psql('postgres',
-       "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)");
+       "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)"
+);
 $node_publisher->safe_psql('postgres',
        "CREATE TABLE tab4_1_1 PARTITION OF tab4_1 FOR VALUES IN (0, 1)");
 
@@ -479,11 +480,9 @@ $node_subscriber2->safe_psql('postgres',
 # Note: We create two separate tables, not a partitioned one, so that we can
 # easily identity through which relation were the changes replicated.
 $node_subscriber2->safe_psql('postgres',
-       "CREATE TABLE tab4 (a int PRIMARY KEY)"
-);
+       "CREATE TABLE tab4 (a int PRIMARY KEY)");
 $node_subscriber2->safe_psql('postgres',
-       "CREATE TABLE tab4_1 (a int PRIMARY KEY)"
-);
+       "CREATE TABLE tab4_1 (a int PRIMARY KEY)");
 # Publication that sub2 points to now publishes via root, so must update
 # subscription target relations. We set the list of publications so that
 # the FOR ALL TABLES publication is second (the list order matters).
@@ -497,9 +496,8 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
   or die "Timed out while waiting for subscriber to synchronize data";
 
 # check that data is synced correctly
-$result = $node_subscriber1->safe_psql('postgres',
-       "SELECT c, a FROM tab2");
-is( $result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
+$result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2");
+is($result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
 
 # insert
 $node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)");
@@ -512,8 +510,7 @@ $node_publisher->safe_psql('postgres',
 
 # Insert a row into the leaf partition, should be replicated through the
 # partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
-       "INSERT INTO tab4 VALUES (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (0)");
 
 $node_publisher->wait_for_catchup('sub_viaroot');
 $node_publisher->wait_for_catchup('sub2');
@@ -555,13 +552,13 @@ sub2_tab3|5), 'inserts into tab3 replicated');
 
 # tab4 change should be replicated through the root partition, which
 # maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
-       "SELECT a FROM tab4 ORDER BY 1");
-is( $result, qq(0), 'inserts into tab4 replicated');
+$result =
+  $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is($result, qq(0), 'inserts into tab4 replicated');
 
-$result = $node_subscriber2->safe_psql('postgres',
-       "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+  $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
 
 
 # now switch the order of publications in the list, try again, the result
@@ -576,21 +573,20 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
 
 # Insert a change into the leaf partition, should be replicated through
 # the partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
-       "INSERT INTO tab4 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (1)");
 
 $node_publisher->wait_for_catchup('sub2');
 
 # tab4 change should be replicated through the root partition, which
 # maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
-       "SELECT a FROM tab4 ORDER BY 1");
+$result =
+  $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
 is( $result, qq(0
 1), 'inserts into tab4 replicated');
 
-$result = $node_subscriber2->safe_psql('postgres',
-       "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+  $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
 
 
 # update (replicated as update)
index aacc0fcf46275b5e76e85e0a655a9ce58444c7c1..c3e9857f7ce88100d4716d2859f6ad17d4371e90 100644 (file)
@@ -29,7 +29,8 @@ $node_subscriber->start;
 # Create some pre-existing content on publisher
 $node_publisher->safe_psql('postgres',
        "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full SELECT generate_series(1,10);
        PREPARE TRANSACTION 'some_initial_data';
@@ -45,7 +46,8 @@ $node_publisher->safe_psql('postgres',
        "CREATE PUBLICATION tap_pub FOR TABLE tab_full");
 
 my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres',        "
+$node_subscriber->safe_psql(
+       'postgres', "
        CREATE SUBSCRIPTION tap_sub
        CONNECTION '$publisher_connstr application_name=$appname'
        PUBLICATION tap_pub
@@ -56,13 +58,13 @@ $node_publisher->wait_for_catchup($appname);
 
 # Also wait for initial table sync to finish
 my $synced_query =
-       "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+  "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
 $node_subscriber->poll_query_until('postgres', $synced_query)
   or die "Timed out while waiting for subscriber to synchronize data";
 
 # Also wait for two-phase to be enabled
 my $twophase_query =
-       "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+  "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
 $node_subscriber->poll_query_until('postgres', $twophase_query)
   or die "Timed out while waiting for subscriber to enable twophase";
 
@@ -71,7 +73,8 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
 # then COMMIT PREPARED
 ###############################
 
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (11);
        PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -79,19 +82,23 @@ $node_publisher->safe_psql('postgres', "
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+my $result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # check that 2PC gets committed on subscriber
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab_full';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 11;");
 is($result, qq(1), 'Row inserted via 2PC has committed on subscriber');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber');
 
 ###############################
@@ -99,7 +106,8 @@ is($result, qq(0), 'transaction is committed on subscriber');
 # then ROLLBACK PREPARED
 ###############################
 
-$node_publisher->safe_psql('postgres',"
+$node_publisher->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (12);
        PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -107,19 +115,23 @@ $node_publisher->safe_psql('postgres',"
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # check that 2PC gets aborted on subscriber
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+       "ROLLBACK PREPARED 'test_prepared_tab_full';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 12;");
 is($result, qq(0), 'Row inserted via 2PC is not present on subscriber');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is aborted on subscriber');
 
 ###############################
@@ -127,7 +139,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
 # (publisher and subscriber crash)
 ###############################
 
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
     BEGIN;
     INSERT INTO tab_full VALUES (12);
     INSERT INTO tab_full VALUES (13);
@@ -140,11 +153,13 @@ $node_publisher->start;
 $node_subscriber->start;
 
 # rollback post the restart
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "ROLLBACK PREPARED 'test_prepared_tab';");
 $node_publisher->wait_for_catchup($appname);
 
 # check inserts are rolled back
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a IN (12,13);");
 is($result, qq(0), 'Rows rolled back are not on the subscriber');
 
 ###############################
@@ -152,7 +167,8 @@ is($result, qq(0), 'Rows rolled back are not on the subscriber');
 # (publisher and subscriber crash)
 ###############################
 
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
     BEGIN;
     INSERT INTO tab_full VALUES (12);
     INSERT INTO tab_full VALUES (13);
@@ -165,11 +181,13 @@ $node_publisher->start;
 $node_subscriber->start;
 
 # commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 $node_publisher->wait_for_catchup($appname);
 
 # check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a IN (12,13);");
 is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 
 ###############################
@@ -177,7 +195,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 # (subscriber only crash)
 ###############################
 
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
     BEGIN;
     INSERT INTO tab_full VALUES (14);
     INSERT INTO tab_full VALUES (15);
@@ -187,11 +206,13 @@ $node_subscriber->stop('immediate');
 $node_subscriber->start;
 
 # commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 $node_publisher->wait_for_catchup($appname);
 
 # check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (14,15);");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a IN (14,15);");
 is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 
 ###############################
@@ -199,7 +220,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 # (publisher only crash)
 ###############################
 
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
     BEGIN;
     INSERT INTO tab_full VALUES (16);
     INSERT INTO tab_full VALUES (17);
@@ -209,11 +231,13 @@ $node_publisher->stop('immediate');
 $node_publisher->start;
 
 # commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 $node_publisher->wait_for_catchup($appname);
 
 # check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (16,17);");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a IN (16,17);");
 is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 
 ###############################
@@ -221,7 +245,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
 ###############################
 
 # check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (21);
        SAVEPOINT sp_inner;
@@ -232,7 +257,8 @@ $node_publisher->safe_psql('postgres', "
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # COMMIT
@@ -241,11 +267,13 @@ $node_publisher->safe_psql('postgres', "COMMIT PREPARED 'outer';");
 $node_publisher->wait_for_catchup($appname);
 
 # check the transaction state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber');
 
 # check inserts are visible. 22 should be rolled back. 21 should be committed.
-$result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT a FROM tab_full where a IN (21,22);");
 is($result, qq(21), 'Rows committed are on the subscriber');
 
 ###############################
@@ -253,14 +281,16 @@ is($result, qq(21), 'Rows committed are on the subscriber');
 ###############################
 
 # check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (51);
        PREPARE TRANSACTION '';");
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # ROLLBACK
@@ -269,7 +299,8 @@ $node_publisher->safe_psql('postgres', "ROLLBACK PREPARED '';");
 # check that 2PC gets aborted on subscriber
 $node_publisher->wait_for_catchup($appname);
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is aborted on subscriber');
 
 ###############################
@@ -277,11 +308,15 @@ is($result, qq(0), 'transaction is aborted on subscriber');
 ###############################
 
 #create some test tables for copy tests
-$node_publisher->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "INSERT INTO tab_copy SELECT generate_series(1,5);");
-$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+       "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+       "INSERT INTO tab_copy SELECT generate_series(1,5);");
+$node_subscriber->safe_psql('postgres',
+       "CREATE TABLE tab_copy (a int PRIMARY KEY)");
 $node_subscriber->safe_psql('postgres', "INSERT INTO tab_copy VALUES (88);");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
 is($result, qq(1), 'initial data in subscriber table');
 
 # Setup logical replication
@@ -289,7 +324,8 @@ $node_publisher->safe_psql('postgres',
        "CREATE PUBLICATION tap_pub_copy FOR TABLE tab_copy;");
 
 my $appname_copy = 'appname_copy';
-$node_subscriber->safe_psql('postgres',        "
+$node_subscriber->safe_psql(
+       'postgres', "
        CREATE SUBSCRIPTION tap_sub_copy
        CONNECTION '$publisher_connstr application_name=$appname_copy'
        PUBLICATION tap_pub_copy
@@ -307,11 +343,13 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
   or die "Timed out while waiting for subscriber to enable twophase";
 
 # Check that the initial table data was NOT replicated (because we said copy_data=false)
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
 is($result, qq(1), 'initial data in subscriber table');
 
 # Now do a prepare on publisher and check that it IS replicated
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
     BEGIN;
     INSERT INTO tab_copy VALUES (99);
     PREPARE TRANSACTION 'mygid';");
@@ -322,18 +360,21 @@ $node_publisher->wait_for_catchup($appname);
 
 # Check that the transaction has been prepared on the subscriber, there will be 2
 # prepared transactions for the 2 subscriptions.
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(2), 'transaction is prepared on subscriber');
 
 # Now commit the insert and verify that it IS replicated
 $node_publisher->safe_psql('postgres', "COMMIT PREPARED 'mygid';");
 
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+  $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
 is($result, qq(6), 'publisher inserted data');
 
 $node_publisher->wait_for_catchup($appname_copy);
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
 is($result, qq(2), 'replicated data in subscriber table');
 
 $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_copy;");
@@ -345,16 +386,21 @@ $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_copy;");
 
 $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_subscription");
 is($result, qq(0), 'check subscription was dropped on subscriber');
 
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_slots");
 is($result, qq(0), 'check replication slot was dropped on publisher');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+       'check subscription relation status was dropped on subscriber');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_origin");
 is($result, qq(0), 'check replication origin was dropped on subscriber');
 
 $node_subscriber->stop('fast');
index 900c25d5ce257266bd275ec0ef55381eaa5c474d..7a797f37bad2dee31bb84cbb230f874a1ecb41fa 100644 (file)
@@ -20,7 +20,8 @@ use Test::More;
 # node_A
 my $node_A = PostgreSQL::Test::Cluster->new('node_A');
 $node_A->init(allows_streaming => 'logical');
-$node_A->append_conf('postgresql.conf', qq(
+$node_A->append_conf(
+       'postgresql.conf', qq(
 max_prepared_transactions = 10
 logical_decoding_work_mem = 64kB
 ));
@@ -28,7 +29,8 @@ $node_A->start;
 # node_B
 my $node_B = PostgreSQL::Test::Cluster->new('node_B');
 $node_B->init(allows_streaming => 'logical');
-$node_B->append_conf('postgresql.conf', qq(
+$node_B->append_conf(
+       'postgresql.conf', qq(
 max_prepared_transactions = 10
 logical_decoding_work_mem = 64kB
 ));
@@ -36,23 +38,22 @@ $node_B->start;
 # node_C
 my $node_C = PostgreSQL::Test::Cluster->new('node_C');
 $node_C->init(allows_streaming => 'logical');
-$node_C->append_conf('postgresql.conf', qq(
+$node_C->append_conf(
+       'postgresql.conf', qq(
 max_prepared_transactions = 10
 logical_decoding_work_mem = 64kB
 ));
 $node_C->start;
 
 # Create some pre-existing content on node_A
-$node_A->safe_psql('postgres',
-       "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_A->safe_psql(
+       'postgres', "
        INSERT INTO tab_full SELECT generate_series(1,10);");
 
 # Create the same tables on node_B and node_C
-$node_B->safe_psql('postgres',
-       "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_C->safe_psql('postgres',
-       "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_B->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_C->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
 
 # Create some pre-existing content on node_A (for streaming tests)
 $node_A->safe_psql('postgres',
@@ -63,9 +64,11 @@ $node_A->safe_psql('postgres',
 # Create the same tables on node_B and node_C
 # columns a and b are compatible with same table name on node_A
 $node_B->safe_psql('postgres',
-       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
 $node_C->safe_psql('postgres',
-       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
 
 # Setup logical replication
 
@@ -78,7 +81,8 @@ my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
 $node_A->safe_psql('postgres',
        "CREATE PUBLICATION tap_pub_A FOR TABLE tab_full, test_tab");
 my $appname_B = 'tap_sub_B';
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+       'postgres', "
        CREATE SUBSCRIPTION tap_sub_B
        CONNECTION '$node_A_connstr application_name=$appname_B'
        PUBLICATION tap_pub_A
@@ -89,7 +93,8 @@ my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
 $node_B->safe_psql('postgres',
        "CREATE PUBLICATION tap_pub_B FOR TABLE tab_full, test_tab");
 my $appname_C = 'tap_sub_C';
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+       'postgres', "
        CREATE SUBSCRIPTION tap_sub_C
        CONNECTION '$node_B_connstr application_name=$appname_C'
        PUBLICATION tap_pub_B
@@ -100,13 +105,14 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # Also wait for two-phase to be enabled
-my $twophase_query = "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+my $twophase_query =
+  "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
 $node_B->poll_query_until('postgres', $twophase_query)
-       or die "Timed out while waiting for subscriber to enable twophase";
+  or die "Timed out while waiting for subscriber to enable twophase";
 $node_C->poll_query_until('postgres', $twophase_query)
-       or die "Timed out while waiting for subscriber to enable twophase";
+  or die "Timed out while waiting for subscriber to enable twophase";
 
-is(1,1, "Cascade setup is complete");
+is(1, 1, "Cascade setup is complete");
 
 my $result;
 
@@ -116,7 +122,8 @@ my $result;
 ###############################
 
 # 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (11);
        PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -125,9 +132,11 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber C');
 
 # 2PC COMMIT
@@ -137,15 +146,19 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_B->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 11;");
 is($result, qq(1), 'Row inserted via 2PC has committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_C->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 11;");
 is($result, qq(1), 'Row inserted via 2PC has committed on subscriber C');
 
 # check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber C');
 
 ###############################
@@ -154,7 +167,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
 ###############################
 
 # 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (12);
        PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -163,9 +177,11 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber C');
 
 # 2PC ROLLBACK
@@ -175,15 +191,19 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check that transaction is aborted on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_B->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 12;");
 is($result, qq(0), 'Row inserted via 2PC is not present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_C->safe_psql('postgres',
+       "SELECT count(*) FROM tab_full where a = 12;");
 is($result, qq(0), 'Row inserted via 2PC is not present on subscriber C');
 
 # check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber C');
 
 ###############################
@@ -191,7 +211,8 @@ is($result, qq(0), 'transaction is ended on subscriber C');
 ###############################
 
 # 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO tab_full VALUES (21);
        SAVEPOINT sp_inner;
@@ -204,9 +225,11 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber C');
 
 # 2PC COMMIT
@@ -216,46 +239,56 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber C');
 
 # check inserts are visible at subscriber(s).
 # 22 should be rolled back.
 # 21 should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_B->safe_psql('postgres',
+       "SELECT a FROM tab_full where a IN (21,22);");
 is($result, qq(21), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_C->safe_psql('postgres',
+       "SELECT a FROM tab_full where a IN (21,22);");
 is($result, qq(21), 'Rows committed are present on subscriber C');
 
 # ---------------------
 # 2PC + STREAMING TESTS
 # ---------------------
 
-my $oldpid_B = $node_A->safe_psql('postgres', "
+my $oldpid_B = $node_A->safe_psql(
+       'postgres', "
        SELECT pid FROM pg_stat_replication
        WHERE application_name = '$appname_B' AND state = 'streaming';");
-my $oldpid_C = $node_B->safe_psql('postgres', "
+my $oldpid_C = $node_B->safe_psql(
+       'postgres', "
        SELECT pid FROM pg_stat_replication
        WHERE application_name = '$appname_C' AND state = 'streaming';");
 
 # Setup logical replication (streaming = on)
 
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+       'postgres', "
        ALTER SUBSCRIPTION tap_sub_B
        SET (streaming = on);");
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+       'postgres', "
        ALTER SUBSCRIPTION tap_sub_C
        SET (streaming = on)");
 
 # Wait for subscribers to finish initialization
 
-$node_A->poll_query_until('postgres', "
+$node_A->poll_query_until(
+       'postgres', "
        SELECT pid != $oldpid_B FROM pg_stat_replication
        WHERE application_name = '$appname_B' AND state = 'streaming';"
 ) or die "Timed out while waiting for apply to restart";
-$node_B->poll_query_until('postgres', "
+$node_B->poll_query_until(
+       'postgres', "
        SELECT pid != $oldpid_C FROM pg_stat_replication
        WHERE application_name = '$appname_C' AND state = 'streaming';"
 ) or die "Timed out while waiting for apply to restart";
@@ -270,7 +303,8 @@ $node_B->poll_query_until('postgres', "
 
 # Insert, update and delete enough rows to exceed the 64kB limit.
 # Then 2PC PREPARE
-$node_A->safe_psql('postgres', q{
+$node_A->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -281,9 +315,11 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber C');
 
 # 2PC COMMIT
@@ -293,15 +329,23 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults');
-$result = $node_C->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults');
+$result = $node_B->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+       'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults'
+);
+$result = $node_C->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+       'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults'
+);
 
 # check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber C');
 
 ###############################
@@ -320,7 +364,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
 $node_A->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
 
 # 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+       'postgres', "
        BEGIN;
        INSERT INTO test_tab VALUES (9999, 'foobar');
        SAVEPOINT sp_inner;
@@ -335,9 +380,11 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber C');
 
 # 2PC COMMIT
@@ -347,19 +394,23 @@ $node_A->wait_for_catchup($appname_B);
 $node_B->wait_for_catchup($appname_C);
 
 # check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is ended on subscriber C');
 
 # check inserts are visible at subscriber(s).
 # All the streamed data (prior to the SAVEPOINT) should be rolled back.
 # (9999, 'foobar') should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_B->safe_psql('postgres',
+       "SELECT count(*) FROM test_tab where b = 'foobar';");
 is($result, qq(1), 'Rows committed are present on subscriber B');
 $result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
 is($result, qq(3), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_C->safe_psql('postgres',
+       "SELECT count(*) FROM test_tab where b = 'foobar';");
 is($result, qq(1), 'Rows committed are present on subscriber C');
 $result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
 is($result, qq(3), 'Rows committed are present on subscriber C');
@@ -370,24 +421,36 @@ is($result, qq(3), 'Rows committed are present on subscriber C');
 
 # cleanup the node_B => node_C pub/sub
 $node_C->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_C");
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
 is($result, qq(0), 'check subscription was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node C');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+  $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+       'check subscription relation status was dropped on subscriber node C');
+$result = $node_C->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+       'check replication origin was dropped on subscriber node C');
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
 is($result, qq(0), 'check replication slot was dropped on publisher node B');
 
 # cleanup the node_A => node_B pub/sub
 $node_B->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_B");
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
 is($result, qq(0), 'check subscription was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node B');
-$result = $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+  $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+       'check subscription relation status was dropped on subscriber node B');
+$result = $node_B->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+       'check replication origin was dropped on subscriber node B');
+$result =
+  $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
 is($result, qq(0), 'check replication slot was dropped on publisher node A');
 
 # shutdown
index 93ce3ef132d32915d995e7ea5a8625c8f9d1e1f9..d8475d25a497bac2c6928dd49ecc3eb1d47b8ca7 100644 (file)
@@ -15,7 +15,8 @@ use Test::More;
 # Initialize publisher node
 my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
 $node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', qq(
+$node_publisher->append_conf(
+       'postgresql.conf', qq(
 max_prepared_transactions = 10
 logical_decoding_work_mem = 64kB
 ));
@@ -24,25 +25,31 @@ $node_publisher->start;
 # Create subscriber node
 my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
 $node_subscriber->init(allows_streaming => 'logical');
-$node_subscriber->append_conf('postgresql.conf', qq(
+$node_subscriber->append_conf(
+       'postgresql.conf', qq(
 max_prepared_transactions = 10
 ));
 $node_subscriber->start;
 
 # Create some pre-existing content on publisher
-$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b varchar)");
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+$node_publisher->safe_psql('postgres',
+       "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+       "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
 
 # Setup structure on subscriber (columns a and b are compatible with same table name on publisher)
 $node_subscriber->safe_psql('postgres',
-       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+       "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
 
 # Setup logical replication (streaming = on)
 my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+       "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
 
 my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+       'postgres', "
        CREATE SUBSCRIPTION tap_sub
        CONNECTION '$publisher_connstr application_name=$appname'
        PUBLICATION tap_pub
@@ -53,20 +60,21 @@ $node_publisher->wait_for_catchup($appname);
 
 # Also wait for initial table sync to finish
 my $synced_query =
-       "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+  "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
 $node_subscriber->poll_query_until('postgres', $synced_query)
   or die "Timed out while waiting for subscriber to synchronize data";
 
 # Also wait for two-phase to be enabled
 my $twophase_query =
-       "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+  "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
 $node_subscriber->poll_query_until('postgres', $twophase_query)
   or die "Timed out while waiting for subscriber to enable twophase";
 
 ###############################
 # Check initial data was copied to subscriber
 ###############################
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+my $result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
 is($result, qq(2|2|2), 'check initial data was copied to subscriber');
 
 ###############################
@@ -79,7 +87,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber');
 
 # check that 2PC gets replicated to subscriber
 # Insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -89,18 +98,24 @@ $node_publisher->safe_psql('postgres', q{
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+       'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber');
 
 ###############################
@@ -113,10 +128,11 @@ is($result, qq(0), 'transaction is committed on subscriber');
 ###############################
 
 # First, delete the data except for 2 rows (will be replicated)
-$node_publisher->safe_psql('postgres',  "DELETE FROM test_tab WHERE a > 2;");
+$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
 
 # Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -126,19 +142,24 @@ $node_publisher->safe_psql('postgres', q{
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "ROLLBACK PREPARED 'test_prepared_tab';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(2|2|2), 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2),
+       'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is aborted on subscriber');
 
 ###############################
@@ -151,7 +172,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
 # Note: both publisher and subscriber do crash/restart.
 ###############################
 
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -165,12 +187,16 @@ $node_publisher->start;
 $node_subscriber->start;
 
 # commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 $node_publisher->wait_for_catchup($appname);
 
 # check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+       'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
 
 ###############################
 # Do INSERT after the PREPARE but before ROLLBACK PREPARED.
@@ -187,7 +213,8 @@ is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscrib
 $node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
 
 # Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -197,24 +224,29 @@ $node_publisher->safe_psql('postgres', q{
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # Insert a different record (now we are outside of the 2PC transaction)
 # Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+       "INSERT INTO test_tab VALUES (99999, 'foobar')");
 
 # 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "ROLLBACK PREPARED 'test_prepared_tab';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is aborted on subscriber,
 # but the extra INSERT outside of the 2PC still was replicated
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*), count(c), count(d = 999) FROM test_tab");
 is($result, qq(3|3|3), 'check the outside insert was copied to subscriber');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is aborted on subscriber');
 
 ###############################
@@ -232,7 +264,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
 $node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
 
 # Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+       'postgres', q{
        BEGIN;
        INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
        UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -242,24 +275,30 @@ $node_publisher->safe_psql('postgres', q{
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(1), 'transaction is prepared on subscriber');
 
 # Insert a different record (now we are outside of the 2PC transaction)
 # Note: the 2PC transaction still holds row locks so make sure this insert is for a separare primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+       "INSERT INTO test_tab VALUES (99999, 'foobar')");
 
 # 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+       "COMMIT PREPARED 'test_prepared_tab';");
 
 $node_publisher->wait_for_catchup($appname);
 
 # check that transaction is committed on subscriber
 $result = $node_subscriber->safe_psql('postgres',
        "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3335|3335|3335), 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults');
+is($result, qq(3335|3335|3335),
+       'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults'
+);
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_prepared_xacts;");
 is($result, qq(0), 'transaction is committed on subscriber');
 
 ###############################
@@ -268,16 +307,21 @@ is($result, qq(0), 'transaction is committed on subscriber');
 
 $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_subscription");
 is($result, qq(0), 'check subscription was dropped on subscriber');
 
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_slots");
 is($result, qq(0), 'check replication slot was dropped on publisher');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+       'check subscription relation status was dropped on subscriber');
 
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+       "SELECT count(*) FROM pg_replication_origin");
 is($result, qq(0), 'check replication origin was dropped on subscriber');
 
 $node_subscriber->stop('fast');
index 561ddde42160e33c18da71e543067178c926b6cb..246f8c923724cfbbacc21e6ac47a619f4265228c 100644 (file)
@@ -30,8 +30,7 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_1 (a int)");
 my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
 $node_publisher->safe_psql('postgres',
        "CREATE PUBLICATION tap_pub_1 FOR TABLE tab_1");
-$node_publisher->safe_psql('postgres',
-       "CREATE PUBLICATION tap_pub_2");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_2");
 
 $node_subscriber->safe_psql('postgres',
        "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub_1, tap_pub_2"
index 2a6ba5403dafaac502374586cc3c73d2b1d88930..5ce275cf72545f6996f0717473a50c0f91eef0a9 100644 (file)
@@ -27,11 +27,14 @@ $node_publisher->safe_psql('postgres',
 $node_publisher->safe_psql('postgres',
        "CREATE TABLE sch1.tab2 AS SELECT generate_series(1,10) AS a");
 $node_publisher->safe_psql('postgres',
-        "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+       "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
 $node_publisher->safe_psql('postgres',
-        "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+       "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
 $node_publisher->safe_psql('postgres',
-        "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+       "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
 
 $node_publisher->safe_psql('postgres',
        "INSERT INTO sch1.tab1_parent values (1),(4)");
@@ -41,11 +44,14 @@ $node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch1");
 $node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab1 (a int)");
 $node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab2 (a int)");
 $node_subscriber->safe_psql('postgres',
-        "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+       "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
 $node_subscriber->safe_psql('postgres',
-        "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+       "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
 $node_subscriber->safe_psql('postgres',
-        "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+       "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
 
 # Setup logical replication
 my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@@ -75,7 +81,7 @@ is($result, qq(10|1|10), 'check rows on subscriber catchup');
 
 $result = $node_subscriber->safe_psql('postgres',
        "SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
 4|), 'check rows on subscriber catchup');
 
 # Insert some data into few tables and verify that inserted data is replicated
@@ -93,7 +99,7 @@ is($result, qq(20|1|20), 'check replicated inserts on subscriber');
 
 $result = $node_subscriber->safe_psql('postgres',
        "SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
 2|
 4|
 5|), 'check replicated inserts on subscriber');
@@ -189,7 +195,8 @@ is($result, qq(3),
 # Drop schema from publication, verify that the inserts are not published after
 # dropping the schema from publication. Here 2nd insert should not be
 # published.
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+       'postgres', "
        INSERT INTO sch1.tab1 VALUES(21);
        ALTER PUBLICATION tap_pub_schema DROP ALL TABLES IN SCHEMA sch1;
        INSERT INTO sch1.tab1 values(22);"
index 4815e6ccffe8acfbf1159e3806b9e39b3fdae21b..350bc40efcbe08e5f7018acf2df5bf12fd480708 100644 (file)
@@ -12,8 +12,9 @@ $offset = 0;
 
 sub publish_insert
 {
-  my ($tbl, $new_i) = @_;
-  $node_publisher->safe_psql('postgres', qq(
+       my ($tbl, $new_i) = @_;
+       $node_publisher->safe_psql(
+               'postgres', qq(
   SET SESSION AUTHORIZATION regress_alice;
   INSERT INTO $tbl (i) VALUES ($new_i);
   ));
@@ -21,8 +22,9 @@ sub publish_insert
 
 sub publish_update
 {
-  my ($tbl, $old_i, $new_i) = @_;
-  $node_publisher->safe_psql('postgres', qq(
+       my ($tbl, $old_i, $new_i) = @_;
+       $node_publisher->safe_psql(
+               'postgres', qq(
   SET SESSION AUTHORIZATION regress_alice;
   UPDATE $tbl SET i = $new_i WHERE i = $old_i;
   ));
@@ -30,8 +32,9 @@ sub publish_update
 
 sub publish_delete
 {
-  my ($tbl, $old_i) = @_;
-  $node_publisher->safe_psql('postgres', qq(
+       my ($tbl, $old_i) = @_;
+       $node_publisher->safe_psql(
+               'postgres', qq(
   SET SESSION AUTHORIZATION regress_alice;
   DELETE FROM $tbl WHERE i = $old_i;
   ));
@@ -39,47 +42,53 @@ sub publish_delete
 
 sub expect_replication
 {
-  my ($tbl, $cnt, $min, $max, $testname) = @_;
-  $node_publisher->wait_for_catchup('admin_sub');
-  $result = $node_subscriber->safe_psql('postgres', qq(
+       my ($tbl, $cnt, $min, $max, $testname) = @_;
+       $node_publisher->wait_for_catchup('admin_sub');
+       $result = $node_subscriber->safe_psql(
+               'postgres', qq(
   SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
-  is ($result, "$cnt|$min|$max", $testname);
+       is($result, "$cnt|$min|$max", $testname);
 }
 
 sub expect_failure
 {
-  my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
-  $offset = $node_subscriber->wait_for_log($re, $offset);
-  $result = $node_subscriber->safe_psql('postgres', qq(
+       my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
+       $offset = $node_subscriber->wait_for_log($re, $offset);
+       $result = $node_subscriber->safe_psql(
+               'postgres', qq(
   SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
-  is ($result, "$cnt|$min|$max", $testname);
+       is($result, "$cnt|$min|$max", $testname);
 }
 
 sub revoke_superuser
 {
-  my ($role) = @_;
-  $node_subscriber->safe_psql('postgres', qq(
+       my ($role) = @_;
+       $node_subscriber->safe_psql(
+               'postgres', qq(
   ALTER ROLE $role NOSUPERUSER));
 }
 
 sub grant_superuser
 {
-  my ($role) = @_;
-  $node_subscriber->safe_psql('postgres', qq(
+       my ($role) = @_;
+       $node_subscriber->safe_psql(
+               'postgres', qq(
   ALTER ROLE $role SUPERUSER));
 }
 
 sub revoke_bypassrls
 {
-  my ($role) = @_;
-  $node_subscriber->safe_psql('postgres', qq(
+       my ($role) = @_;
+       $node_subscriber->safe_psql(
+               'postgres', qq(
   ALTER ROLE $role NOBYPASSRLS));
 }
 
 sub grant_bypassrls
 {
-  my ($role) = @_;
-  $node_subscriber->safe_psql('postgres', qq(
+       my ($role) = @_;
+       $node_subscriber->safe_psql(
+               'postgres', qq(
   ALTER ROLE $role BYPASSRLS));
 }
 
@@ -88,7 +97,7 @@ sub grant_bypassrls
 # "regress_admin".  For partitioned tables, layout the partitions differently
 # on the publisher than on the subscriber.
 #
-$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher  = PostgreSQL::Test::Cluster->new('publisher');
 $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
 $node_publisher->init(allows_streaming => 'logical');
 $node_subscriber->init;
@@ -96,17 +105,18 @@ $node_publisher->start;
 $node_subscriber->start;
 $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
 my %remainder_a = (
-  publisher => 0,
-  subscriber => 1);
+       publisher  => 0,
+       subscriber => 1);
 my %remainder_b = (
-  publisher => 1,
-  subscriber => 0);
+       publisher  => 1,
+       subscriber => 0);
 
 for my $node ($node_publisher, $node_subscriber)
 {
-  my $remainder_a = $remainder_a{$node->name};
-  my $remainder_b = $remainder_b{$node->name};
-  $node->safe_psql('postgres', qq(
+       my $remainder_a = $remainder_a{ $node->name };
+       my $remainder_b = $remainder_b{ $node->name };
+       $node->safe_psql(
+               'postgres', qq(
   CREATE ROLE regress_admin SUPERUSER LOGIN;
   CREATE ROLE regress_alice NOSUPERUSER LOGIN;
   GRANT CREATE ON DATABASE postgres TO regress_alice;
@@ -129,14 +139,16 @@ for my $node ($node_publisher, $node_subscriber)
   ALTER TABLE alice.hashpart_b REPLICA IDENTITY FULL;
   ));
 }
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
 SET SESSION AUTHORIZATION regress_alice;
 
 CREATE PUBLICATION alice
   FOR TABLE alice.unpartitioned, alice.hashpart
   WITH (publish_via_partition_root = true);
 ));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
 SET SESSION AUTHORIZATION regress_admin;
 CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice;
 ));
@@ -156,9 +168,8 @@ publish_insert("alice.unpartitioned", 3);
 publish_insert("alice.unpartitioned", 5);
 publish_update("alice.unpartitioned", 1 => 7);
 publish_delete("alice.unpartitioned", 3);
-expect_replication(
-    "alice.unpartitioned", 2, 5, 7,
-    "superuser admin replicates into unpartitioned");
+expect_replication("alice.unpartitioned", 2, 5, 7,
+       "superuser admin replicates into unpartitioned");
 
 # Revoke and restore superuser privilege for "regress_admin",
 # verifying that replication fails while superuser privilege is
@@ -166,12 +177,13 @@ expect_replication(
 #
 revoke_superuser("regress_admin");
 publish_update("alice.unpartitioned", 5 => 9);
-expect_failure("alice.unpartitioned", 2, 5, 7,
-                          qr/ERROR:  permission denied for table unpartitioned/msi,
-                          "non-superuser admin fails to replicate update");
+expect_failure(
+       "alice.unpartitioned", 2, 5, 7,
+       qr/ERROR:  permission denied for table unpartitioned/msi,
+       "non-superuser admin fails to replicate update");
 grant_superuser("regress_admin");
 expect_replication("alice.unpartitioned", 2, 7, 9,
-                                  "admin with restored superuser privilege replicates update");
+       "admin with restored superuser privilege replicates update");
 
 # Grant INSERT, UPDATE, DELETE privileges on the target tables to
 # "regress_admin" so that superuser privileges are not necessary for
@@ -180,7 +192,8 @@ expect_replication("alice.unpartitioned", 2, 7, 9,
 # Note that UPDATE and DELETE also require SELECT privileges, which
 # will be granted in subsequent test.
 #
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
 ALTER ROLE regress_admin NOSUPERUSER;
 SET SESSION AUTHORIZATION regress_alice;
 GRANT INSERT,UPDATE,DELETE ON
@@ -192,16 +205,23 @@ REVOKE SELECT ON alice.unpartitioned FROM regress_admin;
 
 publish_insert("alice.unpartitioned", 11);
 expect_replication("alice.unpartitioned", 3, 7, 11,
-                                  "nosuperuser admin with INSERT privileges can replicate into unpartitioned");
+       "nosuperuser admin with INSERT privileges can replicate into unpartitioned"
+);
 
 publish_update("alice.unpartitioned", 7 => 13);
-expect_failure("alice.unpartitioned", 3, 7, 11,
-                          qr/ERROR:  permission denied for table unpartitioned/msi,
-                          "non-superuser admin without SELECT privileges fails to replicate update");
+expect_failure(
+       "alice.unpartitioned",
+       3,
+       7,
+       11,
+       qr/ERROR:  permission denied for table unpartitioned/msi,
+       "non-superuser admin without SELECT privileges fails to replicate update"
+);
 
 # Now grant SELECT
 #
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
 SET SESSION AUTHORIZATION regress_alice;
 GRANT SELECT ON
   alice.unpartitioned,
@@ -211,7 +231,8 @@ GRANT SELECT ON
 
 publish_delete("alice.unpartitioned", 9);
 expect_replication("alice.unpartitioned", 2, 11, 13,
-                                  "nosuperuser admin with all table privileges can replicate into unpartitioned");
+       "nosuperuser admin with all table privileges can replicate into unpartitioned"
+);
 
 # Test partitioning
 #
@@ -221,50 +242,68 @@ publish_insert("alice.hashpart", 103);
 publish_update("alice.hashpart", 102 => 120);
 publish_delete("alice.hashpart", 101);
 expect_replication("alice.hashpart", 2, 103, 120,
-                                  "nosuperuser admin with all table privileges can replicate into hashpart");
+       "nosuperuser admin with all table privileges can replicate into hashpart"
+);
 
 
 # Enable RLS on the target table and check that "regress_admin" can
 # only replicate into it when superuser or bypassrls.
 #
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
 SET SESSION AUTHORIZATION regress_alice;
 ALTER TABLE alice.unpartitioned ENABLE ROW LEVEL SECURITY;
 ));
 
 revoke_superuser("regress_admin");
 publish_insert("alice.unpartitioned", 15);
-expect_failure("alice.unpartitioned", 2, 11, 13,
-                          qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
-                          "non-superuser admin fails to replicate insert into rls enabled table");
+expect_failure(
+       "alice.unpartitioned",
+       2,
+       11,
+       13,
+       qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+       "non-superuser admin fails to replicate insert into rls enabled table");
 grant_superuser("regress_admin");
 expect_replication("alice.unpartitioned", 3, 11, 15,
-                                  "admin with restored superuser privilege replicates insert into rls enabled unpartitioned");
+       "admin with restored superuser privilege replicates insert into rls enabled unpartitioned"
+);
 
 revoke_superuser("regress_admin");
 publish_update("alice.unpartitioned", 11 => 17);
-expect_failure("alice.unpartitioned", 3, 11, 15,
-                          qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
-                          "non-superuser admin fails to replicate update into rls enabled unpartitioned");
+expect_failure(
+       "alice.unpartitioned",
+       3,
+       11,
+       15,
+       qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+       "non-superuser admin fails to replicate update into rls enabled unpartitioned"
+);
 
 grant_bypassrls("regress_admin");
 expect_replication("alice.unpartitioned", 3, 13, 17,
-                                  "admin with bypassrls replicates update into rls enabled unpartitioned");
+       "admin with bypassrls replicates update into rls enabled unpartitioned");
 
 revoke_bypassrls("regress_admin");
 publish_delete("alice.unpartitioned", 13);
-expect_failure("alice.unpartitioned", 3, 13, 17,
-                          qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
-                          "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned");
+expect_failure(
+       "alice.unpartitioned",
+       3,
+       13,
+       17,
+       qr/ERROR:  "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+       "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned"
+);
 grant_bypassrls("regress_admin");
 expect_replication("alice.unpartitioned", 2, 15, 17,
-                                  "admin with bypassrls replicates delete into rls enabled unpartitioned");
+       "admin with bypassrls replicates delete into rls enabled unpartitioned");
 grant_superuser("regress_admin");
 
 # Alter the subscription owner to "regress_alice".  She has neither superuser
 # nor bypassrls, but as the table owner should be able to replicate.
 #
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
 ALTER SUBSCRIPTION admin_sub DISABLE;
 ALTER ROLE regress_alice SUPERUSER;
 ALTER SUBSCRIPTION admin_sub OWNER TO regress_alice;
@@ -275,8 +314,8 @@ ALTER SUBSCRIPTION admin_sub ENABLE;
 publish_insert("alice.unpartitioned", 23);
 publish_update("alice.unpartitioned", 15 => 25);
 publish_delete("alice.unpartitioned", 17);
-expect_replication(
-    "alice.unpartitioned", 2, 23, 25,
-    "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls");
+expect_replication("alice.unpartitioned", 2, 23, 25,
+       "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls"
+);
 
 done_testing();
index 82c4eb6ef627c969481a6a05af1a8d271f83de78..0dc0a6d10f5ed835e934831d40a2c196b1b388d0 100644 (file)
@@ -291,8 +291,7 @@ $node_subscriber->safe_psql('postgres',
 $node_subscriber->safe_psql('postgres',
        "CREATE TABLE tab_rowfilter_viaroot_part (a int)");
 $node_subscriber->safe_psql('postgres',
-       "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)"
-);
+       "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)");
 
 # setup logical replication
 $node_publisher->safe_psql('postgres',
@@ -720,18 +719,14 @@ is($result, qq(t|1), 'check replicated rows to tab_rowfilter_toast');
 $result =
   $node_subscriber->safe_psql('postgres',
        "SELECT a FROM tab_rowfilter_viaroot_part");
-is( $result, qq(16),
-       'check replicated rows to tab_rowfilter_viaroot_part'
-);
+is($result, qq(16), 'check replicated rows to tab_rowfilter_viaroot_part');
 
 # Check there is no data in tab_rowfilter_viaroot_part_1 because rows are
 # replicated via the top most parent table tab_rowfilter_viaroot_part
 $result =
   $node_subscriber->safe_psql('postgres',
        "SELECT a FROM tab_rowfilter_viaroot_part_1");
-is( $result, qq(),
-       'check replicated rows to tab_rowfilter_viaroot_part_1'
-);
+is($result, qq(), 'check replicated rows to tab_rowfilter_viaroot_part_1');
 
 # Testcase end: FOR TABLE with row filter publications
 # ======================================================
index bdcf3e4a2483ba1e0c3bd0f0eb7bfe109f501bc0..19812e11f3197d99f2150d3cd45f5ee059266681 100644 (file)
@@ -26,51 +26,60 @@ sub wait_for_subscription_sync
        my ($node) = @_;
 
        # Also wait for initial table sync to finish
-       my $synced_query = "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+       my $synced_query =
+         "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
 
        $node->poll_query_until('postgres', $synced_query)
-               or die "Timed out while waiting for subscriber to synchronize data";
+         or die "Timed out while waiting for subscriber to synchronize data";
 }
 
 # setup tables on both nodes
 
 # tab1: simple 1:1 replication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
 ));
 
 # tab2: replication from regular to table with fewer columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab2 (a int PRIMARY KEY, b varchar, c int);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab2 (a int PRIMARY KEY, b varchar)
 ));
 
 # tab3: simple 1:1 replication with weird column names
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab3 ("a'" int PRIMARY KEY, "B" varchar, "c'" int)
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab3 ("a'" int PRIMARY KEY, "c'" int)
 ));
 
 # test_part: partitioned tables, with partitioning (including multi-level
 # partitioning, and fewer columns on the subscriber)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part (a int PRIMARY KEY, b text, c timestamptz) PARTITION BY LIST (a);
        CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
        CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
        CREATE TABLE test_part_2_2 PARTITION OF test_part_2_1 FOR VALUES IN (7,8,9,10);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part (a int PRIMARY KEY, b text) PARTITION BY LIST (a);
        CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
        CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
@@ -78,12 +87,14 @@ $node_subscriber->safe_psql('postgres', qq(
 ));
 
 # tab4: table with user-defined enum types
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TYPE test_typ AS ENUM ('blue', 'red');
        CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, c int, d text);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TYPE test_typ AS ENUM ('blue', 'red');
        CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, d text);
 ));
@@ -91,7 +102,8 @@ $node_subscriber->safe_psql('postgres', qq(
 
 # TEST: create publication and subscription for some of the tables with
 # column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE PUBLICATION pub1
           FOR TABLE tab1 (a, "B"), tab3 ("a'", "c'"), test_part (a, b), tab4 (a, b, d)
          WITH (publish_via_partition_root = 'true');
@@ -99,36 +111,41 @@ $node_publisher->safe_psql('postgres', qq(
 
 # check that we got the right prattrs values for the publication in the
 # pg_publication_rel catalog (order by relname, to get stable ordering)
-my $result = $node_publisher->safe_psql('postgres', qq(
+my $result = $node_publisher->safe_psql(
+       'postgres', qq(
        SELECT relname, prattrs
        FROM pg_publication_rel pb JOIN pg_class pc ON(pb.prrelid = pc.oid)
        ORDER BY relname
 ));
 
-is($result, qq(tab1|1 2
+is( $result, qq(tab1|1 2
 tab3|1 3
 tab4|1 2 4
 test_part|1 2), 'publication relation updated');
 
 # TEST: insert data into the tables, create subscription and see if sync
 # replicates the right columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab1 VALUES (1, 2, 3);
        INSERT INTO tab1 VALUES (4, 5, 6);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab3 VALUES (1, 2, 3);
        INSERT INTO tab3 VALUES (4, 5, 6);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab4 VALUES (1, 'red', 3, 'oh my');
        INSERT INTO tab4 VALUES (2, 'blue', 4, 'hello');
 ));
 
 # replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part VALUES (1, 'abc', '2021-07-04 12:00:00');
        INSERT INTO test_part VALUES (2, 'bcd', '2021-07-03 11:12:13');
        INSERT INTO test_part VALUES (7, 'abc', '2021-07-04 12:00:00');
@@ -137,34 +154,35 @@ $node_publisher->safe_psql('postgres', qq(
 
 # create subscription for the publication, wait for sync to complete,
 # then check the sync results
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
 # tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
 4|5|), 'insert on column tab1.c is not replicated');
 
 # tab3: only (a,c) is replicated
 $result = $node_subscriber->safe_psql('postgres',
        qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
 4|6), 'insert on column tab3.b is not replicated');
 
 # tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
 2|blue|hello), 'insert on column tab4.c is not replicated');
 
 # test_part: (a,b) is replicated
 $result = $node_subscriber->safe_psql('postgres',
        "SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
 2|bcd
 7|abc
 8|bcd), 'insert on column test_part.c columns is not replicated');
@@ -173,23 +191,27 @@ is($result, qq(1|abc
 # TEST: now insert more data into the tables, and wait until we replicate
 # them (not by tablesync, but regular decoding and replication)
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab1 VALUES (2, 3, 4);
        INSERT INTO tab1 VALUES (5, 6, 7);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab3 VALUES (2, 3, 4);
        INSERT INTO tab3 VALUES (5, 6, 7);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab4 VALUES (3, 'red', 5, 'foo');
        INSERT INTO tab4 VALUES (4, 'blue', 6, 'bar');
 ));
 
 # replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part VALUES (3, 'xxx', '2022-02-01 10:00:00');
        INSERT INTO test_part VALUES (4, 'yyy', '2022-03-02 15:12:13');
        INSERT INTO test_part VALUES (9, 'zzz', '2022-04-03 21:00:00');
@@ -200,9 +222,9 @@ $node_publisher->safe_psql('postgres', qq(
 $node_publisher->wait_for_catchup('sub1');
 
 # tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
 2|3|
 4|5|
 5|6|), 'insert on column tab1.c is not replicated');
@@ -210,15 +232,15 @@ is($result, qq(1|2|
 # tab3: only (a,c) is replicated
 $result = $node_subscriber->safe_psql('postgres',
        qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
 2|4
 4|6
 5|7), 'insert on column tab3.b is not replicated');
 
 # tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
 2|blue|hello
 3|red|foo
 4|blue|bar), 'insert on column tab4.c is not replicated');
@@ -226,7 +248,7 @@ is($result, qq(1|red|oh my
 # test_part: (a,b) is replicated
 $result = $node_subscriber->safe_psql('postgres',
        "SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
 2|bcd
 3|xxx
 4|yyy
@@ -257,36 +279,38 @@ $node_publisher->safe_psql('postgres',
 
 # tab4
 $node_publisher->safe_psql('postgres',
-       qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1));
+       qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1)
+);
 
 # tab4
 $node_publisher->safe_psql('postgres',
-       qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2));
+       qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2)
+);
 
 # wait for the replication to catch up, and check the UPDATE results got
 # replicated correctly, with the right column list
 $node_publisher->wait_for_catchup('sub1');
 
-$result = $node_subscriber->safe_psql('postgres',
-       qq(SELECT * FROM tab1 ORDER BY a));
-is($result,
-qq(1|4|
+$result =
+  $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab1 ORDER BY a));
+is( $result,
+       qq(1|4|
 2|3|
 4|5|
 5|6|), 'only update on column tab1.b is replicated');
 
 $result = $node_subscriber->safe_psql('postgres',
        qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result,
-qq(1|6
+is( $result,
+       qq(1|6
 2|4
 4|6
 5|7), 'only update on column tab3.c is replicated');
 
-$result = $node_subscriber->safe_psql('postgres',
-       qq(SELECT * FROM tab4 ORDER BY a));
+$result =
+  $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab4 ORDER BY a));
 
-is($result, qq(1|blue|oh my updated
+is( $result, qq(1|blue|oh my updated
 2|red|hello updated
 3|red|foo
 4|blue|bar), 'update on column tab4.c is not replicated');
@@ -295,7 +319,8 @@ is($result, qq(1|blue|oh my updated
 # TEST: add table with a column list, insert data, replicate
 
 # insert some data before adding it to the publication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab2 VALUES (1, 'abc', 3);
 ));
 
@@ -309,34 +334,37 @@ $node_subscriber->safe_psql('postgres',
 # the results of the replication
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab2 VALUES (2, 'def', 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
 2|def), 'insert on column tab2.c is not replicated');
 
 # do a couple updates, check the correct stuff gets replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        UPDATE tab2 SET c = 5 where a = 1;
        UPDATE tab2 SET b = 'xyz' where a = 2;
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-$result = $node_subscriber->safe_psql('postgres',
-       "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+  $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
 2|xyz), 'update on column tab2.c is not replicated');
 
 
 # TEST: add a table to two publications with different column lists, and
 # create a single subscription replicating both publications
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab5 (a int PRIMARY KEY, b int, c int, d int);
        CREATE PUBLICATION pub2 FOR TABLE tab5 (a, b);
        CREATE PUBLICATION pub3 FOR TABLE tab5 (a, d);
@@ -346,11 +374,13 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO tab5 VALUES (2, 22, 222, 2222);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab5 (a int PRIMARY KEY, b int, d int);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub3
 ));
 
@@ -360,52 +390,57 @@ $node_publisher->wait_for_catchup('sub1');
 
 # insert data and make sure all the columns (union of the columns lists)
 # get fully replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab5 VALUES (3, 33, 333, 3333);
        INSERT INTO tab5 VALUES (4, 44, 444, 4444);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
-   qq(1|11|1111
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+       qq(1|11|1111
 2|22|2222
 3|33|3333
 4|44|4444),
-   'overlapping publications with overlapping column lists');
+       'overlapping publications with overlapping column lists');
 
 # and finally, remove the column list for one of the publications, which
 # means replicating all columns (removing the column list), but first add
 # the missing column to the table on subscriber
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        ALTER PUBLICATION pub3 SET TABLE tab5;
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
        ALTER TABLE tab5 ADD COLUMN c INT;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab5 VALUES (5, 55, 555, 5555);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
-   qq(1|11|1111|
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+       qq(1|11|1111|
 2|22|2222|
 3|33|3333|
 4|44|4444|
 5|55|5555|555),
-   'overlapping publications with overlapping column lists');
+       'overlapping publications with overlapping column lists');
 
 # TEST: create a table with a column list, then change the replica
 # identity by replacing a primary key (but use a different column in
 # the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
        CREATE PUBLICATION pub4 FOR TABLE tab6 (a, b);
 
@@ -413,31 +448,35 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO tab6 VALUES (1, 22, 333, 4444);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub4
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab6 VALUES (2, 33, 444, 5555);
        UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
        qq(1|44||
 2|66||), 'replication with the original primary key');
 
 # now redefine the constraint - move the primary key to a different column
 # (which is still covered by the column list, though)
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
        ALTER TABLE tab6 ADD PRIMARY KEY (b);
 ));
@@ -445,35 +484,39 @@ $node_publisher->safe_psql('postgres', qq(
 # we need to do the same thing on the subscriber
 # XXX What would happen if this happens before the publisher ALTER? Or
 # interleaved, somehow? But that seems unrelated to column lists.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
        ALTER TABLE tab6 ADD PRIMARY KEY (b);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab6 VALUES (3, 55, 666, 8888);
        UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
-   qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
+       qq(1|88||
 2|132||
 3|110||),
-   'replication with the modified primary key');
+       'replication with the modified primary key');
 
 
 # TEST: create a table with a column list, then change the replica
 # identity by replacing a primary key with a key on multiple columns
 # (all of them covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
        CREATE PUBLICATION pub5 FOR TABLE tab7 (a, b);
 
@@ -481,52 +524,58 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO tab7 VALUES (1, 22, 333, 4444);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub5
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab7 VALUES (2, 33, 444, 5555);
        UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
-   qq(1|44||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+       qq(1|44||
 2|66||), 'replication with the original primary key');
 
 # now redefine the constraint - move the primary key to a different column
 # (which is not covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
        ALTER TABLE tab7 ADD PRIMARY KEY (a, b);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO tab7 VALUES (3, 55, 666, 7777);
        UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
-   qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+       qq(1|88||
 2|132||
 3|110||),
-   'replication with the modified primary key');
+       'replication with the modified primary key');
 
 # now switch the primary key again to another columns not covered by the
 # column list, but also generate writes between the drop and creation
 # of the new constraint
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
        INSERT INTO tab7 VALUES (4, 77, 888, 9999);
        -- update/delete is not allowed for tables without RI
@@ -535,16 +584,17 @@ $node_publisher->safe_psql('postgres', qq(
        DELETE FROM tab7 WHERE a = 1;
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
-   qq(2|264||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+       qq(2|264||
 3|220||
 4|154||),
-   'replication with the modified primary key');
+       'replication with the modified primary key');
 
 
 # TEST: partitioned tables (with publish_via_partition_root = false)
@@ -555,7 +605,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
 # First, let's create a partitioned table with two partitions, each with
 # a different RI, but a column list not covering all those RI.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_a (a int, b int, c int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -572,7 +623,8 @@ $node_publisher->safe_psql('postgres', qq(
 ));
 
 # do the same thing on the subscriber (with the opposite column order)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_a (b int, a int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -586,38 +638,43 @@ $node_subscriber->safe_psql('postgres', qq(
 
 # create a publication replicating just the column "a", which is not enough
 # for the second partition
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE PUBLICATION pub6 FOR TABLE test_part_a (b, a) WITH (publish_via_partition_root = true);
        ALTER PUBLICATION pub6 ADD TABLE test_part_a_1 (a);
        ALTER PUBLICATION pub6 ADD TABLE test_part_a_2 (b);
 ));
 
 # add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub6
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part_a VALUES (2, 5);
        INSERT INTO test_part_a VALUES (7, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT a, b FROM test_part_a ORDER BY a, b"),
-   qq(1|3
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT a, b FROM test_part_a ORDER BY a, b"),
+       qq(1|3
 2|5
 6|4
 7|6),
-   'partitions with different replica identities not replicated correctly');
+       'partitions with different replica identities not replicated correctly');
 
 # This time start with a column list covering RI for all partitions, but
 # then update the column list to not cover column "b" (needed by the
 # second partition)
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -634,7 +691,8 @@ $node_publisher->safe_psql('postgres', qq(
 ));
 
 # do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -648,37 +706,42 @@ $node_subscriber->safe_psql('postgres', qq(
 
 # create a publication replicating both columns, which is sufficient for
 # both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE PUBLICATION pub7 FOR TABLE test_part_b (a, b) WITH (publish_via_partition_root = true);
 ));
 
 # add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub7
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part_b VALUES (2, 3);
        INSERT INTO test_part_b VALUES (7, 4);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_b ORDER BY a, b"),
-   qq(1|1
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_part_b ORDER BY a, b"),
+       qq(1|1
 2|3
 6|2
 7|4),
-   'partitions with different replica identities not replicated correctly');
+       'partitions with different replica identities not replicated correctly');
 
 
 # TEST: This time start with a column list covering RI for all partitions,
 # but then update RI for one of the partitions to not be covered by the
 # column list anymore.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -695,7 +758,8 @@ $node_publisher->safe_psql('postgres', qq(
 ));
 
 # do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -710,39 +774,44 @@ $node_subscriber->safe_psql('postgres', qq(
 # create a publication replicating data through partition root, with a column
 # list on the root, and then add the partitions one by one with separate
 # column lists (but those are not applied)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
        ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a,c);
        ALTER PUBLICATION pub8 ADD TABLE test_part_c_2 (a,b);
 ));
 
 # add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        DROP SUBSCRIPTION sub1;
        CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub8;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part_c VALUES (3, 7, 8);
        INSERT INTO test_part_c VALUES (4, 9, 10);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
-   qq(1||5
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+       qq(1||5
 2|4|
 3||8
 4|9|),
-   'partitions with different replica identities not replicated correctly');
+       'partitions with different replica identities not replicated correctly');
 
 
 # create a publication not replicating data through partition root, without
 # a column list on the root, and then add the partitions one by one with
 # separate column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP PUBLICATION pub8;
        CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
        ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a);
@@ -750,14 +819,16 @@ $node_publisher->safe_psql('postgres', qq(
 ));
 
 # add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
        TRUNCATE test_part_c;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        TRUNCATE test_part_c;
        INSERT INTO test_part_c VALUES (1, 3, 5);
        INSERT INTO test_part_c VALUES (2, 4, 6);
@@ -765,16 +836,18 @@ $node_publisher->safe_psql('postgres', qq(
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
-   qq(1||
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+       qq(1||
 2|4|),
-   'partitions with different replica identities not replicated correctly');
+       'partitions with different replica identities not replicated correctly');
 
 
 # TEST: Start with a single partition, with RI compatible with the column
 # list, and then attach a partition with incompatible RI.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -786,7 +859,8 @@ $node_publisher->safe_psql('postgres', qq(
 
 # do the same thing on the subscriber (in fact, create both partitions right
 # away, no need to delay that)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
 
        CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -800,33 +874,38 @@ $node_subscriber->safe_psql('postgres', qq(
 
 # create a publication replicating both columns, which is sufficient for
 # both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE PUBLICATION pub9 FOR TABLE test_part_d (a) WITH (publish_via_partition_root = true);
 ));
 
 # add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub9
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_part_d VALUES (3, 4);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_d ORDER BY a, b"),
-   qq(1|
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_part_d ORDER BY a, b"),
+       qq(1|
 3|),
-   'partitions with different replica identities not replicated correctly');
+       'partitions with different replica identities not replicated correctly');
 
 # TEST: With a table included in multiple publications, we should use a
 # union of the column lists. So with column lists (a,b) and (a,c) we
 # should replicate (a,b,c).
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
        CREATE PUBLICATION pub_mix_1 FOR TABLE test_mix_1 (a, b);
        CREATE PUBLICATION pub_mix_2 FOR TABLE test_mix_1 (a, c);
@@ -835,23 +914,26 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO test_mix_1 VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_1, pub_mix_2;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_mix_1 VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a"),
-   qq(1|2|3
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_mix_1 ORDER BY a"),
+       qq(1|2|3
 4|5|6),
-   'a mix of publications should use a union of column list');
+       'a mix of publications should use a union of column list');
 
 
 # TEST: With a table included in multiple publications, we should use a
@@ -859,12 +941,14 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a")
 # TABLES, we should replicate all columns.
 
 # drop unnecessary tables, so as not to interfere with the FOR ALL TABLES
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP TABLE tab1, tab2, tab3, tab4, tab5, tab6, tab7, test_mix_1,
                           test_part, test_part_a, test_part_b, test_part_c, test_part_d;
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
        CREATE PUBLICATION pub_mix_3 FOR TABLE test_mix_2 (a, b);
        CREATE PUBLICATION pub_mix_4 FOR ALL TABLES;
@@ -873,7 +957,8 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO test_mix_2 VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_3, pub_mix_4;
        ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
@@ -881,28 +966,31 @@ $node_subscriber->safe_psql('postgres', qq(
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_mix_2 VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_2"),
-   qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_2"),
+       qq(1|2|3
 4|5|6),
-   'a mix of publications should use a union of column list');
+       'a mix of publications should use a union of column list');
 
 
 # TEST: With a table included in multiple publications, we should use a
 # union of the column lists. If any of the publications is FOR ALL
 # TABLES IN SCHEMA, we should replicate all columns.
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        DROP SUBSCRIPTION sub1;
        CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP TABLE test_mix_2;
        CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
        CREATE PUBLICATION pub_mix_5 FOR TABLE test_mix_3 (a, b);
@@ -912,22 +1000,24 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO test_mix_3 VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_5, pub_mix_6;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_mix_3 VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
-   qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_3"),
+       qq(1|2|3
 4|5|6),
-   'a mix of publications should use a union of column list');
+       'a mix of publications should use a union of column list');
 
 
 # TEST: Check handling of publish_via_partition_root - if a partition is
@@ -935,7 +1025,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
 # defined for the whole table (not the partitions) - both during the initial
 # sync and when replicating changes. This is what we do for row filters.
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        DROP SUBSCRIPTION sub1;
 
        CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
@@ -943,7 +1034,8 @@ $node_subscriber->safe_psql('postgres', qq(
        CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
 ));
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE test_root_1 PARTITION OF test_root FOR VALUES FROM (1) TO (10);
        CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
@@ -955,25 +1047,28 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO test_root VALUES (10, 20, 30);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_root_true;
 ));
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO test_root VALUES (2, 3, 4);
        INSERT INTO test_root VALUES (11, 21, 31);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b, c"),
-   qq(1||
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM test_root ORDER BY a, b, c"),
+       qq(1||
 2||
 10||
 11||),
-   'publication via partition root applies column list');
+       'publication via partition root applies column list');
 
 
 # TEST: Multiple publications which publish schema of parent table and
@@ -982,7 +1077,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b
 # also directly (with a columns list). The expected outcome is there is
 # no column list.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP PUBLICATION pub1, pub2, pub3, pub4, pub5, pub6, pub7, pub8;
 
        CREATE SCHEMA s1;
@@ -996,7 +1092,8 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO s1.t VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        CREATE SCHEMA s1;
        CREATE TABLE s1.t (a int, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE t_1 PARTITION OF s1.t FOR VALUES FROM (1) TO (10);
@@ -1006,21 +1103,23 @@ $node_subscriber->safe_psql('postgres', qq(
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO s1.t VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
-   qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+       qq(1|2|3
 4|5|6),
-   'two publications, publishing the same relation');
+       'two publications, publishing the same relation');
 
 # Now resync the subcription, but with publications in the opposite order.
 # The result should be the same.
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        TRUNCATE s1.t;
 
        ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub1;
@@ -1028,22 +1127,24 @@ $node_subscriber->safe_psql('postgres', qq(
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO s1.t VALUES (7, 8, 9);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
-   qq(7|8|9),
-   'two publications, publishing the same relation');
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+       qq(7|8|9),
+       'two publications, publishing the same relation');
 
 
 # TEST: One publication, containing both the parent and child relations.
 # The expected outcome is list "a", because that's the column list defined
 # for the top-most ancestor added to the publication.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP SCHEMA s1 CASCADE;
        CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1057,7 +1158,8 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO t VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        DROP SCHEMA s1 CASCADE;
        CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1069,16 +1171,18 @@ $node_subscriber->safe_psql('postgres', qq(
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO t VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
-   qq(1||
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+       qq(1||
 4||),
-   'publication containing both parent and child relation');
+       'publication containing both parent and child relation');
 
 
 # TEST: One publication, containing both the parent and child relations.
@@ -1087,7 +1191,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
 # Note: The difference from the preceding test is that in this case both
 # relations have a column list defined.
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        DROP TABLE t;
        CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1101,7 +1206,8 @@ $node_publisher->safe_psql('postgres', qq(
        INSERT INTO t VALUES (1, 2, 3);
 ));
 
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+       'postgres', qq(
        DROP TABLE t;
        CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
        CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1113,16 +1219,18 @@ $node_subscriber->safe_psql('postgres', qq(
 
 wait_for_subscription_sync($node_subscriber);
 
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+       'postgres', qq(
        INSERT INTO t VALUES (4, 5, 6);
 ));
 
 $node_publisher->wait_for_catchup('sub1');
 
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
-   qq(1||
+is( $node_subscriber->safe_psql(
+               'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+       qq(1||
 4||),
-   'publication containing both parent and child relation');
+       'publication containing both parent and child relation');
 
 
 $node_subscriber->stop('fast');
index 0803698d5797eac8310971857dd138b6f062b812..4ffb6bd54747a217ef26e9334d2f7d9d5137c29a 100644 (file)
@@ -89,6 +89,7 @@ sub generate_hash_function
   FIND_PARAMS:
        for ($hash_seed1 = 0; $hash_seed1 < 10; $hash_seed1++)
        {
+
                for ($hash_seed2 = 0; $hash_seed2 < 10; $hash_seed2++)
                {
                        foreach (17, 31, 127, 8191)
index b0d4360c7483ab35fdc21c74b0360903c77c75bd..59268a0bb60960758ffca1452b4de77108d1644b 100644 (file)
@@ -4,10 +4,10 @@ use warnings;
 our $config;
 
 $config->{"tap_tests"} = 1;
-$config->{"asserts"} = 1;
+$config->{"asserts"}   = 1;
 
 $config->{"openssl"} = "c:/openssl/1.1/";
-$config->{"perl"} = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/";
-$config->{"python"} = "c:/python/";
+$config->{"perl"}    = "c:/strawberry/$ENV{DEFAULT_PERL_VERSION}/perl/";
+$config->{"python"}  = "c:/python/";
 
 1;
index 834fcac5b54314d4286508de60e60d6d6f912c13..7b51ae3c2013c970fb24e9e10def1ab3cbe4feaf 100755 (executable)
@@ -28,17 +28,18 @@ use warnings;
 for my $include_file (@ARGV)
 {
        open(my $rfh, '<', $include_file) || die "$include_file: $!";
-       my $buffer = '';
+       my $buffer                = '';
        my $num_pgdllimport_added = 0;
 
        while (my $raw_line = <$rfh>)
        {
-               my      $needs_pgdllimport = 1;
+               my $needs_pgdllimport = 1;
 
                # By convention we declare global variables explicitly extern. We're
                # looking for those not already marked with PGDLLIMPORT.
-               $needs_pgdllimport = 0 if $raw_line !~ /^extern\s+/
-                       || $raw_line =~ /PGDLLIMPORT/;
+               $needs_pgdllimport = 0
+                 if $raw_line !~ /^extern\s+/
+                 || $raw_line =~ /PGDLLIMPORT/;
 
                # Make a copy of the line and perform a simple-minded comment strip.
                # Also strip trailing whitespace.
@@ -48,8 +49,9 @@ for my $include_file (@ARGV)
 
                # Variable declarations should end in a semicolon. If we see an
                # opening parenthesis, it's probably a function declaration.
-               $needs_pgdllimport = 0 if $stripped_line !~ /;$/
-                       || $stripped_line =~ /\(/;
+               $needs_pgdllimport = 0
+                 if $stripped_line !~ /;$/
+                 || $stripped_line =~ /\(/;
 
                # Add PGDLLIMPORT marker, if required.
                if ($needs_pgdllimport)
@@ -68,7 +70,7 @@ for my $include_file (@ARGV)
        if ($num_pgdllimport_added > 0)
        {
                printf "%s: adding %d PGDLLIMPORT markers\n",
-                       $include_file, $num_pgdllimport_added;
+                 $include_file, $num_pgdllimport_added;
                open(my $wfh, '>', $include_file) || die "$include_file: $!";
                print $wfh $buffer;
                close($wfh);
index 5e312d232e932868ea6d61f1ccacae6119bdab4e..f24d9e534820ab9d3a167b8000b28712f5e5d1c3 100644 (file)
@@ -313,7 +313,7 @@ sub WriteItemDefinitionGroup
        my $targetmachine =
          $self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64';
 
-       my $includes = join ';', @{$self->{includes}}, "";
+       my $includes = join ';', @{ $self->{includes} }, "";
 
        print $f <<EOF;
   <ItemDefinitionGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">
index c3058399d4904e3f46f7db2c159143d418d14167..e4feda10fd81f991e3e815e7b6002cf3b406c7f9 100644 (file)
@@ -35,22 +35,22 @@ my $libpq;
 my @unlink_on_exit;
 
 # Set of variables for modules in contrib/ and src/test/modules/
-my $contrib_defines = {};
-my @contrib_uselibpq = ();
+my $contrib_defines        = {};
+my @contrib_uselibpq       = ();
 my @contrib_uselibpgport   = ();
 my @contrib_uselibpgcommon = ();
-my $contrib_extralibs     = { 'libpq_pipeline' => ['ws2_32.lib'] };
+my $contrib_extralibs      = { 'libpq_pipeline' => ['ws2_32.lib'] };
 my $contrib_extraincludes  = {};
 my $contrib_extrasource    = {};
-my @contrib_excludes = (
-       'bool_plperl',      'commit_ts',
-       'hstore_plperl',    'hstore_plpython',
-       'intagg',           'jsonb_plperl',
-       'jsonb_plpython',   'ltree_plpython',
-       'sepgsql',
-       'brin',             'test_extensions',
-       'test_misc',        'test_pg_dump',
-       'snapshot_too_old', 'unsafe_tests');
+my @contrib_excludes       = (
+       'bool_plperl',     'commit_ts',
+       'hstore_plperl',   'hstore_plpython',
+       'intagg',          'jsonb_plperl',
+       'jsonb_plpython',  'ltree_plpython',
+       'sepgsql',         'brin',
+       'test_extensions', 'test_misc',
+       'test_pg_dump',    'snapshot_too_old',
+       'unsafe_tests');
 
 # Set of variables for frontend modules
 my $frontend_defines = { 'initdb' => 'FRONTEND' };
@@ -286,16 +286,18 @@ sub mkvcbuild
 
        my $libpq_testclient =
          $solution->AddProject('libpq_testclient', 'exe', 'misc',
-                                                       'src/interfaces/libpq/test');
-       $libpq_testclient->AddFile('src/interfaces/libpq/test/libpq_testclient.c');
+               'src/interfaces/libpq/test');
+       $libpq_testclient->AddFile(
+               'src/interfaces/libpq/test/libpq_testclient.c');
        $libpq_testclient->AddIncludeDir('src/interfaces/libpq');
        $libpq_testclient->AddReference($libpgport, $libpq);
        $libpq_testclient->AddLibrary('ws2_32.lib');
 
        my $libpq_uri_regress =
          $solution->AddProject('libpq_uri_regress', 'exe', 'misc',
-                                                       'src/interfaces/libpq/test');
-       $libpq_uri_regress->AddFile('src/interfaces/libpq/test/libpq_uri_regress.c');
+               'src/interfaces/libpq/test');
+       $libpq_uri_regress->AddFile(
+               'src/interfaces/libpq/test/libpq_uri_regress.c');
        $libpq_uri_regress->AddIncludeDir('src/interfaces/libpq');
        $libpq_uri_regress->AddReference($libpgport, $libpq);
        $libpq_uri_regress->AddLibrary('ws2_32.lib');
@@ -464,7 +466,8 @@ sub mkvcbuild
 
        if (!$solution->{options}->{openssl})
        {
-               push @contrib_excludes, 'sslinfo', 'ssl_passphrase_callback', 'pgcrypto';
+               push @contrib_excludes, 'sslinfo', 'ssl_passphrase_callback',
+                 'pgcrypto';
        }
 
        if (!$solution->{options}->{uuid})
@@ -508,7 +511,8 @@ sub mkvcbuild
 
                my $pymajorver = substr($pyver, 0, 1);
 
-               die "Python version $pyver is too old (version 3 or later is required)"
+               die
+                 "Python version $pyver is too old (version 3 or later is required)"
                  if int($pymajorver) < 3;
 
                my $plpython = $solution->AddProject('plpython' . $pymajorver,
@@ -926,7 +930,7 @@ sub AddTransformModule
        # Add PL dependencies
        $p->AddIncludeDir($pl_src);
        $p->AddReference($pl_proj);
-       $p->AddIncludeDir($_) for @{$pl_proj->{includes}};
+       $p->AddIncludeDir($_) for @{ $pl_proj->{includes} };
        foreach my $pl_lib (@{ $pl_proj->{libraries} })
        {
                $p->AddLibrary($pl_lib);
@@ -936,7 +940,7 @@ sub AddTransformModule
        if ($type_proj)
        {
                $p->AddIncludeDir($type_src);
-               $p->AddIncludeDir($_) for @{$type_proj->{includes}};
+               $p->AddIncludeDir($_) for @{ $type_proj->{includes} };
                foreach my $type_lib (@{ $type_proj->{libraries} })
                {
                        $p->AddLibrary($type_lib);
@@ -950,9 +954,9 @@ sub AddTransformModule
 # Add a simple contrib project
 sub AddContrib
 {
-       my $subdir = shift;
-       my $n      = shift;
-       my $mf     = Project::read_file("$subdir/$n/Makefile");
+       my $subdir   = shift;
+       my $n        = shift;
+       my $mf       = Project::read_file("$subdir/$n/Makefile");
        my @projects = ();
 
        if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
@@ -988,7 +992,8 @@ sub AddContrib
        }
 
        # Process custom compiler flags
-       if ($mf =~ /^PG_CPPFLAGS\s*=\s*(.*)$/mg || $mf =~ /^override\s*CPPFLAGS\s*[+:]?=\s*(.*)$/mg)
+       if (   $mf =~ /^PG_CPPFLAGS\s*=\s*(.*)$/mg
+               || $mf =~ /^override\s*CPPFLAGS\s*[+:]?=\s*(.*)$/mg)
        {
                foreach my $flag (split /\s+/, $1)
                {
index d39c502f30afcf313c6d0c4520f4afb1cb20cf34..570bab563a71eeaaff20306511799b09bdfe6c4a 100644 (file)
@@ -76,11 +76,11 @@ sub AddFiles
 # name but a different file extension and add those files too.
 sub FindAndAddAdditionalFiles
 {
-       my $self = shift;
+       my $self  = shift;
        my $fname = shift;
        $fname =~ /(.*)(\.[^.]+)$/;
        my $filenoext = $1;
-       my $fileext = $2;
+       my $fileext   = $2;
 
        # For .c files, check if either a .l or .y file of the same name
        # exists and add that too.
@@ -161,7 +161,7 @@ sub AddReference
 
        while (my $ref = shift)
        {
-               if (! grep { $_ eq $ref} @{ $self->{references} })
+               if (!grep { $_ eq $ref } @{ $self->{references} })
                {
                        push @{ $self->{references} }, $ref;
                }
@@ -181,7 +181,7 @@ sub AddLibrary
                $lib = '&quot;' . $lib . "&quot;";
        }
 
-       if (! grep { $_ eq $lib} @{ $self->{libraries} })
+       if (!grep { $_ eq $lib } @{ $self->{libraries} })
        {
                push @{ $self->{libraries} }, $lib;
        }
@@ -199,7 +199,7 @@ sub AddIncludeDir
 
        foreach my $inc (split(/;/, $incstr))
        {
-               if (! grep { $_ eq $inc} @{ $self->{includes} })
+               if (!grep { $_ eq $inc } @{ $self->{includes} })
                {
                        push @{ $self->{includes} }, $inc;
                }
index 03357095b20cd5fdc1948d2fbc5f7d34c136c3a0..d30e8fcb117be3167d3ce55c85d2218b8d973cb3 100644 (file)
@@ -349,7 +349,7 @@ sub GenerateFiles
                HAVE_READLINE_READLINE_H    => undef,
                HAVE_READLINK               => undef,
                HAVE_READV                  => undef,
-               HAVE_RL_COMPLETION_MATCHES               => undef,
+               HAVE_RL_COMPLETION_MATCHES  => undef,
                HAVE_RL_COMPLETION_SUPPRESS_QUOTE        => undef,
                HAVE_RL_FILENAME_COMPLETION_FUNCTION     => undef,
                HAVE_RL_FILENAME_QUOTE_CHARACTERS        => undef,
index 65b7be795c3af723890a87c59cdf0395fc48b2bd..c3729f6be5e82e3de4c95b9d3ed762cd906c8af6 100644 (file)
@@ -70,10 +70,10 @@ copy("$Config/regress/regress.dll",               "src/test/regress");
 copy("$Config/dummy_seclabel/dummy_seclabel.dll", "src/test/regress");
 
 # Configuration settings used by TAP tests
-$ENV{with_ssl} = $config->{openssl} ? 'openssl' : 'no';
-$ENV{with_ldap} = $config->{ldap} ? 'yes' : 'no';
-$ENV{with_icu} = $config->{icu} ? 'yes' : 'no';
-$ENV{with_gssapi} = $config->{gss} ? 'yes' : 'no';
+$ENV{with_ssl}    = $config->{openssl} ? 'openssl' : 'no';
+$ENV{with_ldap}   = $config->{ldap}    ? 'yes'     : 'no';
+$ENV{with_icu}    = $config->{icu}     ? 'yes'     : 'no';
+$ENV{with_gssapi} = $config->{gss}     ? 'yes'     : 'no';
 $ENV{with_krb_srvnam} = $config->{krb_srvnam} || 'postgres';
 $ENV{with_readline} = 'no';
 
index 87ee7bf86628102ebb5301a4141e8cfb0aff9967..dd1214977a87df410f41549b4162c44a14f44304 100644 (file)
@@ -4,6 +4,7 @@ ACL_SIZE_INFORMATION
 AFFIX
 ASN1_INTEGER
 ASN1_OBJECT
+ASN1_OCTET_STRING
 ASN1_STRING
 AV
 A_ArrayExpr
@@ -63,6 +64,7 @@ AllocSetFreeList
 AllocateDesc
 AllocateDescKind
 AlterCollationStmt
+AlterDatabaseRefreshCollStmt
 AlterDatabaseSetStmt
 AlterDatabaseStmt
 AlterDefaultPrivilegesStmt
@@ -80,6 +82,7 @@ AlterOpFamilyStmt
 AlterOperatorStmt
 AlterOwnerStmt
 AlterPolicyStmt
+AlterPublicationAction
 AlterPublicationStmt
 AlterRoleSetStmt
 AlterRoleStmt
@@ -117,11 +120,17 @@ ApplyErrorCallbackArg
 ApplyExecutionData
 ApplySubXactData
 Archive
+ArchiveCheckConfiguredCB
 ArchiveEntryPtrType
+ArchiveFileCB
 ArchiveFormat
 ArchiveHandle
 ArchiveMode
+ArchiveModuleCallbacks
+ArchiveModuleInit
 ArchiveOpts
+ArchiveShutdownCB
+ArchiveStreamState
 ArchiverOutput
 ArchiverStage
 ArrayAnalyzeExtraData
@@ -226,6 +235,8 @@ BackgroundWorkerHandle
 BackgroundWorkerSlot
 Barrier
 BaseBackupCmd
+BaseBackupTargetHandle
+BaseBackupTargetType
 BeginDirectModify_function
 BeginForeignInsert_function
 BeginForeignModify_function
@@ -237,6 +248,7 @@ BgwHandleStatus
 BinaryArithmFunc
 BindParamCbData
 BipartiteMatchState
+BitString
 BitmapAnd
 BitmapAndPath
 BitmapAndState
@@ -271,11 +283,11 @@ BloomScanOpaqueData
 BloomSignatureWord
 BloomState
 BloomTuple
-BlowfishContext
 BoolAggState
 BoolExpr
 BoolExprType
 BoolTestType
+Boolean
 BooleanTest
 BpChar
 BrinBuildState
@@ -336,6 +348,7 @@ CachedPlanSource
 CallContext
 CallStmt
 CancelRequestPacket
+Cardinality
 CaseExpr
 CaseTestExpr
 CaseWhen
@@ -346,6 +359,7 @@ CatCTup
 CatCache
 CatCacheHeader
 CatalogId
+CatalogIdMapEntry
 CatalogIndexState
 ChangeVarNodes_context
 CheckPoint
@@ -413,6 +427,7 @@ CompositeIOData
 CompositeTypeStmt
 CompoundAffixFlag
 CompressionAlgorithm
+CompressionLocation
 CompressorState
 ComputeXidHorizonsResult
 ConditionVariable
@@ -426,7 +441,6 @@ ConnParams
 ConnStatusType
 ConnType
 ConnectionStateEnum
-ConnsAllowedState
 ConsiderSplitContext
 Const
 ConstrCheck
@@ -446,6 +460,7 @@ CopyDest
 CopyFormatOptions
 CopyFromState
 CopyFromStateData
+CopyHeaderChoice
 CopyInsertMethod
 CopyMultiInsertBuffer
 CopyMultiInsertInfo
@@ -582,8 +597,10 @@ DumpComponents
 DumpId
 DumpOptions
 DumpSignalInformation
+DumpableAcl
 DumpableObject
 DumpableObjectType
+DumpableObjectWithAcl
 DynamicFileList
 DynamicZoneAbbrev
 EC_KEY
@@ -594,6 +611,7 @@ EOM_get_flat_size_method
 EPQState
 EPlan
 EState
+EStatus
 EVP_CIPHER
 EVP_CIPHER_CTX
 EVP_MD
@@ -635,9 +653,9 @@ EventTriggerInfo
 EventTriggerQueryState
 ExceptionLabelMap
 ExceptionMap
-ExclusiveBackupState
 ExecAuxRowMark
 ExecEvalBoolSubroutine
+ExecEvalJsonExprContext
 ExecEvalSubroutine
 ExecForeignBatchInsert_function
 ExecForeignDelete_function
@@ -690,14 +708,12 @@ ExtensibleNodeEntry
 ExtensibleNodeMethods
 ExtensionControlFile
 ExtensionInfo
-ExtensionMemberId
 ExtensionVersionInfo
 FDWCollateState
 FD_SET
 FILE
 FILETIME
-FILE_INFORMATION_CLASS
-FILE_STANDARD_INFORMATION
+FPI
 FSMAddress
 FSMPage
 FSMPageData
@@ -724,6 +740,7 @@ FixedParallelExecutorState
 FixedParallelState
 FixedParamState
 FlagMode
+Float
 FlushPosition
 FmgrBuiltin
 FmgrHookEventType
@@ -793,6 +810,7 @@ FormData_pg_sequence_data
 FormData_pg_shdepend
 FormData_pg_statistic
 FormData_pg_statistic_ext
+FormData_pg_statistic_ext_data
 FormData_pg_subscription
 FormData_pg_subscription_rel
 FormData_pg_tablespace
@@ -850,6 +868,7 @@ Form_pg_sequence_data
 Form_pg_shdepend
 Form_pg_statistic
 Form_pg_statistic_ext
+Form_pg_statistic_ext_data
 Form_pg_subscription
 Form_pg_subscription_rel
 Form_pg_tablespace
@@ -916,6 +935,7 @@ GISTSearchItem
 GISTTYPE
 GIST_SPLITVEC
 GMReaderTupleBuffer
+GROUP
 GV
 Gather
 GatherMerge
@@ -980,6 +1000,7 @@ GistSplitVector
 GistTsVectorOptions
 GistVacState
 GlobalTransaction
+GlobalVisHorizonKind
 GlobalVisState
 GrantRoleStmt
 GrantStmt
@@ -1020,7 +1041,6 @@ HASHELEMENT
 HASHHDR
 HASHSEGMENT
 HASH_SEQ_STATUS
-HCRYPTPROV
 HE
 HEntry
 HIST_ENTRY
@@ -1063,7 +1083,6 @@ HashScanPosData
 HashScanPosItem
 HashSkewBucket
 HashState
-HashTapeInfo
 HashValueFunc
 HbaLine
 HeadlineJsonState
@@ -1087,7 +1106,6 @@ INFIX
 INT128
 INTERFACE_INFO
 IOFuncSelector
-IO_STATUS_BLOCK
 IPCompareMethod
 ITEM
 IV
@@ -1158,13 +1176,14 @@ Instrumentation
 Int128AggState
 Int8TransTypeData
 IntRBTreeNode
+Integer
 IntegerSet
 InternalDefaultACL
 InternalGrant
 Interval
 IntoClause
-InvalidationChunk
-InvalidationListHeader
+InvalMessageArray
+InvalidationMsgsGroup
 IpcMemoryId
 IpcMemoryKey
 IpcMemoryState
@@ -1206,10 +1225,31 @@ JoinState
 JoinType
 JsObject
 JsValue
+JsonAggConstructor
 JsonAggState
+JsonArgument
+JsonArrayAgg
+JsonArrayConstructor
+JsonArrayQueryConstructor
 JsonBaseObjectInfo
+JsonBehavior
+JsonBehaviorType
+JsonCoercion
+JsonCommon
+JsonConstructorExpr
+JsonConstructorType
+JsonEncoding
+JsonExpr
+JsonExprOp
+JsonFormat
+JsonFormatType
+JsonFunc
+JsonFuncExpr
 JsonHashEntry
+JsonIsPredicate
+JsonItemCoercions
 JsonIterateStringValuesAction
+JsonKeyValue
 JsonLexContext
 JsonLikeRegexContext
 JsonManifestFileField
@@ -1217,10 +1257,15 @@ JsonManifestParseContext
 JsonManifestParseState
 JsonManifestSemanticState
 JsonManifestWALRangeField
+JsonObjectAgg
+JsonObjectConstructor
+JsonOutput
 JsonParseContext
 JsonParseErrorType
+JsonParseExpr
 JsonPath
 JsonPathBool
+JsonPathDatatypeStatus
 JsonPathExecContext
 JsonPathExecResult
 JsonPathGinAddPathItemFunc
@@ -1233,11 +1278,18 @@ JsonPathGinPathItem
 JsonPathItem
 JsonPathItemType
 JsonPathKeyword
+JsonPathMutableContext
 JsonPathParseItem
 JsonPathParseResult
 JsonPathPredicateCallback
 JsonPathString
+JsonPathVarCallback
+JsonPathVariableEvalContext
+JsonQuotes
+JsonReturning
+JsonScalarExpr
 JsonSemAction
+JsonSerializeExpr
 JsonTable
 JsonTableColumn
 JsonTableColumnType
@@ -1252,8 +1304,16 @@ JsonTableSibling
 JsonTokenType
 JsonTransformStringValuesAction
 JsonTypeCategory
+JsonUniqueBuilderState
+JsonUniqueCheckState
+JsonUniqueHashEntry
+JsonUniqueParsingState
+JsonUniqueStackEntry
+JsonValueExpr
 JsonValueList
 JsonValueListIterator
+JsonValueType
+JsonWrapper
 Jsonb
 JsonbAggState
 JsonbContainer
@@ -1268,6 +1328,8 @@ JsonbTypeCategory
 JsonbValue
 JumbleState
 JunkFilter
+KeyAction
+KeyActions
 KeyArray
 KeySuffix
 KeyWord
@@ -1311,6 +1373,7 @@ LPBYTE
 LPCTSTR
 LPCWSTR
 LPDWORD
+LPFILETIME
 LPSECURITY_ATTRIBUTES
 LPSERVICE_STATUS
 LPSTR
@@ -1327,6 +1390,11 @@ LWLock
 LWLockHandle
 LWLockMode
 LWLockPadded
+LZ4F_compressionContext_t
+LZ4F_decompressOptions_t
+LZ4F_decompressionContext_t
+LZ4F_errorCode_t
+LZ4F_preferences_t
 LabelProvider
 LagTracker
 LargeObjectDesc
@@ -1431,6 +1499,7 @@ MBuf
 MCVItem
 MCVList
 MEMORY_BASIC_INFORMATION
+MGVTBL
 MINIDUMPWRITEDUMP
 MINIDUMP_TYPE
 MJEvalResult
@@ -1484,6 +1553,7 @@ ModifyTable
 ModifyTableContext
 ModifyTablePath
 ModifyTableState
+MonotonicFunction
 MorphOpaque
 MsgType
 MultiAssignRef
@@ -1530,6 +1600,7 @@ NotificationHash
 NotificationList
 NotifyStmt
 Nsrt
+NtDllRoutine
 NullIfExpr
 NullTest
 NullTestType
@@ -1604,8 +1675,9 @@ PACL
 PATH
 PBOOL
 PCtxtHandle
+PERL_CONTEXT
+PERL_SI
 PFN
-PFN_NTQUERYINFORMATIONFILE
 PGAlignedBlock
 PGAlignedXLogBlock
 PGAsyncStatusType
@@ -1663,7 +1735,6 @@ PGresParamDesc
 PGresult
 PGresult_data
 PHANDLE
-PIO_STATUS_BLOCK
 PLAINTREE
 PLAssignStmt
 PLUID_AND_ATTRIBUTES
@@ -1793,9 +1864,10 @@ PTEntryArray
 PTIterationArray
 PTOKEN_PRIVILEGES
 PTOKEN_USER
+PULONG
 PUTENVPROC
 PVIndStats
-PvIndVacStatus
+PVIndVacStatus
 PVOID
 PVShared
 PX_Alias
@@ -1897,8 +1969,11 @@ PathClauseUsage
 PathCostComparison
 PathHashStack
 PathKey
+PathKeyInfo
 PathKeysComparison
 PathTarget
+PathkeyMutatorState
+PathkeySortCost
 PatternInfo
 PatternInfoArray
 Pattern_Prefix_Status
@@ -1908,8 +1983,8 @@ PendingRelDelete
 PendingRelSync
 PendingUnlinkEntry
 PendingWriteback
+PerLockTagEntry
 PerlInterpreter
-Perl_check_t
 Perl_ppaddr_t
 Permutation
 PermutationStep
@@ -1964,7 +2039,6 @@ PgStat_Kind
 PgStat_KindInfo
 PgStat_LocalState
 PgStat_PendingDroppedStatsItem
-PgStat_ReplSlotStats
 PgStat_SLRUStats
 PgStat_ShmemControl
 PgStat_Snapshot
@@ -2054,6 +2128,7 @@ ProjectSetPath
 ProjectSetState
 ProjectionInfo
 ProjectionPath
+PromptInterruptContext
 ProtocolVersion
 PrsStorage
 PruneState
@@ -2080,7 +2155,6 @@ PushFilter
 PushFilterOps
 PushFunction
 PyCFunction
-PyCodeObject
 PyMappingMethods
 PyMethodDef
 PyModuleDef
@@ -2125,7 +2199,6 @@ RI_QueryKey
 RTEKind
 RWConflict
 RWConflictPoolHeader
-RandomState
 Range
 RangeBound
 RangeBox
@@ -2153,6 +2226,7 @@ ReadBufferMode
 ReadBytePtrType
 ReadExtraTocPtrType
 ReadFunc
+ReadLocalXLogPageNoWaitPrivate
 ReadReplicationSlotCmd
 ReassignOwnedStmt
 RecheckForeignScan_function
@@ -2199,6 +2273,7 @@ RelationInfo
 RelationPtr
 RelationSyncEntry
 RelcacheCallbackFunction
+ReleaseMatchCB
 RelfilenodeMapEntry
 RelfilenodeMapKey
 Relids
@@ -2275,7 +2350,7 @@ RewriteState
 RmgrData
 RmgrDescData
 RmgrId
-RmgrIds
+RoleNameItem
 RoleSpec
 RoleSpecType
 RoleStmtType
@@ -2288,6 +2363,7 @@ RowMarkClause
 RowMarkType
 RowSecurityDesc
 RowSecurityPolicy
+RtlGetLastNtStatus_t
 RuleInfo
 RuleLock
 RuleStmt
@@ -2325,7 +2401,6 @@ SPLITCOST
 SPNode
 SPNodeData
 SPPageDesc
-SQLCmd
 SQLDropObject
 SQLFunctionCache
 SQLFunctionCachePtr
@@ -2343,7 +2418,7 @@ SYNCHRONIZATION_BARRIER
 SampleScan
 SampleScanGetSampleSize_function
 SampleScanState
-SamplerRandomState
+SavedTransactionCharacteristics
 ScalarArrayOpExpr
 ScalarArrayOpExprHashEntry
 ScalarArrayOpExprHashTable
@@ -2525,7 +2600,6 @@ StatEntry
 StatExtEntry
 StateFileChunk
 StatisticExtInfo
-Stats
 StatsBuildData
 StatsData
 StatsElem
@@ -2537,7 +2611,7 @@ Step
 StopList
 StrategyNumber
 StreamCtl
-StreamXidHash
+String
 StringInfo
 StringInfoData
 StripnullState
@@ -2555,6 +2629,7 @@ SubXactInfo
 SubqueryScan
 SubqueryScanPath
 SubqueryScanState
+SubqueryScanStatus
 SubscriptExecSetup
 SubscriptExecSteps
 SubscriptRoutines
@@ -2569,6 +2644,7 @@ SupportRequestIndexCondition
 SupportRequestRows
 SupportRequestSelectivity
 SupportRequestSimplify
+SupportRequestWFuncMonotonic
 Syn
 SyncOps
 SyncRepConfigData
@@ -2631,6 +2707,7 @@ TSVectorData
 TSVectorParseState
 TSVectorStat
 TState
+TStatus
 TStoreState
 TXNEntryFile
 TYPCATEGORY
@@ -2801,7 +2878,6 @@ UniquePath
 UniquePathMethod
 UniqueState
 UnlistenStmt
-UnpackTarState
 UnresolvedTup
 UnresolvedTupData
 UpdateContext
@@ -2820,7 +2896,6 @@ VacuumParams
 VacuumRelation
 VacuumStmt
 ValidateIndexState
-Value
 ValuesScan
 ValuesScanState
 Var
@@ -2839,6 +2914,7 @@ VariableShowStmt
 VariableSpace
 VariableStatData
 VariableSubstituteHook
+Variables
 VersionedQuery
 Vfd
 ViewCheckOption
@@ -2878,7 +2954,6 @@ WaitEventTimeout
 WaitPMResult
 WalCloseMethod
 WalCompression
-WalCompressionMethod
 WalLevel
 WalRcvData
 WalRcvExecResult
@@ -2898,6 +2973,7 @@ Walfile
 WindowAgg
 WindowAggPath
 WindowAggState
+WindowAggStatus
 WindowClause
 WindowClauseSortData
 WindowDef
@@ -2944,7 +3020,6 @@ XLogCtlData
 XLogCtlInsert
 XLogDumpConfig
 XLogDumpPrivate
-XLogDumpStats
 XLogLongPageHeader
 XLogLongPageHeaderData
 XLogPageHeader
@@ -2952,13 +3027,14 @@ XLogPageHeaderData
 XLogPageReadCB
 XLogPageReadPrivate
 XLogPageReadResult
+XLogPrefetchStats
 XLogPrefetcher
 XLogPrefetcherFilter
-XLogPrefetchStats
 XLogReaderRoutine
 XLogReaderState
 XLogRecData
 XLogRecPtr
+XLogRecStats
 XLogRecord
 XLogRecordBlockCompressHeader
 XLogRecordBlockHeader
@@ -2968,8 +3044,10 @@ XLogRecoveryCtlData
 XLogRedoAction
 XLogSegNo
 XLogSource
+XLogStats
 XLogwrtResult
 XLogwrtRqst
+XPV
 XPVIV
 XPVMG
 XactCallback
@@ -2988,6 +3066,10 @@ XmlTableBuilderData
 YYLTYPE
 YYSTYPE
 YY_BUFFER_STATE
+ZSTD_CCtx
+ZSTD_DCtx
+ZSTD_inBuffer
+ZSTD_outBuffer
 _SPI_connection
 _SPI_plan
 __AssignProcessToJobObject
@@ -2999,6 +3081,7 @@ __SetInformationJobObject
 __time64_t
 _dev_t
 _ino_t
+_locale_t
 _resultmap
 _stringlist
 acquireLocksOnSubLinks_context
@@ -3041,6 +3124,29 @@ backup_manifest_info
 backup_manifest_option
 base_yy_extra_type
 basebackup_options
+bbsink
+bbsink_copystream
+bbsink_gzip
+bbsink_lz4
+bbsink_ops
+bbsink_server
+bbsink_shell
+bbsink_state
+bbsink_throttle
+bbsink_zstd
+bbstreamer
+bbstreamer_archive_context
+bbstreamer_extractor
+bbstreamer_gzip_decompressor
+bbstreamer_gzip_writer
+bbstreamer_lz4_frame
+bbstreamer_member
+bbstreamer_ops
+bbstreamer_plain_writer
+bbstreamer_recovery_injector
+bbstreamer_tar_archiver
+bbstreamer_tar_parser
+bbstreamer_zstd_frame
 bgworker_main_type
 binaryheap
 binaryheap_comparator
@@ -3049,11 +3155,14 @@ bits16
 bits32
 bits8
 bloom_filter
+boolKEY
 brin_column_state
 brin_serialize_callback_type
 bytea
 cached_re_str
+canonicalize_state
 cashKEY
+catalogid_hash
 cfp
 check_agg_arguments_context
 check_function_callback
@@ -3064,9 +3173,7 @@ check_ungrouped_columns_context
 chr
 clock_t
 cmpEntriesArg
-cmpfunc
 codes_t
-coercion
 collation_cache_entry
 color
 colormaprange
@@ -3159,6 +3266,7 @@ find_expr_references_context
 fix_join_expr_context
 fix_scan_expr_context
 fix_upper_expr_context
+fix_windowagg_cond_context
 flatten_join_alias_vars_context
 float4
 float4KEY
@@ -3239,7 +3347,6 @@ init_function
 inline_cte_walker_context
 inline_error_callback_arg
 ino_t
-inquiry
 instr_time
 int128
 int16
@@ -3319,16 +3426,8 @@ mix_data_t
 mixedStruct
 mode_t
 movedb_failure_params
-mp_digit
-mp_int
-mp_result
-mp_sign
-mp_size
-mp_small
-mp_usmall
-mp_word
-mpz_t
 multirange_bsearch_comparison
+multirange_unnest_fctx
 mxact
 mxtruncinfo
 needs_fmgr_hook_type
@@ -3338,6 +3437,7 @@ normal_rand_fctx
 ntile_context
 numeric
 object_access_hook_type
+object_access_hook_type_str
 off_t
 oidKEY
 oidvector
@@ -3391,6 +3491,7 @@ pg_locale_t
 pg_mb_radix_tree
 pg_md5_ctx
 pg_on_exit_callback
+pg_prng_state
 pg_re_flags
 pg_saslprep_rc
 pg_sha1_ctx
@@ -3426,7 +3527,9 @@ pgssSharedState
 pgssStoreKind
 pgssVersion
 pgstat_entry_ref_hash_hash
+pgstat_entry_ref_hash_iterator
 pgstat_page
+pgstat_snapshot_hash
 pgstattuple_type
 pgthreadlock_t
 pid_t
@@ -3529,7 +3632,6 @@ ret_type
 rewind_source
 rewrite_event
 rf_context
-rijndael_ctx
 rm_detail_t
 role_auth_extra
 row_security_policy_hook_type
@@ -3561,6 +3663,7 @@ slist_mutable_iter
 slist_node
 slock_t
 socket_set
+socklen_t
 spgBulkDeleteState
 spgChooseIn
 spgChooseOut
@@ -3603,7 +3706,6 @@ string
 substitute_actual_parameters_context
 substitute_actual_srf_parameters_context
 substitute_phv_relids_context
-svtype
 symbol
 tablespaceinfo
 teSection
@@ -3635,8 +3737,6 @@ tuplehash_hash
 tuplehash_iterator
 type
 tzEntry
-u1byte
-u4byte
 u_char
 u_int
 uchr