diff options
| author | Tom Lane | 2023-05-19 21:24:48 +0000 |
|---|---|---|
| committer | Tom Lane | 2023-05-19 21:24:48 +0000 |
| commit | 0245f8db36f375326c2bae0c3420d3c77714e72d (patch) | |
| tree | 7ce91f23658a05ea24be4703fb06cdc6b56248f7 /src/test | |
| parent | df6b19fbbc20d830de91d9bea68715a39635b568 (diff) | |
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files.
This set of diffs is a bit larger than typical. We've updated to
pg_bsd_indent 2.1.2, which properly indents variable declarations that
have multi-line initialization expressions (the continuation lines are
now indented one tab stop). We've also updated to perltidy version
20230309 and changed some of its settings, which reduces its desire to
add whitespace to lines to make assignments etc. line up. Going
forward, that should make for fewer random-seeming changes to existing
code.
Discussion: https://postgr.es/m/20230428092545.qfb3y5wcu4cm75ur@alvherre.pgsql
Diffstat (limited to 'src/test')
78 files changed, 1014 insertions, 803 deletions
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 0680f8b07c5..12552837a8e 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -23,9 +23,9 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; - my $database = shift; - my $role = shift; + my $node = shift; + my $database = shift; + my $role = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -95,7 +95,8 @@ $node->safe_psql( RESET scram_iterations;" ); -my $res = $node->safe_psql('postgres', +my $res = $node->safe_psql( + 'postgres', "SELECT substr(rolpassword,1,19) FROM pg_authid WHERE rolname = 'scram_role_iter'"); @@ -106,8 +107,8 @@ is($res, 'SCRAM-SHA-256$1024:', 'scram_iterations in server side ROLE'); # as earlier version cause the session to time out. SKIP: { - skip "IO::Pty and IPC::Run >= 0.98 required", 1 unless - eval { require IO::Pty; IPC::Run->VERSION('0.98'); }; + skip "IO::Pty and IPC::Run >= 0.98 required", 1 + unless eval { require IO::Pty; IPC::Run->VERSION('0.98'); }; # Alter the password on the created role using \password in psql to ensure # that clientside password changes use the scram_iterations value when @@ -117,16 +118,19 @@ SKIP: $session->set_query_timer_restart(); $session->query("SET password_encryption='scram-sha-256';"); $session->query("SET scram_iterations=42;"); - $session->query_until(qr/Enter new password/, "\\password scram_role_iter\n"); + $session->query_until(qr/Enter new password/, + "\\password scram_role_iter\n"); $session->query_until(qr/Enter it again/, "pass\n"); $session->query_until(qr/postgres=# /, "pass\n"); $session->quit; - $res = $node->safe_psql('postgres', + $res = $node->safe_psql( + 'postgres', "SELECT substr(rolpassword,1,17) FROM pg_authid WHERE rolname = 'scram_role_iter'"); - is($res, 'SCRAM-SHA-256$42:', 'scram_iterations in psql \password command'); + is($res, 'SCRAM-SHA-256$42:', + 'scram_iterations in psql \password command'); } # Create a database to test regular expression. @@ -482,7 +486,7 @@ chmod 0600, $pgpassfile or die; reset_pg_hba($node, 'all', 'all', 'password'); test_conn($node, 'user=scram_role', 'password from pgpass', 0); -test_conn($node, 'user=md5_role', 'password from pgpass', 2); +test_conn($node, 'user=md5_role', 'password from pgpass', 2); append_to_file( $pgpassfile, qq! diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl index c00f4e1b322..ef158311668 100644 --- a/src/test/authentication/t/002_saslprep.pl +++ b/src/test/authentication/t/002_saslprep.pl @@ -20,7 +20,7 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; + my $node = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -34,10 +34,10 @@ sub test_login { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $node = shift; - my $role = shift; - my $password = shift; - my $expected_res = shift; + my $node = shift; + my $role = shift; + my $password = shift; + my $expected_res = shift; my $status_string = 'failed'; $status_string = 'success' if ($expected_res eq 0); @@ -93,25 +93,25 @@ CREATE ROLE saslpreptest7_role LOGIN PASSWORD E'foo\\u0627\\u0031bar'; reset_pg_hba($node, 'scram-sha-256'); # Check that #1 and #5 are treated the same as just 'IX' -test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0); +test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0); test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0); # but different from lower case 'ix' test_login($node, 'saslpreptest1_role', "ix", 2); # Check #4 -test_login($node, 'saslpreptest4a_role', "a", 0); +test_login($node, 'saslpreptest4a_role', "a", 0); test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0); -test_login($node, 'saslpreptest4b_role', "a", 0); +test_login($node, 'saslpreptest4b_role', "a", 0); test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0); # Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password # contains prohibited characters, we use it as is, without normalization. test_login($node, 'saslpreptest6_role', "foo\x07bar", 0); -test_login($node, 'saslpreptest6_role', "foobar", 2); +test_login($node, 'saslpreptest6_role', "foobar", 2); test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0); test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2); -test_login($node, 'saslpreptest7_role', "foobar", 2); +test_login($node, 'saslpreptest7_role', "foobar", 2); done_testing(); diff --git a/src/test/authentication/t/003_peer.pl b/src/test/authentication/t/003_peer.pl index a6be651ea7f..3272e52cae8 100644 --- a/src/test/authentication/t/003_peer.pl +++ b/src/test/authentication/t/003_peer.pl @@ -20,7 +20,7 @@ if (!$use_unix_sockets) # and then execute a reload to refresh it. sub reset_pg_hba { - my $node = shift; + my $node = shift; my $hba_method = shift; unlink($node->data_dir . '/pg_hba.conf'); @@ -33,10 +33,10 @@ sub reset_pg_hba # and then execute a reload to refresh it. sub reset_pg_ident { - my $node = shift; - my $map_name = shift; + my $node = shift; + my $map_name = shift; my $system_user = shift; - my $pg_user = shift; + my $pg_user = shift; unlink($node->data_dir . '/pg_ident.conf'); $node->append_conf('pg_ident.conf', "$map_name $system_user $pg_user"); diff --git a/src/test/authentication/t/004_file_inclusion.pl b/src/test/authentication/t/004_file_inclusion.pl index 8cd2a8dae4a..55d28ad5864 100644 --- a/src/test/authentication/t/004_file_inclusion.pl +++ b/src/test/authentication/t/004_file_inclusion.pl @@ -37,9 +37,9 @@ my %line_counters = ('hba_rule' => 0, 'ident_rule' => 0); # is loaded by the backend. sub add_hba_line { - my $node = shift; + my $node = shift; my $filename = shift; - my $entry = shift; + my $entry = shift; my $globline; my $fileline; my @tokens; @@ -64,7 +64,7 @@ sub add_hba_line $globline = ++$line_counters{'hba_rule'}; # Generate the expected pg_hba_file_rules line - @tokens = split(/ /, $entry); + @tokens = split(/ /, $entry); $tokens[1] = '{' . $tokens[1] . '}'; # database $tokens[2] = '{' . $tokens[2] . '}'; # user_name @@ -95,9 +95,9 @@ sub add_hba_line # returns an entry to match with pg_ident_file_mappings. sub add_ident_line { - my $node = shift; + my $node = shift; my $filename = shift; - my $entry = shift; + my $entry = shift; my $globline; my $fileline; my @tokens; @@ -136,7 +136,7 @@ sub add_ident_line } # Locations for the entry points of the HBA and ident files. -my $hba_file = 'subdir1/pg_hba_custom.conf'; +my $hba_file = 'subdir1/pg_hba_custom.conf'; my $ident_file = 'subdir2/pg_ident_custom.conf'; my $node = PostgreSQL::Test::Cluster->new('primary'); @@ -147,7 +147,7 @@ my $data_dir = $node->data_dir; note "Generating HBA structure with include directives"; -my $hba_expected = ''; +my $hba_expected = ''; my $ident_expected = ''; # customise main auth file names @@ -230,7 +230,7 @@ mkdir("$data_dir/ident_pos"); $ident_expected .= add_ident_line($node, "$ident_file", "include ../pg_ident_pre.conf"); $ident_expected .= add_ident_line($node, 'pg_ident_pre.conf', "pre foo bar"); -$ident_expected .= add_ident_line($node, "$ident_file", "test a b"); +$ident_expected .= add_ident_line($node, "$ident_file", "test a b"); $ident_expected .= add_ident_line($node, "$ident_file", "include ../ident_pos/pg_ident_pos.conf"); $ident_expected .= diff --git a/src/test/icu/t/010_database.pl b/src/test/icu/t/010_database.pl index 715b1bffd66..d3901f5d3f6 100644 --- a/src/test/icu/t/010_database.pl +++ b/src/test/icu/t/010_database.pl @@ -54,7 +54,8 @@ b), # Test error cases in CREATE DATABASE involving locale-related options my ($ret, $stdout, $stderr) = $node1->psql('postgres', - q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' TEMPLATE template0 ENCODING UTF8}); + q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' TEMPLATE template0 ENCODING UTF8} +); isnt($ret, 0, "ICU locale must be specified for ICU provider: exit code not 0"); like( diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl index e2c928349ff..39c035de32d 100644 --- a/src/test/kerberos/t/001_auth.pl +++ b/src/test/kerberos/t/001_auth.pl @@ -30,26 +30,27 @@ if ($ENV{with_gssapi} ne 'yes') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bkerberos\b/) { - plan skip_all => 'Potentially unsafe test GSSAPI/Kerberos not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test GSSAPI/Kerberos not enabled in PG_TEST_EXTRA'; } my ($krb5_bin_dir, $krb5_sbin_dir); -if ($^O eq 'darwin' && -d "/opt/homebrew" ) +if ($^O eq 'darwin' && -d "/opt/homebrew") { # typical paths for Homebrew on ARM - $krb5_bin_dir = '/opt/homebrew/opt/krb5/bin'; + $krb5_bin_dir = '/opt/homebrew/opt/krb5/bin'; $krb5_sbin_dir = '/opt/homebrew/opt/krb5/sbin'; } elsif ($^O eq 'darwin') { # typical paths for Homebrew on Intel - $krb5_bin_dir = '/usr/local/opt/krb5/bin'; + $krb5_bin_dir = '/usr/local/opt/krb5/bin'; $krb5_sbin_dir = '/usr/local/opt/krb5/sbin'; } elsif ($^O eq 'freebsd') { - $krb5_bin_dir = '/usr/local/bin'; + $krb5_bin_dir = '/usr/local/bin'; $krb5_sbin_dir = '/usr/local/sbin'; } elsif ($^O eq 'linux') @@ -57,44 +58,44 @@ elsif ($^O eq 'linux') $krb5_sbin_dir = '/usr/sbin'; } -my $krb5_config = 'krb5-config'; -my $kinit = 'kinit'; -my $klist = 'klist'; -my $kdb5_util = 'kdb5_util'; +my $krb5_config = 'krb5-config'; +my $kinit = 'kinit'; +my $klist = 'klist'; +my $kdb5_util = 'kdb5_util'; my $kadmin_local = 'kadmin.local'; -my $krb5kdc = 'krb5kdc'; +my $krb5kdc = 'krb5kdc'; if ($krb5_bin_dir && -d $krb5_bin_dir) { $krb5_config = $krb5_bin_dir . '/' . $krb5_config; - $kinit = $krb5_bin_dir . '/' . $kinit; - $klist = $krb5_bin_dir . '/' . $klist; + $kinit = $krb5_bin_dir . '/' . $kinit; + $klist = $krb5_bin_dir . '/' . $klist; } if ($krb5_sbin_dir && -d $krb5_sbin_dir) { - $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; + $kdb5_util = $krb5_sbin_dir . '/' . $kdb5_util; $kadmin_local = $krb5_sbin_dir . '/' . $kadmin_local; - $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; + $krb5kdc = $krb5_sbin_dir . '/' . $krb5kdc; } -my $host = 'auth-test-localhost.postgresql.example.com'; +my $host = 'auth-test-localhost.postgresql.example.com'; my $hostaddr = '127.0.0.1'; -my $realm = 'EXAMPLE.COM'; - -my $krb5_conf = "${PostgreSQL::Test::Utils::tmp_check}/krb5.conf"; -my $kdc_conf = "${PostgreSQL::Test::Utils::tmp_check}/kdc.conf"; -my $krb5_cache = "${PostgreSQL::Test::Utils::tmp_check}/krb5cc"; -my $krb5_log = "${PostgreSQL::Test::Utils::log_path}/krb5libs.log"; -my $kdc_log = "${PostgreSQL::Test::Utils::log_path}/krb5kdc.log"; -my $kdc_port = PostgreSQL::Test::Cluster::get_free_port(); +my $realm = 'EXAMPLE.COM'; + +my $krb5_conf = "${PostgreSQL::Test::Utils::tmp_check}/krb5.conf"; +my $kdc_conf = "${PostgreSQL::Test::Utils::tmp_check}/kdc.conf"; +my $krb5_cache = "${PostgreSQL::Test::Utils::tmp_check}/krb5cc"; +my $krb5_log = "${PostgreSQL::Test::Utils::log_path}/krb5libs.log"; +my $kdc_log = "${PostgreSQL::Test::Utils::log_path}/krb5kdc.log"; +my $kdc_port = PostgreSQL::Test::Cluster::get_free_port(); my $kdc_datadir = "${PostgreSQL::Test::Utils::tmp_check}/krb5kdc"; my $kdc_pidfile = "${PostgreSQL::Test::Utils::tmp_check}/krb5kdc.pid"; -my $keytab = "${PostgreSQL::Test::Utils::tmp_check}/krb5.keytab"; +my $keytab = "${PostgreSQL::Test::Utils::tmp_check}/krb5.keytab"; -my $pgpass = "${PostgreSQL::Test::Utils::tmp_check}/.pgpass"; +my $pgpass = "${PostgreSQL::Test::Utils::tmp_check}/.pgpass"; -my $dbname = 'postgres'; -my $username = 'test1'; +my $dbname = 'postgres'; +my $username = 'test1'; my $application = '001_auth.pl'; note "setting up Kerberos"; @@ -108,10 +109,7 @@ $stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/ $krb5_version = $1; # Construct a pgpass file to make sure we don't use it -append_to_file( - $pgpass, - '*:*:*:*:abc123' -); +append_to_file($pgpass, '*:*:*:*:abc123'); chmod 0600, $pgpass; @@ -187,9 +185,9 @@ $realm = { mkdir $kdc_datadir or die; # Ensure that we use test's config and cache files, not global ones. -$ENV{'KRB5_CONFIG'} = $krb5_conf; +$ENV{'KRB5_CONFIG'} = $krb5_conf; $ENV{'KRB5_KDC_PROFILE'} = $kdc_conf; -$ENV{'KRB5CCNAME'} = $krb5_cache; +$ENV{'KRB5CCNAME'} = $krb5_cache; my $service_principal = "$ENV{with_krb_srvnam}/$host"; @@ -224,24 +222,35 @@ $node->start; my $port = $node->port(); $node->safe_psql('postgres', 'CREATE USER test1;'); -$node->safe_psql('postgres', "CREATE USER test2 WITH ENCRYPTED PASSWORD 'abc123';"); +$node->safe_psql('postgres', + "CREATE USER test2 WITH ENCRYPTED PASSWORD 'abc123';"); $node->safe_psql('postgres', 'CREATE EXTENSION postgres_fdw;'); $node->safe_psql('postgres', 'CREATE EXTENSION dblink;'); -$node->safe_psql('postgres', "CREATE SERVER s1 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host '$host', hostaddr '$hostaddr', port '$port', dbname 'postgres');"); -$node->safe_psql('postgres', "CREATE SERVER s2 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (port '$port', dbname 'postgres', passfile '$pgpass');"); +$node->safe_psql('postgres', + "CREATE SERVER s1 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (host '$host', hostaddr '$hostaddr', port '$port', dbname 'postgres');" +); +$node->safe_psql('postgres', + "CREATE SERVER s2 FOREIGN DATA WRAPPER postgres_fdw OPTIONS (port '$port', dbname 'postgres', passfile '$pgpass');" +); $node->safe_psql('postgres', 'GRANT USAGE ON FOREIGN SERVER s1 TO test1;'); -$node->safe_psql('postgres', "CREATE USER MAPPING FOR test1 SERVER s1 OPTIONS (user 'test1');"); -$node->safe_psql('postgres', "CREATE USER MAPPING FOR test1 SERVER s2 OPTIONS (user 'test2');"); +$node->safe_psql('postgres', + "CREATE USER MAPPING FOR test1 SERVER s1 OPTIONS (user 'test1');"); +$node->safe_psql('postgres', + "CREATE USER MAPPING FOR test1 SERVER s2 OPTIONS (user 'test2');"); $node->safe_psql('postgres', "CREATE TABLE t1 (c1 int);"); $node->safe_psql('postgres', "INSERT INTO t1 VALUES (1);"); -$node->safe_psql('postgres', "CREATE FOREIGN TABLE tf1 (c1 int) SERVER s1 OPTIONS (schema_name 'public', table_name 't1');"); +$node->safe_psql('postgres', + "CREATE FOREIGN TABLE tf1 (c1 int) SERVER s1 OPTIONS (schema_name 'public', table_name 't1');" +); $node->safe_psql('postgres', "GRANT SELECT ON t1 TO test1;"); $node->safe_psql('postgres', "GRANT SELECT ON tf1 TO test1;"); -$node->safe_psql('postgres', "CREATE FOREIGN TABLE tf2 (c1 int) SERVER s2 OPTIONS (schema_name 'public', table_name 't1');"); +$node->safe_psql('postgres', + "CREATE FOREIGN TABLE tf2 (c1 int) SERVER s2 OPTIONS (schema_name 'public', table_name 't1');" +); $node->safe_psql('postgres', "GRANT SELECT ON tf2 TO test1;"); # Set up a table for SYSTEM_USER parallel worker testing. @@ -302,13 +311,14 @@ sub test_query $node->connect_ok( $connstr, $test_name, - sql => $query, + sql => $query, expected_stdout => $expected); return; } unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss map=mymap @@ -453,7 +463,8 @@ test_query( 'testing system_user with parallel workers'); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 hostgssenc all all $hostaddr/32 gss map=mymap @@ -485,8 +496,7 @@ test_access( "connection authorized: user=$username database=$dbname application_name=$application GSS (authenticated=yes, encrypted=yes, deleg_credentials=no, principal=test1\@$realm)" ); -$node->append_conf('postgresql.conf', - qq{gss_accept_deleg=off}); +$node->append_conf('postgresql.conf', qq{gss_accept_deleg=off}); $node->restart; test_access( @@ -510,8 +520,7 @@ test_access( "connection authorized: user=$username database=$dbname application_name=$application GSS (authenticated=yes, encrypted=yes, deleg_credentials=no, principal=test1\@$realm)" ); -$node->append_conf('postgresql.conf', - qq{gss_accept_deleg=on}); +$node->append_conf('postgresql.conf', qq{gss_accept_deleg=on}); $node->restart; test_access( @@ -560,57 +569,77 @@ my $psql_stderr = ''; my $psql_rc = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test1 dbname=$dbname host=$host hostaddr=$hostaddr port=$port','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink attempt fails without delegated credentials'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work without delegated credentials'); -like($psql_out, qr/^$/,'dblink does not work without delegated credentials'); + stderr => \$psql_stderr); +is($psql_rc, '3', 'dblink attempt fails without delegated credentials'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work without delegated credentials'); +like($psql_out, qr/^$/, 'dblink does not work without delegated credentials'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test2 dbname=$dbname port=$port passfile=$pgpass','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink does not work without delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work without delegated credentials and with passfile'); -like($psql_out, qr/^$/,'dblink does not work without delegated credentials and with passfile'); + stderr => \$psql_stderr); +is($psql_rc, '3', + 'dblink does not work without delegated credentials and with passfile'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work without delegated credentials and with passfile'); +like($psql_out, qr/^$/, + 'dblink does not work without delegated credentials and with passfile'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf1;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','postgres_fdw does not work without delegated credentials'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work without delegated credentials'); -like($psql_out, qr/^$/,'postgres_fdw does not work without delegated credentials'); + stderr => \$psql_stderr); +is($psql_rc, '3', 'postgres_fdw does not work without delegated credentials'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work without delegated credentials'); +like($psql_out, qr/^$/, + 'postgres_fdw does not work without delegated credentials'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf2;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=require gssdeleg=disable", stdout => \$psql_out, - stderr => \$psql_stderr + stderr => \$psql_stderr); +is($psql_rc, '3', + 'postgres_fdw does not work without delegated credentials and with passfile' +); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work without delegated credentials and with passfile' +); +like($psql_out, qr/^$/, + 'postgres_fdw does not work without delegated credentials and with passfile' ); -is($psql_rc,'3','postgres_fdw does not work without delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work without delegated credentials and with passfile'); -like($psql_out, qr/^$/,'postgres_fdw does not work without delegated credentials and with passfile'); test_access($node, 'test1', 'SELECT true', 2, 'gssencmode=disable', 'fails with GSS encryption disabled and hostgssenc hba'); @@ -626,7 +655,8 @@ $node->connect_ok( "multiple authentication types requested, works with GSS encryption"); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 hostnogssenc all all $hostaddr/32 gss map=mymap @@ -662,7 +692,8 @@ test_query( "SELECT * FROM dblink('user=test1 dbname=$dbname host=$host hostaddr=$hostaddr port=$port','select 1') as t1(c1 int);", qr/^1$/s, 'gssencmode=prefer gssdeleg=enable', - 'dblink works not-encrypted (server not configured to accept encrypted GSSAPI connections)'); + 'dblink works not-encrypted (server not configured to accept encrypted GSSAPI connections)' +); test_query( $node, @@ -670,39 +701,54 @@ test_query( "TABLE tf1;", qr/^1$/s, 'gssencmode=prefer gssdeleg=enable', - 'postgres_fdw works not-encrypted (server not configured to accept encrypted GSSAPI connections)'); + 'postgres_fdw works not-encrypted (server not configured to accept encrypted GSSAPI connections)' +); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "SELECT * FROM dblink('user=test2 dbname=$dbname port=$port passfile=$pgpass','select 1') as t1(c1 int);", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", stdout => \$psql_out, - stderr => \$psql_stderr -); -is($psql_rc,'3','dblink does not work with delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'dblink does not work with delegated credentials and with passfile'); -like($psql_out, qr/^$/,'dblink does not work with delegated credentials and with passfile'); + stderr => \$psql_stderr); +is($psql_rc, '3', + 'dblink does not work with delegated credentials and with passfile'); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'dblink does not work with delegated credentials and with passfile'); +like($psql_out, qr/^$/, + 'dblink does not work with delegated credentials and with passfile'); $psql_out = ''; $psql_stderr = ''; $psql_rc = $node->psql( - 'postgres', + 'postgres', "TABLE tf2;", - connstr => "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", + connstr => + "user=test1 host=$host hostaddr=$hostaddr gssencmode=prefer gssdeleg=enable", stdout => \$psql_out, - stderr => \$psql_stderr + stderr => \$psql_stderr); +is($psql_rc, '3', + 'postgres_fdw does not work with delegated credentials and with passfile' +); +like( + $psql_stderr, + qr/password or GSSAPI delegated credentials required/, + 'postgres_fdw does not work with delegated credentials and with passfile' +); +like($psql_out, qr/^$/, + 'postgres_fdw does not work with delegated credentials and with passfile' ); -is($psql_rc,'3','postgres_fdw does not work with delegated credentials and with passfile'); -like($psql_stderr, qr/password or GSSAPI delegated credentials required/,'postgres_fdw does not work with delegated credentials and with passfile'); -like($psql_out, qr/^$/,'postgres_fdw does not work with delegated credentials and with passfile'); truncate($node->data_dir . '/pg_ident.conf', 0); unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss include_realm=0 @@ -729,17 +775,15 @@ test_query( 'dblink works encrypted'); test_query( - $node, - 'test1', - "TABLE tf1;", - qr/^1$/s, + $node, 'test1', "TABLE tf1;", qr/^1$/s, 'gssencmode=require gssdeleg=enable', 'postgres_fdw works encrypted'); # Reset pg_hba.conf, and cause a usermap failure with an authentication # that has passed. unlink($node->data_dir . '/pg_hba.conf'); -$node->append_conf('pg_hba.conf', +$node->append_conf( + 'pg_hba.conf', qq{ local all test2 scram-sha-256 host all all $hostaddr/32 gss include_realm=0 krb_realm=EXAMPLE.ORG diff --git a/src/test/ldap/LdapServer.pm b/src/test/ldap/LdapServer.pm index 3cc05b82550..a4c1a1843c8 100644 --- a/src/test/ldap/LdapServer.pm +++ b/src/test/ldap/LdapServer.pm @@ -66,36 +66,36 @@ INIT if ($^O eq 'darwin' && -d '/opt/homebrew/opt/openldap') { # typical paths for Homebrew on ARM - $slapd = '/opt/homebrew/opt/openldap/libexec/slapd'; + $slapd = '/opt/homebrew/opt/openldap/libexec/slapd'; $ldap_schema_dir = '/opt/homebrew/etc/openldap/schema'; } elsif ($^O eq 'darwin' && -d '/usr/local/opt/openldap') { # typical paths for Homebrew on Intel - $slapd = '/usr/local/opt/openldap/libexec/slapd'; + $slapd = '/usr/local/opt/openldap/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } elsif ($^O eq 'darwin' && -d '/opt/local/etc/openldap') { # typical paths for MacPorts - $slapd = '/opt/local/libexec/slapd'; + $slapd = '/opt/local/libexec/slapd'; $ldap_schema_dir = '/opt/local/etc/openldap/schema'; } elsif ($^O eq 'linux') { - $slapd = '/usr/sbin/slapd'; + $slapd = '/usr/sbin/slapd'; $ldap_schema_dir = '/etc/ldap/schema' if -d '/etc/ldap/schema'; $ldap_schema_dir = '/etc/openldap/schema' if -d '/etc/openldap/schema'; } elsif ($^O eq 'freebsd') { - $slapd = '/usr/local/libexec/slapd'; + $slapd = '/usr/local/libexec/slapd'; $ldap_schema_dir = '/usr/local/etc/openldap/schema'; } elsif ($^O eq 'openbsd') { - $slapd = '/usr/local/libexec/slapd'; + $slapd = '/usr/local/libexec/slapd'; $ldap_schema_dir = '/usr/local/share/examples/openldap/schema'; } else @@ -137,25 +137,25 @@ sub new { die "no suitable binaries found" unless $setup; - my $class = shift; - my $rootpw = shift; - my $authtype = shift; # 'users' or 'anonymous' + my $class = shift; + my $rootpw = shift; + my $authtype = shift; # 'users' or 'anonymous' my $testname = basename((caller)[1], '.pl'); - my $self = {}; + my $self = {}; my $test_temp = PostgreSQL::Test::Utils::tempdir("ldap-$testname"); - my $ldap_datadir = "$test_temp/openldap-data"; - my $slapd_certs = "$test_temp/slapd-certs"; + my $ldap_datadir = "$test_temp/openldap-data"; + my $slapd_certs = "$test_temp/slapd-certs"; my $slapd_pidfile = "$test_temp/slapd.pid"; - my $slapd_conf = "$test_temp/slapd.conf"; + my $slapd_conf = "$test_temp/slapd.conf"; my $slapd_logfile = "${PostgreSQL::Test::Utils::log_path}/slapd-$testname.log"; my $ldap_server = 'localhost'; - my $ldap_port = PostgreSQL::Test::Cluster::get_free_port(); - my $ldaps_port = PostgreSQL::Test::Cluster::get_free_port(); - my $ldap_url = "ldap://$ldap_server:$ldap_port"; - my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; + my $ldap_port = PostgreSQL::Test::Cluster::get_free_port(); + my $ldaps_port = PostgreSQL::Test::Cluster::get_free_port(); + my $ldap_url = "ldap://$ldap_server:$ldap_port"; + my $ldaps_url = "ldaps://$ldap_server:$ldaps_port"; my $ldap_basedn = 'dc=example,dc=net'; my $ldap_rootdn = 'cn=Manager,dc=example,dc=net'; my $ldap_rootpw = $rootpw; @@ -188,7 +188,7 @@ EOC append_to_file($slapd_conf, $conf); mkdir $ldap_datadir or die "making $ldap_datadir: $!"; - mkdir $slapd_certs or die "making $slapd_certs: $!"; + mkdir $slapd_certs or die "making $slapd_certs: $!"; my $certdir = dirname(__FILE__) . "/../ssl/ssl"; @@ -205,7 +205,8 @@ EOC chmod 0600, $ldap_pwfile or die "chmod on $ldap_pwfile"; # -s0 prevents log messages ending up in syslog - system_or_bail $slapd, '-f', $slapd_conf, '-s0', '-h', "$ldap_url $ldaps_url"; + system_or_bail $slapd, '-f', $slapd_conf, '-s0', '-h', + "$ldap_url $ldaps_url"; # wait until slapd accepts requests my $retries = 0; @@ -215,25 +216,25 @@ EOC if ( system_log( "ldapsearch", "-sbase", - "-H", $ldap_url, - "-b", $ldap_basedn, - "-D", $ldap_rootdn, - "-y", $ldap_pwfile, - "-n", "'objectclass=*'") == 0); + "-H", $ldap_url, + "-b", $ldap_basedn, + "-D", $ldap_rootdn, + "-y", $ldap_pwfile, + "-n", "'objectclass=*'") == 0); die "cannot connect to slapd" if ++$retries >= 300; note "waiting for slapd to accept requests..."; Time::HiRes::usleep(1000000); } $self->{pidfile} = $slapd_pidfile; - $self->{pwfile} = $ldap_pwfile; - $self->{url} = $ldap_url; - $self->{s_url} = $ldaps_url; - $self->{server} = $ldap_server; - $self->{port} = $ldap_port; - $self->{s_port} = $ldaps_port; - $self->{basedn} = $ldap_basedn; - $self->{rootdn} = $ldap_rootdn; + $self->{pwfile} = $ldap_pwfile; + $self->{url} = $ldap_url; + $self->{s_url} = $ldaps_url; + $self->{server} = $ldap_server; + $self->{port} = $ldap_port; + $self->{s_port} = $ldaps_port; + $self->{basedn} = $ldap_basedn; + $self->{rootdn} = $ldap_rootdn; bless $self, $class; push @servers, $self; @@ -244,8 +245,8 @@ EOC sub _ldapenv { my $self = shift; - my %env = %ENV; - $env{'LDAPURI'} = $self->{url}; + my %env = %ENV; + $env{'LDAPURI'} = $self->{url}; $env{'LDAPBINDDN'} = $self->{rootdn}; return %env; } @@ -287,8 +288,8 @@ Set the user's password in the LDAP server sub ldapsetpw { - my $self = shift; - my $user = shift; + my $self = shift; + my $user = shift; my $password = shift; local %ENV = $self->_ldapenv; diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl index 1e027ced011..3e113fd6ebb 100644 --- a/src/test/ldap/t/001_auth.pl +++ b/src/test/ldap/t/001_auth.pl @@ -37,8 +37,8 @@ $ldap->ldapadd_file('authdata.ldif'); $ldap->ldapsetpw('uid=test1,dc=example,dc=net', 'secret1'); $ldap->ldapsetpw('uid=test2,dc=example,dc=net', 'secret2'); -my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url, - $ldaps_url, $ldap_basedn, $ldap_rootdn +my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url, + $ldaps_url, $ldap_basedn, $ldap_rootdn ) = $ldap->prop(qw(server port s_port url s_url basedn rootdn)); # don't bother to check the server's cert (though perhaps we should) diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl index 8a3a6b40e6a..59cc2b1244b 100644 --- a/src/test/modules/commit_ts/t/002_standby.pl +++ b/src/test/modules/commit_ts/t/002_standby.pl @@ -11,7 +11,7 @@ use Test::More; use PostgreSQL::Test::Cluster; my $bkplabel = 'backup'; -my $primary = PostgreSQL::Test::Cluster->new('primary'); +my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->append_conf( diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl index f750a8896a9..5af511e369c 100644 --- a/src/test/modules/commit_ts/t/003_standby_2.pl +++ b/src/test/modules/commit_ts/t/003_standby_2.pl @@ -11,7 +11,7 @@ use Test::More; use PostgreSQL::Test::Cluster; my $bkplabel = 'backup'; -my $primary = PostgreSQL::Test::Cluster->new('primary'); +my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init(allows_streaming => 1); $primary->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl index 20865217d94..8fe4bedb140 100644 --- a/src/test/modules/commit_ts/t/004_restart.pl +++ b/src/test/modules/commit_ts/t/004_restart.pl @@ -25,12 +25,12 @@ like( ($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]); -is($ret, 0, 'getting ts of BootstrapTransactionId succeeds'); +is($ret, 0, 'getting ts of BootstrapTransactionId succeeds'); is($stdout, '', 'timestamp of BootstrapTransactionId is null'); ($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]); -is($ret, 0, 'getting ts of FrozenTransactionId succeeds'); +is($ret, 0, 'getting ts of FrozenTransactionId succeeds'); is($stdout, '', 'timestamp of FrozenTransactionId is null'); # Since FirstNormalTransactionId will've occurred during initdb, long before we diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl index 4174292d2df..c96c8d7a4de 100644 --- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl +++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl @@ -34,7 +34,7 @@ elsif (!$LdapServer::setup) my $clear_ldap_rootpw = "FooBaR1"; my $rot13_ldap_rootpw = "SbbOnE1"; -my $ldap = LdapServer->new($clear_ldap_rootpw, 'users'); # no anonymous auth +my $ldap = LdapServer->new($clear_ldap_rootpw, 'users'); # no anonymous auth $ldap->ldapadd_file("$FindBin::RealBin/../../../ldap/authdata.ldif"); $ldap->ldapsetpw('uid=test1,dc=example,dc=net', 'secret1'); @@ -47,7 +47,8 @@ note "setting up PostgreSQL instance"; my $node = PostgreSQL::Test::Cluster->new('node'); $node->init; $node->append_conf('postgresql.conf', "log_connections = on\n"); -$node->append_conf('postgresql.conf', "shared_preload_libraries = 'ldap_password_func'"); +$node->append_conf('postgresql.conf', + "shared_preload_libraries = 'ldap_password_func'"); $node->start; $node->safe_psql('postgres', 'CREATE USER test1;'); @@ -82,7 +83,8 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 2, 'search+bind authentication fails with wrong ldapbindpasswd'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with wrong ldapbindpasswd'); unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', @@ -90,7 +92,8 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 2, 'search+bind authentication fails with clear password'); +test_access($node, 'test1', 2, + 'search+bind authentication fails with clear password'); unlink($node->data_dir . '/pg_hba.conf'); $node->append_conf('pg_hba.conf', @@ -98,6 +101,7 @@ $node->append_conf('pg_hba.conf', ); $node->restart; -test_access($node, 'test1', 0, 'search+bind authentication succeeds with rot13ed password'); +test_access($node, 'test1', 0, + 'search+bind authentication succeeds with rot13ed password'); done_testing(); diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c index f48da7d963e..f5b4d4d1ff2 100644 --- a/src/test/modules/libpq_pipeline/libpq_pipeline.c +++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c @@ -985,7 +985,7 @@ test_prepared(PGconn *conn) static void notice_processor(void *arg, const char *message) { - int *n_notices = (int *) arg; + int *n_notices = (int *) arg; (*n_notices)++; fprintf(stderr, "NOTICE %d: %s", *n_notices, message); diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl index 7560439fec1..056fa5c6d2b 100644 --- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl +++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl @@ -53,7 +53,7 @@ for my $testname (@tests) $node->command_ok( [ 'libpq_pipeline', @extraargs, - $testname, $node->connstr('postgres') + $testname, $node->connstr('postgres') ], "libpq_pipeline $testname"); diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl index 378d7b4fc77..2b2c144ee28 100644 --- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl +++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl @@ -69,7 +69,7 @@ my $ret = # with a bad passphrase the server should not start -ok($ret, "pg_ctl fails with bad passphrase"); +ok($ret, "pg_ctl fails with bad passphrase"); ok(!-e "$ddir/postmaster.pid", "postgres not started with bad passphrase"); # just in case diff --git a/src/test/modules/test_custom_rmgrs/t/001_basic.pl b/src/test/modules/test_custom_rmgrs/t/001_basic.pl index 347a001823e..50655d3788a 100644 --- a/src/test/modules/test_custom_rmgrs/t/001_basic.pl +++ b/src/test/modules/test_custom_rmgrs/t/001_basic.pl @@ -27,7 +27,8 @@ $node->safe_psql('postgres', 'CREATE EXTENSION pg_walinspect'); # make sure checkpoints don't interfere with the test. my $start_lsn = $node->safe_psql('postgres', - qq[SELECT lsn FROM pg_create_physical_replication_slot('regress_test_slot1', true, false);]); + qq[SELECT lsn FROM pg_create_physical_replication_slot('regress_test_slot1', true, false);] +); # write and save the WAL record's returned end LSN for verifying it later my $record_end_lsn = $node->safe_psql('postgres', @@ -36,11 +37,12 @@ my $record_end_lsn = $node->safe_psql('postgres', # ensure the WAL is written and flushed to disk $node->safe_psql('postgres', 'SELECT pg_switch_wal()'); -my $end_lsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_flush_lsn()'); +my $end_lsn = + $node->safe_psql('postgres', 'SELECT pg_current_wal_flush_lsn()'); # check if our custom WAL resource manager has successfully registered with the server -my $row_count = - $node->safe_psql('postgres', +my $row_count = $node->safe_psql( + 'postgres', qq[SELECT count(*) FROM pg_get_wal_resource_managers() WHERE rm_name = 'test_custom_rmgrs';]); is($row_count, '1', @@ -48,14 +50,14 @@ is($row_count, '1', ); # check if our custom WAL resource manager has successfully written a WAL record -my $expected = qq($record_end_lsn|test_custom_rmgrs|TEST_CUSTOM_RMGRS_MESSAGE|0|payload (10 bytes): payload123); -my $result = - $node->safe_psql('postgres', +my $expected = + qq($record_end_lsn|test_custom_rmgrs|TEST_CUSTOM_RMGRS_MESSAGE|0|payload (10 bytes): payload123); +my $result = $node->safe_psql( + 'postgres', qq[SELECT end_lsn, resource_manager, record_type, fpi_length, description FROM pg_get_wal_records_info('$start_lsn', '$end_lsn') WHERE resource_manager = 'test_custom_rmgrs';]); is($result, $expected, - 'custom WAL resource manager has successfully written a WAL record' -); + 'custom WAL resource manager has successfully written a WAL record'); $node->stop; done_testing(); diff --git a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c index 1727910ce7e..a304ba54bb9 100644 --- a/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c +++ b/src/test/modules/test_custom_rmgrs/test_custom_rmgrs.c @@ -31,7 +31,7 @@ PG_MODULE_MAGIC; */ typedef struct xl_testcustomrmgrs_message { - Size message_size; /* size of the message */ + Size message_size; /* size of the message */ char message[FLEXIBLE_ARRAY_MEMBER]; /* payload */ } xl_testcustomrmgrs_message; diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index b7c6f98577c..82f937fca4f 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -318,6 +318,7 @@ get_altertable_subcmdinfo(PG_FUNCTION_ARGS) if (OidIsValid(sub->address.objectId)) { char *objdesc; + objdesc = getObjectDescription((const ObjectAddress *) &sub->address, false); values[1] = CStringGetTextDatum(objdesc); } diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl index 64766c1e33f..5a07a5d36df 100644 --- a/src/test/modules/test_misc/t/001_constraint_validation.pl +++ b/src/test/modules/test_misc/t/001_constraint_validation.pl @@ -25,8 +25,8 @@ sub run_sql_command $node->psql( 'postgres', $sql, - stderr => \$stderr, - on_error_die => 1, + stderr => \$stderr, + on_error_die => 1, on_error_stop => 1); return $stderr; } diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl index 95cd2b7b65f..f774a021a8a 100644 --- a/src/test/modules/test_misc/t/002_tablespace.pl +++ b/src/test/modules/test_misc/t/002_tablespace.pl @@ -13,7 +13,7 @@ $node->init; $node->start; # Create a couple of directories to use as tablespaces. -my $basedir = $node->basedir(); +my $basedir = $node->basedir(); my $TS1_LOCATION = "$basedir/ts1"; $TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar mkdir($TS1_LOCATION); diff --git a/src/test/modules/test_misc/t/003_check_guc.pl b/src/test/modules/test_misc/t/003_check_guc.pl index e9f33f3c775..4fd6d03b9e6 100644 --- a/src/test/modules/test_misc/t/003_check_guc.pl +++ b/src/test/modules/test_misc/t/003_check_guc.pl @@ -73,8 +73,8 @@ close $contents; # Cross-check that all the GUCs found in the sample file match the ones # fetched above. This maps the arrays to a hash, making the creation of # each exclude and intersection list easier. -my %gucs_in_file_hash = map { $_ => 1 } @gucs_in_file; -my %all_params_hash = map { $_ => 1 } @all_params_array; +my %gucs_in_file_hash = map { $_ => 1 } @gucs_in_file; +my %all_params_hash = map { $_ => 1 } @all_params_array; my %not_in_sample_hash = map { $_ => 1 } @not_in_sample_array; my @missing_from_file = grep(!$gucs_in_file_hash{$_}, @all_params_array); @@ -91,7 +91,9 @@ is(scalar(@sample_intersect), # These would log some information only on errors. foreach my $param (@missing_from_file) { - print("found GUC $param in guc_tables.c, missing from postgresql.conf.sample\n"); + print( + "found GUC $param in guc_tables.c, missing from postgresql.conf.sample\n" + ); } foreach my $param (@missing_from_list) { diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 78e76774238..d00c3544e91 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -46,15 +46,15 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir; my %pgdump_runs = ( binary_upgrade => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/binary_upgrade.sql", '--schema-only', - '--binary-upgrade', '--dbname=postgres', + '--binary-upgrade', '--dbname=postgres', ], }, clean => { dump_cmd => [ 'pg_dump', "--file=$tempdir/clean.sql", - '-c', '--no-sync', + '-c', '--no-sync', '--dbname=postgres', ], }, @@ -151,26 +151,26 @@ my %pgdump_runs = ( }, extension_schema => { dump_cmd => [ - 'pg_dump', '--schema=public', + 'pg_dump', '--schema=public', "--file=$tempdir/extension_schema.sql", 'postgres', ], }, pg_dumpall_globals => { dump_cmd => [ - 'pg_dumpall', '--no-sync', + 'pg_dumpall', '--no-sync', "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], }, no_privs => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_privs.sql", '-x', 'postgres', ], }, no_owner => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/no_owner.sql", '-O', 'postgres', ], @@ -183,14 +183,14 @@ my %pgdump_runs = ( }, section_pre_data => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/section_pre_data.sql", '--section=pre-data', 'postgres', ], }, section_data => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', '--no-sync', "--file=$tempdir/section_data.sql", '--section=data', 'postgres', ], @@ -276,15 +276,15 @@ my %pgdump_runs = ( # Tests which are considered 'full' dumps by pg_dump, but there # are flags used to exclude specific items (ACLs, LOs, etc). my %full_runs = ( - binary_upgrade => 1, - clean => 1, - clean_if_exists => 1, - createdb => 1, - defaults => 1, - exclude_table => 1, - no_privs => 1, - no_owner => 1, - with_extension => 1, + binary_upgrade => 1, + clean => 1, + clean_if_exists => 1, + createdb => 1, + defaults => 1, + exclude_table => 1, + no_privs => 1, + no_owner => 1, + with_extension => 1, without_extension => 1); my %tests = ( @@ -302,13 +302,13 @@ my %tests = ( 'CREATE EXTENSION test_pg_dump' => { create_order => 2, - create_sql => 'CREATE EXTENSION test_pg_dump;', - regexp => qr/^ + create_sql => 'CREATE EXTENSION test_pg_dump;', + regexp => qr/^ \QCREATE EXTENSION IF NOT EXISTS test_pg_dump WITH SCHEMA public;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { binary_upgrade => 1, without_extension => 1 }, @@ -316,9 +316,9 @@ my %tests = ( 'CREATE ROLE regress_dump_test_role' => { create_order => 1, - create_sql => 'CREATE ROLE regress_dump_test_role;', - regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m, - like => { pg_dumpall_globals => 1, }, + create_sql => 'CREATE ROLE regress_dump_test_role;', + regexp => qr/^CREATE ROLE regress_dump_test_role;\n/m, + like => { pg_dumpall_globals => 1, }, }, 'GRANT ALTER SYSTEM ON PARAMETER full_page_writes TO regress_dump_test_role' @@ -355,8 +355,8 @@ my %tests = ( 'CREATE SCHEMA public' => { regexp => qr/^CREATE SCHEMA public;/m, - like => { - extension_schema => 1, + like => { + extension_schema => 1, without_extension_explicit_schema => 1, }, }, @@ -400,14 +400,14 @@ my %tests = ( 'SETVAL SEQUENCE regress_seq_dumpable' => { create_order => 6, - create_sql => qq{SELECT nextval('regress_seq_dumpable');}, - regexp => qr/^ + create_sql => qq{SELECT nextval('regress_seq_dumpable');}, + regexp => qr/^ \QSELECT pg_catalog.setval('public.regress_seq_dumpable', 1, true);\E \n/xm, like => { %full_runs, - data_only => 1, - section_data => 1, + data_only => 1, + section_data => 1, extension_schema => 1, }, unlike => { without_extension => 1, }, @@ -429,20 +429,20 @@ my %tests = ( \n/xm, like => { %full_runs, - data_only => 1, - section_data => 1, + data_only => 1, + section_data => 1, extension_schema => 1, }, unlike => { - binary_upgrade => 1, - exclude_table => 1, + binary_upgrade => 1, + exclude_table => 1, without_extension => 1, }, }, 'REVOKE ALL ON FUNCTION wgo_then_no_access' => { create_order => 3, - create_sql => q{ + create_sql => q{ DO $$BEGIN EXECUTE format( 'REVOKE ALL ON FUNCTION wgo_then_no_access() FROM pg_signal_backend, public, %I', @@ -456,7 +456,7 @@ my %tests = ( /xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -464,7 +464,7 @@ my %tests = ( 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE wgo_then_regular' => { create_order => 3, - create_sql => 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE + create_sql => 'REVOKE GRANT OPTION FOR UPDATE ON SEQUENCE wgo_then_regular FROM pg_signal_backend;', regexp => qr/^ \QREVOKE ALL ON SEQUENCE public.wgo_then_regular FROM pg_signal_backend;\E @@ -473,7 +473,7 @@ my %tests = ( /xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -493,7 +493,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { without_extension => 1, }, @@ -518,7 +518,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -545,14 +545,14 @@ my %tests = ( 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' => { create_order => 4, - create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table + create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role;', regexp => qr/^ \QGRANT SELECT(col2) ON TABLE public.regress_pg_dump_table TO regress_dump_test_role;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1 }, @@ -568,7 +568,7 @@ my %tests = ( \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -583,14 +583,14 @@ my %tests = ( 'REVOKE SELECT(col1) ON regress_pg_dump_table' => { create_order => 3, - create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table + create_sql => 'REVOKE SELECT(col1) ON regress_pg_dump_table FROM PUBLIC;', regexp => qr/^ \QREVOKE SELECT(col1) ON TABLE public.regress_pg_dump_table FROM PUBLIC;\E \n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, }, unlike => { no_privs => 1, without_extension => 1, }, @@ -699,13 +699,13 @@ my %tests = ( regexp => qr/^ \QALTER INDEX regress_pg_dump_schema.extdependtab_pkey DEPENDS ON EXTENSION test_pg_dump;\E\n /xms, - like => {%pgdump_runs}, + like => {%pgdump_runs}, unlike => { - data_only => 1, - extension_schema => 1, + data_only => 1, + extension_schema => 1, pg_dumpall_globals => 1, - section_data => 1, - section_pre_data => 1, + section_data => 1, + section_pre_data => 1, # Excludes this schema as extension is not listed. without_extension_explicit_schema => 1, }, @@ -715,13 +715,13 @@ my %tests = ( regexp => qr/^ \QALTER INDEX regress_pg_dump_schema.extdependtab_col2_idx DEPENDS ON EXTENSION test_pg_dump;\E\n /xms, - like => {%pgdump_runs}, + like => {%pgdump_runs}, unlike => { - data_only => 1, - extension_schema => 1, + data_only => 1, + extension_schema => 1, pg_dumpall_globals => 1, - section_data => 1, - section_pre_data => 1, + section_data => 1, + section_pre_data => 1, # Excludes this schema as extension is not listed. without_extension_explicit_schema => 1, }, @@ -730,7 +730,7 @@ my %tests = ( # Objects not included in extension, part of schema created by extension 'CREATE TABLE regress_pg_dump_schema.external_tab' => { create_order => 4, - create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab + create_sql => 'CREATE TABLE regress_pg_dump_schema.external_tab (col1 int);', regexp => qr/^ \QCREATE TABLE regress_pg_dump_schema.external_tab (\E @@ -738,7 +738,7 @@ my %tests = ( \n\);\n/xm, like => { %full_runs, - schema_only => 1, + schema_only => 1, section_pre_data => 1, # Excludes the extension and keeps the schema's data. without_extension_internal_schema => 1, diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm index 349bebeaeaf..843f65b448b 100644 --- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm +++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm @@ -329,13 +329,13 @@ sub adjust_old_dumpfile # adjust some places where we don't print so many parens anymore my $prefix = "CONSTRAINT (?:sequence|copy)_con CHECK [(][(]"; - my $orig = "((x > 3) AND (y <> 'check failed'::text))"; - my $repl = "(x > 3) AND (y <> 'check failed'::text)"; + my $orig = "((x > 3) AND (y <> 'check failed'::text))"; + my $repl = "(x > 3) AND (y <> 'check failed'::text)"; $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg; $prefix = "CONSTRAINT insert_con CHECK [(][(]"; - $orig = "((x >= 3) AND (y <> 'check failed'::text))"; - $repl = "(x >= 3) AND (y <> 'check failed'::text)"; + $orig = "((x >= 3) AND (y <> 'check failed'::text))"; + $repl = "(x >= 3) AND (y <> 'check failed'::text)"; $dump =~ s/($prefix)\Q$orig\E/$1$repl/mg; $orig = "DEFAULT ((-1) * currval('public.insert_seq'::regclass))"; @@ -406,78 +406,78 @@ sub adjust_old_dumpfile # Data for _mash_view_qualifiers my @_unused_view_qualifiers = ( # Present at least since 9.2 - { obj => 'VIEW public.trigger_test_view', qual => 'trigger_test' }, - { obj => 'VIEW public.domview', qual => 'domtab' }, + { obj => 'VIEW public.trigger_test_view', qual => 'trigger_test' }, + { obj => 'VIEW public.domview', qual => 'domtab' }, { obj => 'VIEW public.my_property_normal', qual => 'customer' }, { obj => 'VIEW public.my_property_secure', qual => 'customer' }, - { obj => 'VIEW public.pfield_v1', qual => 'pf' }, - { obj => 'VIEW public.rtest_v1', qual => 'rtest_t1' }, - { obj => 'VIEW public.rtest_vview1', qual => 'x' }, - { obj => 'VIEW public.rtest_vview2', qual => 'rtest_view1' }, - { obj => 'VIEW public.rtest_vview3', qual => 'x' }, - { obj => 'VIEW public.rtest_vview5', qual => 'rtest_view1' }, - { obj => 'VIEW public.shoelace_obsolete', qual => 'shoelace' }, + { obj => 'VIEW public.pfield_v1', qual => 'pf' }, + { obj => 'VIEW public.rtest_v1', qual => 'rtest_t1' }, + { obj => 'VIEW public.rtest_vview1', qual => 'x' }, + { obj => 'VIEW public.rtest_vview2', qual => 'rtest_view1' }, + { obj => 'VIEW public.rtest_vview3', qual => 'x' }, + { obj => 'VIEW public.rtest_vview5', qual => 'rtest_view1' }, + { obj => 'VIEW public.shoelace_obsolete', qual => 'shoelace' }, { obj => 'VIEW public.shoelace_candelete', qual => 'shoelace_obsolete' }, - { obj => 'VIEW public.toyemp', qual => 'emp' }, - { obj => 'VIEW public.xmlview4', qual => 'emp' }, + { obj => 'VIEW public.toyemp', qual => 'emp' }, + { obj => 'VIEW public.xmlview4', qual => 'emp' }, # Since 9.3 (some of these were removed in 9.6) - { obj => 'VIEW public.tv', qual => 't' }, + { obj => 'VIEW public.tv', qual => 't' }, { obj => 'MATERIALIZED VIEW mvschema.tvm', qual => 'tv' }, - { obj => 'VIEW public.tvv', qual => 'tv' }, - { obj => 'MATERIALIZED VIEW public.tvvm', qual => 'tvv' }, - { obj => 'VIEW public.tvvmv', qual => 'tvvm' }, - { obj => 'MATERIALIZED VIEW public.bb', qual => 'tvvmv' }, - { obj => 'VIEW public.nums', qual => 'nums' }, - { obj => 'VIEW public.sums_1_100', qual => 't' }, - { obj => 'MATERIALIZED VIEW public.tm', qual => 't' }, - { obj => 'MATERIALIZED VIEW public.tmm', qual => 'tm' }, - { obj => 'MATERIALIZED VIEW public.tvmm', qual => 'tvm' }, + { obj => 'VIEW public.tvv', qual => 'tv' }, + { obj => 'MATERIALIZED VIEW public.tvvm', qual => 'tvv' }, + { obj => 'VIEW public.tvvmv', qual => 'tvvm' }, + { obj => 'MATERIALIZED VIEW public.bb', qual => 'tvvmv' }, + { obj => 'VIEW public.nums', qual => 'nums' }, + { obj => 'VIEW public.sums_1_100', qual => 't' }, + { obj => 'MATERIALIZED VIEW public.tm', qual => 't' }, + { obj => 'MATERIALIZED VIEW public.tmm', qual => 'tm' }, + { obj => 'MATERIALIZED VIEW public.tvmm', qual => 'tvm' }, # Since 9.4 { - obj => 'MATERIALIZED VIEW public.citext_matview', + obj => 'MATERIALIZED VIEW public.citext_matview', qual => 'citext_table' }, { - obj => 'OR REPLACE VIEW public.key_dependent_view', + obj => 'OR REPLACE VIEW public.key_dependent_view', qual => 'view_base_table' }, { - obj => 'OR REPLACE VIEW public.key_dependent_view_no_cols', + obj => 'OR REPLACE VIEW public.key_dependent_view_no_cols', qual => 'view_base_table' }, # Since 9.5 { - obj => 'VIEW public.dummy_seclabel_view1', + obj => 'VIEW public.dummy_seclabel_view1', qual => 'dummy_seclabel_tbl2' }, - { obj => 'VIEW public.vv', qual => 'test_tablesample' }, + { obj => 'VIEW public.vv', qual => 'test_tablesample' }, { obj => 'VIEW public.test_tablesample_v1', qual => 'test_tablesample' }, { obj => 'VIEW public.test_tablesample_v2', qual => 'test_tablesample' }, # Since 9.6 { - obj => 'MATERIALIZED VIEW public.test_pg_dump_mv1', + obj => 'MATERIALIZED VIEW public.test_pg_dump_mv1', qual => 'test_pg_dump_t1' }, { obj => 'VIEW public.test_pg_dump_v1', qual => 'test_pg_dump_t1' }, - { obj => 'VIEW public.mvtest_tv', qual => 'mvtest_t' }, + { obj => 'VIEW public.mvtest_tv', qual => 'mvtest_t' }, { - obj => 'MATERIALIZED VIEW mvtest_mvschema.mvtest_tvm', + obj => 'MATERIALIZED VIEW mvtest_mvschema.mvtest_tvm', qual => 'mvtest_tv' }, - { obj => 'VIEW public.mvtest_tvv', qual => 'mvtest_tv' }, + { obj => 'VIEW public.mvtest_tvv', qual => 'mvtest_tv' }, { obj => 'MATERIALIZED VIEW public.mvtest_tvvm', qual => 'mvtest_tvv' }, - { obj => 'VIEW public.mvtest_tvvmv', qual => 'mvtest_tvvm' }, - { obj => 'MATERIALIZED VIEW public.mvtest_bb', qual => 'mvtest_tvvmv' }, - { obj => 'MATERIALIZED VIEW public.mvtest_tm', qual => 'mvtest_t' }, - { obj => 'MATERIALIZED VIEW public.mvtest_tmm', qual => 'mvtest_tm' }, + { obj => 'VIEW public.mvtest_tvvmv', qual => 'mvtest_tvvm' }, + { obj => 'MATERIALIZED VIEW public.mvtest_bb', qual => 'mvtest_tvvmv' }, + { obj => 'MATERIALIZED VIEW public.mvtest_tm', qual => 'mvtest_t' }, + { obj => 'MATERIALIZED VIEW public.mvtest_tmm', qual => 'mvtest_tm' }, { obj => 'MATERIALIZED VIEW public.mvtest_tvmm', qual => 'mvtest_tvm' }, # Since 10 (some removed in 12) - { obj => 'VIEW public.itestv10', qual => 'itest10' }, - { obj => 'VIEW public.itestv11', qual => 'itest11' }, + { obj => 'VIEW public.itestv10', qual => 'itest10' }, + { obj => 'VIEW public.itestv11', qual => 'itest11' }, { obj => 'VIEW public.xmltableview2', qual => '"xmltable"' }, # Since 12 { - obj => 'MATERIALIZED VIEW public.tableam_tblmv_heap2', + obj => 'MATERIALIZED VIEW public.tableam_tblmv_heap2', qual => 'tableam_tbl_heap2' }, # Since 13 @@ -496,7 +496,7 @@ sub _mash_view_qualifiers for my $uvq (@_unused_view_qualifiers) { - my $leader = "CREATE $uvq->{obj} "; + my $leader = "CREATE $uvq->{obj} "; my $qualifier = $uvq->{qual}; # Note: we loop because there are presently some cases where the same # view name appears in multiple databases. Fortunately, the same @@ -507,8 +507,8 @@ sub _mash_view_qualifiers foreach my $chunk (@splitchunks) { my @thischunks = split /;/, $chunk, 2; - my $stmt = shift(@thischunks); - my $ostmt = $stmt; + my $stmt = shift(@thischunks); + my $ostmt = $stmt; # now $stmt is just the body of the CREATE [MATERIALIZED] VIEW $stmt =~ s/$qualifier\.//g; diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm index a9c102949f8..924b57ab218 100644 --- a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm +++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm @@ -82,20 +82,28 @@ sub new { my $class = shift; my ($interactive, $psql_params) = @_; - my $psql = {'stdin' => '', 'stdout' => '', 'stderr' => '', 'query_timer_restart' => undef}; + my $psql = { + 'stdin' => '', + 'stdout' => '', + 'stderr' => '', + 'query_timer_restart' => undef + }; my $run; # This constructor should only be called from PostgreSQL::Test::Cluster - my ($package, $file, $line) = caller; - die "Forbidden caller of constructor: package: $package, file: $file:$line" + my ($package, $file, $line) = caller; + die + "Forbidden caller of constructor: package: $package, file: $file:$line" unless $package->isa('PostgreSQL::Test::Cluster'); - $psql->{timeout} = IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); + $psql->{timeout} = + IPC::Run::timeout($PostgreSQL::Test::Utils::timeout_default); if ($interactive) { $run = IPC::Run::start $psql_params, - '<pty<', \$psql->{stdin}, '>pty>', \$psql->{stdout}, '2>', \$psql->{stderr}, + '<pty<', \$psql->{stdin}, '>pty>', \$psql->{stdout}, '2>', + \$psql->{stderr}, $psql->{timeout}; } else @@ -126,8 +134,9 @@ sub _wait_connect # errors anyway, but that might be added later.) my $banner = "background_psql: ready"; $self->{stdin} .= "\\echo $banner\n"; - $self->{run}->pump() until $self->{stdout} =~ /$banner/ || $self->{timeout}->is_expired; - $self->{stdout} = ''; # clear out banner + $self->{run}->pump() + until $self->{stdout} =~ /$banner/ || $self->{timeout}->is_expired; + $self->{stdout} = ''; # clear out banner die "psql startup timed out" if $self->{timeout}->is_expired; } @@ -173,10 +182,10 @@ sub reconnect_and_clear # restart $self->{run}->run(); - $self->{stdin} = ''; + $self->{stdin} = ''; $self->{stdout} = ''; - $self->_wait_connect() + $self->_wait_connect(); } =pod @@ -219,7 +228,7 @@ sub query $ret = $self->{stderr} eq "" ? 0 : 1; - return wantarray ? ( $output, $ret ) : $output; + return wantarray ? ($output, $ret) : $output; } =pod diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index bc9b5dc6444..baea0fcd1c2 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -112,10 +112,10 @@ use PostgreSQL::Version; use PostgreSQL::Test::RecursiveCopy; use Socket; use Test::More; -use PostgreSQL::Test::Utils (); +use PostgreSQL::Test::Utils (); use PostgreSQL::Test::BackgroundPsql (); -use Time::HiRes qw(usleep); -use Scalar::Util qw(blessed); +use Time::HiRes qw(usleep); +use Scalar::Util qw(blessed); our ($use_tcp, $test_localhost, $test_pghost, $last_host_assigned, $last_port_assigned, @all_nodes, $died, $portdir); @@ -132,8 +132,8 @@ INIT # Set PGHOST for backward compatibility. This doesn't work for own_host # nodes, so prefer to not rely on this when writing new tests. - $use_tcp = !$PostgreSQL::Test::Utils::use_unix_sockets; - $test_localhost = "127.0.0.1"; + $use_tcp = !$PostgreSQL::Test::Utils::use_unix_sockets; + $test_localhost = "127.0.0.1"; $last_host_assigned = 1; if ($use_tcp) { @@ -147,7 +147,7 @@ INIT $test_pghost = PostgreSQL::Test::Utils::tempdir_short; $test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os; } - $ENV{PGHOST} = $test_pghost; + $ENV{PGHOST} = $test_pghost; $ENV{PGDATABASE} = 'postgres'; # Tracking of last port value assigned to accelerate free port lookup. @@ -160,9 +160,10 @@ INIT $portdir = $ENV{PG_TEST_PORT_DIR}; # Otherwise, try to use a directory at the top of the build tree # or as a last resort use the tmp_check directory - my $build_dir = $ENV{MESON_BUILD_ROOT} + my $build_dir = + $ENV{MESON_BUILD_ROOT} || $ENV{top_builddir} - || $PostgreSQL::Test::Utils::tmp_check ; + || $PostgreSQL::Test::Utils::tmp_check; $portdir ||= "$build_dir/portlock"; $portdir =~ s!\\!/!g; # Make sure the directory exists @@ -408,7 +409,7 @@ sub config_data my @map; foreach my $line (@lines) { - my ($k,$v) = split (/ = /,$line,2); + my ($k, $v) = split(/ = /, $line, 2); push(@map, $k, $v); } return @map; @@ -509,14 +510,14 @@ disabled. sub init { my ($self, %params) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $host = $self->host; + my $host = $self->host; local %ENV = $self->_get_env(); $params{allows_streaming} = 0 unless defined $params{allows_streaming}; - $params{has_archiving} = 0 unless defined $params{has_archiving}; + $params{has_archiving} = 0 unless defined $params{has_archiving}; mkdir $self->backup_dir; mkdir $self->archive_dir; @@ -585,7 +586,7 @@ sub init or die("unable to set permissions for $pgdata/postgresql.conf"); $self->set_replication_conf if $params{allows_streaming}; - $self->enable_archiving if $params{has_archiving}; + $self->enable_archiving if $params{has_archiving}; return; } @@ -680,17 +681,17 @@ sub backup { my ($self, $backup_name, %params) = @_; my $backup_path = $self->backup_dir . '/' . $backup_name; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); print "# Taking pg_basebackup $backup_name from node \"$name\"\n"; PostgreSQL::Test::Utils::system_or_bail( 'pg_basebackup', '-D', - $backup_path, '-h', - $self->host, '-p', - $self->port, '--checkpoint', - 'fast', '--no-sync', + $backup_path, '-h', + $self->host, '-p', + $self->port, '--checkpoint', + 'fast', '--no-sync', @{ $params{backup_options} }); print "# Backup finished\n"; return; @@ -755,14 +756,14 @@ sub init_from_backup { my ($self, $root_node, $backup_name, %params) = @_; my $backup_path = $root_node->backup_dir . '/' . $backup_name; - my $host = $self->host; - my $port = $self->port; - my $node_name = $self->name; - my $root_name = $root_node->name; + my $host = $self->host; + my $port = $self->port; + my $node_name = $self->name; + my $root_name = $root_node->name; $params{has_streaming} = 0 unless defined $params{has_streaming}; $params{has_restoring} = 0 unless defined $params{has_restoring}; - $params{standby} = 1 unless defined $params{standby}; + $params{standby} = 1 unless defined $params{standby}; print "# Initializing node \"$node_name\" from backup \"$backup_name\" of node \"$root_name\"\n"; @@ -780,7 +781,7 @@ sub init_from_backup $backup_path . '/base.tar', '-C', $data_path); PostgreSQL::Test::Utils::system_or_bail( - $params{tar_program}, 'xf', + $params{tar_program}, 'xf', $backup_path . '/pg_wal.tar', '-C', $data_path . '/pg_wal'); } @@ -853,9 +854,9 @@ instead return a true or false value to indicate success or failure. sub start { my ($self, %params) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; my $ret; BAIL_OUT("node \"$name\" is already running") if defined $self->{_pid}; @@ -872,8 +873,8 @@ sub start # -w is now the default but having it here does no harm and helps # compatibility with older versions. $ret = PostgreSQL::Test::Utils::system_log( - 'pg_ctl', '-w', '-D', $self->data_dir, - '-l', $self->logfile, '-o', "--cluster-name=$name", + 'pg_ctl', '-w', '-D', $self->data_dir, + '-l', $self->logfile, '-o', "--cluster-name=$name", 'start'); if ($ret != 0) @@ -938,7 +939,7 @@ sub stop { my ($self, $mode, %params) = @_; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; my $ret; local %ENV = $self->_get_env(); @@ -976,9 +977,9 @@ Reload configuration parameters on the node. sub reload { my ($self) = @_; - my $port = $self->port; + my $port = $self->port; my $pgdata = $self->data_dir; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -998,11 +999,11 @@ Wrapper for pg_ctl restart sub restart { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(PGAPPNAME => undef); @@ -1027,11 +1028,11 @@ Wrapper for pg_ctl promote sub promote { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -1051,11 +1052,11 @@ Wrapper for pg_ctl logrotate sub logrotate { - my ($self) = @_; - my $port = $self->port; - my $pgdata = $self->data_dir; + my ($self) = @_; + my $port = $self->port; + my $pgdata = $self->data_dir; my $logfile = $self->logfile; - my $name = $self->name; + my $name = $self->name; local %ENV = $self->_get_env(); @@ -1070,7 +1071,7 @@ sub enable_streaming { my ($self, $root_node) = @_; my $root_connstr = $root_node->connstr; - my $name = $self->name; + my $name = $self->name; print "### Enabling streaming replication for node \"$name\"\n"; $self->append_conf( @@ -1155,8 +1156,8 @@ sub set_standby_mode sub enable_archiving { my ($self) = @_; - my $path = $self->archive_dir; - my $name = $self->name; + my $path = $self->archive_dir; + my $name = $self->name; print "### Enabling WAL archiving for node \"$name\"\n"; @@ -1301,7 +1302,7 @@ sub new _host => $host, _basedir => "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data", - _name => $name, + _name => $name, _logfile_generation => 0, _logfile_base => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}", @@ -1354,8 +1355,8 @@ sub new # sub _set_pg_version { - my ($self) = @_; - my $inst = $self->{_install_path}; + my ($self) = @_; + my $inst = $self->{_install_path}; my $pg_config = "pg_config"; if (defined $inst) @@ -1509,7 +1510,7 @@ called from outside the module as C<PostgreSQL::Test::Cluster::get_free_port()>. sub get_free_port { my $found = 0; - my $port = $last_port_assigned; + my $port = $last_port_assigned; while ($found == 0) { @@ -1589,14 +1590,14 @@ sub _reserve_port my $port = shift; # open in rw mode so we don't have to reopen it and lose the lock my $filename = "$portdir/$port.rsv"; - sysopen(my $portfile, $filename, O_RDWR|O_CREAT) + sysopen(my $portfile, $filename, O_RDWR | O_CREAT) || die "opening port file $filename: $!"; # take an exclusive lock to avoid concurrent access flock($portfile, LOCK_EX) || die "locking port file $filename: $!"; # see if someone else has or had a reservation of this port my $pid = <$portfile> || "0"; chomp $pid; - if ($pid +0 > 0) + if ($pid + 0 > 0) { if (kill 0, $pid) { @@ -1609,7 +1610,7 @@ sub _reserve_port # All good, go ahead and reserve the port seek($portfile, 0, SEEK_SET); # print the pid with a fixed width so we don't leave any trailing junk - print $portfile sprintf("%10d\n",$$); + print $portfile sprintf("%10d\n", $$); flock($portfile, LOCK_UN); close($portfile); push(@port_reservation_files, $filename); @@ -1705,9 +1706,9 @@ sub safe_psql my $ret = $self->psql( $dbname, $sql, %params, - stdout => \$stdout, - stderr => \$stderr, - on_error_die => 1, + stdout => \$stdout, + stderr => \$stderr, + on_error_die => 1, on_error_stop => 1); # psql can emit stderr from NOTICEs etc @@ -1819,10 +1820,10 @@ sub psql local %ENV = $self->_get_env(); - my $stdout = $params{stdout}; - my $stderr = $params{stderr}; - my $replication = $params{replication}; - my $timeout = undef; + my $stdout = $params{stdout}; + my $stderr = $params{stderr}; + my $replication = $params{replication}; + my $timeout = undef; my $timeout_exception = 'psql timed out'; # Build the connection string. @@ -1859,7 +1860,7 @@ sub psql } $params{on_error_stop} = 1 unless defined $params{on_error_stop}; - $params{on_error_die} = 0 unless defined $params{on_error_die}; + $params{on_error_die} = 0 unless defined $params{on_error_die}; push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop}; push @psql_params, @{ $params{extra_params} } @@ -1888,7 +1889,7 @@ sub psql local $@; eval { my @ipcrun_opts = (\@psql_params, '<', \$sql); - push @ipcrun_opts, '>', $stdout if defined $stdout; + push @ipcrun_opts, '>', $stdout if defined $stdout; push @ipcrun_opts, '2>', $stderr if defined $stderr; push @ipcrun_opts, $timeout if defined $timeout; @@ -2231,8 +2232,8 @@ sub connect_ok my ($ret, $stdout, $stderr) = $self->psql( 'postgres', $sql, - extra_params => ['-w'], - connstr => "$connstr", + extra_params => ['-w'], + connstr => "$connstr", on_error_stop => 0); is($ret, 0, $test_name); @@ -2306,7 +2307,7 @@ sub connect_fails 'postgres', undef, extra_params => ['-w'], - connstr => "$connstr"); + connstr => "$connstr"); isnt($ret, 0, $test_name); @@ -2353,11 +2354,11 @@ sub poll_query_until my $cmd = [ $self->installed_command('psql'), '-XAt', - '-d', $self->connstr($dbname) + '-d', $self->connstr($dbname) ]; my ($stdout, $stderr); my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { @@ -2531,8 +2532,7 @@ Returns the contents of log of the node sub log_content { my ($self) = @_; - return - PostgreSQL::Test::Utils::slurp_file($self->logfile); + return PostgreSQL::Test::Utils::slurp_file($self->logfile); } @@ -2574,11 +2574,11 @@ sub lsn { my ($self, $mode) = @_; my %modes = ( - 'insert' => 'pg_current_wal_insert_lsn()', - 'flush' => 'pg_current_wal_flush_lsn()', - 'write' => 'pg_current_wal_lsn()', + 'insert' => 'pg_current_wal_insert_lsn()', + 'flush' => 'pg_current_wal_flush_lsn()', + 'write' => 'pg_current_wal_lsn()', 'receive' => 'pg_last_wal_receive_lsn()', - 'replay' => 'pg_last_wal_replay_lsn()'); + 'replay' => 'pg_last_wal_replay_lsn()'); $mode = '<undef>' if !defined($mode); croak "unknown mode for 'lsn': '$mode', valid modes are " @@ -2650,7 +2650,8 @@ sub wait_for_catchup } if (!defined($target_lsn)) { - my $isrecovery = $self->safe_psql('postgres', "SELECT pg_is_in_recovery()"); + my $isrecovery = + $self->safe_psql('postgres', "SELECT pg_is_in_recovery()"); chomp($isrecovery); if ($isrecovery eq 't') { @@ -2788,7 +2789,7 @@ sub wait_for_subscription_sync # Wait for all tables to finish initial sync. print "Waiting for all subscriptions in \"$name\" to synchronize data\n"; my $query = - qq[SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');]; + qq[SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');]; $self->poll_query_until($dbname, $query) or croak "timed out waiting for subscriber to synchronize data"; @@ -2821,7 +2822,7 @@ sub wait_for_log $offset = 0 unless defined $offset; my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { @@ -2903,8 +2904,8 @@ sub slot { my ($self, $slot_name) = @_; my @columns = ( - 'plugin', 'slot_type', 'datoid', 'database', - 'active', 'active_pid', 'xmin', 'catalog_xmin', + 'plugin', 'slot_type', 'datoid', 'database', + 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn'); return $self->query_hash( 'postgres', @@ -2943,7 +2944,7 @@ sub pg_recvlogical_upto my $timeout_exception = 'pg_recvlogical timed out'; croak 'slot name must be specified' unless defined($slot_name); - croak 'endpos must be specified' unless defined($endpos); + croak 'endpos must be specified' unless defined($endpos); my @cmd = ( $self->installed_command('pg_recvlogical'), @@ -3057,7 +3058,17 @@ sub create_logical_slot_on_standby my $handle; - $handle = IPC::Run::start(['pg_recvlogical', '-d', $self->connstr($dbname), '-P', 'test_decoding', '-S', $slot_name, '--create-slot'], '>', \$stdout, '2>', \$stderr); + $handle = IPC::Run::start( + [ + 'pg_recvlogical', '-d', + $self->connstr($dbname), '-P', + 'test_decoding', '-S', + $slot_name, '--create-slot' + ], + '>', + \$stdout, + '2>', + \$stderr); # Once the slot's restart_lsn is determined, the standby looks for # xl_running_xacts WAL record from the restart_lsn onwards. First wait @@ -3067,7 +3078,9 @@ sub create_logical_slot_on_standby 'postgres', qq[ SELECT restart_lsn IS NOT NULL FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name' - ]) or die "timed out waiting for logical slot to calculate its restart_lsn"; + ]) + or die + "timed out waiting for logical slot to calculate its restart_lsn"; # Then arrange for the xl_running_xacts record for which pg_recvlogical is # waiting. @@ -3075,8 +3088,9 @@ sub create_logical_slot_on_standby $handle->finish(); - is($self->slot($slot_name)->{'slot_type'}, 'logical', $slot_name . ' on standby created') - or die "could not create slot" . $slot_name; + is($self->slot($slot_name)->{'slot_type'}, + 'logical', $slot_name . ' on standby created') + or die "could not create slot" . $slot_name; } =pod diff --git a/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm index b4a441d6f45..15964e62173 100644 --- a/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm +++ b/src/test/perl/PostgreSQL/Test/RecursiveCopy.pm @@ -93,7 +93,7 @@ sub copypath sub _copypath_recurse { my ($base_src_dir, $base_dest_dir, $curr_path, $filterfn) = @_; - my $srcpath = "$base_src_dir/$curr_path"; + my $srcpath = "$base_src_dir/$curr_path"; my $destpath = "$base_dest_dir/$curr_path"; # invoke the filter and skip all further operation if it returns false diff --git a/src/test/perl/PostgreSQL/Test/SimpleTee.pm b/src/test/perl/PostgreSQL/Test/SimpleTee.pm index 029a8880232..82099bf5036 100644 --- a/src/test/perl/PostgreSQL/Test/SimpleTee.pm +++ b/src/test/perl/PostgreSQL/Test/SimpleTee.pm @@ -27,7 +27,7 @@ BEGIN { $last_time = time; } sub _time_str { - my $tm = time; + my $tm = time; my $diff = $tm - $last_time; $last_time = $tm; my ($sec, $min, $hour) = localtime($tm); @@ -45,12 +45,12 @@ sub TIEHANDLE sub PRINT { my $self = shift; - my $ok = 1; + my $ok = 1; # The first file argument passed to tiehandle in PostgreSQL::Test::Utils is # the original stdout, which is what PROVE sees. Additional decorations # confuse it, so only put out the time string on files after the first. my $skip = 1; - my $ts = _time_str; + my $ts = _time_str; for my $fh (@$self) { print $fh ($skip ? "" : $ts), @_ or $ok = 0; diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm index f03d29309d9..38cd7d830d8 100644 --- a/src/test/perl/PostgreSQL/Test/Utils.pm +++ b/src/test/perl/PostgreSQL/Test/Utils.pm @@ -146,7 +146,7 @@ BEGIN $windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys'; # Check if this environment is MSYS2. $is_msys2 = - $windows_os + $windows_os && -x '/usr/bin/uname' && `uname -or` =~ /^[2-9].*Msys/; @@ -211,15 +211,15 @@ INIT # Hijack STDOUT and STDERR to the log file open(my $orig_stdout, '>&', \*STDOUT); open(my $orig_stderr, '>&', \*STDERR); - open(STDOUT, '>&', $testlog); - open(STDERR, '>&', $testlog); + open(STDOUT, '>&', $testlog); + open(STDERR, '>&', $testlog); # The test output (ok ...) needs to be printed to the original STDOUT so # that the 'prove' program can parse it, and display it to the user in # real time. But also copy it to the log file, to provide more context # in the log. my $builder = Test::More->builder; - my $fh = $builder->output; + my $fh = $builder->output; tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stdout, $testlog; $fh = $builder->failure_output; tie *$fh, "PostgreSQL::Test::SimpleTee", $orig_stderr, $testlog; @@ -284,7 +284,7 @@ sub tempdir $prefix = "tmp_test" unless defined $prefix; return File::Temp::tempdir( $prefix . '_XXXX', - DIR => $tmp_check, + DIR => $tmp_check, CLEANUP => 1); } @@ -321,7 +321,7 @@ https://postgr.es/m/20220116210241.GC756210@rfd.leadboat.com for details. sub has_wal_read_bug { return - $Config{osname} eq 'linux' + $Config{osname} eq 'linux' && $Config{archname} =~ /^sparc/ && !run_log([ qw(df -x ext4), $tmp_check ], '>', '/dev/null', '2>&1'); } @@ -563,10 +563,10 @@ sub string_replace_file my ($filename, $find, $replace) = @_; open(my $in, '<', $filename); my $content; - while(<$in>) + while (<$in>) { $_ =~ s/$find/$replace/; - $content = $content.$_; + $content = $content . $_; } close $in; open(my $out, '>', $filename); @@ -595,7 +595,7 @@ sub check_mode_recursive find( { follow_fast => 1, - wanted => sub { + wanted => sub { # Is file in the ignore list? foreach my $ignore ($ignore_list ? @{$ignore_list} : []) { @@ -611,7 +611,7 @@ sub check_mode_recursive unless (defined($file_stat)) { my $is_ENOENT = $!{ENOENT}; - my $msg = "unable to stat $File::Find::name: $!"; + my $msg = "unable to stat $File::Find::name: $!"; if ($is_ENOENT) { warn $msg; @@ -682,7 +682,7 @@ sub chmod_recursive find( { follow_fast => 1, - wanted => sub { + wanted => sub { my $file_stat = stat($File::Find::name); if (defined($file_stat)) diff --git a/src/test/perl/PostgreSQL/Version.pm b/src/test/perl/PostgreSQL/Version.pm index 7e5f5faba56..3705c1bdafc 100644 --- a/src/test/perl/PostgreSQL/Version.pm +++ b/src/test/perl/PostgreSQL/Version.pm @@ -52,7 +52,7 @@ use Scalar::Util qw(blessed); use overload '<=>' => \&_version_cmp, 'cmp' => \&_version_cmp, - '""' => \&_stringify; + '""' => \&_stringify; =pod @@ -74,7 +74,7 @@ of a Postgres command like `psql --version` or `pg_config --version`; sub new { my $class = shift; - my $arg = shift; + my $arg = shift; chomp $arg; diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 76846905a71..0c72ba09441 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -14,7 +14,7 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary'); # and it needs proper authentication configuration. $node_primary->init( allows_streaming => 1, - auth_extra => [ '--create-role', 'repl_role' ]); + auth_extra => [ '--create-role', 'repl_role' ]); $node_primary->start; my $backup_name = 'my_backup'; @@ -91,18 +91,18 @@ sub test_target_session_attrs { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $node1 = shift; - my $node2 = shift; + my $node1 = shift; + my $node2 = shift; my $target_node = shift; - my $mode = shift; - my $status = shift; - - my $node1_host = $node1->host; - my $node1_port = $node1->port; - my $node1_name = $node1->name; - my $node2_host = $node2->host; - my $node2_port = $node2->port; - my $node2_name = $node2->name; + my $mode = shift; + my $status = shift; + + my $node1_host = $node1->host; + my $node1_port = $node1->port; + my $node1_name = $node1->name; + my $node2_host = $node2->host; + my $node2_port = $node2->port; + my $node2_name = $node2->name; my $target_port = undef; $target_port = $target_node->port if (defined $target_node); my $target_name = undef; @@ -218,11 +218,11 @@ $node_primary->psql( 'postgres', " CREATE ROLE repl_role REPLICATION LOGIN; GRANT pg_read_all_settings TO repl_role;"); -my $primary_host = $node_primary->host; -my $primary_port = $node_primary->port; +my $primary_host = $node_primary->host; +my $primary_port = $node_primary->port; my $connstr_common = "host=$primary_host port=$primary_port user=repl_role"; -my $connstr_rep = "$connstr_common replication=1"; -my $connstr_db = "$connstr_common replication=database dbname=postgres"; +my $connstr_rep = "$connstr_common replication=1"; +my $connstr_db = "$connstr_common replication=database dbname=postgres"; # Test SHOW ALL my ($ret, $stdout, $stderr) = $node_primary->psql( @@ -534,8 +534,8 @@ my $connstr = $node_primary->connstr('postgres') . " replication=database"; # a replication command and a SQL command. $node_primary->command_fails_like( [ - 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)", - '-c', 'BASE_BACKUP', '-d', $connstr + 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)", + '-c', 'BASE_BACKUP', '-d', $connstr ], qr/a backup is already in progress in this session/, 'BASE_BACKUP cannot run in session already running backup'); @@ -553,8 +553,8 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', ''); my $sigchld_bb = IPC::Run::start( [ 'psql', '-X', '-c', "BASE_BACKUP (CHECKPOINT 'fast', MAX_RATE 32);", - '-c', 'SELECT pg_backup_stop()', - '-d', $connstr + '-c', 'SELECT pg_backup_stop()', + '-d', $connstr ], '<', \$sigchld_bb_stdin, @@ -577,7 +577,7 @@ is( $node_primary->poll_query_until( # The psql command should fail on pg_backup_stop(). ok( pump_until( - $sigchld_bb, $sigchld_bb_timeout, + $sigchld_bb, $sigchld_bb_timeout, \$sigchld_bb_stderr, qr/backup is not in progress/), 'base backup cleanly cancelled'); $sigchld_bb->finish(); diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl index cccf2677e3e..48e00f9e296 100644 --- a/src/test/recovery/t/002_archiving.pl +++ b/src/test/recovery/t/002_archiving.pl @@ -12,7 +12,7 @@ use File::Copy; # Initialize primary node, doing archives my $node_primary = PostgreSQL::Test::Cluster->new('primary'); $node_primary->init( - has_archiving => 1, + has_archiving => 1, allows_streaming => 1); my $backup_name = 'my_backup'; @@ -33,9 +33,9 @@ $node_standby->append_conf('postgresql.conf', # Set archive_cleanup_command and recovery_end_command, checking their # execution by the backend with dummy commands. -my $data_dir = $node_standby->data_dir; +my $data_dir = $node_standby->data_dir; my $archive_cleanup_command_file = "archive_cleanup_command.done"; -my $recovery_end_command_file = "recovery_end_command.done"; +my $recovery_end_command_file = "recovery_end_command.done"; $node_standby->append_conf( 'postgresql.conf', qq( archive_cleanup_command = 'echo archive_cleanup_done > $archive_cleanup_command_file' diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index 84d06096f6a..e882ce20773 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -16,12 +16,12 @@ sub test_recovery_standby { local $Test::Builder::Level = $Test::Builder::Level + 1; - my $test_name = shift; - my $node_name = shift; - my $node_primary = shift; + my $test_name = shift; + my $node_name = shift; + my $node_primary = shift; my $recovery_params = shift; - my $num_rows = shift; - my $until_lsn = shift; + my $num_rows = shift; + my $until_lsn = shift; my $node_standby = PostgreSQL::Test::Cluster->new($node_name); $node_standby->init_from_backup($node_primary, 'my_backup', @@ -147,7 +147,7 @@ recovery_target_time = '$recovery_time'"); my $res = run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', + 'pg_ctl', '-D', $node_standby->data_dir, '-l', $node_standby->logfile, 'start' ]); ok(!$res, 'invalid recovery startup fails'); @@ -162,13 +162,13 @@ $node_standby = PostgreSQL::Test::Cluster->new('standby_8'); $node_standby->init_from_backup( $node_primary, 'my_backup', has_restoring => 1, - standby => 0); + standby => 0); $node_standby->append_conf('postgresql.conf', "recovery_target_name = 'does_not_exist'"); run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', + 'pg_ctl', '-D', $node_standby->data_dir, '-l', $node_standby->logfile, 'start' ]); diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl index bc1793ca94f..8fadca42045 100644 --- a/src/test/recovery/t/005_replay_delay.pl +++ b/src/test/recovery/t/005_replay_delay.pl @@ -24,7 +24,7 @@ $node_primary->backup($backup_name); # Create streaming standby from backup my $node_standby = PostgreSQL::Test::Cluster->new('standby'); -my $delay = 3; +my $delay = 3; $node_standby->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby->append_conf( diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index fe0319009b6..5025d65b1b4 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -109,7 +109,7 @@ $node_primary->safe_psql('postgres', my $stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, $expected, @@ -122,7 +122,7 @@ $node_primary->poll_query_until('postgres', $stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 900b03421f1..e1273fd0f12 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -10,7 +10,7 @@ use PostgreSQL::Test::Utils; use Test::More; my $psql_out = ''; -my $psql_rc = ''; +my $psql_rc = ''; sub configure_and_reload { @@ -49,7 +49,7 @@ $node_paris->start; # Switch to synchronous replication in both directions configure_and_reload($node_london, "synchronous_standby_names = 'paris'"); -configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); +configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); # Set up nonce names for current primary and standby nodes note "Initially, london is primary and paris is standby"; diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index 993f654a9b0..6fbbeedde3b 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -187,8 +187,8 @@ my $endpos = $node_replica->safe_psql('postgres', $stdout = $node_replica->pg_recvlogical_upto( 'postgres', 'before_basebackup', - $endpos, $PostgreSQL::Test::Utils::timeout_default, - 'include-xids' => '0', + $endpos, $PostgreSQL::Test::Utils::timeout_default, + 'include-xids' => '0', 'skip-empty-xacts' => '1'); # walsender likes to add a newline diff --git a/src/test/recovery/t/012_subtransactions.pl b/src/test/recovery/t/012_subtransactions.pl index 177ab9bc584..91ae79dd514 100644 --- a/src/test/recovery/t/012_subtransactions.pl +++ b/src/test/recovery/t/012_subtransactions.pl @@ -35,7 +35,7 @@ $node_primary->append_conf( $node_primary->psql('postgres', "SELECT pg_reload_conf()"); my $psql_out = ''; -my $psql_rc = ''; +my $psql_rc = ''; ############################################################################### # Check that replay will correctly set SUBTRANS and properly advance nextXid diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 92e7b367df2..ce57792f312 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -80,7 +80,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m), 'inserted in-progress-before-sigquit'); $killme_stdout = ''; @@ -164,7 +164,7 @@ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'inserted in-progress-before-sigkill'); $killme_stdout = ''; diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl index d1e1811fe1b..3591b3309e6 100644 --- a/src/test/recovery/t/014_unlogged_reinit.pl +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -30,9 +30,9 @@ my $seqUnloggedPath = $node->safe_psql('postgres', # Test that main and init forks exist. ok(-f "$pgdata/${baseUnloggedPath}_init", 'table init fork exists'); -ok(-f "$pgdata/$baseUnloggedPath", 'table main fork exists'); -ok(-f "$pgdata/${seqUnloggedPath}_init", 'sequence init fork exists'); -ok(-f "$pgdata/$seqUnloggedPath", 'sequence main fork exists'); +ok(-f "$pgdata/$baseUnloggedPath", 'table main fork exists'); +ok(-f "$pgdata/${seqUnloggedPath}_init", 'sequence init fork exists'); +ok(-f "$pgdata/$seqUnloggedPath", 'sequence main fork exists'); # Test the sequence is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"), @@ -54,7 +54,7 @@ my $ts1UnloggedPath = $node->safe_psql('postgres', # Test that main and init forks exist. ok(-f "$pgdata/${ts1UnloggedPath}_init", 'init fork in tablespace exists'); -ok(-f "$pgdata/$ts1UnloggedPath", 'main fork in tablespace exists'); +ok(-f "$pgdata/$ts1UnloggedPath", 'main fork in tablespace exists'); # Create more unlogged sequences for testing. $node->safe_psql('postgres', 'CREATE UNLOGGED SEQUENCE seq_unlogged2'); @@ -73,7 +73,7 @@ $node->safe_psql('postgres', 'INSERT INTO tab_seq_unlogged3 DEFAULT VALUES'); $node->stop('immediate'); # Write fake forks to test that they are removed during recovery. -append_to_file("$pgdata/${baseUnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${baseUnloggedPath}_vm", 'TEST_VM'); append_to_file("$pgdata/${baseUnloggedPath}_fsm", 'TEST_FSM'); # Remove main fork to test that it is recopied from init. @@ -83,7 +83,7 @@ unlink("$pgdata/${seqUnloggedPath}") or BAIL_OUT("could not remove \"${seqUnloggedPath}\": $!"); # the same for the tablespace -append_to_file("$pgdata/${ts1UnloggedPath}_vm", 'TEST_VM'); +append_to_file("$pgdata/${ts1UnloggedPath}_vm", 'TEST_VM'); append_to_file("$pgdata/${ts1UnloggedPath}_fsm", 'TEST_FSM'); unlink("$pgdata/${ts1UnloggedPath}") or BAIL_OUT("could not remove \"${ts1UnloggedPath}\": $!"); diff --git a/src/test/recovery/t/016_min_consistency.pl b/src/test/recovery/t/016_min_consistency.pl index a7e709315fb..81f7a43c079 100644 --- a/src/test/recovery/t/016_min_consistency.pl +++ b/src/test/recovery/t/016_min_consistency.pl @@ -20,7 +20,7 @@ use Test::More; sub find_largest_lsn { my $blocksize = int(shift); - my $filename = shift; + my $filename = shift; my ($max_hi, $max_lo) = (0, 0); open(my $fh, "<:raw", $filename) or die "failed to open $filename: $!"; diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl index 13ee7e194c4..74359e0e388 100644 --- a/src/test/recovery/t/017_shm.pl +++ b/src/test/recovery/t/017_shm.pl @@ -147,7 +147,7 @@ log_ipcs(); my $pre_existing_msg = qr/pre-existing shared memory block/; { my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { last @@ -194,7 +194,7 @@ sub poll_start my ($node) = @_; my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default; - my $attempts = 0; + my $attempts = 0; while ($attempts < $max_attempts) { diff --git a/src/test/recovery/t/018_wal_optimize.pl b/src/test/recovery/t/018_wal_optimize.pl index 866259580a5..1d613eaede4 100644 --- a/src/test/recovery/t/018_wal_optimize.pl +++ b/src/test/recovery/t/018_wal_optimize.pl @@ -24,7 +24,7 @@ sub check_orphan_relfilenodes my $db_oid = $node->safe_psql('postgres', "SELECT oid FROM pg_database WHERE datname = 'postgres'"); - my $prefix = "base/$db_oid/"; + my $prefix = "base/$db_oid/"; my $filepaths_referenced = $node->safe_psql( 'postgres', " SELECT pg_relation_filepath(oid) FROM pg_class @@ -145,7 +145,7 @@ wal_skip_threshold = 0 is($result, qq(20000), "wal_level = $wal_level, end-of-xact WAL"); # Data file for COPY query in subsequent tests - my $basedir = $node->basedir; + my $basedir = $node->basedir; my $copy_file = "$basedir/copy_data.txt"; PostgreSQL::Test::Utils::append_to_file( $copy_file, qq(20000,30000 diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index cb047bf77d9..a1aba16e145 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -377,7 +377,7 @@ $logstart = get_log_size($node_primary3); kill 'STOP', $senderpid, $receiverpid; advance_wal($node_primary3, 2); -my $msg_logged = 0; +my $msg_logged = 0; my $max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { @@ -402,7 +402,7 @@ $node_primary3->poll_query_until('postgres', "lost") or die "timed out waiting for slot to be lost"; -$msg_logged = 0; +$msg_logged = 0; $max_attempts = $PostgreSQL::Test::Utils::timeout_default; while ($max_attempts-- >= 0) { diff --git a/src/test/recovery/t/020_archive_status.pl b/src/test/recovery/t/020_archive_status.pl index 13ada994dbb..fa24153d4b9 100644 --- a/src/test/recovery/t/020_archive_status.pl +++ b/src/test/recovery/t/020_archive_status.pl @@ -12,7 +12,7 @@ use Test::More; my $primary = PostgreSQL::Test::Cluster->new('primary'); $primary->init( - has_archiving => 1, + has_archiving => 1, allows_streaming => 1); $primary->append_conf('postgresql.conf', 'autovacuum = off'); $primary->start; @@ -39,9 +39,9 @@ $primary->safe_psql( # This will be used to track the activity of the archiver. my $segment_name_1 = $primary->safe_psql('postgres', q{SELECT pg_walfile_name(pg_current_wal_lsn())}); -my $segment_path_1 = "pg_wal/archive_status/$segment_name_1"; +my $segment_path_1 = "pg_wal/archive_status/$segment_name_1"; my $segment_path_1_ready = "$segment_path_1.ready"; -my $segment_path_1_done = "$segment_path_1.done"; +my $segment_path_1_done = "$segment_path_1.done"; $primary->safe_psql( 'postgres', q{ CREATE TABLE mine AS SELECT generate_series(1,10) AS x; @@ -115,9 +115,9 @@ is( $primary->safe_psql( # with existing status files. my $segment_name_2 = $primary->safe_psql('postgres', q{SELECT pg_walfile_name(pg_current_wal_lsn())}); -my $segment_path_2 = "pg_wal/archive_status/$segment_name_2"; +my $segment_path_2 = "pg_wal/archive_status/$segment_name_2"; my $segment_path_2_ready = "$segment_path_2.ready"; -my $segment_path_2_done = "$segment_path_2.done"; +my $segment_path_2_done = "$segment_path_2.done"; $primary->safe_psql( 'postgres', q{ INSERT INTO mine SELECT generate_series(10,20) AS x; diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl index 03c8efdfb5c..14fd8bfc7fc 100644 --- a/src/test/recovery/t/022_crash_temp_files.pl +++ b/src/test/recovery/t/022_crash_temp_files.pl @@ -98,7 +98,7 @@ SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; @@ -205,7 +205,7 @@ SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; ok( pump_until( - $killme, $psql_timeout, + $killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; diff --git a/src/test/recovery/t/023_pitr_prepared_xact.pl b/src/test/recovery/t/023_pitr_prepared_xact.pl index e55098ef7fe..a8cdf4efdd4 100644 --- a/src/test/recovery/t/023_pitr_prepared_xact.pl +++ b/src/test/recovery/t/023_pitr_prepared_xact.pl @@ -27,7 +27,7 @@ $node_primary->backup($backup_name); my $node_pitr = PostgreSQL::Test::Cluster->new('node_pitr'); $node_pitr->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl index 43eb4213210..d594332b18d 100644 --- a/src/test/recovery/t/024_archive_recovery.pl +++ b/src/test/recovery/t/024_archive_recovery.pl @@ -70,15 +70,15 @@ sub test_recovery_wal_level_minimal $recovery_node->init_from_backup( $node, $backup_name, has_restoring => 1, - standby => $standby_setting); + standby => $standby_setting); # Use run_log instead of recovery_node->start because this test expects # that the server ends with an error during recovery. run_log( [ - 'pg_ctl', '-D', + 'pg_ctl', '-D', $recovery_node->data_dir, '-l', - $recovery_node->logfile, 'start' + $recovery_node->logfile, 'start' ]); # wait for postgres to terminate diff --git a/src/test/recovery/t/025_stuck_on_old_timeline.pl b/src/test/recovery/t/025_stuck_on_old_timeline.pl index fc88ceff9d7..91309030df9 100644 --- a/src/test/recovery/t/025_stuck_on_old_timeline.pl +++ b/src/test/recovery/t/025_stuck_on_old_timeline.pl @@ -51,8 +51,8 @@ my $node_standby = PostgreSQL::Test::Cluster->new('standby'); $node_standby->init_from_backup( $node_primary, $backup_name, allows_streaming => 1, - has_streaming => 1, - has_archiving => 1); + has_streaming => 1, + has_archiving => 1); $node_standby->start; # Take backup of standby, use -Xnone so that pg_wal is empty. diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index 255c45a4ff1..f2f4e77626f 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -60,7 +60,7 @@ $node_standby_1->append_conf('postgresql.conf', 'max_standby_streaming_delay = 600s'); $node_standby_1->start; -my $dlpath = dirname($ENV{REGRESS_SHLIB}); +my $dlpath = dirname($ENV{REGRESS_SHLIB}); my $outputdir = $PostgreSQL::Test::Utils::tmp_check; # Run the regression tests against the primary. @@ -103,7 +103,7 @@ $node_primary->wait_for_replay_catchup($node_standby_1); command_ok( [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', - '--no-sync', '-p', $node_primary->port, + '--no-sync', '-p', $node_primary->port, '--no-unlogged-table-data' # if unlogged, standby has schema only ], 'dump primary server'); diff --git a/src/test/recovery/t/028_pitr_timelines.pl b/src/test/recovery/t/028_pitr_timelines.pl index b32a12968aa..bb29a2d378c 100644 --- a/src/test/recovery/t/028_pitr_timelines.pl +++ b/src/test/recovery/t/028_pitr_timelines.pl @@ -64,7 +64,7 @@ INSERT INTO foo VALUES(2); my $node_standby = PostgreSQL::Test::Cluster->new('standby'); $node_standby->init_from_backup( $node_primary, $backup_name, - standby => 1, + standby => 1, has_streaming => 1, has_archiving => 1, has_restoring => 0); @@ -118,7 +118,7 @@ $node_standby->stop; my $node_pitr = PostgreSQL::Test::Cluster->new('node_pitr'); $node_pitr->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr->append_conf( 'postgresql.conf', qq{ @@ -156,7 +156,7 @@ $node_pitr->stop(); my $node_pitr2 = PostgreSQL::Test::Cluster->new('node_pitr2'); $node_pitr2->init_from_backup( $node_primary, $backup_name, - standby => 0, + standby => 0, has_restoring => 1); $node_pitr2->append_conf( 'postgresql.conf', qq{ diff --git a/src/test/recovery/t/029_stats_restart.pl b/src/test/recovery/t/029_stats_restart.pl index 83d6647d32c..742bd57e289 100644 --- a/src/test/recovery/t/029_stats_restart.pl +++ b/src/test/recovery/t/029_stats_restart.pl @@ -15,7 +15,7 @@ $node->init(allows_streaming => 1); $node->append_conf('postgresql.conf', "track_functions = 'all'"); $node->start; -my $connect_db = 'postgres'; +my $connect_db = 'postgres'; my $db_under_test = 'test'; # create test objects @@ -53,7 +53,7 @@ $node->stop(); my $statsfile = $PostgreSQL::Test::Utils::tmp_check . '/' . "discard_stats1"; ok(!-f "$statsfile", "backup statsfile cannot already exist"); -my $datadir = $node->data_dir(); +my $datadir = $node->data_dir(); my $og_stats = "$datadir/pg_stat/pgstat.stat"; ok(-f "$og_stats", "origin stats file must exist"); copy($og_stats, $statsfile) or die "Copy failed: $!"; @@ -147,12 +147,12 @@ $node->safe_psql($connect_db, "CHECKPOINT; CHECKPOINT;"); ## check checkpoint and wal stats are incremented due to restart my $ckpt_start = checkpoint_stats(); -my $wal_start = wal_stats(); +my $wal_start = wal_stats(); $node->restart; $sect = "post restart"; my $ckpt_restart = checkpoint_stats(); -my $wal_restart = wal_stats(); +my $wal_restart = wal_stats(); cmp_ok( $ckpt_start->{count}, '<', @@ -176,7 +176,7 @@ is($wal_start->{reset}, $wal_restart->{reset}, $node->safe_psql($connect_db, "SELECT pg_stat_reset_shared('bgwriter')"); $sect = "post ckpt reset"; -my $ckpt_reset = checkpoint_stats(); +my $ckpt_reset = checkpoint_stats(); my $wal_ckpt_reset = wal_stats(); cmp_ok($ckpt_restart->{count}, @@ -200,7 +200,7 @@ $node->restart; $sect = "post ckpt reset & restart"; my $ckpt_restart_reset = checkpoint_stats(); -my $wal_restart2 = wal_stats(); +my $wal_restart2 = wal_stats(); # made sure above there's enough checkpoints that this will be stable even on slow machines cmp_ok( diff --git a/src/test/recovery/t/031_recovery_conflict.pl b/src/test/recovery/t/031_recovery_conflict.pl index e29bc6c181c..05e83fa854f 100644 --- a/src/test/recovery/t/031_recovery_conflict.pl +++ b/src/test/recovery/t/031_recovery_conflict.pl @@ -67,8 +67,8 @@ $node_primary->wait_for_replay_catchup($node_standby); # a longrunning psql that we can use to trigger conflicts -my $psql_standby = $node_standby->background_psql($test_db, - on_error_stop => 0); +my $psql_standby = + $node_standby->background_psql($test_db, on_error_stop => 0); my $expected_conflicts = 0; @@ -96,7 +96,8 @@ my $cursor1 = "test_recovery_conflict_cursor"; # DECLARE and use a cursor on standby, causing buffer with the only block of # the relation to be pinned on the standby -my $res = $psql_standby->query_safe(qq[ +my $res = $psql_standby->query_safe( + qq[ BEGIN; DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1; FETCH FORWARD FROM $cursor1; @@ -131,7 +132,8 @@ $node_primary->safe_psql($test_db, $node_primary->wait_for_replay_catchup($node_standby); # DECLARE and FETCH from cursor on the standby -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1; FETCH FORWARD FROM $cursor1; @@ -159,7 +161,8 @@ $sect = "lock conflict"; $expected_conflicts++; # acquire lock to conflict with -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; LOCK TABLE $table1 IN ACCESS SHARE MODE; SELECT 1; @@ -183,7 +186,8 @@ $expected_conflicts++; # DECLARE a cursor for a query which, with sufficiently low work_mem, will # spill tuples into temp files in the temporary tablespace created during # setup. -$res = $psql_standby->query_safe(qq[ +$res = $psql_standby->query_safe( + qq[ BEGIN; SET work_mem = '64kB'; DECLARE $cursor1 CURSOR FOR @@ -240,7 +244,8 @@ SELECT txid_current(); $node_primary->wait_for_replay_catchup($node_standby); -$res = $psql_standby->query_until(qr/^1$/m, qq[ +$res = $psql_standby->query_until( + qr/^1$/m, qq[ BEGIN; -- hold pin DECLARE $cursor1 CURSOR FOR SELECT a FROM $table1; @@ -248,7 +253,9 @@ $res = $psql_standby->query_until(qr/^1$/m, qq[ -- wait for lock held by prepared transaction SELECT * FROM $table2; ]); -ok( 1, "$sect: cursor holding conflicting pin, also waiting for lock, established"); +ok(1, + "$sect: cursor holding conflicting pin, also waiting for lock, established" +); # just to make sure we're waiting for lock already ok( $node_standby->poll_query_until( @@ -305,7 +312,7 @@ done_testing(); sub check_conflict_log { - my $message = shift; + my $message = shift; my $old_log_location = $log_location; $log_location = $node_standby->wait_for_log(qr/$message/, $log_location); @@ -318,7 +325,7 @@ sub check_conflict_log sub check_conflict_stat { my $conflict_type = shift; - my $count = $node_standby->safe_psql($test_db, + my $count = $node_standby->safe_psql($test_db, qq[SELECT confl_$conflict_type FROM pg_stat_database_conflicts WHERE datname='$test_db';] ); diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl index 92ec510037a..3bc2db1a4f3 100644 --- a/src/test/recovery/t/032_relfilenode_reuse.pl +++ b/src/test/recovery/t/032_relfilenode_reuse.pl @@ -141,8 +141,8 @@ $node_primary->safe_psql('postgres', $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;"); cause_eviction(\%psql_primary, \%psql_standby); $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;"); -$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); -$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); +$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); +$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); $node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database'); diff --git a/src/test/recovery/t/033_replay_tsp_drops.pl b/src/test/recovery/t/033_replay_tsp_drops.pl index 42a6e693328..0a35a7bda69 100644 --- a/src/test/recovery/t/033_replay_tsp_drops.pl +++ b/src/test/recovery/t/033_replay_tsp_drops.pl @@ -37,8 +37,7 @@ sub test_tablespace has_streaming => 1); $node_standby->append_conf('postgresql.conf', "allow_in_place_tablespaces = on"); - $node_standby->append_conf('postgresql.conf', - "primary_slot_name = slot"); + $node_standby->append_conf('postgresql.conf', "primary_slot_name = slot"); $node_standby->start; # Make sure the connection is made @@ -137,7 +136,8 @@ while ($max_attempts-- >= 0) last if ( find_in_log( - $node_standby, qr!WARNING: ( [A-Z0-9]+:)? creating missing directory: pg_tblspc/!, + $node_standby, + qr!WARNING: ( [A-Z0-9]+:)? creating missing directory: pg_tblspc/!, $logstart)); usleep(100_000); } diff --git a/src/test/recovery/t/034_create_database.pl b/src/test/recovery/t/034_create_database.pl index 4698cbc3915..ed562bba251 100644 --- a/src/test/recovery/t/034_create_database.pl +++ b/src/test/recovery/t/034_create_database.pl @@ -17,7 +17,7 @@ $node->start; # are persisted after creating a database from it using the WAL_LOG strategy, # as a direct copy of the template database's pg_class is used in this case. my $db_template = "template1"; -my $db_new = "test_db_1"; +my $db_new = "test_db_1"; # Create table. It should persist on the template database. $node->safe_psql("postgres", diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl index 2b4a6883302..64beec4bd34 100644 --- a/src/test/recovery/t/035_standby_logical_decoding.pl +++ b/src/test/recovery/t/035_standby_logical_decoding.pl @@ -10,17 +10,18 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; -my ($stdin, $stdout, $stderr, - $cascading_stdout, $cascading_stderr, $subscriber_stdin, +my ($stdin, $stdout, $stderr, + $cascading_stdout, $cascading_stderr, $subscriber_stdin, $subscriber_stdout, $subscriber_stderr, $ret, - $handle, $slot); + $handle, $slot); my $node_primary = PostgreSQL::Test::Cluster->new('primary'); my $node_standby = PostgreSQL::Test::Cluster->new('standby'); -my $node_cascading_standby = PostgreSQL::Test::Cluster->new('cascading_standby'); +my $node_cascading_standby = + PostgreSQL::Test::Cluster->new('cascading_standby'); my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); my $default_timeout = $PostgreSQL::Test::Utils::timeout_default; -my $psql_timeout = IPC::Run::timer($default_timeout); +my $psql_timeout = IPC::Run::timer($default_timeout); my $res; # Name for the physical slot on primary @@ -62,8 +63,10 @@ sub create_logical_slots my $active_slot = $slot_prefix . 'activeslot'; my $inactive_slot = $slot_prefix . 'inactiveslot'; - $node->create_logical_slot_on_standby($node_primary, qq($inactive_slot), 'testdb'); - $node->create_logical_slot_on_standby($node_primary, qq($active_slot), 'testdb'); + $node->create_logical_slot_on_standby($node_primary, qq($inactive_slot), + 'testdb'); + $node->create_logical_slot_on_standby($node_primary, qq($active_slot), + 'testdb'); } # Drop the logical slots on standby. @@ -73,8 +76,10 @@ sub drop_logical_slots my $active_slot = $slot_prefix . 'activeslot'; my $inactive_slot = $slot_prefix . 'inactiveslot'; - $node_standby->psql('postgres', qq[SELECT pg_drop_replication_slot('$inactive_slot')]); - $node_standby->psql('postgres', qq[SELECT pg_drop_replication_slot('$active_slot')]); + $node_standby->psql('postgres', + qq[SELECT pg_drop_replication_slot('$inactive_slot')]); + $node_standby->psql('postgres', + qq[SELECT pg_drop_replication_slot('$active_slot')]); } # Acquire one of the standby logical slots created by create_logical_slots(). @@ -86,7 +91,20 @@ sub make_slot_active my $slot_user_handle; my $active_slot = $slot_prefix . 'activeslot'; - $slot_user_handle = IPC::Run::start(['pg_recvlogical', '-d', $node->connstr('testdb'), '-S', qq($active_slot), '-o', 'include-xids=0', '-o', 'skip-empty-xacts=1', '--no-loop', '--start', '-f', '-'], '>', $to_stdout, '2>', $to_stderr); + $slot_user_handle = IPC::Run::start( + [ + 'pg_recvlogical', '-d', + $node->connstr('testdb'), '-S', + qq($active_slot), '-o', + 'include-xids=0', '-o', + 'skip-empty-xacts=1', '--no-loop', + '--start', '-f', + '-' + ], + '>', + $to_stdout, + '2>', + $to_stderr); if ($wait) { @@ -108,7 +126,8 @@ sub check_pg_recvlogical_stderr $slot_user_handle->finish; $return = $?; cmp_ok($return, "!=", 0, "pg_recvlogical exited non-zero"); - if ($return) { + if ($return) + { like($stderr, qr/$check_stderr/, 'slot has been invalidated'); } @@ -121,8 +140,10 @@ sub check_slots_dropped { my ($slot_prefix, $slot_user_handle) = @_; - is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'}, '', 'inactiveslot on standby dropped'); - is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'}, '', 'activeslot on standby dropped'); + is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'}, + '', 'inactiveslot on standby dropped'); + is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'}, + '', 'activeslot on standby dropped'); check_pg_recvlogical_stderr($slot_user_handle, "conflict with recovery"); } @@ -132,7 +153,8 @@ sub change_hot_standby_feedback_and_wait_for_xmins { my ($hsf, $invalidated) = @_; - $node_standby->append_conf('postgresql.conf',qq[ + $node_standby->append_conf( + 'postgresql.conf', qq[ hot_standby_feedback = $hsf ]); @@ -143,19 +165,19 @@ sub change_hot_standby_feedback_and_wait_for_xmins # With hot_standby_feedback on, xmin should advance, # but catalog_xmin should still remain NULL since there is no logical slot. wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NOT NULL AND catalog_xmin IS NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NULL"); } elsif ($hsf) { # With hot_standby_feedback on, xmin and catalog_xmin should advance. wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NOT NULL AND catalog_xmin IS NOT NULL"); + "xmin IS NOT NULL AND catalog_xmin IS NOT NULL"); } else { # Both should be NULL since hs_feedback is off wait_for_xmins($node_primary, $primary_slotname, - "xmin IS NULL AND catalog_xmin IS NULL"); + "xmin IS NULL AND catalog_xmin IS NULL"); } } @@ -168,20 +190,18 @@ sub check_slots_conflicting_status if ($conflicting) { $res = $node_standby->safe_psql( - 'postgres', qq( + 'postgres', qq( select bool_and(conflicting) from pg_replication_slots;)); - is($res, 't', - "Logical slots are reported as conflicting"); + is($res, 't', "Logical slots are reported as conflicting"); } else { $res = $node_standby->safe_psql( - 'postgres', qq( + 'postgres', qq( select bool_or(conflicting) from pg_replication_slots;)); - is($res, 'f', - "Logical slots are reported as non conflicting"); + is($res, 'f', "Logical slots are reported as non conflicting"); } } @@ -199,7 +219,8 @@ sub reactive_slots_change_hfs_and_wait_for_xmins change_hot_standby_feedback_and_wait_for_xmins($hsf, $invalidated); - $handle = make_slot_active($node_standby, $slot_prefix, 1, \$stdout, \$stderr); + $handle = + make_slot_active($node_standby, $slot_prefix, 1, \$stdout, \$stderr); # reset stat: easier to check for confl_active_logicalslot in pg_stat_database_conflicts $node_standby->psql('testdb', q[select pg_stat_reset();]); @@ -215,20 +236,24 @@ sub check_for_invalidation # message should be issued ok( find_in_log( - $node_standby, - "invalidating obsolete replication slot \"$inactive_slot\"", $log_start), + $node_standby, + "invalidating obsolete replication slot \"$inactive_slot\"", + $log_start), "inactiveslot slot invalidation is logged $test_name"); ok( find_in_log( - $node_standby, - "invalidating obsolete replication slot \"$active_slot\"", $log_start), + $node_standby, + "invalidating obsolete replication slot \"$active_slot\"", + $log_start), "activeslot slot invalidation is logged $test_name"); # Verify that pg_stat_database_conflicts.confl_active_logicalslot has been updated ok( $node_standby->poll_query_until( - 'postgres', - "select (confl_active_logicalslot = 1) from pg_stat_database_conflicts where datname = 'testdb'", 't'), - 'confl_active_logicalslot updated') or die "Timed out waiting confl_active_logicalslot to be updated"; + 'postgres', + "select (confl_active_logicalslot = 1) from pg_stat_database_conflicts where datname = 'testdb'", + 't'), + 'confl_active_logicalslot updated' + ) or die "Timed out waiting confl_active_logicalslot to be updated"; } ######################## @@ -236,7 +261,8 @@ sub check_for_invalidation ######################## $node_primary->init(allows_streaming => 1, has_archiving => 1); -$node_primary->append_conf('postgresql.conf', q{ +$node_primary->append_conf( + 'postgresql.conf', q{ wal_level = 'logical' max_replication_slots = 4 max_wal_senders = 4 @@ -246,15 +272,17 @@ $node_primary->start; $node_primary->psql('postgres', q[CREATE DATABASE testdb]); -$node_primary->safe_psql('testdb', qq[SELECT * FROM pg_create_physical_replication_slot('$primary_slotname');]); +$node_primary->safe_psql('testdb', + qq[SELECT * FROM pg_create_physical_replication_slot('$primary_slotname');] +); # Check conflicting is NULL for physical slot $res = $node_primary->safe_psql( - 'postgres', qq[ - SELECT conflicting is null FROM pg_replication_slots where slot_name = '$primary_slotname';]); + 'postgres', qq[ + SELECT conflicting is null FROM pg_replication_slots where slot_name = '$primary_slotname';] +); -is($res, 't', - "Physical slot reports conflicting as NULL"); +is($res, 't', "Physical slot reports conflicting as NULL"); my $backup_name = 'b1'; $node_primary->backup($backup_name); @@ -271,7 +299,8 @@ $node_standby->init_from_backup( $node_primary, $backup_name, has_streaming => 1, has_restoring => 1); -$node_standby->append_conf('postgresql.conf', +$node_standby->append_conf( + 'postgresql.conf', qq[primary_slot_name = '$primary_slotname' max_replication_slots = 5]); $node_standby->start; @@ -284,7 +313,7 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my %psql_subscriber = ( - 'subscriber_stdin' => '', + 'subscriber_stdin' => '', 'subscriber_stdout' => '', 'subscriber_stderr' => ''); $psql_subscriber{run} = IPC::Run::start( @@ -305,13 +334,17 @@ $psql_subscriber{run} = IPC::Run::start( # create the logical slots create_logical_slots($node_standby, 'behaves_ok_'); -$node_primary->safe_psql('testdb', qq[CREATE TABLE decoding_test(x integer, y text);]); -$node_primary->safe_psql('testdb', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE decoding_test(x integer, y text);]); +$node_primary->safe_psql('testdb', + qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;] +); $node_primary->wait_for_replay_catchup($node_standby); my $result = $node_standby->safe_psql('testdb', - qq[SELECT pg_logical_slot_get_changes('behaves_ok_activeslot', NULL, NULL);]); + qq[SELECT pg_logical_slot_get_changes('behaves_ok_activeslot', NULL, NULL);] +); # test if basic decoding works is(scalar(my @foobar = split /^/m, $result), @@ -350,21 +383,21 @@ $node_primary->safe_psql('testdb', $node_primary->wait_for_replay_catchup($node_standby); my $stdout_recv = $node_standby->pg_recvlogical_upto( - 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, - 'include-xids' => '0', - 'skip-empty-xacts' => '1'); + 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, + 'include-xids' => '0', + 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, $expected, - 'got same expected output from pg_recvlogical decoding session'); + 'got same expected output from pg_recvlogical decoding session'); $node_standby->poll_query_until('testdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'behaves_ok_activeslot' AND active_pid IS NULL)" ) or die "slot never became inactive"; $stdout_recv = $node_standby->pg_recvlogical_upto( - 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, - 'include-xids' => '0', - 'skip-empty-xacts' => '1'); + 'testdb', 'behaves_ok_activeslot', $endpos, $default_timeout, + 'include-xids' => '0', + 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); @@ -374,10 +407,9 @@ $node_primary->safe_psql('postgres', 'CREATE DATABASE otherdb'); # on the standby. $node_primary->wait_for_replay_catchup($node_standby); -($result, $stdout, $stderr) = $node_standby->psql( - 'otherdb', - "SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" - ); +($result, $stdout, $stderr) = $node_standby->psql('otherdb', + "SELECT lsn FROM pg_logical_slot_peek_changes('behaves_ok_activeslot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" +); ok( $stderr =~ m/replication slot "behaves_ok_activeslot" was not created in this database/, "replaying logical slot from another database fails"); @@ -408,8 +440,7 @@ my $standby_connstr = $node_standby->connstr . ' dbname=postgres'; # and we wouldn't be able to launch pg_log_standby_snapshot() on the primary # while waiting. # psql_subscriber() allows to not wait synchronously. -$psql_subscriber{subscriber_stdin} .= - qq[CREATE SUBSCRIPTION tap_sub +$psql_subscriber{subscriber_stdin} .= qq[CREATE SUBSCRIPTION tap_sub CONNECTION '$standby_connstr' PUBLICATION tap_pub WITH (copy_data = off);]; @@ -451,10 +482,12 @@ $node_subscriber->stop; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum full on pg_class with hot_standby_feedback turned off on # the standby. -reactive_slots_change_hfs_and_wait_for_xmins('behaves_ok_', 'vacuum_full_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('behaves_ok_', 'vacuum_full_', + 0, 1); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); DROP TABLE conflict_test; VACUUM full pg_class; @@ -469,13 +502,16 @@ check_for_invalidation('vacuum_full_', 1, 'with vacuum FULL on pg_class'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'vacuum_full_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'vacuum_full_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"vacuum_full_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"vacuum_full_activeslot\"" +); # Turn hot_standby_feedback back on -change_hot_standby_feedback_and_wait_for_xmins(1,1); +change_hot_standby_feedback_and_wait_for_xmins(1, 1); ################################################## # Verify that invalidated logical slots stay invalidated across a restart. @@ -531,10 +567,12 @@ my $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum on pg_class with hot_standby_feedback turned off on the standby. -reactive_slots_change_hfs_and_wait_for_xmins('vacuum_full_', 'row_removal_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('vacuum_full_', 'row_removal_', + 0, 1); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); DROP TABLE conflict_test; VACUUM pg_class; @@ -549,10 +587,13 @@ check_for_invalidation('row_removal_', $logstart, 'with vacuum on pg_class'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'row_removal_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'row_removal_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"row_removal_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"row_removal_activeslot\"" +); ################################################## # Recovery conflict: Same as Scenario 2 but on a shared catalog table @@ -564,10 +605,12 @@ $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to create/drop a relation and # launch a vacuum on pg_class with hot_standby_feedback turned off on the standby. -reactive_slots_change_hfs_and_wait_for_xmins('row_removal_', 'shared_row_removal_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('row_removal_', + 'shared_row_removal_', 0, 1); # Trigger the conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE ROLE create_trash; DROP ROLE create_trash; VACUUM pg_authid; @@ -577,15 +620,19 @@ $node_primary->safe_psql('testdb', qq[ $node_primary->wait_for_replay_catchup($node_standby); # Check invalidation in the logfile and in pg_stat_database_conflicts -check_for_invalidation('shared_row_removal_', $logstart, 'with vacuum on pg_authid'); +check_for_invalidation('shared_row_removal_', $logstart, + 'with vacuum on pg_authid'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'shared_row_removal_', 0, \$stdout, \$stderr); +$handle = make_slot_active($node_standby, 'shared_row_removal_', 0, \$stdout, + \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"shared_row_removal_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"shared_row_removal_activeslot\"" +); ################################################## # Recovery conflict: Same as Scenario 2 but on a non catalog table @@ -595,10 +642,12 @@ check_pg_recvlogical_stderr($handle, "can no longer get changes from replication # get the position to search from in the standby logfile $logstart = -s $node_standby->logfile; -reactive_slots_change_hfs_and_wait_for_xmins('shared_row_removal_', 'no_conflict_', 0, 1); +reactive_slots_change_hfs_and_wait_for_xmins('shared_row_removal_', + 'no_conflict_', 0, 1); # This should not trigger a conflict -$node_primary->safe_psql('testdb', qq[ +$node_primary->safe_psql( + 'testdb', qq[ CREATE TABLE conflict_test(x integer, y text); INSERT INTO conflict_test(x,y) SELECT s, s::text FROM generate_series(1,4) s; UPDATE conflict_test set x=1, y=1; @@ -609,20 +658,24 @@ $node_primary->wait_for_replay_catchup($node_standby); # message should not be issued ok( !find_in_log( - $node_standby, - "invalidating obsolete slot \"no_conflict_inactiveslot\"", $logstart), - 'inactiveslot slot invalidation is not logged with vacuum on conflict_test'); + $node_standby, + "invalidating obsolete slot \"no_conflict_inactiveslot\"", $logstart), + 'inactiveslot slot invalidation is not logged with vacuum on conflict_test' +); ok( !find_in_log( - $node_standby, - "invalidating obsolete slot \"no_conflict_activeslot\"", $logstart), - 'activeslot slot invalidation is not logged with vacuum on conflict_test'); + $node_standby, + "invalidating obsolete slot \"no_conflict_activeslot\"", $logstart), + 'activeslot slot invalidation is not logged with vacuum on conflict_test' +); # Verify that pg_stat_database_conflicts.confl_active_logicalslot has not been updated ok( $node_standby->poll_query_until( - 'postgres', - "select (confl_active_logicalslot = 0) from pg_stat_database_conflicts where datname = 'testdb'", 't'), - 'confl_active_logicalslot not updated') or die "Timed out waiting confl_active_logicalslot to be updated"; + 'postgres', + "select (confl_active_logicalslot = 0) from pg_stat_database_conflicts where datname = 'testdb'", + 't'), + 'confl_active_logicalslot not updated' +) or die "Timed out waiting confl_active_logicalslot to be updated"; # Verify slots are reported as non conflicting in pg_replication_slots check_slots_conflicting_status(0); @@ -643,10 +696,13 @@ $logstart = -s $node_standby->logfile; # One way to produce recovery conflict is to trigger an on-access pruning # on a relation marked as user_catalog_table. -reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0, 0); +reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0, + 0); # This should trigger the conflict -$node_primary->safe_psql('testdb', qq[CREATE TABLE prun(id integer, s char(2000)) WITH (fillfactor = 75, user_catalog_table = true);]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE prun(id integer, s char(2000)) WITH (fillfactor = 75, user_catalog_table = true);] +); $node_primary->safe_psql('testdb', qq[INSERT INTO prun VALUES (1, 'A');]); $node_primary->safe_psql('testdb', qq[UPDATE prun SET s = 'B';]); $node_primary->safe_psql('testdb', qq[UPDATE prun SET s = 'C';]); @@ -664,7 +720,8 @@ check_slots_conflicting_status(1); $handle = make_slot_active($node_standby, 'pruning_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it has been invalidated -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"pruning_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"pruning_activeslot\""); # Turn hot_standby_feedback back on change_hot_standby_feedback_and_wait_for_xmins(1, 1); @@ -683,13 +740,15 @@ drop_logical_slots('pruning_'); # create the logical slots create_logical_slots($node_standby, 'wal_level_'); -$handle = make_slot_active($node_standby, 'wal_level_', 1, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 1, \$stdout, \$stderr); # reset stat: easier to check for confl_active_logicalslot in pg_stat_database_conflicts $node_standby->psql('testdb', q[select pg_stat_reset();]); # Make primary wal_level replica. This will trigger slot conflict. -$node_primary->append_conf('postgresql.conf',q[ +$node_primary->append_conf( + 'postgresql.conf', q[ wal_level = 'replica' ]); $node_primary->restart; @@ -702,20 +761,27 @@ check_for_invalidation('wal_level_', $logstart, 'due to wal_level'); # Verify slots are reported as conflicting in pg_replication_slots check_slots_conflicting_status(1); -$handle = make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); # We are not able to read from the slot as it requires wal_level >= logical on the primary server -check_pg_recvlogical_stderr($handle, "logical decoding on standby requires wal_level >= logical on the primary"); +check_pg_recvlogical_stderr($handle, + "logical decoding on standby requires wal_level >= logical on the primary" +); # Restore primary wal_level -$node_primary->append_conf('postgresql.conf',q[ +$node_primary->append_conf( + 'postgresql.conf', q[ wal_level = 'logical' ]); $node_primary->restart; $node_primary->wait_for_replay_catchup($node_standby); -$handle = make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); +$handle = + make_slot_active($node_standby, 'wal_level_', 0, \$stdout, \$stderr); # as the slot has been invalidated we should not be able to read -check_pg_recvlogical_stderr($handle, "can no longer get changes from replication slot \"wal_level_activeslot\""); +check_pg_recvlogical_stderr($handle, + "can no longer get changes from replication slot \"wal_level_activeslot\"" +); ################################################## # DROP DATABASE should drops it's slots, including active slots. @@ -731,24 +797,28 @@ $handle = make_slot_active($node_standby, 'drop_db_', 1, \$stdout, \$stderr); # Create a slot on a database that would not be dropped. This slot should not # get dropped. -$node_standby->create_logical_slot_on_standby($node_primary, 'otherslot', 'postgres'); +$node_standby->create_logical_slot_on_standby($node_primary, 'otherslot', + 'postgres'); # dropdb on the primary to verify slots are dropped on standby $node_primary->safe_psql('postgres', q[DROP DATABASE testdb]); $node_primary->wait_for_replay_catchup($node_standby); -is($node_standby->safe_psql('postgres', - q[SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'testdb')]), 'f', +is( $node_standby->safe_psql( + 'postgres', + q[SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'testdb')]), + 'f', 'database dropped on standby'); check_slots_dropped('drop_db', $handle); -is($node_standby->slot('otherslot')->{'slot_type'}, 'logical', - 'otherslot on standby not dropped'); +is($node_standby->slot('otherslot')->{'slot_type'}, + 'logical', 'otherslot on standby not dropped'); # Cleanup : manually drop the slot that was not dropped. -$node_standby->psql('postgres', q[SELECT pg_drop_replication_slot('otherslot')]); +$node_standby->psql('postgres', + q[SELECT pg_drop_replication_slot('otherslot')]); ################################################## # Test standby promotion and logical decoding behavior @@ -758,7 +828,8 @@ $node_standby->psql('postgres', q[SELECT pg_drop_replication_slot('otherslot')]) $node_standby->reload; $node_primary->psql('postgres', q[CREATE DATABASE testdb]); -$node_primary->safe_psql('testdb', qq[CREATE TABLE decoding_test(x integer, y text);]); +$node_primary->safe_psql('testdb', + qq[CREATE TABLE decoding_test(x integer, y text);]); # Wait for the standby to catchup before initializing the cascading standby $node_primary->wait_for_replay_catchup($node_standby); @@ -767,7 +838,9 @@ $node_primary->wait_for_replay_catchup($node_standby); # Keep this step after the "Verify that invalidated logical slots do not lead # to retaining WAL" test (as the physical slot on the standby could prevent the # WAL file removal). -$node_standby->safe_psql('testdb', qq[SELECT * FROM pg_create_physical_replication_slot('$standby_physical_slotname');]); +$node_standby->safe_psql('testdb', + qq[SELECT * FROM pg_create_physical_replication_slot('$standby_physical_slotname');] +); # Initialize cascading standby node $node_standby->backup($backup_name); @@ -775,7 +848,8 @@ $node_cascading_standby->init_from_backup( $node_standby, $backup_name, has_streaming => 1, has_restoring => 1); -$node_cascading_standby->append_conf('postgresql.conf', +$node_cascading_standby->append_conf( + 'postgresql.conf', qq[primary_slot_name = '$standby_physical_slotname' hot_standby_feedback = on]); $node_cascading_standby->start; @@ -784,14 +858,18 @@ $node_cascading_standby->start; create_logical_slots($node_standby, 'promotion_'); # Wait for the cascading standby to catchup before creating the slots -$node_standby->wait_for_replay_catchup($node_cascading_standby, $node_primary); +$node_standby->wait_for_replay_catchup($node_cascading_standby, + $node_primary); # create the logical slots on the cascading standby too create_logical_slots($node_cascading_standby, 'promotion_'); # Make slots actives -$handle = make_slot_active($node_standby, 'promotion_', 1, \$stdout, \$stderr); -my $cascading_handle = make_slot_active($node_cascading_standby, 'promotion_', 1, \$cascading_stdout, \$cascading_stderr); +$handle = + make_slot_active($node_standby, 'promotion_', 1, \$stdout, \$stderr); +my $cascading_handle = + make_slot_active($node_cascading_standby, 'promotion_', 1, + \$cascading_stdout, \$cascading_stderr); # Insert some rows before the promotion $node_primary->safe_psql('testdb', @@ -800,7 +878,8 @@ $node_primary->safe_psql('testdb', # Wait for both standbys to catchup $node_primary->wait_for_replay_catchup($node_standby); -$node_standby->wait_for_replay_catchup($node_cascading_standby, $node_primary); +$node_standby->wait_for_replay_catchup($node_cascading_standby, + $node_primary); # promote $node_standby->promote; @@ -830,35 +909,38 @@ $stdout_sql = $node_standby->safe_psql('testdb', qq[SELECT data FROM pg_logical_slot_peek_changes('promotion_inactiveslot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); -is($stdout_sql, $expected, 'got expected output from SQL decoding session on promoted standby'); +is($stdout_sql, $expected, + 'got expected output from SQL decoding session on promoted standby'); # check that we are decoding pre and post promotion inserted rows # with pg_recvlogical that has started before the promotion my $pump_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default); -ok( pump_until( - $handle, $pump_timeout, \$stdout, qr/^.*COMMIT.*COMMIT$/s), - 'got 2 COMMIT from pg_recvlogical output'); +ok(pump_until($handle, $pump_timeout, \$stdout, qr/^.*COMMIT.*COMMIT$/s), + 'got 2 COMMIT from pg_recvlogical output'); chomp($stdout); is($stdout, $expected, - 'got same expected output from pg_recvlogical decoding session'); + 'got same expected output from pg_recvlogical decoding session'); # check that we are decoding pre and post promotion inserted rows on the cascading standby $stdout_sql = $node_cascading_standby->safe_psql('testdb', qq[SELECT data FROM pg_logical_slot_peek_changes('promotion_inactiveslot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); -is($stdout_sql, $expected, 'got expected output from SQL decoding session on cascading standby'); +is($stdout_sql, $expected, + 'got expected output from SQL decoding session on cascading standby'); # check that we are decoding pre and post promotion inserted rows # with pg_recvlogical that has started before the promotion on the cascading standby ok( pump_until( - $cascading_handle, $pump_timeout, \$cascading_stdout, qr/^.*COMMIT.*COMMIT$/s), - 'got 2 COMMIT from pg_recvlogical output'); + $cascading_handle, $pump_timeout, + \$cascading_stdout, qr/^.*COMMIT.*COMMIT$/s), + 'got 2 COMMIT from pg_recvlogical output'); chomp($cascading_stdout); is($cascading_stdout, $expected, - 'got same expected output from pg_recvlogical decoding session on cascading standby'); + 'got same expected output from pg_recvlogical decoding session on cascading standby' +); done_testing(); diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index 88ab255ce61..abf633dc085 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -85,14 +85,14 @@ typedef enum TAPtype TEST_STATUS, PLAN, NONE -} TAPtype; +} TAPtype; /* options settable from command line */ _stringlist *dblist = NULL; bool debug = false; char *inputdir = "."; char *outputdir = "."; -char *expecteddir = "."; +char *expecteddir = "."; char *bindir = PGBINDIR; char *launcher = NULL; static _stringlist *loadextension = NULL; diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index e7956cb1a0f..76442de063f 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -19,7 +19,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } my $ssl_server = SSL::Server->new(); @@ -78,11 +79,11 @@ note "testing password-protected keys"; switch_server_cert( $node, - certfile => 'server-cn-only', - cafile => 'root+client_ca', - keyfile => 'server-password', + certfile => 'server-cn-only', + cafile => 'root+client_ca', + keyfile => 'server-password', passphrase_cmd => 'echo wrongpassword', - restart => 'no'); + restart => 'no'); command_fails( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], @@ -91,11 +92,11 @@ $node->_update_pid(0); switch_server_cert( $node, - certfile => 'server-cn-only', - cafile => 'root+client_ca', - keyfile => 'server-password', + certfile => 'server-cn-only', + cafile => 'root+client_ca', + keyfile => 'server-password', passphrase_cmd => 'echo secret1', - restart => 'no'); + restart => 'no'); command_ok( [ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ], @@ -468,7 +469,8 @@ $node->connect_fails( qr/could not get server's host name from server certificate/); # Test system trusted roots. -switch_server_cert($node, +switch_server_cert( + $node, certfile => 'server-cn-only+server_ca', keyfile => 'server-cn-only', cafile => 'root_ca'); @@ -481,13 +483,15 @@ $common_connstr = $node->connect_fails( "$common_connstr sslmode=verify-full host=common-name.pg-ssltest.test", "sslrootcert=system does not connect with private CA", - expected_stderr => qr/SSL error: (certificate verify failed|unregistered scheme)/); + expected_stderr => + qr/SSL error: (certificate verify failed|unregistered scheme)/); # Modes other than verify-full cannot be mixed with sslrootcert=system. $node->connect_fails( "$common_connstr sslmode=verify-ca host=common-name.pg-ssltest.test", "sslrootcert=system only accepts sslmode=verify-full", - expected_stderr => qr/weak sslmode "verify-ca" may not be used with sslrootcert=system/); + expected_stderr => + qr/weak sslmode "verify-ca" may not be used with sslrootcert=system/); SKIP: { @@ -503,7 +507,9 @@ SKIP: $node->connect_fails( "$common_connstr host=common-name.pg-ssltest.test.bad", "sslrootcert=system defaults to sslmode=verify-full", - expected_stderr => qr/server certificate for "common-name.pg-ssltest.test" does not match host name "common-name.pg-ssltest.test.bad"/); + expected_stderr => + qr/server certificate for "common-name.pg-ssltest.test" does not match host name "common-name.pg-ssltest.test.bad"/ + ); } # Test that the CRL works @@ -530,10 +536,10 @@ $node->connect_fails( # pg_stat_ssl command_like( [ - 'psql', '-X', - '-A', '-F', - ',', '-P', - 'null=_null_', '-d', + 'psql', '-X', + '-A', '-F', + ',', '-P', + 'null=_null_', '-d', "$common_connstr sslrootcert=invalid", '-c', "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" ], @@ -766,8 +772,8 @@ $node->connect_fails( qr/certificate authentication failed for user "anotheruser"/, # certificate authentication should be logged even on failure # temporarily(?) skip this check due to timing issue -# log_like => -# [qr/connection authenticated: identity="CN=ssltestuser" method=cert/], + # log_like => + # [qr/connection authenticated: identity="CN=ssltestuser" method=cert/], ); # revoked client cert @@ -777,10 +783,10 @@ $node->connect_fails( "certificate authorization fails with revoked client cert", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ], + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ], # revoked certificates should not authenticate the user log_unlike => [qr/connection authenticated:/],); @@ -818,7 +824,7 @@ $node->connect_ok( # intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca'); $common_connstr = - "$default_ssl_connstr user=ssltestuser dbname=certdb " + "$default_ssl_connstr user=ssltestuser dbname=certdb " . sslkey('client.key') . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost"; @@ -831,26 +837,30 @@ $node->connect_fails( "intermediate client certificate is missing", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656576, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656576, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); $node->connect_fails( - "$common_connstr sslmode=require sslcert=ssl/client-long.crt " . sslkey('client-long.key'), + "$common_connstr sslmode=require sslcert=ssl/client-long.crt " + . sslkey('client-long.key'), "logged client certificate Subjects are truncated if they're too long", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "\.\.\./CN=ssl-123456789012345678901234567890123456789012345678901234567890", serial number 2315418733629425152, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "\.\.\./CN=ssl-123456789012345678901234567890123456789012345678901234567890", serial number 2315418733629425152, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); # Use an invalid cafile here so that the next test won't be able to verify the # client CA. -switch_server_cert($node, certfile => 'server-cn-only', cafile => 'server-cn-only'); +switch_server_cert( + $node, + certfile => 'server-cn-only', + cafile => 'server-cn-only'); # intermediate CA is provided but doesn't have a trusted root (checks error # logging for cert chain depths > 0) @@ -859,17 +869,17 @@ $node->connect_fails( "intermediate client certificate is untrusted", expected_stderr => qr/SSL error: tlsv1 alert unknown ca/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 1: unable to get local issuer certificate}, -# qr{Failed certificate data \(unverified\): subject "/CN=Test CA for PostgreSQL SSL regression test client certs", serial number 2315134995201656577, issuer "/CN=Test root CA for PostgreSQL SSL regression test suite"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 1: unable to get local issuer certificate}, + # qr{Failed certificate data \(unverified\): subject "/CN=Test CA for PostgreSQL SSL regression test client certs", serial number 2315134995201656577, issuer "/CN=Test root CA for PostgreSQL SSL regression test suite"}, + # ] ); # test server-side CRL directory switch_server_cert( $node, certfile => 'server-cn-only', - crldir => 'root+client-crldir'); + crldir => 'root+client-crldir'); # revoked client cert $node->connect_fails( @@ -878,10 +888,10 @@ $node->connect_fails( "certificate authorization fails with revoked client cert with server-side CRL directory", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); # revoked client cert, non-ASCII subject @@ -891,10 +901,10 @@ $node->connect_fails( "certificate authorization fails with revoked UTF-8 client cert with server-side CRL directory", expected_stderr => qr/SSL error: sslv3 alert certificate revoked/, # temporarily(?) skip this check due to timing issue -# log_like => [ -# qr{Client certificate verification failed at depth 0: certificate revoked}, -# qr{Failed certificate data \(unverified\): subject "/CN=\\xce\\x9f\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xad\\xce\\xb1\\xcf\\x82", serial number 2315420958437414144, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, -# ] + # log_like => [ + # qr{Client certificate verification failed at depth 0: certificate revoked}, + # qr{Failed certificate data \(unverified\): subject "/CN=\\xce\\x9f\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xad\\xce\\xb1\\xcf\\x82", serial number 2315420958437414144, issuer "/CN=Test CA for PostgreSQL SSL regression test client certs"}, + # ] ); done_testing(); diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl index 8038135697f..28c54bdb09f 100644 --- a/src/test/ssl/t/002_scram.pl +++ b/src/test/ssl/t/002_scram.pl @@ -22,7 +22,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } my $ssl_server = SSL::Server->new(); @@ -70,7 +71,7 @@ $node->start; $ssl_server->configure_test_server_for_ssl( $node, $SERVERHOSTADDR, $SERVERHOSTCIDR, "scram-sha-256", - 'password' => "pass", + 'password' => "pass", 'password_enc' => "scram-sha-256"); switch_server_cert($node, certfile => 'server-cn-only'); $ENV{PGPASSWORD} = "pass"; @@ -117,7 +118,7 @@ $node->connect_fails( # because channel binding is not performed. Note that ssl/client.key may # be used in a different test, so the name of this temporary client key # is chosen here to be unique. -my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); +my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); my $client_tmp_key = "$cert_tempdir/client_scram.key"; copy("ssl/client.key", "$cert_tempdir/client_scram.key") or die diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl index c073625213e..5306aad8023 100644 --- a/src/test/ssl/t/003_sslinfo.pl +++ b/src/test/ssl/t/003_sslinfo.pl @@ -20,7 +20,8 @@ if ($ENV{with_ssl} ne 'openssl') } elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/) { - plan skip_all => 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; + plan skip_all => + 'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA'; } #### Some configuration @@ -172,9 +173,9 @@ is($result, 'CA:FALSE|t', 'extract extension from cert'); # Sanity tests for sslcertmode, using ssl_client_cert_present() my @cases = ( - { opts => "sslcertmode=allow", present => 't' }, + { opts => "sslcertmode=allow", present => 't' }, { opts => "sslcertmode=allow sslcert=invalid", present => 'f' }, - { opts => "sslcertmode=disable", present => 'f' },); + { opts => "sslcertmode=disable", present => 'f' },); if ($supports_sslcertmode_require) { push(@cases, { opts => "sslcertmode=require", present => 't' }); diff --git a/src/test/ssl/t/SSL/Backend/OpenSSL.pm b/src/test/ssl/t/SSL/Backend/OpenSSL.pm index b52035100a4..a762f43634f 100644 --- a/src/test/ssl/t/SSL/Backend/OpenSSL.pm +++ b/src/test/ssl/t/SSL/Backend/OpenSSL.pm @@ -71,8 +71,8 @@ sub init chmod(0600, glob "$pgdata/server-*.key") or die "failed to change permissions on server keys: $!"; _copy_files("ssl/root+client_ca.crt", $pgdata); - _copy_files("ssl/root_ca.crt", $pgdata); - _copy_files("ssl/root+client.crl", $pgdata); + _copy_files("ssl/root_ca.crt", $pgdata); + _copy_files("ssl/root+client.crl", $pgdata); mkdir("$pgdata/root+client-crldir") or die "unable to create server CRL dir $pgdata/root+client-crldir: $!"; _copy_files("ssl/root+client-crldir/*", "$pgdata/root+client-crldir/"); @@ -84,11 +84,11 @@ sub init # the tests. To get the full path for inclusion in connection strings, the # %key hash can be interrogated. my $cert_tempdir = PostgreSQL::Test::Utils::tempdir(); - my @keys = ( - "client.key", "client-revoked.key", - "client-der.key", "client-encrypted-pem.key", + my @keys = ( + "client.key", "client-revoked.key", + "client-der.key", "client-encrypted-pem.key", "client-encrypted-der.key", "client-dn.key", - "client_ext.key", "client-long.key", + "client_ext.key", "client-long.key", "client-revoked-utf8.key"); foreach my $keyfile (@keys) { @@ -174,13 +174,13 @@ sub set_server_cert { my ($self, $params) = @_; - $params->{cafile} = 'root+client_ca' unless defined $params->{cafile}; + $params->{cafile} = 'root+client_ca' unless defined $params->{cafile}; $params->{crlfile} = 'root+client.crl' unless defined $params->{crlfile}; $params->{keyfile} = $params->{certfile} unless defined $params->{keyfile}; my $sslconf = - "ssl_ca_file='$params->{cafile}.crt'\n" + "ssl_ca_file='$params->{cafile}.crt'\n" . "ssl_cert_file='$params->{certfile}.crt'\n" . "ssl_key_file='$params->{keyfile}.key'\n" . "ssl_crl_file='$params->{crlfile}'\n"; diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm index b6344b936a5..2c5c0552227 100644 --- a/src/test/ssl/t/SSL/Server.pm +++ b/src/test/ssl/t/SSL/Server.pm @@ -94,7 +94,7 @@ sub new bless $self, $class; if ($flavor =~ /\Aopenssl\z/i) { - $self->{flavor} = 'openssl'; + $self->{flavor} = 'openssl'; $self->{backend} = SSL::Backend::OpenSSL->new(); } else @@ -115,7 +115,7 @@ string. sub sslkey { - my $self = shift; + my $self = shift; my $keyfile = shift; my $backend = $self->{backend}; @@ -143,10 +143,10 @@ sub configure_test_server_for_ssl my $self = shift; my ($node, $serverhost, $servercidr, $authmethod, %params) = @_; my $backend = $self->{backend}; - my $pgdata = $node->data_dir; + my $pgdata = $node->data_dir; my @databases = ( - 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', + 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', 'certdb_cn', 'verifydb'); # Create test users and databases @@ -229,7 +229,7 @@ Get the name of the currently used SSL backend. sub ssl_library { - my $self = shift; + my $self = shift; my $backend = $self->{backend}; return $backend->get_library(); @@ -284,11 +284,11 @@ returning. sub switch_server_cert { - my $self = shift; - my $node = shift; + my $self = shift; + my $node = shift; my $backend = $self->{backend}; - my %params = @_; - my $pgdata = $node->data_dir; + my %params = @_; + my $pgdata = $node->data_dir; open my $sslconf, '>', "$pgdata/sslconfig.conf"; print $sslconf "ssl=on\n"; diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl index 91aa068c95b..0a399cdb82b 100644 --- a/src/test/subscription/t/001_rep_changes.pl +++ b/src/test/subscription/t/001_rep_changes.pl @@ -233,7 +233,8 @@ $node_subscriber->safe_psql('postgres', ); # Wait for initial table sync to finish -$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_temp1'); +$node_subscriber->wait_for_subscription_sync($node_publisher, + 'tap_sub_temp1'); # Subscriber table will have no rows initially $result = diff --git a/src/test/subscription/t/005_encoding.pl b/src/test/subscription/t/005_encoding.pl index 297adfb3bb6..2f0bf7730b9 100644 --- a/src/test/subscription/t/005_encoding.pl +++ b/src/test/subscription/t/005_encoding.pl @@ -11,13 +11,13 @@ use Test::More; my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_publisher->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_publisher->start; my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_subscriber->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=LATIN1' ]); + extra => [ '--locale=C', '--encoding=LATIN1' ]); $node_subscriber->start; my $ddl = "CREATE TABLE test1 (a int, b text);"; @@ -42,7 +42,7 @@ $node_publisher->wait_for_catchup('mysub'); is( $node_subscriber->safe_psql( 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'} - ), # LATIN1 + ), # LATIN1 qq(1), 'data replicated to subscriber'); diff --git a/src/test/subscription/t/012_collation.pl b/src/test/subscription/t/012_collation.pl index 4d947f1375d..823550a31b5 100644 --- a/src/test/subscription/t/012_collation.pl +++ b/src/test/subscription/t/012_collation.pl @@ -17,13 +17,13 @@ if ($ENV{with_icu} ne 'yes') my $node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_publisher->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_publisher->start; my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_subscriber->init( allows_streaming => 'logical', - extra => [ '--locale=C', '--encoding=UTF8' ]); + extra => [ '--locale=C', '--encoding=UTF8' ]); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; diff --git a/src/test/subscription/t/014_binary.pl b/src/test/subscription/t/014_binary.pl index feefbe734e4..e5ce849c191 100644 --- a/src/test/subscription/t/014_binary.pl +++ b/src/test/subscription/t/014_binary.pl @@ -57,7 +57,7 @@ $node_publisher->safe_psql( my $publisher_connstring = $node_publisher->connstr . ' dbname=postgres'; $node_subscriber->safe_psql('postgres', - "CREATE SUBSCRIPTION tsub CONNECTION '$publisher_connstring' " + "CREATE SUBSCRIPTION tsub CONNECTION '$publisher_connstring' " . "PUBLICATION tpub WITH (slot_name = tpub_slot, binary = true)"); # Ensure the COPY command is executed in binary format on the publisher diff --git a/src/test/subscription/t/015_stream.pl b/src/test/subscription/t/015_stream.pl index 88344bdbaa0..5c00711ef2d 100644 --- a/src/test/subscription/t/015_stream.pl +++ b/src/test/subscription/t/015_stream.pl @@ -30,13 +30,13 @@ sub test_streaming # Interleave a pair of transactions, each exceeding the 64kB limit. my $offset = 0; - my $h = $node_publisher->background_psql('postgres', - on_error_stop => 0); + my $h = $node_publisher->background_psql('postgres', on_error_stop => 0); # Check the subscriber log from now on. $offset = -s $node_subscriber->logfile; - $h->query_safe(q{ + $h->query_safe( + q{ BEGIN; INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i); UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0; @@ -52,7 +52,7 @@ sub test_streaming }); $h->query_safe('COMMIT'); - # errors make the next test fail, so ignore them here + # errors make the next test fail, so ignore them here $h->quit; $node_publisher->wait_for_catchup($appname); @@ -211,15 +211,15 @@ $node_subscriber->reload; $node_subscriber->safe_psql('postgres', q{SELECT 1}); # Interleave a pair of transactions, each exceeding the 64kB limit. -my $h = $node_publisher->background_psql('postgres', - on_error_stop => 0); +my $h = $node_publisher->background_psql('postgres', on_error_stop => 0); # Confirm if a deadlock between the leader apply worker and the parallel apply # worker can be detected. my $offset = -s $node_subscriber->logfile; -$h->query_safe(q{ +$h->query_safe( + q{ BEGIN; INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i); }); @@ -260,7 +260,8 @@ $node_subscriber->safe_psql('postgres', # Check the subscriber log from now on. $offset = -s $node_subscriber->logfile; -$h->query_safe(q{ +$h->query_safe( + q{ BEGIN; INSERT INTO test_tab_2 SELECT i FROM generate_series(1, 5000) s(i); }); @@ -296,7 +297,8 @@ is($result, qq(10000), 'data replicated to subscriber after dropping index'); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. @@ -317,7 +319,8 @@ $node_publisher->wait_for_catchup($appname); # Check that transaction is committed on subscriber $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_tab_2"); -is($result, qq(15000), 'parallel apply worker replayed all changes from file'); +is($result, qq(15000), + 'parallel apply worker replayed all changes from file'); $node_subscriber->stop; $node_publisher->stop; diff --git a/src/test/subscription/t/018_stream_subxact_abort.pl b/src/test/subscription/t/018_stream_subxact_abort.pl index 2b67ae1e0ac..91d19ae672a 100644 --- a/src/test/subscription/t/018_stream_subxact_abort.pl +++ b/src/test/subscription/t/018_stream_subxact_abort.pl @@ -205,7 +205,8 @@ test_streaming($node_publisher, $node_subscriber, $appname, 1); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. diff --git a/src/test/subscription/t/023_twophase_stream.pl b/src/test/subscription/t/023_twophase_stream.pl index f4af44414b6..fdcc4b359d2 100644 --- a/src/test/subscription/t/023_twophase_stream.pl +++ b/src/test/subscription/t/023_twophase_stream.pl @@ -391,7 +391,8 @@ test_streaming($node_publisher, $node_subscriber, $appname, 1); $node_subscriber->append_conf('postgresql.conf', 'logical_replication_mode = immediate'); # Reset the log_min_messages to default. -$node_subscriber->append_conf('postgresql.conf', "log_min_messages = warning"); +$node_subscriber->append_conf('postgresql.conf', + "log_min_messages = warning"); $node_subscriber->reload; # Run a query to make sure that the reload has taken effect. diff --git a/src/test/subscription/t/025_rep_changes_for_schema.pl b/src/test/subscription/t/025_rep_changes_for_schema.pl index a22ae0a2ac0..8543f52710c 100644 --- a/src/test/subscription/t/025_rep_changes_for_schema.pl +++ b/src/test/subscription/t/025_rep_changes_for_schema.pl @@ -63,7 +63,8 @@ $node_subscriber->safe_psql('postgres', ); # Wait for initial table sync to finish -$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub_schema'); +$node_subscriber->wait_for_subscription_sync($node_publisher, + 'tap_sub_schema'); # Check the schema table data is synced up my $result = $node_subscriber->safe_psql('postgres', diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl index 96a6d686eb6..45e51c5a52c 100644 --- a/src/test/subscription/t/026_stats.pl +++ b/src/test/subscription/t/026_stats.pl @@ -43,7 +43,7 @@ sub create_sub_pub_w_errors ]); # Set up publication. - my $pub_name = $table_name . '_pub'; + my $pub_name = $table_name . '_pub'; my $publisher_connstr = $node_publisher->connstr . qq( dbname=$db); $node_publisher->safe_psql($db, diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl index 8a7e79cacac..d7a7e3ef5bb 100644 --- a/src/test/subscription/t/027_nosuperuser.pl +++ b/src/test/subscription/t/027_nosuperuser.pl @@ -81,7 +81,7 @@ sub grant_superuser # "regress_admin". For partitioned tables, layout the partitions differently # on the publisher than on the subscriber. # -$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_publisher->init(allows_streaming => 'logical'); $node_subscriber->init; @@ -89,10 +89,10 @@ $node_publisher->start; $node_subscriber->start; $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; my %remainder_a = ( - publisher => 0, + publisher => 0, subscriber => 1); my %remainder_b = ( - publisher => 1, + publisher => 1, subscriber => 0); for my $node ($node_publisher, $node_subscriber) @@ -197,8 +197,7 @@ publish_insert("alice.hashpart", 103); publish_update("alice.hashpart", 102 => 120); publish_delete("alice.hashpart", 101); expect_replication("alice.hashpart", 2, 103, 120, - "nosuperuser admin with privileges on role can replicate into hashpart" -); + "nosuperuser admin with privileges on role can replicate into hashpart"); # Force RLS on the target table and check that replication fails. $node_subscriber->safe_psql( @@ -223,8 +222,7 @@ $node_subscriber->safe_psql( ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY; )); expect_replication("alice.unpartitioned", 3, 11, 15, - "non-superuser admin can replicate insert if rls is not forced" -); + "non-superuser admin can replicate insert if rls is not forced"); $node_subscriber->safe_psql( 'postgres', qq( @@ -237,8 +235,7 @@ expect_failure( 11, 15, qr/ERROR: ( [A-Z0-9]+:)? user "regress_alice" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi, - "replication of update into table with forced rls fails" -); + "replication of update into table with forced rls fails"); $node_subscriber->safe_psql( 'postgres', qq( ALTER TABLE alice.unpartitioned NO FORCE ROW LEVEL SECURITY; @@ -258,8 +255,7 @@ expect_failure( 13, 17, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "replication of insert fails if table owner lacks insert permission" -); + "replication of insert fails if table owner lacks insert permission"); # alice needs INSERT but not SELECT to replicate an INSERT. $node_subscriber->safe_psql( diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl index b0d4b2d5b17..aec483f785f 100644 --- a/src/test/subscription/t/028_row_filter.pl +++ b/src/test/subscription/t/028_row_filter.pl @@ -18,7 +18,7 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $appname = 'tap_sub'; +my $appname = 'tap_sub'; # ==================================================================== # Testcase start: FOR ALL TABLES @@ -544,13 +544,11 @@ is( $result, qq(20 $result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_rowfilter_parent_sync ORDER BY 1"); -is( $result, qq(16), - 'check initial data copy from tab_rowfilter_parent_sync'); +is($result, qq(16), 'check initial data copy from tab_rowfilter_parent_sync'); $result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_rowfilter_child_sync ORDER BY 1"); -is( $result, qq(), - 'check initial data copy from tab_rowfilter_child_sync'); +is($result, qq(), 'check initial data copy from tab_rowfilter_child_sync'); # The following commands are executed after CREATE SUBSCRIPTION, so these SQL # commands are for testing normal logical replication behavior. diff --git a/src/test/subscription/t/030_origin.pl b/src/test/subscription/t/030_origin.pl index b9b1351ddbb..9ca1fa25d8f 100644 --- a/src/test/subscription/t/030_origin.pl +++ b/src/test/subscription/t/030_origin.pl @@ -9,10 +9,10 @@ use PostgreSQL::Test::Cluster; use PostgreSQL::Test::Utils; use Test::More; -my $subname_AB = 'tap_sub_A_B'; +my $subname_AB = 'tap_sub_A_B'; my $subname_AB2 = 'tap_sub_A_B_2'; -my $subname_BA = 'tap_sub_B_A'; -my $subname_BC = 'tap_sub_B_C'; +my $subname_BA = 'tap_sub_B_A'; +my $subname_BC = 'tap_sub_B_C'; my $result; my $stdout; diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl index b67292ba9c6..dbff8060402 100644 --- a/src/test/subscription/t/031_column_list.pl +++ b/src/test/subscription/t/031_column_list.pl @@ -20,7 +20,7 @@ $node_subscriber->append_conf('postgresql.conf', $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $offset = 0; +my $offset = 0; # setup tables on both nodes diff --git a/src/test/subscription/t/032_subscribe_use_index.pl b/src/test/subscription/t/032_subscribe_use_index.pl index 76d7c85fff4..576eec6a578 100644 --- a/src/test/subscription/t/032_subscribe_use_index.pl +++ b/src/test/subscription/t/032_subscribe_use_index.pl @@ -18,8 +18,8 @@ $node_subscriber->init(allows_streaming => 'logical'); $node_subscriber->start; my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; -my $appname = 'tap_sub'; -my $result = ''; +my $appname = 'tap_sub'; +my $result = ''; # ============================================================================= # Testcase start: Subscription can use index with multiple rows and columns @@ -60,19 +60,24 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select (idx_scan = 4) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates 4 rows via index"; +$node_subscriber->poll_query_until('postgres', + q{select (idx_scan = 4) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates 4 rows via index"; # make sure that the subscriber has the correct data after the UPDATE $result = $node_subscriber->safe_psql('postgres', - "select count(*) from test_replica_id_full WHERE (x = 100 and y = '200')"); -is($result, qq(2), 'ensure subscriber has the correct data at the end of the test'); + "select count(*) from test_replica_id_full WHERE (x = 100 and y = '200')" +); +is($result, qq(2), + 'ensure subscriber has the correct data at the end of the test'); # make sure that the subscriber has the correct data after the first DELETE $result = $node_subscriber->safe_psql('postgres', "select count(*) from test_replica_id_full where x in (5, 6)"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -145,17 +150,21 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select sum(idx_scan)=3 from pg_stat_all_indexes where indexrelname ilike 'users_table_part_%';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates partitioned table"; +$node_subscriber->poll_query_until('postgres', + q{select sum(idx_scan)=3 from pg_stat_all_indexes where indexrelname ilike 'users_table_part_%';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates partitioned table"; # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select sum(user_id+value_1+value_2) from users_table_part"); -is($result, qq(10907), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(10907), + 'ensure subscriber has the correct data at the end of the test'); $result = $node_subscriber->safe_psql('postgres', "select count(DISTINCT(user_id,value_1, value_2)) from users_table_part"); -is($result, qq(99), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(99), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -182,15 +191,18 @@ $node_subscriber->safe_psql('postgres', # index with only an expression $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names_expr_only ON people ((firstname || ' ' || lastname))"); + "CREATE INDEX people_names_expr_only ON people ((firstname || ' ' || lastname))" +); # partial index $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names_partial ON people(firstname) WHERE (firstname = 'first_name_1')"); + "CREATE INDEX people_names_partial ON people(firstname) WHERE (firstname = 'first_name_1')" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0,200) i"); + "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0,200) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -204,31 +216,41 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # update 2 rows $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'" +); $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_2' AND lastname = 'last_name_2'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_2' AND lastname = 'last_name_2'" +); # make sure none of the indexes is used on the subscriber $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', - "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"); -is($result, qq(0), 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'); + "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')" +); +is($result, qq(0), + 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions' +); $node_publisher->safe_psql('postgres', "DELETE FROM people WHERE firstname = 'first_name_3'"); $node_publisher->safe_psql('postgres', - "DELETE FROM people WHERE firstname = 'first_name_4' AND lastname = 'last_name_4'"); + "DELETE FROM people WHERE firstname = 'first_name_4' AND lastname = 'last_name_4'" +); # make sure the index is not used on the subscriber $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', - "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')"); -is($result, qq(0), 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions'); + "select sum(idx_scan) from pg_stat_all_indexes where indexrelname IN ('people_names_expr_only', 'people_names_partial')" +); +is($result, qq(0), + 'ensure subscriber tap_sub_rep_full updates two rows via seq. scan with index on expressions' +); # make sure that the subscriber has the correct data -$result = $node_subscriber->safe_psql('postgres', - "SELECT count(*) FROM people"); -is($result, qq(199), 'ensure subscriber has the correct data at the end of the test'); +$result = + $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people"); +is($result, qq(199), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -252,11 +274,13 @@ $node_publisher->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE people (firstname text, lastname text)"); $node_subscriber->safe_psql('postgres', - "CREATE INDEX people_names ON people (firstname, lastname, (firstname || ' ' || lastname))"); + "CREATE INDEX people_names ON people (firstname, lastname, (firstname || ' ' || lastname))" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0, 20) i"); + "INSERT INTO people SELECT 'first_name_' || i::text, 'last_name_' || i::text FROM generate_series(0, 20) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -270,7 +294,8 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # update 1 row $node_publisher->safe_psql('postgres', - "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'"); + "UPDATE people SET firstname = 'no-name' WHERE firstname = 'first_name_1'" +); # delete the updated row $node_publisher->safe_psql('postgres', @@ -278,22 +303,25 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select idx_scan=2 from pg_stat_all_indexes where indexrelname = 'people_names';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full deletes two rows via index scan with index on expressions and columns"; +$node_subscriber->poll_query_until('postgres', + q{select idx_scan=2 from pg_stat_all_indexes where indexrelname = 'people_names';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full deletes two rows via index scan with index on expressions and columns"; # make sure that the subscriber has the correct data -$result = $node_subscriber->safe_psql('postgres', - "SELECT count(*) FROM people"); -is($result, qq(20), 'ensure subscriber has the correct data at the end of the test'); +$result = + $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people"); +is($result, qq(20), + 'ensure subscriber has the correct data at the end of the test'); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people WHERE firstname = 'no-name'"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # now, drop the index with the expression, we'll use sequential scan -$node_subscriber->safe_psql('postgres', - "DROP INDEX people_names"); +$node_subscriber->safe_psql('postgres', "DROP INDEX people_names"); # delete 1 row $node_publisher->safe_psql('postgres', @@ -303,7 +331,8 @@ $node_publisher->safe_psql('postgres', $node_publisher->wait_for_catchup($appname); $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM people WHERE lastname = 'last_name_18'"); -is($result, qq(0), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(0), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -319,19 +348,16 @@ $node_subscriber->safe_psql('postgres', "DROP TABLE people"); # Testcase start: Null values and missing column $node_publisher->safe_psql('postgres', - "CREATE TABLE test_replica_id_full (x int)" -); + "CREATE TABLE test_replica_id_full (x int)"); $node_publisher->safe_psql('postgres', "ALTER TABLE test_replica_id_full REPLICA IDENTITY FULL"); $node_subscriber->safe_psql('postgres', - "CREATE TABLE test_replica_id_full (x int, y int)" -); + "CREATE TABLE test_replica_id_full (x int, y int)"); $node_subscriber->safe_psql('postgres', - "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)" -); + "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full(x,y)"); # create pub/sub $node_publisher->safe_psql('postgres', @@ -352,19 +378,23 @@ $node_publisher->safe_psql('postgres', # check if the index is used even when the index has NULL values $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select idx_scan=1 from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates test_replica_id_full table"; +$node_subscriber->poll_query_until('postgres', + q{select idx_scan=1 from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idx';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates test_replica_id_full table"; # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select sum(x) from test_replica_id_full WHERE y IS NULL"); -is($result, qq(7), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(7), + 'ensure subscriber has the correct data at the end of the test'); # make sure that the subscriber has the correct data $result = $node_subscriber->safe_psql('postgres', "select count(*) from test_replica_id_full WHERE y IS NULL"); -is($result, qq(3), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(3), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); @@ -394,11 +424,13 @@ $node_publisher->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE test_replica_id_full (x int, y int)"); $node_subscriber->safe_psql('postgres', - "CREATE UNIQUE INDEX test_replica_id_full_idxy ON test_replica_id_full(x,y)"); + "CREATE UNIQUE INDEX test_replica_id_full_idxy ON test_replica_id_full(x,y)" +); # insert some initial data $node_publisher->safe_psql('postgres', - "INSERT INTO test_replica_id_full SELECT i, i FROM generate_series(0,21) i"); + "INSERT INTO test_replica_id_full SELECT i, i FROM generate_series(0,21) i" +); # create pub/sub $node_publisher->safe_psql('postgres', @@ -412,7 +444,8 @@ $node_subscriber->wait_for_subscription_sync($node_publisher, $appname); # duplicate the data in subscriber for y column $node_subscriber->safe_psql('postgres', - "INSERT INTO test_replica_id_full SELECT i+100, i FROM generate_series(0,21) i"); + "INSERT INTO test_replica_id_full SELECT i+100, i FROM generate_series(0,21) i" +); # now, we update only 1 row on the publisher and expect the subscriber to only # update 1 row although there are two tuples with y = 15 on the subscriber @@ -421,15 +454,18 @@ $node_publisher->safe_psql('postgres', # wait until the index is used on the subscriber $node_publisher->wait_for_catchup($appname); -$node_subscriber->poll_query_until( - 'postgres', q{select (idx_scan = 1) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idxy';} -) or die "Timed out while waiting for check subscriber tap_sub_rep_full updates one row via index"; +$node_subscriber->poll_query_until('postgres', + q{select (idx_scan = 1) from pg_stat_all_indexes where indexrelname = 'test_replica_id_full_idxy';} + ) + or die + "Timed out while waiting for check subscriber tap_sub_rep_full updates one row via index"; # make sure that the subscriber has the correct data # we only updated 1 row $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM test_replica_id_full WHERE x = 2000"); -is($result, qq(1), 'ensure subscriber has the correct data at the end of the test'); +is($result, qq(1), + 'ensure subscriber has the correct data at the end of the test'); # cleanup pub $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_rep_full"); diff --git a/src/test/subscription/t/033_run_as_table_owner.pl b/src/test/subscription/t/033_run_as_table_owner.pl index cabc8a7c59a..0aa8a093efc 100644 --- a/src/test/subscription/t/033_run_as_table_owner.pl +++ b/src/test/subscription/t/033_run_as_table_owner.pl @@ -73,7 +73,7 @@ sub revoke_superuser # "regress_admin". For partitioned tables, layout the partitions differently # on the publisher than on the subscriber. # -$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); +$node_publisher = PostgreSQL::Test::Cluster->new('publisher'); $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber'); $node_publisher->init(allows_streaming => 'logical'); $node_subscriber->init; @@ -120,18 +120,14 @@ publish_insert("alice.unpartitioned", 3); publish_insert("alice.unpartitioned", 5); publish_update("alice.unpartitioned", 1 => 7); publish_delete("alice.unpartitioned", 3); -expect_replication("alice.unpartitioned", 2, 5, 7, - "superuser can replicate"); +expect_replication("alice.unpartitioned", 2, 5, 7, "superuser can replicate"); # Revoke superuser privilege for "regress_admin", and verify that we now # fail to replicate an insert. revoke_superuser("regress_admin"); publish_insert("alice.unpartitioned", 9); expect_failure( - "alice.unpartitioned", - 2, - 5, - 7, + "alice.unpartitioned", 2, 5, 7, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, "with no privileges cannot replicate"); @@ -144,8 +140,7 @@ GRANT INSERT,UPDATE,DELETE ON alice.unpartitioned TO regress_admin; REVOKE SELECT ON alice.unpartitioned FROM regress_admin; )); expect_replication("alice.unpartitioned", 3, 5, 9, - "with INSERT privilege can replicate INSERT" -); + "with INSERT privilege can replicate INSERT"); # We can't yet replicate an UPDATE because we don't have SELECT. publish_update("alice.unpartitioned", 5 => 11); @@ -156,8 +151,7 @@ expect_failure( 5, 9, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "without SELECT privilege cannot replicate UPDATE or DELETE" -); + "without SELECT privilege cannot replicate UPDATE or DELETE"); # After granting SELECT, replication resumes. $node_subscriber->safe_psql( @@ -166,8 +160,7 @@ SET SESSION AUTHORIZATION regress_alice; GRANT SELECT ON alice.unpartitioned TO regress_admin; )); expect_replication("alice.unpartitioned", 2, 7, 11, - "with all privileges can replicate" -); + "with all privileges can replicate"); # Remove all privileges again. Instead, give the ability to SET ROLE to # regress_alice. @@ -189,8 +182,7 @@ expect_failure( 7, 11, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, - "with SET ROLE but not INHERIT cannot replicate" -); + "with SET ROLE but not INHERIT cannot replicate"); # Now remove SET ROLE and add INHERIT and check that things start working. $node_subscriber->safe_psql( @@ -198,7 +190,6 @@ $node_subscriber->safe_psql( GRANT regress_alice TO regress_admin WITH INHERIT TRUE, SET FALSE; )); expect_replication("alice.unpartitioned", 3, 7, 13, - "with INHERIT but not SET ROLE can replicate" -); + "with INHERIT but not SET ROLE can replicate"); done_testing(); diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index b832ddcf63e..4fabc441683 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -127,8 +127,8 @@ $node_twoways->start; for my $db (qw(d1 d2)) { $node_twoways->safe_psql('postgres', "CREATE DATABASE $db"); - $node_twoways->safe_psql($db, "CREATE TABLE t (f int)"); - $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)"); + $node_twoways->safe_psql($db, "CREATE TABLE t (f int)"); + $node_twoways->safe_psql($db, "CREATE TABLE t2 (f int)"); } my $rows = 3000; @@ -141,7 +141,7 @@ $node_twoways->safe_psql( }); $node_twoways->safe_psql('d2', - "CREATE SUBSCRIPTION testsub CONNECTION \$\$" + "CREATE SUBSCRIPTION testsub CONNECTION \$\$" . $node_twoways->connstr('d1') . "\$\$ PUBLICATION testpub WITH (create_slot=false, " . "slot_name='testslot')"); |
