diff options
Diffstat (limited to 'src/test')
-rw-r--r-- | src/test/modules/test_pg_dump/t/001_base.pl | 12 | ||||
-rw-r--r-- | src/test/modules/test_radixtree/test_radixtree.c | 8 | ||||
-rw-r--r-- | src/test/perl/PostgreSQL/Test/Cluster.pm | 50 | ||||
-rw-r--r-- | src/test/perl/PostgreSQL/Test/Kerberos.pm | 19 | ||||
-rw-r--r-- | src/test/recovery/t/001_stream_rep.pl | 8 | ||||
-rw-r--r-- | src/test/recovery/t/015_promotion_pages.pl | 3 | ||||
-rw-r--r-- | src/test/recovery/t/019_replslot_limit.pl | 4 | ||||
-rw-r--r-- | src/test/recovery/t/035_standby_logical_decoding.pl | 9 | ||||
-rw-r--r-- | src/test/recovery/t/037_invalid_database.pl | 10 | ||||
-rw-r--r-- | src/test/recovery/t/040_standby_failover_slots_sync.pl | 92 | ||||
-rw-r--r-- | src/test/ssl/t/001_ssltests.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/008_diff_schema.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/019_stream_subxact_ddl_abort.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/026_stats.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/029_on_error.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/032_subscribe_use_index.pl | 3 | ||||
-rw-r--r-- | src/test/subscription/t/033_run_as_table_owner.pl | 5 | ||||
-rw-r--r-- | src/test/subscription/t/100_bugs.pl | 15 |
18 files changed, 146 insertions, 107 deletions
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl index 4266f26c658..e2579e29cd8 100644 --- a/src/test/modules/test_pg_dump/t/001_base.pl +++ b/src/test/modules/test_pg_dump/t/001_base.pl @@ -180,7 +180,8 @@ my %pgdump_runs = ( # (undumped) extension tables privileged_internals => { dump_cmd => [ - 'pg_dump', '--no-sync', "--file=$tempdir/privileged_internals.sql", + 'pg_dump', '--no-sync', + "--file=$tempdir/privileged_internals.sql", # these two tables are irrelevant to the test case '--exclude-table=regress_pg_dump_schema.external_tab', '--exclude-table=regress_pg_dump_schema.extdependtab', @@ -222,15 +223,18 @@ my %pgdump_runs = ( }, exclude_extension => { dump_cmd => [ - 'pg_dump', '--no-sync', "--file=$tempdir/exclude_extension.sql", + 'pg_dump', '--no-sync', + "--file=$tempdir/exclude_extension.sql", '--exclude-extension=test_pg_dump', 'postgres', ], }, exclude_extension_filter => { dump_cmd => [ - 'pg_dump', '--no-sync', + 'pg_dump', + '--no-sync', "--file=$tempdir/exclude_extension_filter.sql", - "--filter=$tempdir/exclude_extension_filter.txt", 'postgres', + "--filter=$tempdir/exclude_extension_filter.txt", + 'postgres', ], }, diff --git a/src/test/modules/test_radixtree/test_radixtree.c b/src/test/modules/test_radixtree/test_radixtree.c index d301c60d000..1d9165a3a23 100644 --- a/src/test/modules/test_radixtree/test_radixtree.c +++ b/src/test/modules/test_radixtree/test_radixtree.c @@ -112,7 +112,7 @@ static rt_node_class_test_elem rt_node_class_tests[] = * Return the number of keys in the radix tree. */ static uint64 -rt_num_entries(rt_radix_tree * tree) +rt_num_entries(rt_radix_tree *tree) { return tree->ctl->num_keys; } @@ -209,7 +209,7 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc) * false. */ for (int i = 0; i < children; i++) - EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i])); + EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i])); rt_stats(radixtree); @@ -231,14 +231,14 @@ test_basic(rt_node_class_test_elem *test_info, int shift, bool asc) TestValueType update = keys[i] + 1; /* rt_set should report the key found */ - EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) & update)); + EXPECT_TRUE(rt_set(radixtree, keys[i], (TestValueType *) &update)); } /* delete and re-insert keys */ for (int i = 0; i < children; i++) { EXPECT_TRUE(rt_delete(radixtree, keys[i])); - EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) & keys[i])); + EXPECT_FALSE(rt_set(radixtree, keys[i], (TestValueType *) &keys[i])); } /* look up keys after deleting and re-inserting */ diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index aa9646329fb..83f385a4870 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -838,20 +838,20 @@ sub init_from_backup my $data_path = $self->data_dir; if (defined $params{combine_with_prior}) { - my @prior_backups = @{$params{combine_with_prior}}; + my @prior_backups = @{ $params{combine_with_prior} }; my @prior_backup_path; for my $prior_backup_name (@prior_backups) { push @prior_backup_path, - $root_node->backup_dir . '/' . $prior_backup_name; + $root_node->backup_dir . '/' . $prior_backup_name; } local %ENV = $self->_get_env(); my @combineargs = ('pg_combinebackup', '-d'); if (exists $params{tablespace_map}) { - while (my ($olddir, $newdir) = each %{$params{tablespace_map}}) + while (my ($olddir, $newdir) = each %{ $params{tablespace_map} }) { push @combineargs, "-T$olddir=$newdir"; } @@ -872,24 +872,25 @@ sub init_from_backup # We need to generate a tablespace_map file. open(my $tsmap, ">", "$data_path/tablespace_map") - || die "$data_path/tablespace_map: $!"; + || die "$data_path/tablespace_map: $!"; # Extract tarfiles and add tablespace_map entries my @tstars = grep { /^\d+.tar/ } - PostgreSQL::Test::Utils::slurp_dir($backup_path); + PostgreSQL::Test::Utils::slurp_dir($backup_path); for my $tstar (@tstars) { my $tsoid = $tstar; $tsoid =~ s/\.tar$//; die "no tablespace mapping for $tstar" - if !exists $params{tablespace_map} || - !exists $params{tablespace_map}{$tsoid}; + if !exists $params{tablespace_map} + || !exists $params{tablespace_map}{$tsoid}; my $newdir = $params{tablespace_map}{$tsoid}; mkdir($newdir) || die "mkdir $newdir: $!"; - PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf', - $backup_path . '/' . $tstar, '-C', $newdir); + PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, + 'xf', $backup_path . '/' . $tstar, + '-C', $newdir); my $escaped_newdir = $newdir; $escaped_newdir =~ s/\\/\\\\/g; @@ -906,11 +907,13 @@ sub init_from_backup # Copy the main backup. If we see a tablespace directory for which we # have a tablespace mapping, skip it, but remember that we saw it. - PostgreSQL::Test::RecursiveCopy::copypath($backup_path, $data_path, + PostgreSQL::Test::RecursiveCopy::copypath( + $backup_path, + $data_path, 'filterfn' => sub { my ($path) = @_; - if ($path =~ /^pg_tblspc\/(\d+)$/ && - exists $params{tablespace_map}{$1}) + if ($path =~ /^pg_tblspc\/(\d+)$/ + && exists $params{tablespace_map}{$1}) { push @tsoids, $1; return 0; @@ -922,14 +925,14 @@ sub init_from_backup { # We need to generate a tablespace_map file. open(my $tsmap, ">", "$data_path/tablespace_map") - || die "$data_path/tablespace_map: $!"; + || die "$data_path/tablespace_map: $!"; # Now use the list of tablespace links to copy each tablespace. for my $tsoid (@tsoids) { die "no tablespace mapping for $tsoid" - if !exists $params{tablespace_map} || - !exists $params{tablespace_map}{$tsoid}; + if !exists $params{tablespace_map} + || !exists $params{tablespace_map}{$tsoid}; my $olddir = $backup_path . '/pg_tblspc/' . $tsoid; my $newdir = $params{tablespace_map}{$tsoid}; @@ -1166,9 +1169,8 @@ sub restart # -w is now the default but having it here does no harm and helps # compatibility with older versions. - $ret = PostgreSQL::Test::Utils::system_log( - 'pg_ctl', '-w', '-D', $self->data_dir, - '-l', $self->logfile, 'restart'); + $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D', + $self->data_dir, '-l', $self->logfile, 'restart'); if ($ret != 0) { @@ -3370,19 +3372,21 @@ sub validate_slot_inactive_since my ($self, $slot_name, $reference_time) = @_; my $name = $self->name; - my $inactive_since = $self->safe_psql('postgres', + my $inactive_since = $self->safe_psql( + 'postgres', qq(SELECT inactive_since FROM pg_replication_slots WHERE slot_name = '$slot_name' AND inactive_since IS NOT NULL;) - ); + ); # Check that the inactive_since is sane - is($self->safe_psql('postgres', - qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND + is( $self->safe_psql( + 'postgres', + qq[SELECT '$inactive_since'::timestamptz > to_timestamp(0) AND '$inactive_since'::timestamptz > '$reference_time'::timestamptz;] ), 't', "last inactive time for slot $slot_name is valid on node $name") - or die "could not validate captured inactive_since for slot $slot_name"; + or die "could not validate captured inactive_since for slot $slot_name"; return $inactive_since; } diff --git a/src/test/perl/PostgreSQL/Test/Kerberos.pm b/src/test/perl/PostgreSQL/Test/Kerberos.pm index f7810da9c1d..f76d765368e 100644 --- a/src/test/perl/PostgreSQL/Test/Kerberos.pm +++ b/src/test/perl/PostgreSQL/Test/Kerberos.pm @@ -10,10 +10,12 @@ use strict; use warnings FATAL => 'all'; use PostgreSQL::Test::Utils; -our ($krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, $klist, - $kdb5_util, $kadmin_local, $krb5kdc, - $krb5_conf, $kdc_conf, $krb5_cache, $krb5_log, $kdc_log, - $kdc_port, $kdc_datadir, $kdc_pidfile, $keytab); +our ( + $krb5_bin_dir, $krb5_sbin_dir, $krb5_config, $kinit, + $klist, $kdb5_util, $kadmin_local, $krb5kdc, + $krb5_conf, $kdc_conf, $krb5_cache, $krb5_log, + $kdc_log, $kdc_port, $kdc_datadir, $kdc_pidfile, + $keytab); INIT { @@ -178,7 +180,8 @@ $realm = { key_stash_file = $kdc_datadir/_k5.$realm }!); - mkdir $kdc_datadir or BAIL_OUT("could not create directory \"$kdc_datadir\""); + mkdir $kdc_datadir + or BAIL_OUT("could not create directory \"$kdc_datadir\""); # Ensure that we use test's config and cache files, not global ones. $ENV{'KRB5_CONFIG'} = $krb5_conf; @@ -189,7 +192,8 @@ $realm = { system_or_bail $kdb5_util, 'create', '-s', '-P', 'secret0'; - system_or_bail $kadmin_local, '-q', "addprinc -randkey $service_principal"; + system_or_bail $kadmin_local, '-q', + "addprinc -randkey $service_principal"; system_or_bail $kadmin_local, '-q', "ktadd -k $keytab $service_principal"; system_or_bail $krb5kdc, '-P', $kdc_pidfile; @@ -226,7 +230,8 @@ END # take care not to change the script's exit value my $exit_code = $?; - kill 'INT', `cat $kdc_pidfile` if defined($kdc_pidfile) && -f $kdc_pidfile; + kill 'INT', `cat $kdc_pidfile` + if defined($kdc_pidfile) && -f $kdc_pidfile; $? = $exit_code; } diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 4c698b5ce1b..f3ea45ac4a2 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -99,9 +99,11 @@ is($result, qq(33|0|t), 'check streamed sequence content on standby 2'); $node_primary->safe_psql('postgres', "CREATE UNLOGGED SEQUENCE ulseq; SELECT nextval('ulseq')"); $node_primary->wait_for_replay_catchup($node_standby_1); -is($node_standby_1->safe_psql('postgres', - "SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"), - 't', 'pg_sequence_last_value() on unlogged sequence on standby 1'); +is( $node_standby_1->safe_psql( + 'postgres', + "SELECT pg_sequence_last_value('ulseq'::regclass) IS NULL"), + 't', + 'pg_sequence_last_value() on unlogged sequence on standby 1'); # Check that only READ-only queries can run on standbys is($node_standby_1->psql('postgres', 'INSERT INTO tab_int VALUES (1)'), diff --git a/src/test/recovery/t/015_promotion_pages.pl b/src/test/recovery/t/015_promotion_pages.pl index d5aaec0051d..7972fc7b77b 100644 --- a/src/test/recovery/t/015_promotion_pages.pl +++ b/src/test/recovery/t/015_promotion_pages.pl @@ -56,7 +56,8 @@ $bravo->safe_psql('postgres', 'checkpoint'); # beyond the previous vacuum. $alpha->safe_psql('postgres', 'create table test2 (a int, b bytea)'); $alpha->safe_psql('postgres', - q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)}); + q{insert into test2 select generate_series(1,10000), sha256(random()::text::bytea)} +); $alpha->safe_psql('postgres', 'truncate test2'); # Wait again for all records to be replayed. diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 96b60cedbbd..efb4ba3af1e 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -443,7 +443,7 @@ $primary4->safe_psql( # Get inactive_since value after the slot's creation. Note that the slot is # still inactive till it's used by the standby below. my $inactive_since = - $primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time); + $primary4->validate_slot_inactive_since($sb4_slot, $slot_creation_time); $standby4->start; @@ -502,7 +502,7 @@ $publisher4->safe_psql('postgres', # Get inactive_since value after the slot's creation. Note that the slot is # still inactive till it's used by the subscriber below. $inactive_since = - $publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time); + $publisher4->validate_slot_inactive_since($lsub4_slot, $slot_creation_time); $subscriber4->start; $subscriber4->safe_psql('postgres', diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl index 8d6740c734b..07ff5231d33 100644 --- a/src/test/recovery/t/035_standby_logical_decoding.pl +++ b/src/test/recovery/t/035_standby_logical_decoding.pl @@ -178,13 +178,15 @@ sub check_slots_conflict_reason $res = $node_standby->safe_psql( 'postgres', qq( - select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;)); + select invalidation_reason from pg_replication_slots where slot_name = '$active_slot' and conflicting;) + ); is($res, "$reason", "$active_slot reason for conflict is $reason"); $res = $node_standby->safe_psql( 'postgres', qq( - select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;)); + select invalidation_reason from pg_replication_slots where slot_name = '$inactive_slot' and conflicting;) + ); is($res, "$reason", "$inactive_slot reason for conflict is $reason"); } @@ -559,7 +561,8 @@ check_slots_conflict_reason('vacuum_full_', 'rows_removed'); ################################################## # Get the restart_lsn from an invalidated slot -my $restart_lsn = $node_standby->safe_psql('postgres', +my $restart_lsn = $node_standby->safe_psql( + 'postgres', "SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'vacuum_full_activeslot' AND conflicting;" ); diff --git a/src/test/recovery/t/037_invalid_database.pl b/src/test/recovery/t/037_invalid_database.pl index 32b7d8af571..47f524be4cc 100644 --- a/src/test/recovery/t/037_invalid_database.pl +++ b/src/test/recovery/t/037_invalid_database.pl @@ -42,11 +42,15 @@ like( qr/FATAL:\s+cannot connect to invalid database "regression_invalid"/, "can't connect to invalid database - error message"); -is($node->psql('postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'), - 2, "can't ALTER invalid database"); +is( $node->psql( + 'postgres', 'ALTER DATABASE regression_invalid CONNECTION LIMIT 10'), + 2, + "can't ALTER invalid database"); # check invalid database can't be used as a template -is( $node->psql('postgres', 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'), +is( $node->psql( + 'postgres', + 'CREATE DATABASE copy_invalid TEMPLATE regression_invalid'), 3, "can't use invalid database as template"); diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl index 12acf874d70..f0bf0ddc121 100644 --- a/src/test/recovery/t/040_standby_failover_slots_sync.pl +++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl @@ -170,7 +170,8 @@ $standby1->start; # Capture the inactive_since of the slot from the primary. Note that the slot # will be inactive since the corresponding subscription was dropped. my $inactive_since_on_primary = - $primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); + $primary->validate_slot_inactive_since('lsub1_slot', + $slot_creation_time_on_primary); # Wait for the standby to catch up so that the standby is not lagging behind # the failover slots. @@ -190,7 +191,8 @@ is( $standby1->safe_psql( # Capture the inactive_since of the synced slot on the standby my $inactive_since_on_standby = - $standby1->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); + $standby1->validate_slot_inactive_since('lsub1_slot', + $slot_creation_time_on_primary); # Synced slot on the standby must get its own inactive_since is( $standby1->safe_psql( @@ -264,7 +266,8 @@ $primary->safe_psql( # Capture the inactive_since of the slot from the primary. Note that the slot # will be inactive since the corresponding subscription was dropped. $inactive_since_on_primary = - $primary->validate_slot_inactive_since('lsub1_slot', $slot_creation_time_on_primary); + $primary->validate_slot_inactive_since('lsub1_slot', + $slot_creation_time_on_primary); # Wait for the standby to catch up so that the standby is not lagging behind # the failover slots. @@ -276,8 +279,8 @@ my $log_offset = -s $standby1->logfile; $standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();"); # Confirm that the invalidated slot has been dropped. -$standby1->wait_for_log(qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, - $log_offset); +$standby1->wait_for_log( + qr/dropped replication slot "lsub1_slot" of dbid [0-9]+/, $log_offset); # Confirm that the logical slot has been re-created on the standby and is # flagged as 'synced' @@ -336,7 +339,8 @@ ok( $stderr =~ "cannot sync slots if dbname is not specified in primary_conninfo"); # Add the dbname back to the primary_conninfo for further tests -$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'"); +$standby1->append_conf('postgresql.conf', + "primary_conninfo = '$connstr_1 dbname=postgres'"); $standby1->reload; ################################################## @@ -427,19 +431,20 @@ $primary->wait_for_replay_catchup($standby1); # synced slot. See the test where we promote standby (Promote the standby1 to # primary.) $primary->safe_psql('postgres', - "SELECT pg_logical_emit_message(false, 'test', 'test');" -); + "SELECT pg_logical_emit_message(false, 'test', 'test');"); # Get the confirmed_flush_lsn for the logical slot snap_test_slot on the primary my $confirmed_flush_lsn = $primary->safe_psql('postgres', - "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';"); + "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot';" +); $standby1->safe_psql('postgres', "SELECT pg_sync_replication_slots();"); # Verify that confirmed_flush_lsn of snap_test_slot slot is synced to the standby ok( $standby1->poll_query_until( 'postgres', - "SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;"), + "SELECT '$confirmed_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'snap_test_slot' AND synced AND NOT temporary;" + ), 'confirmed_flush_lsn of slot snap_test_slot synced to standby'); ################################################## @@ -479,22 +484,24 @@ GRANT USAGE on SCHEMA myschema TO repl_role; }); # Start the standby with changed primary_conninfo. -$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'"); +$standby1->append_conf('postgresql.conf', + "primary_conninfo = '$connstr_1 dbname=slotsync_test_db user=repl_role'"); $standby1->start; # Run the synchronization function. If the sync flow was not prepared # to handle such attacks, it would have failed during the validation # of the primary_slot_name itself resulting in # ERROR: slot synchronization requires valid primary_slot_name -$standby1->safe_psql('slotsync_test_db', "SELECT pg_sync_replication_slots();"); +$standby1->safe_psql('slotsync_test_db', + "SELECT pg_sync_replication_slots();"); # Reset the dbname and user in primary_conninfo to the earlier values. -$standby1->append_conf('postgresql.conf', "primary_conninfo = '$connstr_1 dbname=postgres'"); +$standby1->append_conf('postgresql.conf', + "primary_conninfo = '$connstr_1 dbname=postgres'"); $standby1->reload; # Drop the newly created database. -$primary->psql('postgres', - q{DROP DATABASE slotsync_test_db;}); +$primary->psql('postgres', q{DROP DATABASE slotsync_test_db;}); ################################################## # Test to confirm that the slot sync worker exits on invalid GUC(s) and @@ -508,20 +515,21 @@ $standby1->append_conf('postgresql.conf', qq(sync_replication_slots = on)); $standby1->reload; # Confirm that the slot sync worker is able to start. -$standby1->wait_for_log(qr/slot sync worker started/, - $log_offset); +$standby1->wait_for_log(qr/slot sync worker started/, $log_offset); $log_offset = -s $standby1->logfile; # Disable another GUC required for slot sync. -$standby1->append_conf( 'postgresql.conf', qq(hot_standby_feedback = off)); +$standby1->append_conf('postgresql.conf', qq(hot_standby_feedback = off)); $standby1->reload; # Confirm that slot sync worker acknowledge the GUC change and logs the msg # about wrong configuration. -$standby1->wait_for_log(qr/slot sync worker will restart because of a parameter change/, +$standby1->wait_for_log( + qr/slot sync worker will restart because of a parameter change/, $log_offset); -$standby1->wait_for_log(qr/slot synchronization requires hot_standby_feedback to be enabled/, +$standby1->wait_for_log( + qr/slot synchronization requires hot_standby_feedback to be enabled/, $log_offset); $log_offset = -s $standby1->logfile; @@ -531,8 +539,7 @@ $standby1->append_conf('postgresql.conf', "hot_standby_feedback = on"); $standby1->reload; # Confirm that the slot sync worker is able to start now. -$standby1->wait_for_log(qr/slot sync worker started/, - $log_offset); +$standby1->wait_for_log(qr/slot sync worker started/, $log_offset); ################################################## # Test to confirm that confirmed_flush_lsn of the logical slot on the primary @@ -557,7 +564,8 @@ $subscriber1->wait_for_subscription_sync; # Do not allow any further advancement of the confirmed_flush_lsn for the # lsub1_slot. -$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); +$subscriber1->safe_psql('postgres', + "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); # Wait for the replication slot to become inactive on the publisher $primary->poll_query_until( @@ -567,12 +575,14 @@ $primary->poll_query_until( # Get the confirmed_flush_lsn for the logical slot lsub1_slot on the primary my $primary_flush_lsn = $primary->safe_psql('postgres', - "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';"); + "SELECT confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot';" +); # Confirm that confirmed_flush_lsn of lsub1_slot slot is synced to the standby ok( $standby1->poll_query_until( 'postgres', - "SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;"), + "SELECT '$primary_flush_lsn' = confirmed_flush_lsn from pg_replication_slots WHERE slot_name = 'lsub1_slot' AND synced AND NOT temporary;" + ), 'confirmed_flush_lsn of slot lsub1_slot synced to standby'); ################################################## @@ -636,7 +646,8 @@ $subscriber2->safe_psql( $subscriber2->wait_for_subscription_sync; -$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); +$subscriber1->safe_psql('postgres', + "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); my $offset = -s $primary->logfile; @@ -674,7 +685,8 @@ $primary->wait_for_log( # primary and keeps waiting for the standby specified in standby_slot_names # (sb1_slot aka standby1). $result = - $subscriber1->safe_psql('postgres', "SELECT count(*) <> $primary_row_count FROM tab_int;"); + $subscriber1->safe_psql('postgres', + "SELECT count(*) <> $primary_row_count FROM tab_int;"); is($result, 't', "subscriber1 doesn't get data from primary until standby1 acknowledges changes" ); @@ -714,7 +726,8 @@ $standby1->stop; # Disable the regress_mysub1 to prevent the logical walsender from generating # more warnings. -$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); +$subscriber1->safe_psql('postgres', + "ALTER SUBSCRIPTION regress_mysub1 DISABLE"); # Wait for the replication slot to become inactive on the publisher $primary->poll_query_until( @@ -758,8 +771,7 @@ $primary->reload; $back_q->quit; $primary->safe_psql('postgres', - "SELECT pg_drop_replication_slot('test_slot');" -); + "SELECT pg_drop_replication_slot('test_slot');"); # Add the physical slot (sb1_slot) back to the standby_slot_names for further # tests. @@ -767,7 +779,8 @@ $primary->adjust_conf('postgresql.conf', 'standby_slot_names', "'sb1_slot'"); $primary->reload; # Enable the regress_mysub1 for further tests -$subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); +$subscriber1->safe_psql('postgres', + "ALTER SUBSCRIPTION regress_mysub1 ENABLE"); ################################################## # Test that logical replication will wait for the user-created inactive @@ -835,14 +848,16 @@ $standby1->promote; # promotion. We do this check before the slot is enabled on the new primary # below, otherwise, the slot gets active setting inactive_since to NULL. my $inactive_since_on_new_primary = - $standby1->validate_slot_inactive_since('lsub1_slot', $promotion_time_on_primary); + $standby1->validate_slot_inactive_since('lsub1_slot', + $promotion_time_on_primary); is( $standby1->safe_psql( 'postgres', "SELECT '$inactive_since_on_new_primary'::timestamptz > '$inactive_since_on_primary'::timestamptz" ), "t", - 'synchronized slot has got its own inactive_since on the new primary after promotion'); + 'synchronized slot has got its own inactive_since on the new primary after promotion' +); # Update subscription with the new primary's connection info my $standby1_conninfo = $standby1->connstr . ' dbname=postgres'; @@ -850,8 +865,10 @@ $subscriber1->safe_psql('postgres', "ALTER SUBSCRIPTION regress_mysub1 CONNECTION '$standby1_conninfo';"); # Confirm the synced slot 'lsub1_slot' is retained on the new primary -is($standby1->safe_psql('postgres', - q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;}), +is( $standby1->safe_psql( + 'postgres', + q{SELECT count(*) = 2 FROM pg_replication_slots WHERE slot_name IN ('lsub1_slot', 'snap_test_slot') AND synced AND NOT temporary;} + ), 't', 'synced slot retained on the new primary'); @@ -861,9 +878,8 @@ $standby1->safe_psql('postgres', $standby1->wait_for_catchup('regress_mysub1'); # Confirm that data in tab_int replicated on the subscriber -is( $subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}), - "20", - 'data replicated from the new primary'); +is($subscriber1->safe_psql('postgres', q{SELECT count(*) FROM tab_int;}), + "20", 'data replicated from the new primary'); # Consume the data from the snap_test_slot. The synced slot should reach a # consistent point by restoring the snapshot at the restart_lsn serialized diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index 94ff043c8ec..68c1b6b41f6 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -86,7 +86,8 @@ switch_server_cert( restart => 'no'); $result = $node->restart(fail_ok => 1); -is($result, 0, 'restart fails with password-protected key file with wrong password'); +is($result, 0, + 'restart fails with password-protected key file with wrong password'); switch_server_cert( $node, diff --git a/src/test/subscription/t/008_diff_schema.pl b/src/test/subscription/t/008_diff_schema.pl index a21b9fab5f2..cf04f85584a 100644 --- a/src/test/subscription/t/008_diff_schema.pl +++ b/src/test/subscription/t/008_diff_schema.pl @@ -48,7 +48,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber'); # Update the rows on the publisher and check the additional columns on # subscriber didn't change -$node_publisher->safe_psql('postgres', "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')"); +$node_publisher->safe_psql('postgres', + "UPDATE test_tab SET b = encode(sha256(b::bytea), 'hex')"); $node_publisher->wait_for_catchup('tap_sub'); diff --git a/src/test/subscription/t/019_stream_subxact_ddl_abort.pl b/src/test/subscription/t/019_stream_subxact_ddl_abort.pl index 6f2e4db6ae8..84c0e5ca9ef 100644 --- a/src/test/subscription/t/019_stream_subxact_ddl_abort.pl +++ b/src/test/subscription/t/019_stream_subxact_ddl_abort.pl @@ -32,7 +32,8 @@ $node_publisher->safe_psql('postgres', # Setup structure on subscriber $node_subscriber->safe_psql('postgres', - "CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)"); + "CREATE TABLE test_tab (a int primary key, b bytea, c INT, d INT, e INT)" +); # Setup logical replication my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres'; diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl index d1d68fad9af..fb3e5629b3c 100644 --- a/src/test/subscription/t/026_stats.pl +++ b/src/test/subscription/t/026_stats.pl @@ -288,8 +288,7 @@ is( $node_subscriber->safe_psql( # Since disabling subscription doesn't wait for walsender to release the replication # slot and exit, wait for the slot to become inactive. -$node_publisher->poll_query_until( - $db, +$node_publisher->poll_query_until($db, qq(SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '$sub2_name' AND active_pid IS NULL)) ) or die "slot never became inactive"; diff --git a/src/test/subscription/t/029_on_error.pl b/src/test/subscription/t/029_on_error.pl index 7a8e424a22e..5c29e5cc326 100644 --- a/src/test/subscription/t/029_on_error.pl +++ b/src/test/subscription/t/029_on_error.pl @@ -166,7 +166,8 @@ BEGIN; INSERT INTO tbl SELECT i, sha256(i::text::bytea) FROM generate_series(1, 10000) s(i); COMMIT; ]); -test_skip_lsn($node_publisher, $node_subscriber, "(4, sha256(4::text::bytea))", +test_skip_lsn($node_publisher, $node_subscriber, + "(4, sha256(4::text::bytea))", "4", "test skipping stream-commit"); $result = $node_subscriber->safe_psql('postgres', diff --git a/src/test/subscription/t/032_subscribe_use_index.pl b/src/test/subscription/t/032_subscribe_use_index.pl index 97b5806acf0..cc999e33c3a 100644 --- a/src/test/subscription/t/032_subscribe_use_index.pl +++ b/src/test/subscription/t/032_subscribe_use_index.pl @@ -490,7 +490,8 @@ $node_publisher->safe_psql('postgres', $node_subscriber->safe_psql('postgres', "CREATE TABLE test_replica_id_full (x int, y text)"); $node_subscriber->safe_psql('postgres', - "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)"); + "CREATE INDEX test_replica_id_full_idx ON test_replica_id_full USING HASH (x)" +); # insert some initial data $node_publisher->safe_psql('postgres', diff --git a/src/test/subscription/t/033_run_as_table_owner.pl b/src/test/subscription/t/033_run_as_table_owner.pl index c56f22881fe..b129fc3c38e 100644 --- a/src/test/subscription/t/033_run_as_table_owner.pl +++ b/src/test/subscription/t/033_run_as_table_owner.pl @@ -207,10 +207,7 @@ GRANT regress_alice TO regress_admin WITH INHERIT FALSE, SET TRUE; # the above grant doesn't help. publish_insert("alice.unpartitioned", 14); expect_failure( - "alice.unpartitioned", - 3, - 7, - 13, + "alice.unpartitioned", 3, 7, 13, qr/ERROR: ( [A-Z0-9]+:)? permission denied for table unpartitioned/msi, "with no privileges cannot replicate"); diff --git a/src/test/subscription/t/100_bugs.pl b/src/test/subscription/t/100_bugs.pl index e5d7d37a402..cb36ca7b16b 100644 --- a/src/test/subscription/t/100_bugs.pl +++ b/src/test/subscription/t/100_bugs.pl @@ -469,23 +469,22 @@ $node_subscriber->safe_psql( )); $node_subscriber->wait_for_subscription_sync($node_publisher, 'sub1'); -$result = $node_subscriber->safe_psql('postgres', - "SELECT a, b FROM tab_default"); -is($result, qq(1|f +$result = + $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default"); +is( $result, qq(1|f 2|t), 'check snapshot on subscriber'); # Update all rows in the table and ensure the rows with the missing `b` # attribute replicate correctly. -$node_publisher->safe_psql('postgres', - "UPDATE tab_default SET a = a + 1"); +$node_publisher->safe_psql('postgres', "UPDATE tab_default SET a = a + 1"); $node_publisher->wait_for_catchup('sub1'); # When the bug is present, the `1|f` row will not be updated to `2|f` because # the publisher incorrectly fills in `NULL` for `b` and publishes an update # for `1|NULL`, which doesn't exist in the subscriber. -$result = $node_subscriber->safe_psql('postgres', - "SELECT a, b FROM tab_default"); -is($result, qq(2|f +$result = + $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab_default"); +is( $result, qq(2|f 3|t), 'check replicated update on subscriber'); $node_publisher->stop('fast'); |