# This is only needed on Windows machines that don't use UNIX sockets.
$node->init(
'allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+ 'auth_extra' => [ '--create-role' => 'backupuser' ]);
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'");
# to keep test times reasonable. Using @pg_basebackup_defs as the first
# element of the array passed to IPC::Run interpolate the array (as it is
# not a reference to an array)...
-my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
+my @pg_basebackup_defs =
+ ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
# This particular test module generally wants to run with -Xfetch, because
# -Xstream is not supported with a backup target, and with -U backupuser.
-my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch');
+my @pg_basebackup_cmd = (
+ @pg_basebackup_defs,
+ '--username' => 'backupuser',
+ '--wal-method' => 'fetch');
# Can't use this module without setting basebackup_to_shell.command.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
# Should work now.
$node->command_ok(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
'backup with no detail: pg_basebackup');
verify_backup('', $backup_path, "backup with no detail");
# Should fail with a detail.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell:foo' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell:foo' ],
qr/a target detail is not permitted because the configured command does not include %d/,
'fails if detail provided without %d');
# Should fail due to lack of permission.
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/permission denied to use basebackup_to_shell/,
'fails if required_role not granted');
# Should fail due to lack of a detail.
$node->safe_psql('postgres', 'GRANT trustworthy TO backupuser');
$node->command_fails_like(
- [ @pg_basebackup_cmd, '--target', 'shell' ],
+ [ @pg_basebackup_cmd, '--target' => 'shell' ],
qr/a target detail is required because the configured command includes %d/,
'fails if %d is present and detail not given');
# Should work.
-$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ],
+$node->command_ok([ @pg_basebackup_cmd, '--target' => 'shell:bar' ],
'backup with detail: pg_basebackup');
verify_backup('bar.', $backup_path, "backup with detail");
# Verify.
$node->command_ok(
[
- 'pg_verifybackup', '-n',
- '-m', "${backup_dir}/${prefix}backup_manifest",
- '-e', $extract_path
+ 'pg_verifybackup',
+ '--no-parse-wal',
+ '--manifest-path' => "${backup_dir}/${prefix}backup_manifest",
+ '--exit-on-error',
+ $extract_path
],
"$test_name: backup verifies ok");
}
program_version_ok('initdb');
program_options_handling_ok('initdb');
-command_fails([ 'initdb', '-S', "$tempdir/nonexistent" ],
+command_fails([ 'initdb', '--sync-only', "$tempdir/nonexistent" ],
'sync missing data directory');
mkdir $xlogdir;
mkdir "$xlogdir/lost+found";
-command_fails(
- [ 'initdb', '-X', $xlogdir, $datadir ],
+command_fails([ 'initdb', '--waldir' => $xlogdir, $datadir ],
'existing nonempty xlog directory');
rmdir "$xlogdir/lost+found";
command_fails(
- [ 'initdb', '-X', 'pgxlog', $datadir ],
+ [ 'initdb', '--waldir' => 'pgxlog', $datadir ],
'relative xlog directory not allowed');
-command_fails(
- [ 'initdb', '-U', 'pg_test', $datadir ],
+command_fails([ 'initdb', '--username' => 'pg_test', $datadir ],
'role names cannot begin with "pg_"');
mkdir $datadir;
local (%ENV) = %ENV;
delete $ENV{TZ};
- # while we are here, also exercise -T and -c options
+ # while we are here, also exercise --text-search-config and --set options
command_ok(
[
- 'initdb', '-N', '-T', 'german', '-c',
- 'default_text_search_config=german',
- '-X', $xlogdir, $datadir
+ 'initdb',
+ '--no-sync',
+ '--text-search-config' => 'german',
+ '--set' => 'default_text_search_config=german',
+ '--waldir' => $xlogdir,
+ $datadir
],
'successful creation');
qr/Data page checksum version:.*1/,
'checksums are enabled in control file');
-command_ok([ 'initdb', '-S', $datadir ], 'sync only');
+command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs)
{
- command_ok([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
+ command_ok(
+ [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
'sync method syncfs');
}
else
{
- command_fails([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ],
+ command_fails(
+ [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ],
'sync method syncfs');
}
command_like(
[
'initdb', '--no-sync',
- '-A', 'trust',
+ '-A' => 'trust',
'--locale-provider=icu', '--locale=und',
'--lc-collate=C', '--lc-ctype=C',
'--lc-messages=C', '--lc-numeric=C',
],
'fails for invalid option combination');
-command_fails([ 'initdb', '--no-sync', '--set', 'foo=bar', "$tempdir/dataX" ],
+command_fails(
+ [ 'initdb', '--no-sync', '--set' => 'foo=bar', "$tempdir/dataX" ],
'fails for invalid --set option');
# Make sure multiple invocations of -c parameters are added case insensitive
# not part of the tests included in pg_checksums to save from
# the creation of an extra instance.
command_fails(
- [ 'pg_checksums', '-D', $datadir_nochecksums ],
+ [ 'pg_checksums', '--pgdata' => $datadir_nochecksums ],
"pg_checksums fails with data checksum disabled");
done_testing();
# Failing to resolve a database pattern is an error by default.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'qqq', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'qqq', '--database' => 'postgres' ],
1,
[qr/^$/],
[qr/pg_amcheck: error: no connectable databases to check matching "qqq"/],
# But only a warning under --no-strict-names
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-d', 'qqq', '-d', 'postgres' ],
+ [
+ 'pg_amcheck',
+ '--no-strict-names',
+ '--database' => 'qqq',
+ '--database' => 'postgres'
+ ],
0,
[qr/^$/],
[
# Check that a substring of an existent database name does not get interpreted
# as a matching pattern.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'post', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'post', '--database' => 'postgres' ],
1,
[qr/^$/],
[
# Check that a superstring of an existent database name does not get interpreted
# as a matching pattern.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgresql', '-d', 'postgres' ],
+ [
+ 'pg_amcheck',
+ '--database' => 'postgresql',
+ '--database' => 'postgres'
+ ],
1,
[qr/^$/],
[
# Test connecting with a non-existent user
# Failing to connect to the initial database due to bad username is an error.
-$node->command_checks_all([ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ],
+$node->command_checks_all(
+ [ 'pg_amcheck', '--username' => 'no_such_user', 'postgres' ],
1, [qr/^$/], [], 'checking with a non-existent user');
#########################################
# Again, but this time with another database to check, so no error is raised.
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'template1', '-d', 'postgres' ],
+ [ 'pg_amcheck', '--database' => 'template1', '--database' => 'postgres' ],
0,
[qr/^$/],
[
# Check three-part unreasonable pattern that has zero-length names
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '..' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '..' ],
1,
[qr/^$/],
[
# Again, but with non-trivial schema and relation parts
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '.foo.bar' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.foo.bar' ],
1,
[qr/^$/],
[
# Check two-part unreasonable pattern that has zero-length names
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'postgres', '-t', '.' ],
+ [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.' ],
1,
[qr/^$/],
[qr/pg_amcheck: error: no heap tables to check matching "\."/],
# Check that a multipart database name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'localhost.postgres' ],
+ [ 'pg_amcheck', '--database' => 'localhost.postgres' ],
2,
[qr/^$/],
[
# Check that a three-part schema name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-s', 'localhost.postgres.pg_catalog' ],
+ [ 'pg_amcheck', '--schema' => 'localhost.postgres.pg_catalog' ],
2,
[qr/^$/],
[
# Check that a four-part table name is rejected
$node->command_checks_all(
- [ 'pg_amcheck', '-t', 'localhost.postgres.pg_catalog.pg_class' ],
+ [ 'pg_amcheck', '--table' => 'localhost.postgres.pg_catalog.pg_class' ],
2,
[qr/^$/],
[
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'this.is.a.really.long.dotted.string'
+ '--table' => 'this.is.a.really.long.dotted.string'
],
2,
[qr/^$/],
'ungrammatical table names still draw errors under --no-strict-names');
$node->command_checks_all(
[
- 'pg_amcheck', '--no-strict-names', '-s',
- 'postgres.long.dotted.string'
+ 'pg_amcheck', '--no-strict-names',
+ '--schema' => 'postgres.long.dotted.string'
],
2,
[qr/^$/],
'ungrammatical schema names still draw errors under --no-strict-names');
$node->command_checks_all(
[
- 'pg_amcheck', '--no-strict-names', '-d',
- 'postgres.long.dotted.string'
+ 'pg_amcheck', '--no-strict-names',
+ '--database' => 'postgres.long.dotted.string'
],
2,
[qr/^$/],
# Likewise for exclusion patterns
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-T', 'a.b.c.d' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-table' => 'a.b.c.d' ],
2,
[qr/^$/],
[
'ungrammatical table exclusions still draw errors under --no-strict-names'
);
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-S', 'a.b.c' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-schema' => 'a.b.c' ],
2,
[qr/^$/],
[
'ungrammatical schema exclusions still draw errors under --no-strict-names'
);
$node->command_checks_all(
- [ 'pg_amcheck', '--no-strict-names', '-D', 'a.b' ],
+ [ 'pg_amcheck', '--no-strict-names', '--exclude-database' => 'a.b' ],
2,
[qr/^$/],
[
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'no_such_table',
- '-t', 'no*such*table',
- '-i', 'no_such_index',
- '-i', 'no*such*index',
- '-r', 'no_such_relation',
- '-r', 'no*such*relation',
- '-d', 'no_such_database',
- '-d', 'no*such*database',
- '-r', 'none.none',
- '-r', 'none.none.none',
- '-r', 'postgres.none.none',
- '-r', 'postgres.pg_catalog.none',
- '-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '--table' => 'no_such_table',
+ '--table' => 'no*such*table',
+ '--index' => 'no_such_index',
+ '--index' => 'no*such*index',
+ '--relation' => 'no_such_relation',
+ '--relation' => 'no*such*relation',
+ '--database' => 'no_such_database',
+ '--database' => 'no*such*database',
+ '--relation' => 'none.none',
+ '--relation' => 'none.none.none',
+ '--relation' => 'postgres.none.none',
+ '--relation' => 'postgres.pg_catalog.none',
+ '--relation' => 'postgres.none.pg_class',
+ '--table' => 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
));
$node->command_checks_all(
- [ 'pg_amcheck', '-d', 'regression_invalid' ],
+ [ 'pg_amcheck', '--database' => 'regression_invalid' ],
1,
[qr/^$/],
[
$node->command_checks_all(
[
- 'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo',
+ 'pg_amcheck',
+ '--database' => 'postgres',
+ '--table' => 'regression_invalid.public.foo',
],
1,
[qr/^$/],
$node->command_checks_all(
[
- 'pg_amcheck', '-d',
- 'postgres', '--no-strict-names',
- '-t', 'template1.public.foo',
- '-t', 'another_db.public.foo',
- '-t', 'no_such_database.public.foo',
- '-i', 'template1.public.foo_idx',
- '-i', 'another_db.public.foo_idx',
- '-i', 'no_such_database.public.foo_idx',
+ 'pg_amcheck',
+ '--database' => 'postgres',
+ '--no-strict-names',
+ '--table' => 'template1.public.foo',
+ '--table' => 'another_db.public.foo',
+ '--table' => 'no_such_database.public.foo',
+ '--index' => 'template1.public.foo_idx',
+ '--index' => 'another_db.public.foo_idx',
+ '--index' => 'no_such_database.public.foo_idx',
],
1,
[qr/^$/],
# Check with only schema exclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-S',
- 'public', '-S', 'pg_catalog', '-S',
- 'pg_toast', '-S', 'information_schema',
+ 'pg_amcheck',
+ '--all',
+ '--no-strict-names',
+ '--exclude-schema' => 'public',
+ '--exclude-schema' => 'pg_catalog',
+ '--exclude-schema' => 'pg_toast',
+ '--exclude-schema' => 'information_schema',
],
1,
[qr/^$/],
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-s',
- 'public', '-s', 'pg_catalog', '-s',
- 'pg_toast', '-s', 'information_schema', '-t',
- 'pg_catalog.pg_class', '-S*'
+ 'pg_amcheck',
+ '--all',
+ '--no-strict-names',
+ '--schema' => 'public',
+ '--schema' => 'pg_catalog',
+ '--schema' => 'pg_toast',
+ '--schema' => 'information_schema',
+ '--table' => 'pg_catalog.pg_class',
+ '--exclude-schema' => '*'
],
1,
[qr/^$/],
#
# Standard first arguments to PostgreSQL::Test::Utils functions
-my @cmd = ('pg_amcheck', '-p', $port);
+my @cmd = ('pg_amcheck', '--port' => $port);
# Regular expressions to match various expected output
my $no_output_re = qr/^$/;
# yet corrupted anything. As such, we expect no corruption and verify that
# none is reported
#
-$node->command_checks_all([ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
- 0, [$no_output_re], [$no_output_re], 'pg_amcheck prior to corruption');
+$node->command_checks_all(
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3'
+ ],
+ 0,
+ [$no_output_re],
+ [$no_output_re],
+ 'pg_amcheck prior to corruption');
# Perform the corruptions we planned above using only a single database restart.
#
'pg_amcheck all schemas, tables and indexes in database db1');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3'
+ ],
2,
[
$index_missing_relation_fork_re, $line_pointer_corruption_re,
# complaint on stderr, but otherwise stderr should be quiet.
#
$node->command_checks_all(
- [ @cmd, '--all', '-s', 's1', '-i', 't1_btree' ],
+ [ @cmd, '--all', '--schema' => 's1', '--index' => 't1_btree' ],
2,
[$index_missing_relation_fork_re],
[
'pg_amcheck index s1.t1_btree reports missing main relation fork');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-s', 's1', '-i', 't2_btree' ],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--schema' => 's1',
+ '--index' => 't2_btree'
+ ],
2,
[qr/.+/], # Any non-empty error message is acceptable
[$no_output_re],
# are quiet.
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
0, [$no_output_re], [$no_output_re],
'pg_amcheck of db1.s1 excluding indexes');
# Checking db2.s1 should show table corruptions if indexes are excluded
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db2' ],
- 2, [$missing_file_re], [$no_output_re],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db2' ],
+ 2,
+ [$missing_file_re],
+ [$no_output_re],
'pg_amcheck of db2.s1 excluding indexes');
# In schema db1.s3, the tables and indexes are both corrupt. We should see
# corruption messages on stdout, and nothing on stderr.
#
$node->command_checks_all(
- [ @cmd, '-s', 's3', 'db1' ],
+ [ @cmd, '--schema' => 's3', 'db1' ],
2,
[
$index_missing_relation_fork_re, $line_pointer_corruption_re,
# In schema db1.s4, only toast tables are corrupt. Check that under default
# options the toast corruption is reported, but when excluding toast we get no
# error reports.
-$node->command_checks_all([ @cmd, '-s', 's4', 'db1' ],
+$node->command_checks_all([ @cmd, '--schema' => 's4', 'db1' ],
2, [$missing_file_re], [$no_output_re],
'pg_amcheck in schema s4 reports toast corruption');
$node->command_checks_all(
[
- @cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4',
+ @cmd,
+ '--no-dependent-toast',
+ '--exclude-toast-pointers',
+ '--schema' => 's4',
'db1'
],
0,
'pg_amcheck in schema s4 excluding toast reports no corruption');
# Check that no corruption is reported in schema db1.s5
-$node->command_checks_all([ @cmd, '-s', 's5', 'db1' ],
+$node->command_checks_all([ @cmd, '--schema' => 's5', 'db1' ],
0, [$no_output_re], [$no_output_re],
'pg_amcheck over schema s5 reports no corruption');
# the indexes, no corruption is reported about the schema.
#
$node->command_checks_all(
- [ @cmd, '-s', 's1', '-I', 't1_btree', '-I', 't2_btree', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's1',
+ '--exclude-index' => 't1_btree',
+ '--exclude-index' => 't2_btree',
+ 'db1'
+ ],
0,
[$no_output_re],
[$no_output_re],
# about the schema.
#
$node->command_checks_all(
- [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ],
+ [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ],
0,
[$no_output_re],
[$no_output_re],
# tables that no corruption is reported.
#
$node->command_checks_all(
- [ @cmd, '-s', 's2', '-T', 't1', '-T', 't2', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's2',
+ '--exclude-table' => 't1',
+ '--exclude-table' => 't2',
+ 'db1'
+ ],
0,
[$no_output_re],
[$no_output_re],
# to avoid getting messages about corrupt tables or indexes.
#
command_fails_like(
- [ @cmd, '-s', 's5', '--startblock', 'junk', 'db1' ],
+ [ @cmd, '--schema' => 's5', '--startblock' => 'junk', 'db1' ],
qr/invalid start block/,
'pg_amcheck rejects garbage startblock');
command_fails_like(
- [ @cmd, '-s', 's5', '--endblock', '1234junk', 'db1' ],
+ [ @cmd, '--schema' => 's5', '--endblock' => '1234junk', 'db1' ],
qr/invalid end block/,
'pg_amcheck rejects garbage endblock');
command_fails_like(
- [ @cmd, '-s', 's5', '--startblock', '5', '--endblock', '4', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's5',
+ '--startblock' => '5',
+ '--endblock' => '4',
+ 'db1'
+ ],
qr/end block precedes start block/,
'pg_amcheck rejects invalid block range');
# arguments are handled sensibly.
#
$node->command_checks_all(
- [ @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', 'db1' ],
+ [
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--parent-check', 'db1'
+ ],
2,
[$index_missing_relation_fork_re],
[$no_output_re],
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--heapallindexed',
'--rootdescend', 'db1'
],
2,
'pg_amcheck smoke test --heapallindexed --rootdescend');
$node->command_checks_all(
- [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3', '-S', 's*' ],
- 0, [$no_output_re], [$no_output_re],
+ [
+ @cmd,
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3',
+ '--exclude-schema' => 's*'
+ ],
+ 0,
+ [$no_output_re],
+ [$no_output_re],
'pg_amcheck excluding all corrupt schemas');
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--parent-check',
'--checkunique', 'db1'
],
2,
$node->command_checks_all(
[
- @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed',
+ @cmd,
+ '--schema' => 's1',
+ '--index' => 't1_btree',
+ '--heapallindexed',
'--rootdescend', '--checkunique', 'db1'
],
2,
$node->command_checks_all(
[
- @cmd, '--checkunique', '-d', 'db1', '-d', 'db2',
- '-d', 'db3', '-S', 's*'
+ @cmd,
+ '--checkunique',
+ '--database' => 'db1',
+ '--database' => 'db2',
+ '--database' => 'db3',
+ '--exclude-schema' => 's*'
],
0,
[$no_output_re],
# Check that pg_amcheck runs against the uncorrupted table without error.
$node->command_ok(
- [ 'pg_amcheck', '-p', $port, 'postgres' ],
+ [ 'pg_amcheck', '--port' => $port, 'postgres' ],
'pg_amcheck test table, prior to corruption');
# Check that pg_amcheck runs against the uncorrupted table and index without error.
-$node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ],
+$node->command_ok(
+ [ 'pg_amcheck', '--port' => $port, 'postgres' ],
'pg_amcheck test table and index, prior to corruption');
$node->stop;
# Run pg_amcheck against the corrupt table with epoch=0, comparing actual
# corruption messages against the expected messages
$node->command_checks_all(
- [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
+ [ 'pg_amcheck', '--no-dependent-indexes', '--port' => $port, 'postgres' ],
2, [@expected], [], 'Expected corruption message output');
$node->safe_psql(
'postgres', qq(
));
# We have not yet broken the index, so we should get no corruption
-$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+$node->command_like([ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
# Index corruption should now be reported
$node->command_checks_all(
- [ 'pg_amcheck', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--port' => $node->port, 'postgres' ],
2,
[qr/item order invariant violated for index "fickleidx"/],
[],
# We should get no corruptions
$node->command_like(
- [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
qr/^$/,
'pg_amcheck all schemas, tables and indexes reports no corruption');
# Unique index corruption should now be reported
$node->command_checks_all(
- [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ],
+ [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ],
2,
[qr/index uniqueness is violated for index "bttest_unique_idx"/],
[],
# Initialize node without replication settings
$node->init(
extra => ['--data-checksums'],
- auth_extra => [ '--create-role', 'backupuser' ]);
+ auth_extra => [ '--create-role' => 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
# Sanity checks for options
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none:1' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'none:1'
+ ],
qr/\Qcompression algorithm "none" does not accept a compression level/,
'failure if method "none" specified with compression level');
$node->command_fails_like(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none+' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'none+'
+ ],
qr/\Qunrecognized compression algorithm: "none+"/,
'failure on incorrect separator to define compression level');
$node->reload;
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup" ],
'pg_basebackup fails because of WAL configuration');
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
or BAIL_OUT("unable to create $tempdir/backup");
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
-$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-n' ],
+$node->command_fails(
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup", '-n' ],
'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
my $sfail = quotemeta($server_fails . $cft->[1]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
- "$tempdir/backup", '--compress',
- $cft->[0]
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => $cft->[0],
],
qr/$cfail/,
'client ' . $cft->[2]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
- "$tempdir/backup", '--compress',
- 'server-' . $cft->[0]
+ 'pg_basebackup',
+ '--pgdata' => "$tempdir/backup",
+ '--compress' => 'server-' . $cft->[0],
],
qr/$sfail/,
'server ' . $cft->[2]);
# Run base backup.
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup",
+ '--wal-method' => 'none'
+ ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup2", '--no-manifest',
- '--waldir', "$tempdir/xlog2"
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup2",
+ '--no-manifest',
+ '--waldir' => "$tempdir/xlog2"
],
'separate xlog directory');
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup", '-Ft' ],
+$node->command_ok(
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup",
+ '--format' => 'tar'
+ ],
'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
rmtree("$tempdir/tarbackup");
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ],
- '-T with empty old directory fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '=/foo'
+ ],
+ '--tablespace-mapping with empty old directory fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ],
- '-T with empty new directory fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo='
+ ],
+ '--tablespace-mapping with empty new directory fails');
$node->command_fails(
[
- @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=/bar=/baz"
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo=/bar=/baz'
],
- '-T with multiple = fails');
+ '--tablespace-mapping with multiple = fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar" ],
- '-T with old directory not absolute fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => 'foo=/bar'
+ ],
+ '--tablespace-mapping with old directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar" ],
- '-T with new directory not absolute fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => '/foo=bar'
+ ],
+ '--tablespace-mapping with new directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ],
- '-T with invalid format fails');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_foo",
+ '--format' => 'plain',
+ '--tablespace-mapping' => 'foo'
+ ],
+ '--tablespace-mapping with invalid format fails');
my $superlongname = "superlongname_" . ("x" x 100);
# Tar format doesn't support filenames longer than 100 bytes.
or die "unable to create file $superlongpath";
close $file;
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l1", '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup_l1",
+ '--format' => 'tar'
+ ],
'pg_basebackup tar with long name fails');
unlink "$superlongpath";
}
$node->safe_psql('postgres',
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
. "INSERT INTO test1 VALUES (1234);");
-$node->backup('tarbackup2', backup_options => ['-Ft']);
+$node->backup('tarbackup2', backup_options => [ '--format' => 'tar' ]);
# empty test1, just so that it's different from the to-be-restored data
$node->safe_psql('postgres', "TRUNCATE TABLE test1;");
}
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup1",
+ '--format' => 'plain'
+ ],
'plain format with tablespaces fails without tablespace mapping');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup1", '-Fp',
- "-T$realTsDir=$tempdir/tbackup/tblspc1",
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup1",
+ '--format' => 'plain',
+ '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tblspc1",
],
'plain format with tablespaces succeeds with tablespace mapping');
ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated');
$realTsDir =~ s/=/\\=/;
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup3", '-Fp',
- "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup3",
+ '--format' => 'plain',
+ '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tbl\\=spc2",
],
'mapping tablespace with = sign in path');
ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated');
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/tarbackup_l3",
+ '--format' => 'tar'
+ ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
- 'pg_basebackup -R runs');
+$node->command_ok(
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupR",
+ '--write-recovery-conf'
+ ],
+ 'pg_basebackup --write-recovery-conf runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
qr/^primary_conninfo = '.*port=$port.*'\n/m,
'postgresql.auto.conf sets primary_conninfo');
-$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxd" ],
+$node->command_ok([ @pg_basebackup_defs, '--pgdata' => "$tempdir/backupxd" ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxd");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxf",
+ '--wal-method' => 'fetch'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxf");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '-X', 'stream' ],
- 'pg_basebackup -X stream runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs",
+ '--wal-method' => 'stream'
+ ],
+ 'pg_basebackup --wal-method stream runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
- '-Ft'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxst",
+ '--wal-method' => 'stream',
+ '--format' => 'tar'
],
- 'pg_basebackup -X stream runs in tar mode');
+ 'pg_basebackup --wal-method stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupnoslot", '-X',
- 'stream', '--no-slot'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupnoslot",
+ '--wal-method' => 'stream',
+ '--no-slot'
],
- 'pg_basebackup -X stream runs with --no-slot');
+ 'pg_basebackup --wal-method stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxf",
+ '--wal-method' => 'fetch'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole' ],
+ [ @pg_basebackup_defs, '--target' => 'blackhole' ],
qr/WAL cannot be streamed when a backup target is specified/,
- 'backup target requires -X');
+ 'backup target requires --wal-method');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'stream' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'stream'
+ ],
qr/WAL cannot be streamed when a backup target is specified/,
- 'backup target requires -X other than -X stream');
+ 'backup target requires --wal-method other than --wal-method stream');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'bogus', '-X', 'none' ],
+ [ @pg_basebackup_defs, '--target' => 'bogus', '--wal-method' => 'none' ],
qr/unrecognized target/,
'backup target unrecognized');
$node->command_fails_like(
[
- @pg_basebackup_defs, '--target', 'blackhole', '-X',
- 'none', '-D', "$tempdir/blackhole"
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none',
+ '--pgdata' => "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_fails_like(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-Ft' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none',
+ '--format' => 'tar'
+ ],
qr/cannot specify both format and backup target/,
'backup target and output directory');
$node->command_ok(
- [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--wal-method' => 'none'
+ ],
'backup target blackhole');
$node->command_ok(
[
- @pg_basebackup_defs, '--target',
- "server:$tempdir/backuponserver", '-X',
- 'none'
+ @pg_basebackup_defs,
+ '--target' => "server:$tempdir/backuponserver",
+ '--wal-method' => 'none'
],
'backup target server');
ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created');
'create backup user');
$node->command_ok(
[
- @pg_basebackup_defs, '-U', 'backupuser', '--target',
- "server:$tempdir/backuponserver",
- '-X', 'none'
+ @pg_basebackup_defs,
+ '--username' => 'backupuser',
+ '--target' => "server:$tempdir/backuponserver",
+ '--wal-method' => 'none'
],
'backup target server');
ok( -f "$tempdir/backuponserver/base.tar",
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
- 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_fail",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
- 'pg_basebackup -C fails without slot name');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot'
+ ],
+ 'pg_basebackup --create-slot fails without slot name');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0',
'--no-slot'
],
- 'pg_basebackup fails with -C -S --no-slot');
+ 'pg_basebackup fails with --create-slot --slot --no-slot');
$node->command_fails_like(
[
- @pg_basebackup_defs, '--target', 'blackhole', '-D',
- "$tempdir/blackhole"
+ @pg_basebackup_defs,
+ '--target' => 'blackhole',
+ '--pgdata' => "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backuptr/co", '-X', 'none' ],
- 'pg_basebackup -X fetch runs');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backuptr/co",
+ '--wal-method' => 'none'
+ ],
+ 'pg_basebackup --wal-method fetch runs');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
- 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_fail",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
- 'pg_basebackup -C fails without slot name');
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot'
+ ],
+ 'pg_basebackup --create-slot fails without slot name');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0',
'--no-slot'
],
- 'pg_basebackup fails with -C -S --no-slot');
+ 'pg_basebackup fails with --create-slot --slot --no-slot');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot",
+ '--create-slot',
+ '--slot' => 'slot0'
],
- 'pg_basebackup -C runs');
+ 'pg_basebackup --create-slot runs');
rmtree("$tempdir/backupxs_slot");
is( $node->safe_psql(
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot1", '-C',
- '-S', 'slot0'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_slot1",
+ '--create-slot',
+ '--slot' => 'slot0'
],
- 'pg_basebackup fails with -C -S and a previously existing slot');
+ 'pg_basebackup fails with --create-slot --slot and a previously existing slot'
+);
$node->safe_psql('postgres',
q{SELECT * FROM pg_create_physical_replication_slot('slot1')});
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
[
- @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
- 'slot1', '-X', 'none'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/fail",
+ '--slot' => 'slot1',
+ '--wal-method' => 'none'
],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
- 'stream', '-S', 'slot1'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot1'
],
- 'pg_basebackup -X stream with replication slot runs');
+ 'pg_basebackup --wal-method stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
);
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
- 'stream', '-S', 'slot1', '-R',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backupxs_sl_R",
+ '--wal-method' => 'stream',
+ '--slot' => 'slot1',
+ '--write-recovery-conf',
],
- 'pg_basebackup with replication slot and -R runs');
+ 'pg_basebackup with replication slot and --write-recovery-conf runs');
like(
slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"),
qr/^primary_slot_name = 'slot1'\n/m,
$node->command_ok(
[
- @pg_basebackup_defs, '-D', "$tempdir/backup_dbname_R", '-X',
- 'stream', '-d', "dbname=db1", '-R',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_dbname_R",
+ '--wal-method' => 'stream',
+ '--dbname' => "dbname=db1",
+ '--write-recovery-conf',
],
- 'pg_basebackup with dbname and -R runs');
+ 'pg_basebackup with dbname and --write-recovery-conf runs');
like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"),
qr/dbname=db1/m, 'recovery conf file sets dbname');
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt" ],
1,
[qr{^$}],
[qr/^WARNING.*checksum verification failed/s],
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt2" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt2" ],
1,
[qr{^$}],
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
$node->start;
$node->command_checks_all(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt3" ],
+ [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt3" ],
1,
[qr{^$}],
[qr/^WARNING.*7 total checksum verification failures/s],
# do not verify checksums, should return ok
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_corrupt4", '--no-verify-checksums',
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_corrupt4",
+ '--no-verify-checksums',
],
'pg_basebackup with -k does not report checksum mismatch');
rmtree("$tempdir/backup_corrupt4");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip", '--compress',
- '1', '--format',
- 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip",
+ '--compress' => '1',
+ '--format' => 't'
],
'pg_basebackup with --compress');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip2", '--gzip',
- '--format', 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip2",
+ '--gzip',
+ '--format' => 't'
],
'pg_basebackup with --gzip');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_gzip3", '--compress',
- 'gzip:1', '--format',
- 't'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/backup_gzip3",
+ '--compress' => 'gzip:1',
+ '--format' => 't'
],
'pg_basebackup with --compress=gzip:1');
my $sigchld_bb = IPC::Run::start(
[
@pg_basebackup_defs, '--wal-method=stream',
- '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d',
- $node->connstr('postgres')
- ],
- '<',
- \$sigchld_bb_stdin,
- '>',
- \$sigchld_bb_stdout,
- '2>',
- \$sigchld_bb_stderr,
+ '--pgdata' => "$tempdir/sigchld",
+ '--max-rate' => '32',
+ '--dbname' => $node->connstr('postgres')
+ ],
+ '<' => \$sigchld_bb_stdin,
+ '>' => \$sigchld_bb_stdout,
+ '2>' => \$sigchld_bb_stderr,
$sigchld_bb_timeout);
is( $node->poll_query_until(
$node2->command_fails_like(
[
- @pg_basebackup_defs, '-D',
- "$tempdir" . '/diff_sysid', '--incremental',
- "$backupdir" . '/backup_manifest'
+ @pg_basebackup_defs,
+ '--pgdata' => "$tempdir/diff_sysid",
+ '--incremental' => "$backupdir/backup_manifest",
],
qr/system identifier in backup manifest is .*, but database system identifier is/,
"pg_basebackup fails with different database system manifest");
# to keep test times reasonable. Using @pg_basebackup_defs as the first
# element of the array passed to IPC::Run interpolate the array (as it is
# not a reference to an array)...
-my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
+my @pg_basebackup_defs =
+ ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast');
# Set up an instance.
my $node = PostgreSQL::Test::Cluster->new('main');
# Back it up.
my $backupdir = $tempdir . '/backup';
$node->command_ok(
- [ @pg_basebackup_defs, '-D', $backupdir, '-Ft', '-X', 'none' ],
+ [
+ @pg_basebackup_defs,
+ '--pgdata' => $backupdir,
+ '--format' => 'tar',
+ '--wal-method' => 'none'
+ ],
'pg_basebackup runs');
# Make sure we got base.tar and one tablespace.
$primary->command_fails(['pg_receivewal'],
'pg_receivewal needs target directory specified');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--create-slot', '--drop-slot' ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--create-slot',
+ '--drop-slot',
+ ],
'failure if both --create-slot and --drop-slot specified');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--create-slot' ],
+ [ 'pg_receivewal', '--directory' => $stream_dir, '--create-slot' ],
'failure if --create-slot specified without --slot');
$primary->command_fails(
- [ 'pg_receivewal', '-D', $stream_dir, '--synchronous', '--no-sync' ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--synchronous',
+ '--no-sync',
+ ],
'failure if --synchronous specified with --no-sync');
$primary->command_fails_like(
- [ 'pg_receivewal', '-D', $stream_dir, '--compress', 'none:1', ],
+ [
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--compress' => 'none:1',
+ ],
qr/\Qpg_receivewal: error: invalid compression specification: compression algorithm "none" does not accept a compression level/,
'failure if --compress none:N (where N > 0)');
# Slot creation and drop
my $slot_name = 'test';
$primary->command_ok(
- [ 'pg_receivewal', '--slot', $slot_name, '--create-slot' ],
+ [ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ],
'creating a replication slot');
my $slot = $primary->slot($slot_name);
is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
-$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ],
+$primary->command_ok(
+ [ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ],
'dropping a replication slot');
is($primary->slot($slot_name)->{'slot_type'},
'', 'replication slot was removed');
# compression involved.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--synchronous', '--no-loop'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--synchronous',
+ '--no-loop',
],
'streaming some WAL with --synchronous');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--compress', 'gzip:1',
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--compress' => 'gzip:1',
'--no-loop'
],
"streaming some WAL using ZLIB compression");
# Stream up to the given position.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop', '--compress',
- 'lz4'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--compress' => 'lz4'
],
'streaming some WAL using --compress=lz4');
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop'
+ 'pg_receivewal',
+ '--directory' => $stream_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--no-loop'
],
"streaming some WAL");
# Check case where the slot does not exist.
$primary->command_fails_like(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- 'nonexistentslot', '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal',
+ '--directory' => $slot_dir,
+ '--slot' => 'nonexistentslot',
+ '--no-loop',
+ '--no-sync',
+ '--verbose',
+ '--endpos' => $nextlsn
],
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
'pg_receivewal fails with non-existing slot');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- $slot_name, '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal',
+ '--directory' => $slot_dir,
+ '--slot' => $slot_name,
+ '--no-loop',
+ '--no-sync',
+ '--verbose',
+ '--endpos' => $nextlsn
],
"WAL streamed from the slot's restart_lsn");
ok(-e "$slot_dir/$walfile_streamed",
$standby->command_ok(
[
- 'pg_receivewal', '-D', $timeline_dir, '--verbose',
- '--endpos', $nextlsn, '--slot', $archive_slot,
- '--no-sync', '-n'
+ 'pg_receivewal',
+ '--directory' => $timeline_dir,
+ '--verbose',
+ '--endpos' => $nextlsn,
+ '--slot' => $archive_slot,
+ '--no-sync',
+ '--no-loop'
],
"Stream some wal after promoting, resuming from the slot's position");
ok(-e "$timeline_dir/$walfile_before_promotion",
$node->start;
$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
-$node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
+$node->command_fails(
+ [ 'pg_recvlogical', '--slot' => 'test' ],
'pg_recvlogical needs a database');
-$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
+$node->command_fails(
+ [ 'pg_recvlogical', '--slot' => 'test', '--dbname' => 'postgres' ],
'pg_recvlogical needs an action');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--start'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
],
'no destination file');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--create-slot'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--create-slot',
],
'slot created');
$node->command_ok(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--file' => '-',
],
'replayed a transaction');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--drop-slot'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--drop-slot'
],
'slot dropped');
#test with two-phase option enabled
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--create-slot',
- '--two-phase'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--create-slot',
+ '--two-phase',
],
'slot with two-phase created');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
- $node->connstr('postgres'), '--start',
- '--endpos', "$nextlsn",
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
'--two-phase', '--no-loop',
- '-f', '-'
+ '--file' => '-',
],
'incorrect usage');
$node->command_ok(
[
- 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
- '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'
+ 'pg_recvlogical',
+ '--slot' => 'test',
+ '--dbname' => $node->connstr('postgres'),
+ '--start',
+ '--endpos' => $nextlsn,
+ '--no-loop',
+ '--file' => '-',
],
'replayed a two-phase transaction');
command_fails(['pg_createsubscriber'],
'no subscriber data directory specified');
command_fails(
- [ 'pg_createsubscriber', '--pgdata', $datadir ],
+ [ 'pg_createsubscriber', '--pgdata' => $datadir ],
'no publisher connection string specified');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
],
'no database name specified');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--database', 'pg1',
- '--database', 'pg1'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--database' => 'pg1',
+ '--database' => 'pg1',
],
'duplicate database name');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'duplicate publication name');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of publication names');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo2',
- '--subscription', 'bar1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo2',
+ '--subscription' => 'bar1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of subscription names');
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $datadir,
- '--publisher-server', 'port=5432',
- '--publication', 'foo1',
- '--publication', 'foo2',
- '--subscription', 'bar1',
- '--subscription', 'bar2',
- '--replication-slot', 'baz1',
- '--database', 'pg1',
- '--database', 'pg2'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $datadir,
+ '--publisher-server' => 'port=5432',
+ '--publication' => 'foo1',
+ '--publication' => 'foo2',
+ '--subscription' => 'bar1',
+ '--subscription' => 'bar2',
+ '--replication-slot' => 'baz1',
+ '--database' => 'pg1',
+ '--database' => 'pg2',
],
'wrong number of replication slot names');
# Run pg_createsubscriber on a promoted server
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_t->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_t->host, '--subscriber-port',
- $node_t->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_t->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_t->host,
+ '--subscriber-port' => $node_t->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'target server is not in recovery');
# Run pg_createsubscriber when standby is running
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'standby is up and running');
# Run pg_createsubscriber on about-to-fail node F
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--pgdata', $node_f->data_dir,
- '--publisher-server', $node_p->connstr($db1),
- '--socketdir', $node_f->host,
- '--subscriber-port', $node_f->port,
- '--database', $db1,
- '--database', $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--pgdata' => $node_f->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_f->host,
+ '--subscriber-port' => $node_f->port,
+ '--database' => $db1,
+ '--database' => $db2
],
'subscriber data directory is not a copy of the source database cluster');
# Run pg_createsubscriber on node C (P -> S -> C)
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_c->data_dir, '--publisher-server',
- $node_s->connstr($db1), '--socketdir',
- $node_c->host, '--subscriber-port',
- $node_c->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_c->data_dir,
+ '--publisher-server' => $node_s->connstr($db1),
+ '--socketdir' => $node_c->host,
+ '--subscriber-port' => $node_c->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'primary server is in recovery');
$node_s->stop;
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
+
],
'primary contains unmet conditions on node P');
# Restore default settings here but only apply it after testing standby. Some
});
command_fails(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--database' => $db1,
+ '--database' => $db2,
],
'standby contains unmet conditions on node S');
$node_s->append_conf(
# dry run mode on node S
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--publication',
- 'pub1', '--publication',
- 'pub2', '--subscription',
- 'sub1', '--subscription',
- 'sub2', '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--publication' => 'pub1',
+ '--publication' => 'pub2',
+ '--subscription' => 'sub1',
+ '--subscription' => 'sub2',
+ '--database' => $db1,
+ '--database' => $db2,
],
'run pg_createsubscriber --dry-run on node S');
# pg_createsubscriber can run without --databases option
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--dry-run', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--replication-slot',
- 'replslot1'
+ 'pg_createsubscriber',
+ '--verbose',
+ '--dry-run',
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--replication-slot' => 'replslot1',
],
'run pg_createsubscriber without --databases');
-# Run pg_createsubscriber on node S
+# Run pg_createsubscriber on node S. --verbose is used twice
+# to show more information.
command_ok(
[
- 'pg_createsubscriber', '--verbose',
- '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default",
- '--verbose', '--pgdata',
- $node_s->data_dir, '--publisher-server',
- $node_p->connstr($db1), '--socketdir',
- $node_s->host, '--subscriber-port',
- $node_s->port, '--publication',
- 'pub1', '--publication',
- 'Pub2', '--replication-slot',
- 'replslot1', '--replication-slot',
- 'replslot2', '--database',
- $db1, '--database',
- $db2
+ 'pg_createsubscriber',
+ '--verbose', '--verbose',
+ '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ '--pgdata' => $node_s->data_dir,
+ '--publisher-server' => $node_p->connstr($db1),
+ '--socketdir' => $node_s->host,
+ '--subscriber-port' => $node_s->port,
+ '--publication' => 'pub1',
+ '--publication' => 'pub2',
+ '--replication-slot' => 'replslot1',
+ '--replication-slot' => 'replslot2',
+ '--database' => $db1,
+ '--database' => $db2,
],
'run pg_createsubscriber on node S');
# corrupted yet.
command_ok(
[
- 'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ 'pg_checksums',
+ '--check',
+ '--pgdata' => $pgdata,
+ '--filenode' => $relfilenode_corrupted,
],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
);
# Checksum checks on single relfilenode fail
$node->command_checks_all(
[
- 'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ 'pg_checksums',
+ '--check',
+ '--pgdata' => $pgdata,
+ '--filenode' => $relfilenode_corrupted,
],
1,
[qr/Bad checksums:.*1/],
# Global checksum checks fail as well
$node->command_checks_all(
- [ 'pg_checksums', '--check', '-D', $pgdata ],
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
$node->start;
$node->safe_psql('postgres', "DROP TABLE $table;");
$node->stop;
- $node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+ $node->command_ok(
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds again after table drop on tablespace $tablespace");
$node->start;
unless ($Config{osname} eq 'darwin');
# Enable checksums.
-command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"checksums successfully enabled in cluster");
# Successive attempt to enable checksums fails.
-command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_fails(
+ [ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"enabling checksums fails if already enabled");
# Control file should know that checksums are enabled.
# Disable checksums again. Flush result here as that should be cheap.
command_ok(
- [ 'pg_checksums', '--disable', '-D', $pgdata ],
+ [ 'pg_checksums', '--disable', '--pgdata' => $pgdata ],
"checksums successfully disabled in cluster");
# Successive attempt to disable checksums fails.
command_fails(
- [ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ],
+ [ 'pg_checksums', '--disable', '--no-sync', '--pgdata' => $pgdata ],
"disabling checksums fails if already disabled");
# Control file should know that checksums are disabled.
'checksums disabled in control file');
# Enable checksums again for follow-up tests.
-command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ],
"checksums successfully enabled in cluster");
# Control file should know that checksums are enabled.
'checksums enabled in control file');
# Checksums pass on a newly-created cluster
-command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds with offline cluster");
# Checksums are verified if no other arguments are specified
command_ok(
- [ 'pg_checksums', '-D', $pgdata ],
+ [ 'pg_checksums', '--pgdata' => $pgdata ],
"verifies checksums as default action");
# Specific relation files cannot be requested when action is --disable
# or --enable.
command_fails(
- [ 'pg_checksums', '--disable', '--filenode', '1234', '-D', $pgdata ],
+ [
+ 'pg_checksums',
+ '--disable',
+ '--filenode' => '1234',
+ '--pgdata' => $pgdata
+ ],
"fails when relfilenodes are requested and action is --disable");
command_fails(
- [ 'pg_checksums', '--enable', '--filenode', '1234', '-D', $pgdata ],
+ [
+ 'pg_checksums',
+ '--enable',
+ '--filenode' => '1234',
+ '--pgdata' => $pgdata
+ ],
"fails when relfilenodes are requested and action is --enable");
# Test postgres -C for an offline cluster.
# account on Windows.
command_checks_all(
[
- 'pg_ctl', 'start', '-D', $pgdata, '-s', '-o',
- '-C data_checksums -c log_min_messages=fatal'
+ 'pg_ctl', 'start',
+ '--silent',
+ '--pgdata' => $pgdata,
+ '-o' => '-C data_checksums -c log_min_messages=fatal',
],
1,
[qr/^on$/],
# Checks cannot happen with an online cluster
$node->start;
-command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_fails([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"fails with online cluster");
# Check corruption of table on default tablespace.
append_to_file $file_name, "foo";
$node->command_checks_all(
- [ 'pg_checksums', '--check', '-D', $pgdata ],
+ [ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
1,
[qr/^$/],
[qr/could not read block 0 in file.*$file\":/],
# when verifying checksums.
mkdir "$tablespace_dir/PG_99_999999991/";
append_to_file "$tablespace_dir/PG_99_999999991/foo", "123";
-command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
+command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ],
"succeeds with foreign tablespace");
# Authorized relation files filled with corrupted data cause the
mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!";
$primary->command_ok(
[
- 'pg_basebackup', '-D',
- $backup1path, '--no-sync',
- '-cfast', "-T${tsprimary}=${tsbackup1path}"
+ 'pg_basebackup',
+ '--no-sync',
+ '--pgdata' => $backup1path,
+ '--checkpoint' => 'fast',
+ '--tablespace-mapping' => "${tsprimary}=${tsbackup1path}"
],
"full backup");
mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!";
$primary->command_ok(
[
- 'pg_basebackup', '-D',
- $backup2path, '--no-sync',
- '-cfast', "-T${tsprimary}=${tsbackup2path}",
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--no-sync',
+ '--pgdata' => $backup2path,
+ '--checkpoint' => 'fast',
+ '--tablespace-mapping' => "${tsprimary}=${tsbackup2path}",
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
my $dump2 = $backupdir . '/pitr2.dump';
$pitr1->command_ok(
[
- 'pg_dumpall', '-f',
- $dump1, '--no-sync',
- '--no-unlogged-table-data', '-d',
- $pitr1->connstr('postgres'),
+ 'pg_dumpall',
+ '--no-sync',
+ '--no-unlogged-table-data',
+ '--file' => $dump1,
+ '--dbname' => $pitr1->connstr('postgres'),
],
'dump from PITR 1');
$pitr2->command_ok(
[
- 'pg_dumpall', '-f',
- $dump2, '--no-sync',
- '--no-unlogged-table-data', '-d',
- $pitr2->connstr('postgres'),
+ 'pg_dumpall',
+ '--no-sync',
+ '--no-unlogged-table-data',
+ '--file' => $dump2,
+ '--dbname' => $pitr2->connstr('postgres'),
],
'dump from PITR 2');
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup from node1");
# Insert a second row on the original node.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup from node1");
my $backup3path = $node1->backup_dir . '/backup3';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
- '--incremental', $backup2path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup3path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup2path . '/backup_manifest'
],
"incremental backup from node2");
# Take a full backup.
my $original_backup_path = $node->backup_dir . '/original';
$node->command_ok(
- [ 'pg_basebackup', '-D', $original_backup_path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $original_backup_path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup");
# Verify the full backup.
my $revised_backup_path = $node->backup_dir . '/' . $backup_name;
$node->command_ok(
[
- 'pg_combinebackup', $original_backup_path,
- '-o', $revised_backup_path,
- '--no-sync', @extra_options
+ 'pg_combinebackup',
+ $original_backup_path,
+ '--output' => $revised_backup_path,
+ '--no-sync',
+ @extra_options,
],
"pg_combinebackup with @extra_options");
if (defined $failure_pattern)
# Take a full backup from node1.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node1");
# Now take an incremental backup.
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest',
],
"incremental backup from node1");
my $backup3path = $node1->backup_dir . '/backup3';
$node1->command_ok(
[
- 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast',
- '--incremental', $backup2path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup3path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup2path . '/backup_manifest',
],
"another incremental backup from node1");
# Take a full backup from node2.
my $backupother1path = $node1->backup_dir . '/backupother1';
$node2->command_ok(
- [ 'pg_basebackup', '-D', $backupother1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backupother1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node2");
# Take an incremental backup from node2.
my $backupother2path = $node1->backup_dir . '/backupother2';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast',
- '--incremental', $backupother1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backupother2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backupother1path . '/backup_manifest',
],
"incremental backup from node2");
# Can't combine 2 full backups.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backup1path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backup1path,
+ '--output' => $resultpath,
+ $mode,
],
qr/is a full backup, but only the first backup should be a full backup/,
"can't combine full backups");
# Can't combine 2 incremental backups.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup2path, $backup2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup2path, $backup2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/is an incremental backup, but the first backup should be a full backup/,
"can't combine full backups");
# Can't combine full backup with an incremental backup from a different system.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backupother2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backupother2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/expected system identifier.*but found/,
"can't combine backups from different nodes");
$node1->command_fails_like(
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
qr/ manifest system identifier is .*, but control file has /,
"can't combine backups with different manifest system identifier ");
# Can't omit a required backup.
$node1->command_fails_like(
[
- 'pg_combinebackup', $backup1path, $backup3path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $backup1path, $backup3path,
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't omit a required backup");
$node1->command_fails_like(
[
'pg_combinebackup', $backup1path, $backup3path, $backup2path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't combine backups in the wrong order");
$node1->command_ok(
[
'pg_combinebackup', $backup1path, $backup2path, $backup3path,
- '-o', $resultpath, $mode
+ '--output' => $resultpath,
+ $mode,
],
"can combine 3 matching backups");
rmtree($resultpath);
my $synthetic12path = $node1->backup_dir . '/synthetic12';
$node1->command_ok(
[
- 'pg_combinebackup', $backup1path, $backup2path, '-o',
- $synthetic12path, $mode
+ 'pg_combinebackup', $backup1path, $backup2path,
+ '--output' => $synthetic12path,
+ $mode,
],
"can combine 2 matching backups");
# Can combine result of previous step with second incremental.
$node1->command_ok(
[
- 'pg_combinebackup', $synthetic12path,
- $backup3path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $synthetic12path, $backup3path,
+ '--output' => $resultpath,
+ $mode,
],
"can combine synthetic backup with later incremental");
rmtree($resultpath);
# Can't combine result of 1+2 with 2.
$node1->command_fails_like(
[
- 'pg_combinebackup', $synthetic12path,
- $backup2path, '-o',
- $resultpath, $mode
+ 'pg_combinebackup', $synthetic12path, $backup2path,
+ '--output' => $resultpath,
+ $mode,
],
qr/starts at LSN.*but expected/,
"can't combine synthetic backup with included incremental");
# Take a full backup.
my $backup1path = $primary->backup_dir . '/backup1';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Now make some database changes.
my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Switch to wal_level=minimal, which also requires max_wal_senders=0 and
my $backup2path = $node1->backup_dir . '/backup2';
$node1->command_fails_like(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
qr/WAL summaries are required on timeline 1 from.*are incomplete/,
"incremental backup fails");
# Take a full backup.
my $backup1path = $node1->backup_dir . '/backup1';
$node1->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ ],
"full backup from node1");
# Checkpoint and record LSN after.
my $backup2path = $node1->backup_dir . '/backup2';
$node2->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest',
],
"incremental backup from node2");
# Take a full backup.
my $backup1path = $primary->backup_dir . '/backup1';
$primary->command_ok(
- [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ],
+ [
+ 'pg_basebackup',
+ '--pgdata' => $backup1path,
+ '--no-sync',
+ '--checkpoint' => 'fast'
+ ],
"full backup");
# Take an incremental backup.
my $backup2path = $primary->backup_dir . '/backup2';
$primary->command_ok(
[
- 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast',
- '--incremental', $backup1path . '/backup_manifest'
+ 'pg_basebackup',
+ '--pgdata' => $backup2path,
+ '--no-sync',
+ '--checkpoint' => 'fast',
+ '--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
# pg_combinebackup should fail.
my $outpath = $primary->backup_dir . '/out';
$primary->command_fails_like(
- [ 'pg_combinebackup', $backup1path, $backup2path, '-o', $outpath, ],
+ [
+ 'pg_combinebackup', $backup1path,
+ $backup2path, '--output' => $outpath,
+ ],
qr/full backup contains unexpected incremental file/,
"pg_combinebackup fails");
program_version_ok('pg_ctl');
program_options_handling_ok('pg_ctl');
-command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ],
+command_exit_is([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/nonexistent" ],
1, 'pg_ctl start with nonexistent directory');
-command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data", '-o', '-N' ],
+command_ok(
+ [
+ 'pg_ctl', 'initdb',
+ '--pgdata' => "$tempdir/data",
+ '--options' => '--no-sync'
+ ],
'pg_ctl initdb');
command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
'configure authentication');
}
close $conf;
my $ctlcmd = [
- 'pg_ctl', 'start', '-D', "$tempdir/data", '-l',
- "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
+ 'pg_ctl', 'start',
+ '--pgdata' => "$tempdir/data",
+ '--log' => "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log"
];
command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start');
# postmaster they start. Waiting more than the 2 seconds slop time allowed
# by wait_for_postmaster() prevents that mistake.
sleep 3 if ($windows_os);
-command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
+command_fails([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/data" ],
'second pg_ctl start fails');
-command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
-command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
+command_ok([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
+ 'pg_ctl stop');
+command_fails([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ],
'second pg_ctl stop fails');
# Log file for default permission test. The permissions won't be checked on
# Windows but we still want to do the restart test.
my $logFileName = "$tempdir/data/perm-test-600.log";
-command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ],
+command_ok(
+ [
+ 'pg_ctl', 'restart',
+ '--pgdata' => "$tempdir/data",
+ '--log' => $logFileName
+ ],
'pg_ctl restart with server not running');
# Permissions on log file should be default
skip "group access not supported on Windows", 3
if ($windows_os || $Config::Config{osname} eq 'cygwin');
- system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
+ system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
# Change the data dir mode so log file will be created with group read
# privileges on the next start
chmod_recursive("$tempdir/data", 0750, 0640);
command_ok(
- [ 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', $logFileName ],
+ [
+ 'pg_ctl', 'start',
+ '--pgdata' => "$tempdir/data",
+ '--log' => $logFileName
+ ],
'start server to check group permissions');
ok(-f $logFileName);
ok(check_mode_recursive("$tempdir/data", 0750, 0640));
}
-command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
+command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data" ],
'pg_ctl restart with server running');
-system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data";
+system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
done_testing();
my $tempdir = PostgreSQL::Test::Utils::tempdir;
-command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ],
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => "$tempdir/nonexistent" ],
4, 'pg_ctl status with nonexistent directory');
my $node = PostgreSQL::Test::Cluster->new('main');
$node->init;
-command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
3, 'pg_ctl status with server not running');
-system_or_bail 'pg_ctl', '-l', "$tempdir/logfile", '-D',
- $node->data_dir, '-w', 'start';
-command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ],
+system_or_bail(
+ 'pg_ctl',
+ '--log' => "$tempdir/logfile",
+ '--pgdata' => $node->data_dir,
+ '--wait', 'start');
+command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ],
0, 'pg_ctl status with server running');
-system_or_bail 'pg_ctl', 'stop', '-D', $node->data_dir;
+system_or_bail 'pg_ctl', 'stop', '--pgdata' => $node->data_dir;
done_testing();
my $tempdir = PostgreSQL::Test::Utils::tempdir;
command_fails_like(
- [ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
+ [ 'pg_ctl', '--pgdata' => "$tempdir/nonexistent", 'promote' ],
qr/directory .* does not exist/,
'pg_ctl promote with nonexistent directory');
$node_primary->init(allows_streaming => 1);
command_fails_like(
- [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
qr/PID file .* does not exist/,
'pg_ctl promote of not running instance fails');
$node_primary->start;
command_fails_like(
- [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ],
qr/not in standby mode/,
'pg_ctl promote of primary instance fails');
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
-command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
- 'pg_ctl -W promote of standby runs');
+command_ok(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--no-wait', 'promote'
+ ],
+ 'pg_ctl --no-wait promote of standby runs');
ok( $node_standby->poll_query_until(
'postgres', 'SELECT NOT pg_is_in_recovery()'),
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
-command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ],
+command_ok([ 'pg_ctl', '--pgdata' => $node_standby->data_dir, 'promote' ],
'pg_ctl promote of standby runs');
# no wait here
my %pgdump_runs = (
binary_upgrade => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- '--format=custom',
- "--file=$tempdir/binary_upgrade.dump",
- '-w',
+ 'pg_dump', '--no-sync',
+ '--format' => 'custom',
+ '--file' => "$tempdir/binary_upgrade.dump",
+ '--no-password',
'--schema-only',
'--binary-upgrade',
- '-d', 'postgres', # alternative way to specify database
+ '--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
- 'pg_restore', '-Fc', '--verbose',
- "--file=$tempdir/binary_upgrade.sql",
+ 'pg_restore',
+ '--format' => 'custom',
+ '--verbose',
+ '--file' => "$tempdir/binary_upgrade.sql",
"$tempdir/binary_upgrade.dump",
],
},
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => '1',
+ '--file' => "$tempdir/compression_gzip_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_gzip_custom.sql",
+ '--file' => "$tempdir/compression_gzip_custom.sql",
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
command => [
- 'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
+ 'pg_restore', '--list',
+ "$tempdir/compression_gzip_custom.dump",
],
expected => qr/Compression: gzip/,
name => 'data content is gzip-compressed'
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=gzip:1',
- "--file=$tempdir/compression_gzip_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'gzip:1',
+ '--file' => "$tempdir/compression_gzip_dir",
+ 'postgres',
],
# Give coverage for manually compressed blobs.toc files during
# restore.
"$tempdir/compression_gzip_dir/*.dat.gz",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_gzip_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_gzip_dir.sql",
"$tempdir/compression_gzip_dir",
],
},
test_key => 'compression',
compile_option => 'gzip',
dump_cmd => [
- 'pg_dump', '--format=plain', '-Z1',
- "--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => '1',
+ '--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => 'lz4',
+ '--file' => "$tempdir/compression_lz4_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_lz4_custom.sql",
+ '--file' => "$tempdir/compression_lz4_custom.sql",
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
- command =>
- [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
+ command => [
+ 'pg_restore', '--list',
+ "$tempdir/compression_lz4_custom.dump",
+ ],
expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed'
},
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=lz4:1',
- "--file=$tempdir/compression_lz4_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'lz4:1',
+ '--file' => "$tempdir/compression_lz4_dir",
+ 'postgres',
],
# Verify that data files were compressed
glob_patterns => [
"$tempdir/compression_lz4_dir/*.dat.lz4",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_lz4_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_lz4_dir.sql",
"$tempdir/compression_lz4_dir",
],
},
test_key => 'compression',
compile_option => 'lz4',
dump_cmd => [
- 'pg_dump', '--format=plain', '--compress=lz4',
- "--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => 'lz4',
+ '--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--format=custom',
- '--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
+ 'pg_dump',
+ '--format' => 'custom',
+ '--compress' => 'zstd',
+ '--file' => "$tempdir/compression_zstd_custom.dump",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/compression_zstd_custom.sql",
+ '--file' => "$tempdir/compression_zstd_custom.sql",
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
command => [
- 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
+ 'pg_restore', '--list',
+ "$tempdir/compression_zstd_custom.dump",
],
expected => qr/Compression: zstd/,
name => 'data content is zstd compressed'
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=zstd:1',
- "--file=$tempdir/compression_zstd_dir", 'postgres',
+ 'pg_dump',
+ '--jobs' => '2',
+ '--format' => 'directory',
+ '--compress' => 'zstd:1',
+ '--file' => "$tempdir/compression_zstd_dir",
+ 'postgres',
],
# Give coverage for manually compressed blobs.toc files during
# restore.
"$tempdir/compression_zstd_dir/*.dat.zst",
],
restore_cmd => [
- 'pg_restore', '--jobs=2',
- "--file=$tempdir/compression_zstd_dir.sql",
+ 'pg_restore',
+ '--jobs' => '2',
+ '--file' => "$tempdir/compression_zstd_dir.sql",
"$tempdir/compression_zstd_dir",
],
},
test_key => 'compression',
compile_option => 'zstd',
dump_cmd => [
- 'pg_dump', '--format=plain', '--compress=zstd:long',
- "--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
+ 'pg_dump',
+ '--format' => 'plain',
+ '--compress' => 'zstd:long',
+ '--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
clean => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/clean.sql",
- '-c',
- '-d', 'postgres', # alternative way to specify database
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/clean.sql",
+ '--clean',
+ '--dbname' => 'postgres', # alternative way to specify database
],
},
clean_if_exists => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/clean_if_exists.sql",
- '-c',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/clean_if_exists.sql",
+ '--clean',
'--if-exists',
- '--encoding=UTF8', # no-op, just tests that option is accepted
+ '--encoding' => 'UTF8', # no-op, just for testing
'postgres',
],
},
column_inserts => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/column_inserts.sql", '-a',
+ '--file' => "$tempdir/column_inserts.sql",
+ '--data-only',
'--column-inserts', 'postgres',
],
},
createdb => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/createdb.sql",
- '-C',
- '-R', # no-op, just for testing
- '-v',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/createdb.sql",
+ '--create',
+ '--no-reconnect', # no-op, just for testing
+ '--verbose',
'postgres',
],
},
data_only => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/data_only.sql",
- '-a',
- '--superuser=test_superuser',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/data_only.sql",
+ '--data-only',
+ '--superuser' => 'test_superuser',
'--disable-triggers',
- '-v', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
defaults => {
dump_cmd => [
'pg_dump', '--no-sync',
- '-f', "$tempdir/defaults.sql",
+ '--file' => "$tempdir/defaults.sql",
'postgres',
],
},
defaults_no_public => {
database => 'regress_pg_dump_test',
dump_cmd => [
- 'pg_dump', '--no-sync', '-f', "$tempdir/defaults_no_public.sql",
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/defaults_no_public.sql",
'regress_pg_dump_test',
],
},
defaults_no_public_clean => {
database => 'regress_pg_dump_test',
dump_cmd => [
- 'pg_dump', '--no-sync', '-c', '-f',
- "$tempdir/defaults_no_public_clean.sql",
+ 'pg_dump', '--no-sync',
+ '--clean',
+ '--file' => "$tempdir/defaults_no_public_clean.sql",
'regress_pg_dump_test',
],
},
defaults_public_owner => {
database => 'regress_public_owner',
dump_cmd => [
- 'pg_dump', '--no-sync', '-f',
- "$tempdir/defaults_public_owner.sql",
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/defaults_public_owner.sql",
'regress_public_owner',
],
},
defaults_custom_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fc',
- "--file=$tempdir/defaults_custom_format.dump", 'postgres',
+ 'pg_dump',
+ '--format' => 'custom',
+ '--file' => "$tempdir/defaults_custom_format.dump",
+ 'postgres',
],
restore_cmd => [
- 'pg_restore', '-Fc',
- "--file=$tempdir/defaults_custom_format.sql",
+ 'pg_restore',
+ '--format' => 'custom',
+ '--file' => "$tempdir/defaults_custom_format.sql",
"$tempdir/defaults_custom_format.dump",
],
command_like => {
- command =>
- [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
+ command => [
+ 'pg_restore', '--list',
+ "$tempdir/defaults_custom_format.dump",
+ ],
expected => $supports_gzip
? qr/Compression: gzip/
: qr/Compression: none/,
defaults_dir_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd',
- "--file=$tempdir/defaults_dir_format", 'postgres',
+ 'pg_dump',
+ '--format' => 'directory',
+ '--file' => "$tempdir/defaults_dir_format",
+ 'postgres',
],
restore_cmd => [
- 'pg_restore', '-Fd',
- "--file=$tempdir/defaults_dir_format.sql",
+ 'pg_restore',
+ '--format' => 'directory',
+ '--file' => "$tempdir/defaults_dir_format.sql",
"$tempdir/defaults_dir_format",
],
command_like => {
command =>
- [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
+ [ 'pg_restore', '--list', "$tempdir/defaults_dir_format", ],
expected => $supports_gzip ? qr/Compression: gzip/
: qr/Compression: none/,
name => 'data content is gzip-compressed by default',
defaults_parallel => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd', '-j2', "--file=$tempdir/defaults_parallel",
+ 'pg_dump',
+ '--format' => 'directory',
+ '--jobs' => 2,
+ '--file' => "$tempdir/defaults_parallel",
'postgres',
],
restore_cmd => [
'pg_restore',
- "--file=$tempdir/defaults_parallel.sql",
+ '--file' => "$tempdir/defaults_parallel.sql",
"$tempdir/defaults_parallel",
],
},
defaults_tar_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Ft',
- "--file=$tempdir/defaults_tar_format.tar", 'postgres',
+ 'pg_dump',
+ '--format' => 'tar',
+ '--file' => "$tempdir/defaults_tar_format.tar",
+ 'postgres',
],
restore_cmd => [
'pg_restore',
- '--format=tar',
- "--file=$tempdir/defaults_tar_format.sql",
+ '--format' => 'tar',
+ '--file' => "$tempdir/defaults_tar_format.sql",
"$tempdir/defaults_tar_format.tar",
],
},
exclude_dump_test_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/exclude_dump_test_schema.sql",
- '--exclude-schema=dump_test', 'postgres',
+ '--file' => "$tempdir/exclude_dump_test_schema.sql",
+ '--exclude-schema' => 'dump_test',
+ 'postgres',
],
},
exclude_test_table => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/exclude_test_table.sql",
- '--exclude-table=dump_test.test_table', 'postgres',
+ '--file' => "$tempdir/exclude_test_table.sql",
+ '--exclude-table' => 'dump_test.test_table',
+ 'postgres',
],
},
exclude_measurement => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_measurement.sql",
- '--exclude-table-and-children=dump_test.measurement',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_measurement.sql",
+ '--exclude-table-and-children' => 'dump_test.measurement',
'postgres',
],
},
exclude_measurement_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_measurement_data.sql",
- '--exclude-table-data-and-children=dump_test.measurement',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_measurement_data.sql",
+ '--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
'postgres',
],
},
exclude_test_table_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/exclude_test_table_data.sql",
- '--exclude-table-data=dump_test.test_table',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/exclude_test_table_data.sql",
+ '--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
'postgres',
],
inserts => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/inserts.sql", '-a',
+ '--file' => "$tempdir/inserts.sql",
+ '--data-only',
'--inserts', 'postgres',
],
},
pg_dumpall_globals => {
dump_cmd => [
- 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql",
- '-g', '--no-sync',
+ 'pg_dumpall',
+ '--verbose',
+ '--file' => "$tempdir/pg_dumpall_globals.sql",
+ '--globals-only',
+ '--no-sync',
],
},
pg_dumpall_globals_clean => {
dump_cmd => [
- 'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql",
- '-g', '-c', '--no-sync',
+ 'pg_dumpall',
+ '--file' => "$tempdir/pg_dumpall_globals_clean.sql",
+ '--globals-only',
+ '--clean',
+ '--no-sync',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
- "--file=$tempdir/pg_dumpall_dbprivs.sql",
+ '--file' => "$tempdir/pg_dumpall_dbprivs.sql",
],
},
pg_dumpall_exclude => {
dump_cmd => [
- 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_exclude.sql",
- '--exclude-database', '*dump_test*', '--no-sync',
+ 'pg_dumpall',
+ '--verbose',
+ '--file' => "$tempdir/pg_dumpall_exclude.sql",
+ '--exclude-database' => '*dump_test*',
+ '--no-sync',
],
},
no_toast_compression => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_toast_compression.sql",
- '--no-toast-compression', 'postgres',
+ '--file' => "$tempdir/no_toast_compression.sql",
+ '--no-toast-compression',
+ 'postgres',
],
},
no_large_objects => {
dump_cmd => [
- 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
- '-B', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/no_large_objects.sql",
+ '--no-large-objects',
+ 'postgres',
],
},
no_privs => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_privs.sql", '-x',
+ '--file' => "$tempdir/no_privs.sql",
+ '--no-privileges',
'postgres',
],
},
no_owner => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_owner.sql", '-O',
+ '--file' => "$tempdir/no_owner.sql",
+ '--no-owner',
'postgres',
],
},
no_table_access_method => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/no_table_access_method.sql",
- '--no-table-access-method', 'postgres',
+ '--file' => "$tempdir/no_table_access_method.sql",
+ '--no-table-access-method',
+ 'postgres',
],
},
only_dump_test_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
- "--file=$tempdir/only_dump_test_schema.sql",
- '--schema=dump_test', 'postgres',
+ '--file' => "$tempdir/only_dump_test_schema.sql",
+ '--schema' => 'dump_test',
+ 'postgres',
],
},
only_dump_test_table => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/only_dump_test_table.sql",
- '--table=dump_test.test_table',
- '--lock-wait-timeout='
- . (1000 * $PostgreSQL::Test::Utils::timeout_default),
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/only_dump_test_table.sql",
+ '--table' => 'dump_test.test_table',
+ '--lock-wait-timeout' =>
+ (1000 * $PostgreSQL::Test::Utils::timeout_default),
'postgres',
],
},
only_dump_measurement => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/only_dump_measurement.sql",
- '--table-and-children=dump_test.measurement',
- '--lock-wait-timeout='
- . (1000 * $PostgreSQL::Test::Utils::timeout_default),
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/only_dump_measurement.sql",
+ '--table-and-children' => 'dump_test.measurement',
+ '--lock-wait-timeout' =>
+ (1000 * $PostgreSQL::Test::Utils::timeout_default),
'postgres',
],
},
role => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/role.sql",
- '--role=regress_dump_test_role',
- '--schema=dump_test_second_schema',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/role.sql",
+ '--role' => 'regress_dump_test_role',
+ '--schema' => 'dump_test_second_schema',
'postgres',
],
},
role_parallel => {
test_key => 'role',
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- '--format=directory',
- '--jobs=2',
- "--file=$tempdir/role_parallel",
- '--role=regress_dump_test_role',
- '--schema=dump_test_second_schema',
+ 'pg_dump', '--no-sync',
+ '--format' => 'directory',
+ '--jobs' => '2',
+ '--file' => "$tempdir/role_parallel",
+ '--role' => 'regress_dump_test_role',
+ '--schema' => 'dump_test_second_schema',
'postgres',
],
restore_cmd => [
- 'pg_restore', "--file=$tempdir/role_parallel.sql",
+ 'pg_restore',
+ '--file' => "$tempdir/role_parallel.sql",
"$tempdir/role_parallel",
],
},
rows_per_insert => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/rows_per_insert.sql",
- '-a',
- '--rows-per-insert=4',
- '--table=dump_test.test_table',
- '--table=dump_test.test_fourth_table',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/rows_per_insert.sql",
+ '--data-only',
+ '--rows-per-insert' => '4',
+ '--table' => 'dump_test.test_table',
+ '--table' => 'dump_test.test_fourth_table',
'postgres',
],
},
schema_only => {
dump_cmd => [
- 'pg_dump', '--format=plain',
- "--file=$tempdir/schema_only.sql", '--no-sync',
- '-s', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--format' => 'plain',
+ '--file' => "$tempdir/schema_only.sql",
+ '--schema-only',
+ 'postgres',
],
},
section_pre_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_pre_data.sql",
- '--section=pre-data', '--no-sync',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_pre_data.sql",
+ '--section' => 'pre-data',
'postgres',
],
},
section_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_data.sql",
- '--section=data', '--no-sync',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_data.sql",
+ '--section' => 'data',
'postgres',
],
},
section_post_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_post_data.sql",
- '--section=post-data', '--no-sync', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/section_post_data.sql",
+ '--section' => 'post-data',
+ 'postgres',
],
},
test_schema_plus_large_objects => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql",
-
- '--schema=dump_test', '-b', '-B', '--no-sync', 'postgres',
+ 'pg_dump', '--no-sync',
+ '--file' => "$tempdir/test_schema_plus_large_objects.sql",
+ '--schema' => 'dump_test',
+ '--large-objects',
+ '--no-large-objects',
+ 'postgres',
],
},);
# Test connecting to a non-existent database
command_fails_like(
- [ 'pg_dump', '-p', "$port", 'qqq' ],
+ [ 'pg_dump', '--port' => $port, 'qqq' ],
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/,
'connecting to a non-existent database');
# Test connecting to an invalid database
$node->command_fails_like(
- [ 'pg_dump', '-d', 'regression_invalid' ],
+ [ 'pg_dump', '--dbname' => 'regression_invalid' ],
qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/,
'connecting to an invalid database');
# Test connecting with an unprivileged user
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--role=regress_dump_test_role' ],
+ [ 'pg_dump', '--port' => $port, '--role' => 'regress_dump_test_role' ],
qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/,
'connecting with an unprivileged user');
# Test dumping a non-existent schema, table, and patterns with --strict-names
command_fails_like(
- [ 'pg_dump', '-p', "$port", '-n', 'nonexistent' ],
+ [ 'pg_dump', '--port' => $port, '--schema' => 'nonexistent' ],
qr/\Qpg_dump: error: no matching schemas were found\E/,
'dumping a non-existent schema');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '-t', 'nonexistent' ],
+ [ 'pg_dump', '--port' => $port, '--table' => 'nonexistent' ],
qr/\Qpg_dump: error: no matching tables were found\E/,
'dumping a non-existent table');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--strict-names', '-n', 'nonexistent*' ],
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
+ '--schema' => 'nonexistent*'
+ ],
qr/\Qpg_dump: error: no matching schemas were found for pattern\E/,
'no matching schemas');
command_fails_like(
- [ 'pg_dump', '-p', "$port", '--strict-names', '-t', 'nonexistent*' ],
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
+ '--table' => 'nonexistent*'
+ ],
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
'no matching tables');
# Test invalid multipart database names
$node->command_fails_like(
- [ 'pg_dumpall', '--exclude-database', '.' ],
+ [ 'pg_dumpall', '--exclude-database' => '.' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./,
'pg_dumpall: option --exclude-database rejects multipart pattern "."');
$node->command_fails_like(
- [ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ],
+ [ 'pg_dumpall', '--exclude-database' => 'myhost.mydb' ],
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/,
'pg_dumpall: option --exclude-database rejects multipart database names');
##############################################################
# Test dumping pg_catalog (for research -- cannot be reloaded)
-$node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ],
+$node->command_ok(
+ [ 'pg_dump', '--port' => $port, '--schema' => 'pg_catalog' ],
'pg_dump: option -n pg_catalog');
#########################################
# Test valid database exclusion patterns
$node->command_ok(
- [ 'pg_dumpall', '-p', "$port", '--exclude-database', '"myhost.mydb"' ],
+ [
+ 'pg_dumpall',
+ '--port' => $port,
+ '--exclude-database' => '"myhost.mydb"'
+ ],
'pg_dumpall: option --exclude-database handles database names with embedded dots'
);
# Test invalid multipart schema names
$node->command_fails_like(
- [ 'pg_dump', '--schema', 'myhost.mydb.myschema' ],
+ [ 'pg_dump', '--schema' => 'myhost.mydb.myschema' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/,
'pg_dump: option --schema rejects three-part schema names');
$node->command_fails_like(
- [ 'pg_dump', '--schema', 'otherdb.myschema' ],
+ [ 'pg_dump', '--schema' => 'otherdb.myschema' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/,
'pg_dump: option --schema rejects cross-database multipart schema names');
$node->command_fails_like(
- [ 'pg_dump', '--schema', '.' ],
+ [ 'pg_dump', '--schema' => '.' ],
qr/pg_dump: error: cross-database references are not implemented: \./,
'pg_dump: option --schema rejects degenerate two-part schema name: "."');
$node->command_fails_like(
- [ 'pg_dump', '--schema', '"some.other.db".myschema' ],
+ [ 'pg_dump', '--schema' => '"some.other.db".myschema' ],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/,
'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots'
);
$node->command_fails_like(
- [ 'pg_dump', '--schema', '..' ],
+ [ 'pg_dump', '--schema' => '..' ],
qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./,
'pg_dump: option --schema rejects degenerate three-part schema name: ".."'
);
# Test invalid multipart relation names
$node->command_fails_like(
- [ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ],
+ [ 'pg_dump', '--table' => 'myhost.mydb.myschema.mytable' ],
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/,
'pg_dump: option --table rejects four-part table names');
$node->command_fails_like(
- [ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ],
+ [ 'pg_dump', '--table' => 'otherdb.pg_catalog.pg_class' ],
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names');
command_fails_like(
[
- 'pg_dump', '-p', "$port", '--table',
- '"some.other.db".pg_catalog.pg_class'
+ 'pg_dump',
+ '--port' => $port,
+ '--table' => '"some.other.db".pg_catalog.pg_class'
],
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'
$node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1");
command_fails_like(
- [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ],
+ [
+ "pg_dump",
+ '--port' => $port,
+ '--include-foreign-data' => 's0',
+ 'postgres'
+ ],
qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: detail: Query was: .*t0/,
"correctly fails to dump a foreign table from a dummy FDW");
command_ok(
- [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ],
+ [
+ "pg_dump",
+ '--port' => $port,
+ '--data-only',
+ '--include-foreign-data' => 's2',
+ 'postgres'
+ ],
"dump foreign server with no tables");
done_testing();
$node->command_ok(
[
- 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', "$backupdir/dump1",
- $node->connstr($dbname1)
+ 'pg_dump',
+ '--format' => 'directory',
+ '--no-sync',
+ '--jobs' => 2,
+ '--file' => "$backupdir/dump1",
+ $node->connstr($dbname1),
],
'parallel dump');
$node->command_ok(
[
- 'pg_restore', '-v',
- '-d', $node->connstr($dbname2),
- '-j3', "$backupdir/dump1"
+ 'pg_restore', '--verbose',
+ '--dbname' => $node->connstr($dbname2),
+ '--jobs' => 3,
+ "$backupdir/dump1",
],
'parallel restore');
$node->command_ok(
[
- 'pg_dump', '-Fd',
- '--no-sync', '-j2',
- '-f', "$backupdir/dump2",
- '--inserts', $node->connstr($dbname1)
+ 'pg_dump',
+ '--format' => 'directory',
+ '--no-sync',
+ '--jobs' => 2,
+ '--file' => "$backupdir/dump2",
+ '--inserts',
+ $node->connstr($dbname1),
],
'parallel dump as inserts');
$node->command_ok(
[
- 'pg_restore', '-v',
- '-d', $node->connstr($dbname3),
- '-j3', "$backupdir/dump2"
+ 'pg_restore', '--verbose',
+ '--dbname' => $node->connstr($dbname3),
+ '--jobs' => 3,
+ "$backupdir/dump2",
],
'parallel restore as inserts');
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter patterns as well as comments and whitespace");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"filter file without patterns");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with exclusion of a single table");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with wildcard in pattern");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with multiline names requiring quoting");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"exclude the public schema");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt",
- "--filter=$tempdir/inputfile2.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ '--filter' => "$tempdir/inputfile2.txt",
+ 'postgres'
],
"exclude the public schema with multiple filters");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump tables with filter");
command_fails_like(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
qr/pg_dump: error: no matching foreign servers were found for pattern/,
"dump nonexisting foreign server");
command_ok(
[
- 'pg_dump', '-p', $port, '-f', $plainfile,
- "--filter=$tempdir/inputfile.txt", 'postgres'
+ 'pg_dump',
+ '--port' => $port,
+ '--file' => $plainfile,
+ '--filter' => "$tempdir/inputfile.txt",
+ 'postgres'
],
"dump foreign_data with filter");
command_fails_like(
[
- &n