@ -61,18 +61,19 @@ my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
my % pgdump_runs = (
binary_upgrade = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
'--format=custom' ,
"--file=$tempdir/binary_upgrade.dump" ,
'-w' ,
'pg_dump' , '--no-sync' ,
'--format' = > 'custom' ,
'--file' = > "$tempdir/binary_upgrade.dump" ,
'--no-password' ,
'--schema-only' ,
'--binary-upgrade' ,
'-d' , 'postgres' , # alternative way to specify database
'--dbname' = > 'postgres' , # alternative way to specify database
] ,
restore_cmd = > [
'pg_restore' , '-Fc' , '--verbose' ,
"--file=$tempdir/binary_upgrade.sql" ,
'pg_restore' ,
'--format' = > 'custom' ,
'--verbose' ,
'--file' = > "$tempdir/binary_upgrade.sql" ,
"$tempdir/binary_upgrade.dump" ,
] ,
} ,
@ -82,18 +83,21 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'gzip' ,
dump_cmd = > [
'pg_dump' , '--format=custom' ,
'--compress=1' , "--file=$tempdir/compression_gzip_custom.dump" ,
'pg_dump' ,
'--format' = > 'custom' ,
'--compress' = > '1' ,
'--file' = > "$tempdir/compression_gzip_custom.dump" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' ,
"--file= $tempdir/compression_gzip_custom.sql",
'--file' = > " $tempdir/compression_gzip_custom.sql",
"$tempdir/compression_gzip_custom.dump" ,
] ,
command_like = > {
command = > [
'pg_restore' , '-l' , "$tempdir/compression_gzip_custom.dump" ,
'pg_restore' , '--list' ,
"$tempdir/compression_gzip_custom.dump" ,
] ,
expected = > qr/Compression: gzip/ ,
name = > 'data content is gzip-compressed'
@ -105,9 +109,12 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'gzip' ,
dump_cmd = > [
'pg_dump' , '--jobs=2' ,
'--format=directory' , '--compress=gzip:1' ,
"--file=$tempdir/compression_gzip_dir" , 'postgres' ,
'pg_dump' ,
'--jobs' = > '2' ,
'--format' = > 'directory' ,
'--compress' = > 'gzip:1' ,
'--file' = > "$tempdir/compression_gzip_dir" ,
'postgres' ,
] ,
# Give coverage for manually compressed blobs.toc files during
# restore.
@ -121,8 +128,9 @@ my %pgdump_runs = (
"$tempdir/compression_gzip_dir/*.dat.gz" ,
] ,
restore_cmd = > [
'pg_restore' , '--jobs=2' ,
"--file=$tempdir/compression_gzip_dir.sql" ,
'pg_restore' ,
'--jobs' = > '2' ,
'--file' = > "$tempdir/compression_gzip_dir.sql" ,
"$tempdir/compression_gzip_dir" ,
] ,
} ,
@ -131,8 +139,11 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'gzip' ,
dump_cmd = > [
'pg_dump' , '--format=plain' , '-Z1' ,
"--file=$tempdir/compression_gzip_plain.sql.gz" , 'postgres' ,
'pg_dump' ,
'--format' = > 'plain' ,
'--compress' = > '1' ,
'--file' = > "$tempdir/compression_gzip_plain.sql.gz" ,
'postgres' ,
] ,
# Decompress the generated file to run through the tests.
compress_cmd = > {
@ -146,18 +157,22 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'lz4' ,
dump_cmd = > [
'pg_dump' , '--format=custom' ,
'--compress=lz4' , "--file=$tempdir/compression_lz4_custom.dump" ,
'pg_dump' ,
'--format' = > 'custom' ,
'--compress' = > 'lz4' ,
'--file' = > "$tempdir/compression_lz4_custom.dump" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' ,
"--file= $tempdir/compression_lz4_custom.sql",
'--file' = > " $tempdir/compression_lz4_custom.sql",
"$tempdir/compression_lz4_custom.dump" ,
] ,
command_like = > {
command = >
[ 'pg_restore' , '-l' , "$tempdir/compression_lz4_custom.dump" , ] ,
command = > [
'pg_restore' , '--list' ,
"$tempdir/compression_lz4_custom.dump" ,
] ,
expected = > qr/Compression: lz4/ ,
name = > 'data content is lz4 compressed'
} ,
@ -168,9 +183,12 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'lz4' ,
dump_cmd = > [
'pg_dump' , '--jobs=2' ,
'--format=directory' , '--compress=lz4:1' ,
"--file=$tempdir/compression_lz4_dir" , 'postgres' ,
'pg_dump' ,
'--jobs' = > '2' ,
'--format' = > 'directory' ,
'--compress' = > 'lz4:1' ,
'--file' = > "$tempdir/compression_lz4_dir" ,
'postgres' ,
] ,
# Verify that data files were compressed
glob_patterns = > [
@ -178,8 +196,9 @@ my %pgdump_runs = (
"$tempdir/compression_lz4_dir/*.dat.lz4" ,
] ,
restore_cmd = > [
'pg_restore' , '--jobs=2' ,
"--file=$tempdir/compression_lz4_dir.sql" ,
'pg_restore' ,
'--jobs' = > '2' ,
'--file' = > "$tempdir/compression_lz4_dir.sql" ,
"$tempdir/compression_lz4_dir" ,
] ,
} ,
@ -188,8 +207,11 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'lz4' ,
dump_cmd = > [
'pg_dump' , '--format=plain' , '--compress=lz4' ,
"--file=$tempdir/compression_lz4_plain.sql.lz4" , 'postgres' ,
'pg_dump' ,
'--format' = > 'plain' ,
'--compress' = > 'lz4' ,
'--file' = > "$tempdir/compression_lz4_plain.sql.lz4" ,
'postgres' ,
] ,
# Decompress the generated file to run through the tests.
compress_cmd = > {
@ -206,18 +228,21 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'zstd' ,
dump_cmd = > [
'pg_dump' , '--format=custom' ,
'--compress=zstd' , "--file=$tempdir/compression_zstd_custom.dump" ,
'pg_dump' ,
'--format' = > 'custom' ,
'--compress' = > 'zstd' ,
'--file' = > "$tempdir/compression_zstd_custom.dump" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' ,
"--file= $tempdir/compression_zstd_custom.sql",
'--file' = > " $tempdir/compression_zstd_custom.sql",
"$tempdir/compression_zstd_custom.dump" ,
] ,
command_like = > {
command = > [
'pg_restore' , '-l' , "$tempdir/compression_zstd_custom.dump" ,
'pg_restore' , '--list' ,
"$tempdir/compression_zstd_custom.dump" ,
] ,
expected = > qr/Compression: zstd/ ,
name = > 'data content is zstd compressed'
@ -228,9 +253,12 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'zstd' ,
dump_cmd = > [
'pg_dump' , '--jobs=2' ,
'--format=directory' , '--compress=zstd:1' ,
"--file=$tempdir/compression_zstd_dir" , 'postgres' ,
'pg_dump' ,
'--jobs' = > '2' ,
'--format' = > 'directory' ,
'--compress' = > 'zstd:1' ,
'--file' = > "$tempdir/compression_zstd_dir" ,
'postgres' ,
] ,
# Give coverage for manually compressed blobs.toc files during
# restore.
@ -247,8 +275,9 @@ my %pgdump_runs = (
"$tempdir/compression_zstd_dir/*.dat.zst" ,
] ,
restore_cmd = > [
'pg_restore' , '--jobs=2' ,
"--file=$tempdir/compression_zstd_dir.sql" ,
'pg_restore' ,
'--jobs' = > '2' ,
'--file' = > "$tempdir/compression_zstd_dir.sql" ,
"$tempdir/compression_zstd_dir" ,
] ,
} ,
@ -258,8 +287,11 @@ my %pgdump_runs = (
test_key = > 'compression' ,
compile_option = > 'zstd' ,
dump_cmd = > [
'pg_dump' , '--format=plain' , '--compress=zstd:long' ,
"--file=$tempdir/compression_zstd_plain.sql.zst" , 'postgres' ,
'pg_dump' ,
'--format' = > 'plain' ,
'--compress' = > 'zstd:long' ,
'--file' = > "$tempdir/compression_zstd_plain.sql.zst" ,
'postgres' ,
] ,
# Decompress the generated file to run through the tests.
compress_cmd = > {
@ -274,81 +306,80 @@ my %pgdump_runs = (
clean = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/clean.sql" ,
'-c' ,
'-d' , 'postgres' , # alternative way to specify database
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/clean.sql" ,
'--clean' ,
'--dbname' = > 'postgres' , # alternative way to specify database
] ,
} ,
clean_if_exists = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/clean_if_exists.sql" ,
'-c' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/clean_if_exists.sql" ,
'--clean' ,
'--if-exists' ,
'--encoding=UTF8' , # no-op, just tests that option is accepted
'--encoding' = > 'UTF8' , # no-op, just for testing
'postgres' ,
] ,
} ,
column_inserts = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/column_inserts.sql" , '-a' ,
'--file' = > "$tempdir/column_inserts.sql" ,
'--data-only' ,
'--column-inserts' , 'postgres' ,
] ,
} ,
createdb = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/createdb.sql" ,
'-C' ,
'-R' , # no-op, just for testing
'-v' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/createdb.sql" ,
'--create' ,
'--no-reconnect' , # no-op, just for testing
'--verbose' ,
'postgres' ,
] ,
} ,
data_only = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/data_only.sql" ,
'-a' ,
'--superuser=test_superuser' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/data_only.sql" ,
'--data-only' ,
'--superuser' = > 'test_superuser' ,
'--disable-triggers' ,
'-v' , # no-op, just make sure it works
'-- verbose ' , # no-op, just make sure it works
'postgres' ,
] ,
} ,
defaults = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
'-f' , "$tempdir/defaults.sql" ,
'--file' = > "$tempdir/defaults.sql" ,
'postgres' ,
] ,
} ,
defaults_no_public = > {
database = > 'regress_pg_dump_test' ,
dump_cmd = > [
'pg_dump' , '--no-sync' , '-f' , "$tempdir/defaults_no_public.sql" ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/defaults_no_public.sql" ,
'regress_pg_dump_test' ,
] ,
} ,
defaults_no_public_clean = > {
database = > 'regress_pg_dump_test' ,
dump_cmd = > [
'pg_dump' , '--no-sync' , '-c' , '-f' ,
"$tempdir/defaults_no_public_clean.sql" ,
'pg_dump' , '--no-sync' ,
'--clean' ,
'--file' = > "$tempdir/defaults_no_public_clean.sql" ,
'regress_pg_dump_test' ,
] ,
} ,
defaults_public_owner = > {
database = > 'regress_public_owner' ,
dump_cmd = > [
'pg_dump' , '--no-sync' , '-f' ,
" $tempdir/defaults_public_owner.sql",
'pg_dump' , '--no-sync' ,
'--file' = > " $tempdir/defaults_public_owner.sql",
'regress_public_owner' ,
] ,
} ,
@ -360,17 +391,22 @@ my %pgdump_runs = (
defaults_custom_format = > {
test_key = > 'defaults' ,
dump_cmd = > [
'pg_dump' , '-Fc' ,
"--file=$tempdir/defaults_custom_format.dump" , 'postgres' ,
'pg_dump' ,
'--format' = > 'custom' ,
'--file' = > "$tempdir/defaults_custom_format.dump" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' , '-Fc' ,
"--file=$tempdir/defaults_custom_format.sql" ,
'pg_restore' ,
'--format' = > 'custom' ,
'--file' = > "$tempdir/defaults_custom_format.sql" ,
"$tempdir/defaults_custom_format.dump" ,
] ,
command_like = > {
command = >
[ 'pg_restore' , '-l' , "$tempdir/defaults_custom_format.dump" , ] ,
command = > [
'pg_restore' , '--list' ,
"$tempdir/defaults_custom_format.dump" ,
] ,
expected = > $ supports_gzip
? qr/Compression: gzip/
: qr/Compression: none/ ,
@ -385,17 +421,20 @@ my %pgdump_runs = (
defaults_dir_format = > {
test_key = > 'defaults' ,
dump_cmd = > [
'pg_dump' , '-Fd' ,
"--file=$tempdir/defaults_dir_format" , 'postgres' ,
'pg_dump' ,
'--format' = > 'directory' ,
'--file' = > "$tempdir/defaults_dir_format" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' , '-Fd' ,
"--file=$tempdir/defaults_dir_format.sql" ,
'pg_restore' ,
'--format' = > 'directory' ,
'--file' = > "$tempdir/defaults_dir_format.sql" ,
"$tempdir/defaults_dir_format" ,
] ,
command_like = > {
command = >
[ 'pg_restore' , '-l' , "$tempdir/defaults_dir_format" , ] ,
[ 'pg_restore' , '-- list ' , "$tempdir/defaults_dir_format" , ] ,
expected = > $ supports_gzip ? qr/Compression: gzip/
: qr/Compression: none/ ,
name = > 'data content is gzip-compressed by default' ,
@ -412,12 +451,15 @@ my %pgdump_runs = (
defaults_parallel = > {
test_key = > 'defaults' ,
dump_cmd = > [
'pg_dump' , '-Fd' , '-j2' , "--file=$tempdir/defaults_parallel" ,
'pg_dump' ,
'--format' = > 'directory' ,
'--jobs' = > 2 ,
'--file' = > "$tempdir/defaults_parallel" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' ,
"--file= $tempdir/defaults_parallel.sql",
'--file' = > " $tempdir/defaults_parallel.sql",
"$tempdir/defaults_parallel" ,
] ,
} ,
@ -426,55 +468,56 @@ my %pgdump_runs = (
defaults_tar_format = > {
test_key = > 'defaults' ,
dump_cmd = > [
'pg_dump' , '-Ft' ,
"--file=$tempdir/defaults_tar_format.tar" , 'postgres' ,
'pg_dump' ,
'--format' = > 'tar' ,
'--file' = > "$tempdir/defaults_tar_format.tar" ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' ,
'--format= tar' ,
"--file= $tempdir/defaults_tar_format.sql",
'--format' = > ' tar' ,
'--file' = > " $tempdir/defaults_tar_format.sql",
"$tempdir/defaults_tar_format.tar" ,
] ,
} ,
exclude_dump_test_schema = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/exclude_dump_test_schema.sql" ,
'--exclude-schema=dump_test' , 'postgres' ,
'--file' = > "$tempdir/exclude_dump_test_schema.sql" ,
'--exclude-schema' = > 'dump_test' ,
'postgres' ,
] ,
} ,
exclude_test_table = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/exclude_test_table.sql" ,
'--exclude-table=dump_test.test_table' , 'postgres' ,
'--file' = > "$tempdir/exclude_test_table.sql" ,
'--exclude-table' = > 'dump_test.test_table' ,
'postgres' ,
] ,
} ,
exclude_measurement = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/exclude_measurement.sql" ,
'--exclude-table-and-children=dump_test.measurement' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/exclude_measurement.sql" ,
'--exclude-table-and-children' = > 'dump_test.measurement' ,
'postgres' ,
] ,
} ,
exclude_measurement_data = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/exclude_measurement_data.sql" ,
'--exclude-table-data-and-children=dump_test.measurement' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/exclude_measurement_data.sql" ,
'--exclude-table-data-and-children' = > 'dump_test.measurement' ,
'--no-unlogged-table-data' ,
'postgres' ,
] ,
} ,
exclude_test_table_data = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/exclude_test_table_data.sql" ,
'--exclude-table-data=dump_test.test_table' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/exclude_test_table_data.sql" ,
'--exclude-table-data' = > 'dump_test.test_table' ,
'--no-unlogged-table-data' ,
'postgres' ,
] ,
@ -482,168 +525,190 @@ my %pgdump_runs = (
inserts = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/inserts.sql" , '-a' ,
'--file' = > "$tempdir/inserts.sql" ,
'--data-only' ,
'--inserts' , 'postgres' ,
] ,
} ,
pg_dumpall_globals = > {
dump_cmd = > [
'pg_dumpall' , '-v' , "--file=$tempdir/pg_dumpall_globals.sql" ,
'-g' , '--no-sync' ,
'pg_dumpall' ,
'--verbose' ,
'--file' = > "$tempdir/pg_dumpall_globals.sql" ,
'--globals-only' ,
'--no-sync' ,
] ,
} ,
pg_dumpall_globals_clean = > {
dump_cmd = > [
'pg_dumpall' , "--file=$tempdir/pg_dumpall_globals_clean.sql" ,
'-g' , '-c' , '--no-sync' ,
'pg_dumpall' ,
'--file' = > "$tempdir/pg_dumpall_globals_clean.sql" ,
'--globals-only' ,
'--clean' ,
'--no-sync' ,
] ,
} ,
pg_dumpall_dbprivs = > {
dump_cmd = > [
'pg_dumpall' , '--no-sync' ,
"--file= $tempdir/pg_dumpall_dbprivs.sql",
'--file' = > " $tempdir/pg_dumpall_dbprivs.sql",
] ,
} ,
pg_dumpall_exclude = > {
dump_cmd = > [
'pg_dumpall' , '-v' , "--file=$tempdir/pg_dumpall_exclude.sql" ,
'--exclude-database' , '*dump_test*' , '--no-sync' ,
'pg_dumpall' ,
'--verbose' ,
'--file' = > "$tempdir/pg_dumpall_exclude.sql" ,
'--exclude-database' = > '*dump_test*' ,
'--no-sync' ,
] ,
} ,
no_toast_compression = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/no_toast_compression.sql" ,
'--no-toast-compression' , 'postgres' ,
'--file' = > "$tempdir/no_toast_compression.sql" ,
'--no-toast-compression' ,
'postgres' ,
] ,
} ,
no_large_objects = > {
dump_cmd = > [
'pg_dump' , '--no-sync' , "--file=$tempdir/no_large_objects.sql" ,
'-B' , 'postgres' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/no_large_objects.sql" ,
'--no-large-objects' ,
'postgres' ,
] ,
} ,
no_privs = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/no_privs.sql" , '-x' ,
'--file' = > "$tempdir/no_privs.sql" ,
'--no-privileges' ,
'postgres' ,
] ,
} ,
no_owner = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/no_owner.sql" , '-O' ,
'--file' = > "$tempdir/no_owner.sql" ,
'--no-owner' ,
'postgres' ,
] ,
} ,
no_table_access_method = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/no_table_access_method.sql" ,
'--no-table-access-method' , 'postgres' ,
'--file' = > "$tempdir/no_table_access_method.sql" ,
'--no-table-access-method' ,
'postgres' ,
] ,
} ,
only_dump_test_schema = > {
dump_cmd = > [
'pg_dump' , '--no-sync' ,
"--file=$tempdir/only_dump_test_schema.sql" ,
'--schema=dump_test' , 'postgres' ,
'--file' = > "$tempdir/only_dump_test_schema.sql" ,
'--schema' = > 'dump_test' ,
'postgres' ,
] ,
} ,
only_dump_test_table = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/only_dump_test_table.sql" ,
'--table=dump_test.test_table' ,
'--lock-wait-timeout='
. ( 1000 * $ PostgreSQL:: Test:: Utils:: timeout_default ) ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/only_dump_test_table.sql" ,
'--table' = > 'dump_test.test_table' ,
'--lock-wait-timeout' = >
( 1000 * $ PostgreSQL:: Test:: Utils:: timeout_default ) ,
'postgres' ,
] ,
} ,
only_dump_measurement = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/only_dump_measurement.sql" ,
'--table-and-children=dump_test.measurement' ,
'--lock-wait-timeout='
. ( 1000 * $ PostgreSQL:: Test:: Utils:: timeout_default ) ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/only_dump_measurement.sql" ,
'--table-and-children' = > 'dump_test.measurement' ,
'--lock-wait-timeout' = >
( 1000 * $ PostgreSQL:: Test:: Utils:: timeout_default ) ,
'postgres' ,
] ,
} ,
role = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/role.sql" ,
'--role=regress_dump_test_role' ,
'--schema=dump_test_second_schema' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/role.sql" ,
'--role' = > 'regress_dump_test_role' ,
'--schema' = > 'dump_test_second_schema' ,
'postgres' ,
] ,
} ,
role_parallel = > {
test_key = > 'role' ,
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
'--format=directory' ,
'--jobs=2' ,
"--file=$tempdir/role_parallel" ,
'--role=regress_dump_test_role' ,
'--schema=dump_test_second_schema' ,
'pg_dump' , '--no-sync' ,
'--format' = > 'directory' ,
'--jobs' = > '2' ,
'--file' = > "$tempdir/role_parallel" ,
'--role' = > 'regress_dump_test_role' ,
'--schema' = > 'dump_test_second_schema' ,
'postgres' ,
] ,
restore_cmd = > [
'pg_restore' , "--file=$tempdir/role_parallel.sql" ,
'pg_restore' ,
'--file' = > "$tempdir/role_parallel.sql" ,
"$tempdir/role_parallel" ,
] ,
} ,
rows_per_insert = > {
dump_cmd = > [
'pg_dump' ,
'--no-sync' ,
"--file=$tempdir/rows_per_insert.sql" ,
'-a' ,
'--rows-per-insert=4' ,
'--table=dump_test.test_table' ,
'--table=dump_test.test_fourth_table' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/rows_per_insert.sql" ,
'--data-only' ,
'--rows-per-insert' = > '4' ,
'--table' = > 'dump_test.test_table' ,
'--table' = > 'dump_test.test_fourth_table' ,
'postgres' ,
] ,
} ,
schema_only = > {
dump_cmd = > [
'pg_dump' , '--format=plain' ,
"--file=$tempdir/schema_only.sql" , '--no-sync' ,
'-s' , 'postgres' ,
'pg_dump' , '--no-sync' ,
'--format' = > 'plain' ,
'--file' = > "$tempdir/schema_only.sql" ,
'--schema-only' ,
'postgres' ,
] ,
} ,
section_pre_data = > {
dump_cmd = > [
'pg_dump' , "--file=$tempdir/section_pre_data.sql" ,
'--section=pre-data' , '--no-sync' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/section_pre_data.sql" ,
'--section' = > 'pre-data' ,
'postgres' ,
] ,
} ,
section_data = > {
dump_cmd = > [
'pg_dump' , "--file=$tempdir/section_data.sql" ,
'--section=data' , '--no-sync' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/section_data.sql" ,
'--section' = > 'data' ,
'postgres' ,
] ,
} ,
section_post_data = > {
dump_cmd = > [
'pg_dump' , "--file=$tempdir/section_post_data.sql" ,
'--section=post-data' , '--no-sync' , 'postgres' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/section_post_data.sql" ,
'--section' = > 'post-data' ,
'postgres' ,
] ,
} ,
test_schema_plus_large_objects = > {
dump_cmd = > [
'pg_dump' , "--file=$tempdir/test_schema_plus_large_objects.sql" ,
'--schema=dump_test' , '-b' , '-B' , '--no-sync' , 'postgres' ,
'pg_dump' , '--no-sync' ,
'--file' = > "$tempdir/test_schema_plus_large_objects.sql" ,
'--schema' = > 'dump_test' ,
'--large-objects' ,
'--no-large-objects' ,
'postgres' ,
] ,
} , ) ;
@ -4732,7 +4797,7 @@ foreach my $db (sort keys %create_sql)
# Test connecting to a non-existent database
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , 'qqq' ] ,
[ 'pg_dump' , '--port' = > $ port , 'qqq' ] ,
qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/ ,
'connecting to a non-existent database' ) ;
@ -4740,7 +4805,7 @@ command_fails_like(
# Test connecting to an invalid database
$ node - > command_fails_like (
[ 'pg_dump' , '-d' , 'regression_invalid' ] ,
[ 'pg_dump' , '--dbname' = > 'regression_invalid' ] ,
qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/ ,
'connecting to an invalid database' ) ;
@ -4748,7 +4813,7 @@ $node->command_fails_like(
# Test connecting with an unprivileged user
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , '--role= regress_dump_test_role' ] ,
[ 'pg_dump' , '--port' = > $ port , '--role' = > ' regress_dump_test_role' ] ,
qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/ ,
'connecting with an unprivileged user' ) ;
@ -4756,22 +4821,32 @@ command_fails_like(
# Test dumping a non-existent schema, table, and patterns with --strict-names
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , '-n' , 'nonexistent' ] ,
[ 'pg_dump' , '--port' = > $ port , '--schema' = > 'nonexistent' ] ,
qr/\Qpg_dump: error: no matching schemas were found\E/ ,
'dumping a non-existent schema' ) ;
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , '-t' , 'nonexistent' ] ,
[ 'pg_dump' , '--port' = > $ port , '--table' = > 'nonexistent' ] ,
qr/\Qpg_dump: error: no matching tables were found\E/ ,
'dumping a non-existent table' ) ;
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , '--strict-names' , '-n' , 'nonexistent*' ] ,
[
'pg_dump' ,
'--port' = > $ port ,
'--strict-names' ,
'--schema' = > 'nonexistent*'
] ,
qr/\Qpg_dump: error: no matching schemas were found for pattern\E/ ,
'no matching schemas' ) ;
command_fails_like (
[ 'pg_dump' , '-p' , "$port" , '--strict-names' , '-t' , 'nonexistent*' ] ,
[
'pg_dump' ,
'--port' = > $ port ,
'--strict-names' ,
'--table' = > 'nonexistent*'
] ,
qr/\Qpg_dump: error: no matching tables were found for pattern\E/ ,
'no matching tables' ) ;
@ -4779,26 +4854,31 @@ command_fails_like(
# Test invalid multipart database names
$ node - > command_fails_like (
[ 'pg_dumpall' , '--exclude-database' , '.' ] ,
[ 'pg_dumpall' , '--exclude-database' = > '.' ] ,
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./ ,
'pg_dumpall: option --exclude-database rejects multipart pattern "."' ) ;
$ node - > command_fails_like (
[ 'pg_dumpall' , '--exclude-database' , 'myhost.mydb' ] ,
[ 'pg_dumpall' , '--exclude-database' = > 'myhost.mydb' ] ,
qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/ ,
'pg_dumpall: option --exclude-database rejects multipart database names' ) ;
##############################################################
# Test dumping pg_catalog (for research -- cannot be reloaded)
$ node - > command_ok ( [ 'pg_dump' , '-p' , "$port" , '-n' , 'pg_catalog' ] ,
$ node - > command_ok (
[ 'pg_dump' , '--port' = > $ port , '--schema' = > 'pg_catalog' ] ,
'pg_dump: option -n pg_catalog' ) ;
#########################################
# Test valid database exclusion patterns
$ node - > command_ok (
[ 'pg_dumpall' , '-p' , "$port" , '--exclude-database' , '"myhost.mydb"' ] ,
[
'pg_dumpall' ,
'--port' = > $ port ,
'--exclude-database' = > '"myhost.mydb"'
] ,
'pg_dumpall: option --exclude-database handles database names with embedded dots'
) ;
@ -4806,28 +4886,28 @@ $node->command_ok(
# Test invalid multipart schema names
$ node - > command_fails_like (
[ 'pg_dump' , '--schema' , 'myhost.mydb.myschema' ] ,
[ 'pg_dump' , '--schema' = > 'myhost.mydb.myschema' ] ,
qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/ ,
'pg_dump: option --schema rejects three-part schema names' ) ;
$ node - > command_fails_like (
[ 'pg_dump' , '--schema' , 'otherdb.myschema' ] ,
[ 'pg_dump' , '--schema' = > 'otherdb.myschema' ] ,
qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/ ,
'pg_dump: option --schema rejects cross-database multipart schema names' ) ;
$ node - > command_fails_like (
[ 'pg_dump' , '--schema' , '.' ] ,
[ 'pg_dump' , '--schema' = > '.' ] ,
qr/pg_dump: error: cross-database references are not implemented: \./ ,
'pg_dump: option --schema rejects degenerate two-part schema name: "."' ) ;
$ node - > command_fails_like (
[ 'pg_dump' , '--schema' , '"some.other.db".myschema' ] ,
[ 'pg_dump' , '--schema' = > '"some.other.db".myschema' ] ,
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/ ,
'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots'
) ;
$ node - > command_fails_like (
[ 'pg_dump' , '--schema' , '..' ] ,
[ 'pg_dump' , '--schema' = > '..' ] ,
qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./ ,
'pg_dump: option --schema rejects degenerate three-part schema name: ".."'
) ;
@ -4836,19 +4916,20 @@ $node->command_fails_like(
# Test invalid multipart relation names
$ node - > command_fails_like (
[ 'pg_dump' , '--table' , 'myhost.mydb.myschema.mytable' ] ,
[ 'pg_dump' , '--table' = > 'myhost.mydb.myschema.mytable' ] ,
qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/ ,
'pg_dump: option --table rejects four-part table names' ) ;
$ node - > command_fails_like (
[ 'pg_dump' , '--table' , 'otherdb.pg_catalog.pg_class' ] ,
[ 'pg_dump' , '--table' = > 'otherdb.pg_catalog.pg_class' ] ,
qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/ ,
'pg_dump: option --table rejects cross-database three part table names' ) ;
command_fails_like (
[
'pg_dump' , '-p' , "$port" , '--table' ,
'"some.other.db".pg_catalog.pg_class'
'pg_dump' ,
'--port' = > $ port ,
'--table' = > '"some.other.db".pg_catalog.pg_class'
] ,
qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/ ,
'pg_dump: option --table rejects cross-database three part table names with embedded dots'