summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.git-blame-ignore-revs6
-rw-r--r--config/llvm.m44
-rw-r--r--config/programs.m44
-rwxr-xr-xconfigure87
-rw-r--r--configure.ac67
-rw-r--r--contrib/auto_explain/Makefile2
-rw-r--r--contrib/auto_explain/expected/alter_reset.out19
-rw-r--r--contrib/auto_explain/meson.build5
-rw-r--r--contrib/auto_explain/sql/alter_reset.sql22
-rw-r--r--contrib/dblink/dblink.c270
-rw-r--r--contrib/pg_stat_statements/Makefile3
-rw-r--r--contrib/pg_stat_statements/expected/oldextversions.out67
-rw-r--r--contrib/pg_stat_statements/expected/plancache.out224
-rw-r--r--contrib/pg_stat_statements/meson.build2
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql78
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c53
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.control2
-rw-r--r--contrib/pg_stat_statements/sql/oldextversions.sql5
-rw-r--r--contrib/pg_stat_statements/sql/plancache.sql94
-rw-r--r--contrib/postgres_fdw/connection.c255
-rw-r--r--contrib/postgres_fdw/option.c33
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c880
-rw-r--r--contrib/postgres_fdw/postgres_fdw.h8
-rw-r--r--doc/src/sgml/func.sgml7
-rw-r--r--doc/src/sgml/pageinspect.sgml2
-rw-r--r--doc/src/sgml/pgstatstatements.sgml18
-rw-r--r--doc/src/sgml/protocol.sgml5
-rw-r--r--doc/src/sgml/ref/create_trigger.sgml8
-rw-r--r--doc/src/sgml/ref/pg_dump.sgml38
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml127
-rw-r--r--doc/src/sgml/ref/pg_restore.sgml106
-rw-r--r--doc/src/sgml/ref/vacuumdb.sgml8
-rw-r--r--meson.build5
-rw-r--r--src/Makefile.global.in2
-rw-r--r--src/backend/access/heap/visibilitymap.c2
-rw-r--r--src/backend/access/index/amapi.c13
-rw-r--r--src/backend/access/transam/xlog.c10
-rw-r--r--src/backend/catalog/pg_subscription.c21
-rw-r--r--src/backend/commands/explain.c21
-rw-r--r--src/backend/commands/foreigncmds.c2
-rw-r--r--src/backend/commands/schemacmds.c2
-rw-r--r--src/backend/executor/execParallel.c2
-rw-r--r--src/backend/jit/llvm/Makefile2
-rw-r--r--src/backend/optimizer/path/costsize.c18
-rw-r--r--src/backend/optimizer/plan/createplan.c15
-rw-r--r--src/backend/optimizer/plan/planner.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c11
-rw-r--r--src/backend/parser/gram.y124
-rw-r--r--src/backend/postmaster/autovacuum.c8
-rw-r--r--src/backend/postmaster/bgworker.c1
-rw-r--r--src/backend/postmaster/checkpointer.c155
-rw-r--r--src/backend/postmaster/postmaster.c7
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c31
-rw-r--r--src/backend/replication/logical/reorderbuffer.c2
-rw-r--r--src/backend/replication/logical/tablesync.c34
-rw-r--r--src/backend/replication/syncrep_scanner.l11
-rw-r--r--src/backend/storage/buffer/bufmgr.c13
-rw-r--r--src/backend/storage/ipc/latch.c8
-rw-r--r--src/backend/storage/ipc/procsignal.c6
-rw-r--r--src/backend/tcop/postgres.c2
-rw-r--r--src/backend/tcop/utility.c4
-rw-r--r--src/backend/utils/activity/pgstat.c52
-rw-r--r--src/backend/utils/activity/pgstat_backend.c14
-rw-r--r--src/backend/utils/activity/pgstat_io.c10
-rw-r--r--src/backend/utils/activity/pgstat_slru.c10
-rw-r--r--src/backend/utils/activity/pgstat_wal.c20
-rw-r--r--src/backend/utils/adt/tid.c2
-rw-r--r--src/backend/utils/adt/xml.c74
-rw-r--r--src/backend/utils/cache/plancache.c2
-rw-r--r--src/backend/utils/hash/dynahash.c10
-rw-r--r--src/backend/utils/misc/guc.c21
-rw-r--r--src/backend/utils/mmgr/mcxt.c39
-rw-r--r--src/bin/initdb/Makefile2
-rw-r--r--src/bin/pg_dump/common.c19
-rw-r--r--src/bin/pg_dump/meson.build1
-rw-r--r--src/bin/pg_dump/parallel.c10
-rw-r--r--src/bin/pg_dump/pg_backup.h2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c20
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h1
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c2
-rw-r--r--src/bin/pg_dump/pg_dump.c107
-rw-r--r--src/bin/pg_dump/pg_dump.h6
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c238
-rw-r--r--src/bin/pg_dump/pg_dumpall.c254
-rw-r--r--src/bin/pg_dump/pg_restore.c810
-rw-r--r--src/bin/pg_dump/t/001_basic.pl22
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl136
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl400
-rw-r--r--src/bin/pg_upgrade/check.c20
-rw-r--r--src/bin/pg_upgrade/dump.c2
-rw-r--r--src/bin/pg_upgrade/info.c38
-rw-r--r--src/bin/pg_upgrade/parallel.c11
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.h8
-rw-r--r--src/bin/pg_upgrade/relfilenumber.c57
-rw-r--r--src/bin/pg_upgrade/t/006_transfer_modes.pl35
-rw-r--r--src/bin/pg_upgrade/tablespace.c65
-rw-r--r--src/bin/psql/tab-complete.in.c16
-rw-r--r--src/common/Makefile2
-rw-r--r--src/include/catalog/pg_subscription_rel.h2
-rw-r--r--src/include/libpq/libpq-be-fe-helpers.h74
-rw-r--r--src/include/libpq/libpq-be-fe.h259
-rw-r--r--src/include/nodes/pathnodes.h4
-rw-r--r--src/include/nodes/plannodes.h31
-rw-r--r--src/include/optimizer/pathnode.h2
-rw-r--r--src/include/utils/palloc.h2
-rw-r--r--src/include/utils/pgstat_internal.h34
-rw-r--r--src/include/utils/pgstat_kind.h6
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c9
-rw-r--r--src/interfaces/libpq-oauth/Makefile2
-rw-r--r--src/interfaces/libpq/Makefile2
-rw-r--r--src/interfaces/libpq/fe-cancel.c28
-rw-r--r--src/pl/plpython/Makefile2
-rw-r--r--src/pl/tcl/Makefile2
-rw-r--r--src/test/modules/injection_points/injection_stats.c2
-rw-r--r--src/test/modules/injection_points/injection_stats_fixed.c2
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm1
-rw-r--r--src/test/recovery/t/013_crash_restart.pl7
-rw-r--r--src/test/regress/expected/foreign_key.out2
-rw-r--r--src/test/regress/expected/publication.out21
-rw-r--r--src/test/regress/sql/foreign_key.sql2
-rw-r--r--src/test/regress/sql/publication.sql22
-rw-r--r--src/tools/pgindent/typedefs.list3
122 files changed, 2929 insertions, 3173 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 1ee1dee0111..f83e2fc6586 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -14,6 +14,12 @@
#
# $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso
+1d1612aec7688139e1a5506df1366b4b6a69605d # 2025-07-29 09:10:41 -0400
+# Run pgindent.
+
+73873805fb3627cb23937c750fa83ffd8f16fc6c # 2025-07-25 16:36:44 -0400
+# Run pgindent on the changes of the previous patch.
+
9e345415bcd3c4358350b89edfd710469b8bfaf9 # 2025-07-01 15:23:07 +0200
# Fix indentation in pg_numa code
diff --git a/config/llvm.m4 b/config/llvm.m4
index fa4bedd9370..9d6fe8199e3 100644
--- a/config/llvm.m4
+++ b/config/llvm.m4
@@ -4,7 +4,7 @@
# -----------------
#
# Look for the LLVM installation, check that it's new enough, set the
-# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS
+# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH,LIBS}
# variables. Also verify that CLANG is available, to transform C
# into bitcode.
#
@@ -55,7 +55,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
for pgac_option in `$LLVM_CONFIG --ldflags`; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";;
esac
done
diff --git a/config/programs.m4 b/config/programs.m4
index c73d9307ea8..e57fe4907b8 100644
--- a/config/programs.m4
+++ b/config/programs.m4
@@ -290,8 +290,8 @@ AC_DEFUN([PGAC_CHECK_LIBCURL],
pgac_save_LDFLAGS=$LDFLAGS
pgac_save_LIBS=$LIBS
- CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
- LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS"
+ CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS"
AC_CHECK_HEADER(curl/curl.h, [],
[AC_MSG_ERROR([header file <curl/curl.h> is required for --with-libcurl])])
diff --git a/configure b/configure
index 6d7c22e153f..507a2437c33 100755
--- a/configure
+++ b/configure
@@ -5194,7 +5194,7 @@ fi
for pgac_option in `$LLVM_CONFIG --ldflags`; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";;
esac
done
@@ -9436,12 +9436,12 @@ fi
# Note the user could also set XML2_CFLAGS/XML2_LIBS directly
for pgac_option in $XML2_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $XML2_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -9666,12 +9666,12 @@ fi
# note that -llz4 will be added by AC_CHECK_LIB below.
for pgac_option in $LZ4_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $LZ4_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -9807,12 +9807,12 @@ fi
# note that -lzstd will be added by AC_CHECK_LIB below.
for pgac_option in $ZSTD_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $ZSTD_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -12723,8 +12723,8 @@ if test "$with_libcurl" = yes ; then
pgac_save_LDFLAGS=$LDFLAGS
pgac_save_LIBS=$LIBS
- CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
- LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS"
+ CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS"
ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default"
if test "x$ac_cv_header_curl_curl_h" = xyes; then :
@@ -16658,7 +16658,7 @@ fi
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$ICU_CFLAGS $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $ICU_CFLAGS"
# Verify we have ICU's header files
ac_fn_c_check_header_mongrel "$LINENO" "unicode/ucol.h" "ac_cv_header_unicode_ucol_h" "$ac_includes_default"
@@ -17565,7 +17565,7 @@ $as_echo "#define HAVE_GCC__ATOMIC_INT64_CAS 1" >>confdefs.h
fi
-# Check for x86 cpuid instruction
+# Check for __get_cpuid() and __cpuid()
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid" >&5
$as_echo_n "checking for __get_cpuid... " >&6; }
if ${pgac_cv__get_cpuid+:} false; then :
@@ -17598,77 +17598,79 @@ if test x"$pgac_cv__get_cpuid" = x"yes"; then
$as_echo "#define HAVE__GET_CPUID 1" >>confdefs.h
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5
-$as_echo_n "checking for __get_cpuid_count... " >&6; }
-if ${pgac_cv__get_cpuid_count+:} false; then :
+else
+ # __cpuid()
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5
+$as_echo_n "checking for __cpuid... " >&6; }
+if ${pgac_cv__cpuid+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <cpuid.h>
+#include <intrin.h>
int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]);
+ __cpuid(exx, 1);
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- pgac_cv__get_cpuid_count="yes"
+ pgac_cv__cpuid="yes"
else
- pgac_cv__get_cpuid_count="no"
+ pgac_cv__cpuid="no"
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5
-$as_echo "$pgac_cv__get_cpuid_count" >&6; }
-if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5
+$as_echo "$pgac_cv__cpuid" >&6; }
+ if test x"$pgac_cv__cpuid" = x"yes"; then
-$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h
+$as_echo "#define HAVE__CPUID 1" >>confdefs.h
+ fi
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5
-$as_echo_n "checking for __cpuid... " >&6; }
-if ${pgac_cv__cpuid+:} false; then :
+# Check for __get_cpuid_count() and __cpuidex() in a similar fashion.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5
+$as_echo_n "checking for __get_cpuid_count... " >&6; }
+if ${pgac_cv__get_cpuid_count+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <intrin.h>
+#include <cpuid.h>
int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid(exx[0], 1);
+ __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]);
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- pgac_cv__cpuid="yes"
+ pgac_cv__get_cpuid_count="yes"
else
- pgac_cv__cpuid="no"
+ pgac_cv__get_cpuid_count="no"
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5
-$as_echo "$pgac_cv__cpuid" >&6; }
-if test x"$pgac_cv__cpuid" = x"yes"; then
-
-$as_echo "#define HAVE__CPUID 1" >>confdefs.h
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5
+$as_echo "$pgac_cv__get_cpuid_count" >&6; }
+if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
-fi
+$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5
+else
+ # __cpuidex()
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5
$as_echo_n "checking for __cpuidex... " >&6; }
if ${pgac_cv__cpuidex+:} false; then :
$as_echo_n "(cached) " >&6
@@ -17680,7 +17682,7 @@ int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuidex(exx[0], 7, 0);
+ __cpuidex(exx, 7, 0);
;
return 0;
@@ -17696,10 +17698,11 @@ rm -f core conftest.err conftest.$ac_objext \
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuidex" >&5
$as_echo "$pgac_cv__cpuidex" >&6; }
-if test x"$pgac_cv__cpuidex" = x"yes"; then
+ if test x"$pgac_cv__cpuidex" = x"yes"; then
$as_echo "#define HAVE__CPUIDEX 1" >>confdefs.h
+ fi
fi
# Check for XSAVE intrinsics
@@ -18876,7 +18879,7 @@ Use --without-tcl to disable building PL/Tcl." "$LINENO" 5
fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC"
ac_fn_c_check_header_mongrel "$LINENO" "tcl.h" "ac_cv_header_tcl_h" "$ac_includes_default"
if test "x$ac_cv_header_tcl_h" = xyes; then :
@@ -18945,7 +18948,7 @@ fi
# check for <Python.h>
if test "$with_python" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$python_includespec $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $python_includespec"
ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default"
if test "x$ac_cv_header_Python_h" = xyes; then :
diff --git a/configure.ac b/configure.ac
index c2877e36935..5f4548adc5c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1103,12 +1103,12 @@ if test "$with_libxml" = yes ; then
# Note the user could also set XML2_CFLAGS/XML2_LIBS directly
for pgac_option in $XML2_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $XML2_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1152,12 +1152,12 @@ if test "$with_lz4" = yes; then
# note that -llz4 will be added by AC_CHECK_LIB below.
for pgac_option in $LZ4_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $LZ4_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1177,12 +1177,12 @@ if test "$with_zstd" = yes; then
# note that -lzstd will be added by AC_CHECK_LIB below.
for pgac_option in $ZSTD_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $ZSTD_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1944,7 +1944,7 @@ fi
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$ICU_CFLAGS $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $ICU_CFLAGS"
# Verify we have ICU's header files
AC_CHECK_HEADER(unicode/ucol.h, [],
@@ -2044,7 +2044,7 @@ PGAC_HAVE_GCC__ATOMIC_INT32_CAS
PGAC_HAVE_GCC__ATOMIC_INT64_CAS
-# Check for x86 cpuid instruction
+# Check for __get_cpuid() and __cpuid()
AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <cpuid.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
@@ -2054,8 +2054,21 @@ AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid],
[pgac_cv__get_cpuid="no"])])
if test x"$pgac_cv__get_cpuid" = x"yes"; then
AC_DEFINE(HAVE__GET_CPUID, 1, [Define to 1 if you have __get_cpuid.])
+else
+ # __cpuid()
+ AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
+ [[unsigned int exx[4] = {0, 0, 0, 0};
+ __cpuid(exx, 1);
+ ]])],
+ [pgac_cv__cpuid="yes"],
+ [pgac_cv__cpuid="no"])])
+ if test x"$pgac_cv__cpuid" = x"yes"; then
+ AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.])
+ fi
fi
+# Check for __get_cpuid_count() and __cpuidex() in a similar fashion.
AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <cpuid.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
@@ -2065,28 +2078,18 @@ AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count],
[pgac_cv__get_cpuid_count="no"])])
if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
AC_DEFINE(HAVE__GET_CPUID_COUNT, 1, [Define to 1 if you have __get_cpuid_count.])
-fi
-
-AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid],
-[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
- [[unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid(exx[0], 1);
- ]])],
- [pgac_cv__cpuid="yes"],
- [pgac_cv__cpuid="no"])])
-if test x"$pgac_cv__cpuid" = x"yes"; then
- AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.])
-fi
-
-AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex],
-[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
- [[unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuidex(exx[0], 7, 0);
- ]])],
- [pgac_cv__cpuidex="yes"],
- [pgac_cv__cpuidex="no"])])
-if test x"$pgac_cv__cpuidex" = x"yes"; then
- AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.])
+else
+ # __cpuidex()
+ AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
+ [[unsigned int exx[4] = {0, 0, 0, 0};
+ __cpuidex(exx, 7, 0);
+ ]])],
+ [pgac_cv__cpuidex="yes"],
+ [pgac_cv__cpuidex="no"])])
+ if test x"$pgac_cv__cpuidex" = x"yes"; then
+ AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.])
+ fi
fi
# Check for XSAVE intrinsics
@@ -2344,7 +2347,7 @@ Use --without-tcl to disable building PL/Tcl.])
fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC"
AC_CHECK_HEADER(tcl.h, [], [AC_MSG_ERROR([header file <tcl.h> is required for Tcl])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
@@ -2381,7 +2384,7 @@ fi
# check for <Python.h>
if test "$with_python" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$python_includespec $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $python_includespec"
AC_CHECK_HEADER(Python.h, [], [AC_MSG_ERROR([header file <Python.h> is required for Python])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
diff --git a/contrib/auto_explain/Makefile b/contrib/auto_explain/Makefile
index efd127d3cae..94ab28e7c06 100644
--- a/contrib/auto_explain/Makefile
+++ b/contrib/auto_explain/Makefile
@@ -6,6 +6,8 @@ OBJS = \
auto_explain.o
PGFILEDESC = "auto_explain - logging facility for execution plans"
+REGRESS = alter_reset
+
TAP_TESTS = 1
ifdef USE_PGXS
diff --git a/contrib/auto_explain/expected/alter_reset.out b/contrib/auto_explain/expected/alter_reset.out
new file mode 100644
index 00000000000..ec355189806
--- /dev/null
+++ b/contrib/auto_explain/expected/alter_reset.out
@@ -0,0 +1,19 @@
+--
+-- This tests resetting unknown custom GUCs with reserved prefixes. There's
+-- nothing specific to auto_explain; this is just a convenient place to put
+-- this test.
+--
+SELECT current_database() AS datname \gset
+CREATE ROLE regress_ae_role;
+ALTER DATABASE :"datname" SET auto_explain.bogus = 1;
+ALTER ROLE regress_ae_role SET auto_explain.bogus = 1;
+ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1;
+ALTER SYSTEM SET auto_explain.bogus = 1;
+LOAD 'auto_explain';
+WARNING: invalid configuration parameter name "auto_explain.bogus", removing it
+DETAIL: "auto_explain" is now a reserved prefix.
+ALTER DATABASE :"datname" RESET auto_explain.bogus;
+ALTER ROLE regress_ae_role RESET auto_explain.bogus;
+ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus;
+ALTER SYSTEM RESET auto_explain.bogus;
+DROP ROLE regress_ae_role;
diff --git a/contrib/auto_explain/meson.build b/contrib/auto_explain/meson.build
index 92dc9df6f7c..a9b45cc235f 100644
--- a/contrib/auto_explain/meson.build
+++ b/contrib/auto_explain/meson.build
@@ -20,6 +20,11 @@ tests += {
'name': 'auto_explain',
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'alter_reset',
+ ],
+ },
'tap': {
'tests': [
't/001_auto_explain.pl',
diff --git a/contrib/auto_explain/sql/alter_reset.sql b/contrib/auto_explain/sql/alter_reset.sql
new file mode 100644
index 00000000000..bf621454ec2
--- /dev/null
+++ b/contrib/auto_explain/sql/alter_reset.sql
@@ -0,0 +1,22 @@
+--
+-- This tests resetting unknown custom GUCs with reserved prefixes. There's
+-- nothing specific to auto_explain; this is just a convenient place to put
+-- this test.
+--
+
+SELECT current_database() AS datname \gset
+CREATE ROLE regress_ae_role;
+
+ALTER DATABASE :"datname" SET auto_explain.bogus = 1;
+ALTER ROLE regress_ae_role SET auto_explain.bogus = 1;
+ALTER ROLE regress_ae_role IN DATABASE :"datname" SET auto_explain.bogus = 1;
+ALTER SYSTEM SET auto_explain.bogus = 1;
+
+LOAD 'auto_explain';
+
+ALTER DATABASE :"datname" RESET auto_explain.bogus;
+ALTER ROLE regress_ae_role RESET auto_explain.bogus;
+ALTER ROLE regress_ae_role IN DATABASE :"datname" RESET auto_explain.bogus;
+ALTER SYSTEM RESET auto_explain.bogus;
+
+DROP ROLE regress_ae_role;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index de5bed282f3..f98805fb5f7 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -101,8 +101,8 @@ static void materializeQueryResult(FunctionCallInfo fcinfo,
const char *conname,
const char *sql,
bool fail);
-static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql);
-static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first);
+static PGresult *storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql);
+static void storeRow(storeInfo *sinfo, PGresult *res, bool first);
static remoteConn *getConnectionByName(const char *name);
static HTAB *createConnHash(void);
static remoteConn *createNewConnection(const char *name);
@@ -169,14 +169,6 @@ typedef struct remoteConnHashEnt
/* initial number of connection hashes */
#define NUMCONN 16
-static char *
-xpstrdup(const char *in)
-{
- if (in == NULL)
- return NULL;
- return pstrdup(in);
-}
-
pg_noreturn static void
dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
{
@@ -870,131 +862,123 @@ static void
materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ bool is_sql_cmd;
+ int ntuples;
+ int nfields;
/* prepTuplestoreResult must have been called previously */
Assert(rsinfo->returnMode == SFRM_Materialize);
- PG_TRY();
+ if (PQresultStatus(res) == PGRES_COMMAND_OK)
{
- TupleDesc tupdesc;
- bool is_sql_cmd;
- int ntuples;
- int nfields;
+ is_sql_cmd = true;
- if (PQresultStatus(res) == PGRES_COMMAND_OK)
- {
- is_sql_cmd = true;
+ /*
+ * need a tuple descriptor representing one TEXT column to return the
+ * command status string as our result tuple
+ */
+ tupdesc = CreateTemplateTupleDesc(1);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
+ TEXTOID, -1, 0);
+ ntuples = 1;
+ nfields = 1;
+ }
+ else
+ {
+ Assert(PQresultStatus(res) == PGRES_TUPLES_OK);
- /*
- * need a tuple descriptor representing one TEXT column to return
- * the command status string as our result tuple
- */
- tupdesc = CreateTemplateTupleDesc(1);
- TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
- TEXTOID, -1, 0);
- ntuples = 1;
- nfields = 1;
- }
- else
- {
- Assert(PQresultStatus(res) == PGRES_TUPLES_OK);
+ is_sql_cmd = false;
- is_sql_cmd = false;
+ /* get a tuple descriptor for our result type */
+ switch (get_call_result_type(fcinfo, NULL, &tupdesc))
+ {
+ case TYPEFUNC_COMPOSITE:
+ /* success */
+ break;
+ case TYPEFUNC_RECORD:
+ /* failed to determine actual type of RECORD */
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("function returning record called in context "
+ "that cannot accept type record")));
+ break;
+ default:
+ /* result type isn't composite */
+ elog(ERROR, "return type must be a row type");
+ break;
+ }
- /* get a tuple descriptor for our result type */
- switch (get_call_result_type(fcinfo, NULL, &tupdesc))
- {
- case TYPEFUNC_COMPOSITE:
- /* success */
- break;
- case TYPEFUNC_RECORD:
- /* failed to determine actual type of RECORD */
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("function returning record called in context "
- "that cannot accept type record")));
- break;
- default:
- /* result type isn't composite */
- elog(ERROR, "return type must be a row type");
- break;
- }
+ /* make sure we have a persistent copy of the tupdesc */
+ tupdesc = CreateTupleDescCopy(tupdesc);
+ ntuples = PQntuples(res);
+ nfields = PQnfields(res);
+ }
- /* make sure we have a persistent copy of the tupdesc */
- tupdesc = CreateTupleDescCopy(tupdesc);
- ntuples = PQntuples(res);
- nfields = PQnfields(res);
- }
+ /*
+ * check result and tuple descriptor have the same number of columns
+ */
+ if (nfields != tupdesc->natts)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("remote query result rowtype does not match "
+ "the specified FROM clause rowtype")));
- /*
- * check result and tuple descriptor have the same number of columns
- */
- if (nfields != tupdesc->natts)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("remote query result rowtype does not match "
- "the specified FROM clause rowtype")));
+ if (ntuples > 0)
+ {
+ AttInMetadata *attinmeta;
+ int nestlevel = -1;
+ Tuplestorestate *tupstore;
+ MemoryContext oldcontext;
+ int row;
+ char **values;
- if (ntuples > 0)
- {
- AttInMetadata *attinmeta;
- int nestlevel = -1;
- Tuplestorestate *tupstore;
- MemoryContext oldcontext;
- int row;
- char **values;
+ attinmeta = TupleDescGetAttInMetadata(tupdesc);
- attinmeta = TupleDescGetAttInMetadata(tupdesc);
+ /* Set GUCs to ensure we read GUC-sensitive data types correctly */
+ if (!is_sql_cmd)
+ nestlevel = applyRemoteGucs(conn);
- /* Set GUCs to ensure we read GUC-sensitive data types correctly */
- if (!is_sql_cmd)
- nestlevel = applyRemoteGucs(conn);
+ oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+ MemoryContextSwitchTo(oldcontext);
- oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
- tupstore = tuplestore_begin_heap(true, false, work_mem);
- rsinfo->setResult = tupstore;
- rsinfo->setDesc = tupdesc;
- MemoryContextSwitchTo(oldcontext);
+ values = palloc_array(char *, nfields);
- values = palloc_array(char *, nfields);
+ /* put all tuples into the tuplestore */
+ for (row = 0; row < ntuples; row++)
+ {
+ HeapTuple tuple;
- /* put all tuples into the tuplestore */
- for (row = 0; row < ntuples; row++)
+ if (!is_sql_cmd)
{
- HeapTuple tuple;
+ int i;
- if (!is_sql_cmd)
- {
- int i;
-
- for (i = 0; i < nfields; i++)
- {
- if (PQgetisnull(res, row, i))
- values[i] = NULL;
- else
- values[i] = PQgetvalue(res, row, i);
- }
- }
- else
+ for (i = 0; i < nfields; i++)
{
- values[0] = PQcmdStatus(res);
+ if (PQgetisnull(res, row, i))
+ values[i] = NULL;
+ else
+ values[i] = PQgetvalue(res, row, i);
}
-
- /* build the tuple and put it into the tuplestore. */
- tuple = BuildTupleFromCStrings(attinmeta, values);
- tuplestore_puttuple(tupstore, tuple);
+ }
+ else
+ {
+ values[0] = PQcmdStatus(res);
}
- /* clean up GUC settings, if we changed any */
- restoreLocalGucs(nestlevel);
+ /* build the tuple and put it into the tuplestore. */
+ tuple = BuildTupleFromCStrings(attinmeta, values);
+ tuplestore_puttuple(tupstore, tuple);
}
+
+ /* clean up GUC settings, if we changed any */
+ restoreLocalGucs(nestlevel);
}
- PG_FINALLY();
- {
- /* be sure to release the libpq result */
- PQclear(res);
- }
- PG_END_TRY();
+
+ PQclear(res);
}
/*
@@ -1013,16 +997,17 @@ materializeQueryResult(FunctionCallInfo fcinfo,
bool fail)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- PGresult *volatile res = NULL;
- volatile storeInfo sinfo = {0};
/* prepTuplestoreResult must have been called previously */
Assert(rsinfo->returnMode == SFRM_Materialize);
- sinfo.fcinfo = fcinfo;
-
+ /* Use a PG_TRY block to ensure we pump libpq dry of results */
PG_TRY();
{
+ storeInfo sinfo = {0};
+ PGresult *res;
+
+ sinfo.fcinfo = fcinfo;
/* Create short-lived memory context for data conversions */
sinfo.tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
"dblink temporary context",
@@ -1035,14 +1020,7 @@ materializeQueryResult(FunctionCallInfo fcinfo,
(PQresultStatus(res) != PGRES_COMMAND_OK &&
PQresultStatus(res) != PGRES_TUPLES_OK))
{
- /*
- * dblink_res_error will clear the passed PGresult, so we need
- * this ugly dance to avoid doing so twice during error exit
- */
- PGresult *res1 = res;
-
- res = NULL;
- dblink_res_error(conn, conname, res1, fail,
+ dblink_res_error(conn, conname, res, fail,
"while executing query");
/* if fail isn't set, we'll return an empty query result */
}
@@ -1081,7 +1059,6 @@ materializeQueryResult(FunctionCallInfo fcinfo,
tuplestore_puttuple(tupstore, tuple);
PQclear(res);
- res = NULL;
}
else
{
@@ -1090,26 +1067,20 @@ materializeQueryResult(FunctionCallInfo fcinfo,
Assert(rsinfo->setResult != NULL);
PQclear(res);
- res = NULL;
}
/* clean up data conversion short-lived memory context */
if (sinfo.tmpcontext != NULL)
MemoryContextDelete(sinfo.tmpcontext);
- sinfo.tmpcontext = NULL;
PQclear(sinfo.last_res);
- sinfo.last_res = NULL;
PQclear(sinfo.cur_res);
- sinfo.cur_res = NULL;
}
PG_CATCH();
{
- /* be sure to release any libpq result we collected */
- PQclear(res);
- PQclear(sinfo.last_res);
- PQclear(sinfo.cur_res);
- /* and clear out any pending data in libpq */
+ PGresult *res;
+
+ /* be sure to clear out any pending data in libpq */
while ((res = libpqsrv_get_result(conn, dblink_we_get_result)) !=
NULL)
PQclear(res);
@@ -1122,7 +1093,7 @@ materializeQueryResult(FunctionCallInfo fcinfo,
* Execute query, and send any result rows to sinfo->tuplestore.
*/
static PGresult *
-storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql)
+storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql)
{
bool first = true;
int nestlevel = -1;
@@ -1190,7 +1161,7 @@ storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql)
* (in this case the PGresult might contain either zero or one row).
*/
static void
-storeRow(volatile storeInfo *sinfo, PGresult *res, bool first)
+storeRow(storeInfo *sinfo, PGresult *res, bool first)
{
int nfields = PQnfields(res);
HeapTuple tuple;
@@ -2795,10 +2766,13 @@ dblink_connstr_check(const char *connstr)
/*
* Report an error received from the remote server
*
- * res: the received error result (will be freed)
+ * res: the received error result
* fail: true for ERROR ereport, false for NOTICE
* fmt and following args: sprintf-style format and values for errcontext;
* the resulting string should be worded like "while <some action>"
+ *
+ * If "res" is not NULL, it'll be PQclear'ed here (unless we throw error,
+ * in which case memory context cleanup will clear it eventually).
*/
static void
dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
@@ -2806,15 +2780,11 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
{
int level;
char *pg_diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
- char *pg_diag_message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
- char *pg_diag_message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
- char *pg_diag_message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
- char *pg_diag_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
+ char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
+ char *message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
+ char *message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
+ char *message_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
int sqlstate;
- char *message_primary;
- char *message_detail;
- char *message_hint;
- char *message_context;
va_list ap;
char dblink_context_msg[512];
@@ -2832,11 +2802,6 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
else
sqlstate = ERRCODE_CONNECTION_FAILURE;
- message_primary = xpstrdup(pg_diag_message_primary);
- message_detail = xpstrdup(pg_diag_message_detail);
- message_hint = xpstrdup(pg_diag_message_hint);
- message_context = xpstrdup(pg_diag_context);
-
/*
* If we don't get a message from the PGresult, try the PGconn. This is
* needed because for connection-level failures, PQgetResult may just
@@ -2846,14 +2811,6 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
message_primary = pchomp(PQerrorMessage(conn));
/*
- * Now that we've copied all the data we need out of the PGresult, it's
- * safe to free it. We must do this to avoid PGresult leakage. We're
- * leaking all the strings too, but those are in palloc'd memory that will
- * get cleaned up eventually.
- */
- PQclear(res);
-
- /*
* Format the basic errcontext string. Below, we'll add on something
* about the connection name. That's a violation of the translatability
* guidelines about constructing error messages out of parts, but since
@@ -2877,6 +2834,7 @@ dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
dblink_context_msg, conname)) :
(errcontext("%s on unnamed dblink connection",
dblink_context_msg))));
+ PQclear(res);
}
/*
diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile
index b2bd8794d2a..fe0478ac552 100644
--- a/contrib/pg_stat_statements/Makefile
+++ b/contrib/pg_stat_statements/Makefile
@@ -7,6 +7,7 @@ OBJS = \
EXTENSION = pg_stat_statements
DATA = pg_stat_statements--1.4.sql \
+ pg_stat_statements--1.12--1.13.sql \
pg_stat_statements--1.11--1.12.sql pg_stat_statements--1.10--1.11.sql \
pg_stat_statements--1.9--1.10.sql pg_stat_statements--1.8--1.9.sql \
pg_stat_statements--1.7--1.8.sql pg_stat_statements--1.6--1.7.sql \
@@ -20,7 +21,7 @@ LDFLAGS_SL += $(filter -lm, $(LIBS))
REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/pg_stat_statements/pg_stat_statements.conf
REGRESS = select dml cursors utility level_tracking planning \
user_activity wal entry_timestamp privileges extended \
- parallel cleanup oldextversions squashing
+ parallel plancache cleanup oldextversions squashing
# Disabled because these tests require "shared_preload_libraries=pg_stat_statements",
# which typical installcheck users do not have (e.g. buildfarm clients).
NO_INSTALLCHECK = 1
diff --git a/contrib/pg_stat_statements/expected/oldextversions.out b/contrib/pg_stat_statements/expected/oldextversions.out
index de679b19711..726383a99d7 100644
--- a/contrib/pg_stat_statements/expected/oldextversions.out
+++ b/contrib/pg_stat_statements/expected/oldextversions.out
@@ -407,4 +407,71 @@ SELECT count(*) > 0 AS has_data FROM pg_stat_statements;
t
(1 row)
+-- New functions and views for pg_stat_statements in 1.13
+AlTER EXTENSION pg_stat_statements UPDATE TO '1.13';
+\d pg_stat_statements
+ View "public.pg_stat_statements"
+ Column | Type | Collation | Nullable | Default
+----------------------------+--------------------------+-----------+----------+---------
+ userid | oid | | |
+ dbid | oid | | |
+ toplevel | boolean | | |
+ queryid | bigint | | |
+ query | text | | |
+ plans | bigint | | |
+ total_plan_time | double precision | | |
+ min_plan_time | double precision | | |
+ max_plan_time | double precision | | |
+ mean_plan_time | double precision | | |
+ stddev_plan_time | double precision | | |
+ calls | bigint | | |
+ total_exec_time | double precision | | |
+ min_exec_time | double precision | | |
+ max_exec_time | double precision | | |
+ mean_exec_time | double precision | | |
+ stddev_exec_time | double precision | | |
+ rows | bigint | | |
+ shared_blks_hit | bigint | | |
+ shared_blks_read | bigint | | |
+ shared_blks_dirtied | bigint | | |
+ shared_blks_written | bigint | | |
+ local_blks_hit | bigint | | |
+ local_blks_read | bigint | | |
+ local_blks_dirtied | bigint | | |
+ local_blks_written | bigint | | |
+ temp_blks_read | bigint | | |
+ temp_blks_written | bigint | | |
+ shared_blk_read_time | double precision | | |
+ shared_blk_write_time | double precision | | |
+ local_blk_read_time | double precision | | |
+ local_blk_write_time | double precision | | |
+ temp_blk_read_time | double precision | | |
+ temp_blk_write_time | double precision | | |
+ wal_records | bigint | | |
+ wal_fpi | bigint | | |
+ wal_bytes | numeric | | |
+ wal_buffers_full | bigint | | |
+ jit_functions | bigint | | |
+ jit_generation_time | double precision | | |
+ jit_inlining_count | bigint | | |
+ jit_inlining_time | double precision | | |
+ jit_optimization_count | bigint | | |
+ jit_optimization_time | double precision | | |
+ jit_emission_count | bigint | | |
+ jit_emission_time | double precision | | |
+ jit_deform_count | bigint | | |
+ jit_deform_time | double precision | | |
+ parallel_workers_to_launch | bigint | | |
+ parallel_workers_launched | bigint | | |
+ generic_plan_calls | bigint | | |
+ custom_plan_calls | bigint | | |
+ stats_since | timestamp with time zone | | |
+ minmax_stats_since | timestamp with time zone | | |
+
+SELECT count(*) > 0 AS has_data FROM pg_stat_statements;
+ has_data
+----------
+ t
+(1 row)
+
DROP EXTENSION pg_stat_statements;
diff --git a/contrib/pg_stat_statements/expected/plancache.out b/contrib/pg_stat_statements/expected/plancache.out
new file mode 100644
index 00000000000..e152de9f551
--- /dev/null
+++ b/contrib/pg_stat_statements/expected/plancache.out
@@ -0,0 +1,224 @@
+--
+-- Tests with plan cache
+--
+-- Setup
+CREATE OR REPLACE FUNCTION select_one_func(int) RETURNS VOID AS $$
+DECLARE
+ ret INT;
+BEGIN
+ SELECT $1 INTO ret;
+END;
+$$ LANGUAGE plpgsql;
+CREATE OR REPLACE PROCEDURE select_one_proc(int) AS $$
+DECLARE
+ ret INT;
+BEGIN
+ SELECT $1 INTO ret;
+END;
+$$ LANGUAGE plpgsql;
+-- Prepared statements
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+PREPARE p1 AS SELECT $1 AS a;
+SET plan_cache_mode TO force_generic_plan;
+EXECUTE p1(1);
+ a
+---
+ 1
+(1 row)
+
+SET plan_cache_mode TO force_custom_plan;
+EXECUTE p1(1);
+ a
+---
+ 1
+(1 row)
+
+SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+ calls | generic_plan_calls | custom_plan_calls | query
+-------+--------------------+-------------------+----------------------------------------------------
+ 2 | 1 | 1 | PREPARE p1 AS SELECT $1 AS a
+ 1 | 0 | 0 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
+ 2 | 0 | 0 | SET plan_cache_mode TO $1
+(3 rows)
+
+DEALLOCATE p1;
+-- Extended query protocol
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT $1 AS a \parse p1
+SET plan_cache_mode TO force_generic_plan;
+\bind_named p1 1
+;
+ a
+---
+ 1
+(1 row)
+
+SET plan_cache_mode TO force_custom_plan;
+\bind_named p1 1
+;
+ a
+---
+ 1
+(1 row)
+
+SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+ calls | generic_plan_calls | custom_plan_calls | query
+-------+--------------------+-------------------+----------------------------------------------------
+ 2 | 1 | 1 | SELECT $1 AS a
+ 1 | 0 | 0 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
+ 2 | 0 | 0 | SET plan_cache_mode TO $1
+(3 rows)
+
+\close_prepared p1
+-- EXPLAIN [ANALYZE] EXECUTE
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+PREPARE p1 AS SELECT $1;
+SET plan_cache_mode TO force_generic_plan;
+EXPLAIN (COSTS OFF) EXECUTE p1(1);
+ QUERY PLAN
+------------
+ Result
+(1 row)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1);
+ QUERY PLAN
+-----------------------------------
+ Result (actual rows=1.00 loops=1)
+(1 row)
+
+SET plan_cache_mode TO force_custom_plan;
+EXPLAIN (COSTS OFF) EXECUTE p1(1);
+ QUERY PLAN
+------------
+ Result
+(1 row)
+
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1);
+ QUERY PLAN
+-----------------------------------
+ Result (actual rows=1.00 loops=1)
+(1 row)
+
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+ calls | generic_plan_calls | custom_plan_calls | toplevel | query
+-------+--------------------+-------------------+----------+----------------------------------------------------------------------------------
+ 2 | 0 | 0 | t | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1)
+ 2 | 0 | 0 | t | EXPLAIN (COSTS OFF) EXECUTE p1(1)
+ 4 | 2 | 2 | f | PREPARE p1 AS SELECT $1
+ 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t
+ 2 | 0 | 0 | t | SET plan_cache_mode TO $1
+(5 rows)
+
+RESET pg_stat_statements.track;
+DEALLOCATE p1;
+-- Functions/procedures
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SET plan_cache_mode TO force_generic_plan;
+SELECT select_one_func(1);
+ select_one_func
+-----------------
+
+(1 row)
+
+CALL select_one_proc(1);
+SET plan_cache_mode TO force_custom_plan;
+SELECT select_one_func(1);
+ select_one_func
+-----------------
+
+(1 row)
+
+CALL select_one_proc(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+ calls | generic_plan_calls | custom_plan_calls | toplevel | query
+-------+--------------------+-------------------+----------+----------------------------------------------------
+ 2 | 0 | 0 | t | CALL select_one_proc($1)
+ 4 | 2 | 2 | f | SELECT $1
+ 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t
+ 2 | 0 | 0 | t | SELECT select_one_func($1)
+ 2 | 0 | 0 | t | SET plan_cache_mode TO $1
+(5 rows)
+
+--
+-- EXPLAIN [ANALYZE] EXECUTE + functions/procedures
+--
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SET plan_cache_mode TO force_generic_plan;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1);
+ QUERY PLAN
+-----------------------------------
+ Result (actual rows=1.00 loops=1)
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT select_one_func(1);
+ QUERY PLAN
+------------
+ Result
+(1 row)
+
+CALL select_one_proc(1);
+SET plan_cache_mode TO force_custom_plan;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1);
+ QUERY PLAN
+-----------------------------------
+ Result (actual rows=1.00 loops=1)
+(1 row)
+
+EXPLAIN (COSTS OFF) SELECT select_one_func(1);
+ QUERY PLAN
+------------
+ Result
+(1 row)
+
+CALL select_one_proc(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C", toplevel;
+ calls | generic_plan_calls | custom_plan_calls | toplevel | query
+-------+--------------------+-------------------+----------+------------------------------------------------------------------------------------------------
+ 2 | 0 | 0 | t | CALL select_one_proc($1)
+ 2 | 0 | 0 | t | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func($1)
+ 4 | 0 | 0 | f | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func($1);
+ 2 | 0 | 0 | t | EXPLAIN (COSTS OFF) SELECT select_one_func($1)
+ 4 | 2 | 2 | f | SELECT $1
+ 1 | 0 | 0 | t | SELECT pg_stat_statements_reset() IS NOT NULL AS t
+ 2 | 0 | 0 | t | SET plan_cache_mode TO $1
+(7 rows)
+
+RESET pg_stat_statements.track;
+--
+-- Cleanup
+--
+DROP FUNCTION select_one_func(int);
+DROP PROCEDURE select_one_proc(int);
diff --git a/contrib/pg_stat_statements/meson.build b/contrib/pg_stat_statements/meson.build
index 01a6cbdcf61..7b8bfbb1de7 100644
--- a/contrib/pg_stat_statements/meson.build
+++ b/contrib/pg_stat_statements/meson.build
@@ -21,6 +21,7 @@ contrib_targets += pg_stat_statements
install_data(
'pg_stat_statements.control',
'pg_stat_statements--1.4.sql',
+ 'pg_stat_statements--1.12--1.13.sql',
'pg_stat_statements--1.11--1.12.sql',
'pg_stat_statements--1.10--1.11.sql',
'pg_stat_statements--1.9--1.10.sql',
@@ -54,6 +55,7 @@ tests += {
'privileges',
'extended',
'parallel',
+ 'plancache',
'cleanup',
'oldextversions',
'squashing',
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql b/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql
new file mode 100644
index 00000000000..2f0eaf14ec3
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql
@@ -0,0 +1,78 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.12--1.13.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.13'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements(boolean);
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements(boolean);
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT toplevel bool,
+ OUT queryid bigint,
+ OUT query text,
+ OUT plans int8,
+ OUT total_plan_time float8,
+ OUT min_plan_time float8,
+ OUT max_plan_time float8,
+ OUT mean_plan_time float8,
+ OUT stddev_plan_time float8,
+ OUT calls int8,
+ OUT total_exec_time float8,
+ OUT min_exec_time float8,
+ OUT max_exec_time float8,
+ OUT mean_exec_time float8,
+ OUT stddev_exec_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT shared_blk_read_time float8,
+ OUT shared_blk_write_time float8,
+ OUT local_blk_read_time float8,
+ OUT local_blk_write_time float8,
+ OUT temp_blk_read_time float8,
+ OUT temp_blk_write_time float8,
+ OUT wal_records int8,
+ OUT wal_fpi int8,
+ OUT wal_bytes numeric,
+ OUT wal_buffers_full int8,
+ OUT jit_functions int8,
+ OUT jit_generation_time float8,
+ OUT jit_inlining_count int8,
+ OUT jit_inlining_time float8,
+ OUT jit_optimization_count int8,
+ OUT jit_optimization_time float8,
+ OUT jit_emission_count int8,
+ OUT jit_emission_time float8,
+ OUT jit_deform_count int8,
+ OUT jit_deform_time float8,
+ OUT parallel_workers_to_launch int8,
+ OUT parallel_workers_launched int8,
+ OUT generic_plan_calls int8,
+ OUT custom_plan_calls int8,
+ OUT stats_since timestamp with time zone,
+ OUT minmax_stats_since timestamp with time zone
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_13'
+LANGUAGE C STRICT VOLATILE PARALLEL SAFE;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index e7857f81ec0..9fc9635d330 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -85,7 +85,7 @@ PG_MODULE_MAGIC_EXT(
#define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
/* Magic number identifying the stats file format */
-static const uint32 PGSS_FILE_HEADER = 0x20220408;
+static const uint32 PGSS_FILE_HEADER = 0x20250731;
/* PostgreSQL major version number, changes in which invalidate all entries */
static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
@@ -114,6 +114,7 @@ typedef enum pgssVersion
PGSS_V1_10,
PGSS_V1_11,
PGSS_V1_12,
+ PGSS_V1_13,
} pgssVersion;
typedef enum pgssStoreKind
@@ -210,6 +211,8 @@ typedef struct Counters
* to be launched */
int64 parallel_workers_launched; /* # of parallel workers actually
* launched */
+ int64 generic_plan_calls; /* number of calls using a generic plan */
+ int64 custom_plan_calls; /* number of calls using a custom plan */
} Counters;
/*
@@ -323,6 +326,7 @@ PG_FUNCTION_INFO_V1(pg_stat_statements_1_9);
PG_FUNCTION_INFO_V1(pg_stat_statements_1_10);
PG_FUNCTION_INFO_V1(pg_stat_statements_1_11);
PG_FUNCTION_INFO_V1(pg_stat_statements_1_12);
+PG_FUNCTION_INFO_V1(pg_stat_statements_1_13);
PG_FUNCTION_INFO_V1(pg_stat_statements);
PG_FUNCTION_INFO_V1(pg_stat_statements_info);
@@ -355,7 +359,8 @@ static void pgss_store(const char *query, int64 queryId,
const struct JitInstrumentation *jitusage,
JumbleState *jstate,
int parallel_workers_to_launch,
- int parallel_workers_launched);
+ int parallel_workers_launched,
+ PlannedStmtOrigin planOrigin);
static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
pgssVersion api_version,
bool showtext);
@@ -877,7 +882,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
NULL,
jstate,
0,
- 0);
+ 0,
+ PLAN_STMT_UNKNOWN);
}
/*
@@ -957,7 +963,8 @@ pgss_planner(Query *parse,
NULL,
NULL,
0,
- 0);
+ 0,
+ result->planOrigin);
}
else
{
@@ -1091,7 +1098,8 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
queryDesc->estate->es_jit ? &queryDesc->estate->es_jit->instr : NULL,
NULL,
queryDesc->estate->es_parallel_workers_to_launch,
- queryDesc->estate->es_parallel_workers_launched);
+ queryDesc->estate->es_parallel_workers_launched,
+ queryDesc->plannedstmt->planOrigin);
}
if (prev_ExecutorEnd)
@@ -1224,7 +1232,8 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
NULL,
NULL,
0,
- 0);
+ 0,
+ pstmt->planOrigin);
}
else
{
@@ -1287,7 +1296,8 @@ pgss_store(const char *query, int64 queryId,
const struct JitInstrumentation *jitusage,
JumbleState *jstate,
int parallel_workers_to_launch,
- int parallel_workers_launched)
+ int parallel_workers_launched,
+ PlannedStmtOrigin planOrigin)
{
pgssHashKey key;
pgssEntry *entry;
@@ -1495,6 +1505,12 @@ pgss_store(const char *query, int64 queryId,
entry->counters.parallel_workers_to_launch += parallel_workers_to_launch;
entry->counters.parallel_workers_launched += parallel_workers_launched;
+ /* plan cache counters */
+ if (planOrigin == PLAN_STMT_CACHE_GENERIC)
+ entry->counters.generic_plan_calls++;
+ else if (planOrigin == PLAN_STMT_CACHE_CUSTOM)
+ entry->counters.custom_plan_calls++;
+
SpinLockRelease(&entry->mutex);
}
@@ -1562,7 +1578,8 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS)
#define PG_STAT_STATEMENTS_COLS_V1_10 43
#define PG_STAT_STATEMENTS_COLS_V1_11 49
#define PG_STAT_STATEMENTS_COLS_V1_12 52
-#define PG_STAT_STATEMENTS_COLS 52 /* maximum of above */
+#define PG_STAT_STATEMENTS_COLS_V1_13 54
+#define PG_STAT_STATEMENTS_COLS 54 /* maximum of above */
/*
* Retrieve statement statistics.
@@ -1575,6 +1592,16 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS)
* function. Unfortunately we weren't bright enough to do that for 1.1.
*/
Datum
+pg_stat_statements_1_13(PG_FUNCTION_ARGS)
+{
+ bool showtext = PG_GETARG_BOOL(0);
+
+ pg_stat_statements_internal(fcinfo, PGSS_V1_13, showtext);
+
+ return (Datum) 0;
+}
+
+Datum
pg_stat_statements_1_12(PG_FUNCTION_ARGS)
{
bool showtext = PG_GETARG_BOOL(0);
@@ -1732,6 +1759,10 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
if (api_version != PGSS_V1_12)
elog(ERROR, "incorrect number of output arguments");
break;
+ case PG_STAT_STATEMENTS_COLS_V1_13:
+ if (api_version != PGSS_V1_13)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
default:
elog(ERROR, "incorrect number of output arguments");
}
@@ -1984,6 +2015,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
values[i++] = Int64GetDatumFast(tmp.parallel_workers_to_launch);
values[i++] = Int64GetDatumFast(tmp.parallel_workers_launched);
}
+ if (api_version >= PGSS_V1_13)
+ {
+ values[i++] = Int64GetDatumFast(tmp.generic_plan_calls);
+ values[i++] = Int64GetDatumFast(tmp.custom_plan_calls);
+ }
if (api_version >= PGSS_V1_11)
{
values[i++] = TimestampTzGetDatum(stats_since);
@@ -1999,6 +2035,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
api_version == PGSS_V1_10 ? PG_STAT_STATEMENTS_COLS_V1_10 :
api_version == PGSS_V1_11 ? PG_STAT_STATEMENTS_COLS_V1_11 :
api_version == PGSS_V1_12 ? PG_STAT_STATEMENTS_COLS_V1_12 :
+ api_version == PGSS_V1_13 ? PG_STAT_STATEMENTS_COLS_V1_13 :
-1 /* fail if you forget to update this assert */ ));
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control
index d45ebc12e36..2eee0ceffa8 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.control
+++ b/contrib/pg_stat_statements/pg_stat_statements.control
@@ -1,5 +1,5 @@
# pg_stat_statements extension
comment = 'track planning and execution statistics of all SQL statements executed'
-default_version = '1.12'
+default_version = '1.13'
module_pathname = '$libdir/pg_stat_statements'
relocatable = true
diff --git a/contrib/pg_stat_statements/sql/oldextversions.sql b/contrib/pg_stat_statements/sql/oldextversions.sql
index 13b8ca28586..e416efe9ffb 100644
--- a/contrib/pg_stat_statements/sql/oldextversions.sql
+++ b/contrib/pg_stat_statements/sql/oldextversions.sql
@@ -63,4 +63,9 @@ AlTER EXTENSION pg_stat_statements UPDATE TO '1.12';
\d pg_stat_statements
SELECT count(*) > 0 AS has_data FROM pg_stat_statements;
+-- New functions and views for pg_stat_statements in 1.13
+AlTER EXTENSION pg_stat_statements UPDATE TO '1.13';
+\d pg_stat_statements
+SELECT count(*) > 0 AS has_data FROM pg_stat_statements;
+
DROP EXTENSION pg_stat_statements;
diff --git a/contrib/pg_stat_statements/sql/plancache.sql b/contrib/pg_stat_statements/sql/plancache.sql
new file mode 100644
index 00000000000..160ced7add3
--- /dev/null
+++ b/contrib/pg_stat_statements/sql/plancache.sql
@@ -0,0 +1,94 @@
+--
+-- Tests with plan cache
+--
+
+-- Setup
+CREATE OR REPLACE FUNCTION select_one_func(int) RETURNS VOID AS $$
+DECLARE
+ ret INT;
+BEGIN
+ SELECT $1 INTO ret;
+END;
+$$ LANGUAGE plpgsql;
+CREATE OR REPLACE PROCEDURE select_one_proc(int) AS $$
+DECLARE
+ ret INT;
+BEGIN
+ SELECT $1 INTO ret;
+END;
+$$ LANGUAGE plpgsql;
+
+-- Prepared statements
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+PREPARE p1 AS SELECT $1 AS a;
+SET plan_cache_mode TO force_generic_plan;
+EXECUTE p1(1);
+SET plan_cache_mode TO force_custom_plan;
+EXECUTE p1(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+DEALLOCATE p1;
+
+-- Extended query protocol
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT $1 AS a \parse p1
+SET plan_cache_mode TO force_generic_plan;
+\bind_named p1 1
+;
+SET plan_cache_mode TO force_custom_plan;
+\bind_named p1 1
+;
+SELECT calls, generic_plan_calls, custom_plan_calls, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+\close_prepared p1
+
+-- EXPLAIN [ANALYZE] EXECUTE
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+PREPARE p1 AS SELECT $1;
+SET plan_cache_mode TO force_generic_plan;
+EXPLAIN (COSTS OFF) EXECUTE p1(1);
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1);
+SET plan_cache_mode TO force_custom_plan;
+EXPLAIN (COSTS OFF) EXECUTE p1(1);
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) EXECUTE p1(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+RESET pg_stat_statements.track;
+DEALLOCATE p1;
+
+-- Functions/procedures
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SET plan_cache_mode TO force_generic_plan;
+SELECT select_one_func(1);
+CALL select_one_proc(1);
+SET plan_cache_mode TO force_custom_plan;
+SELECT select_one_func(1);
+CALL select_one_proc(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C";
+
+--
+-- EXPLAIN [ANALYZE] EXECUTE + functions/procedures
+--
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SET plan_cache_mode TO force_generic_plan;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1);
+EXPLAIN (COSTS OFF) SELECT select_one_func(1);
+CALL select_one_proc(1);
+SET plan_cache_mode TO force_custom_plan;
+EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT select_one_func(1);
+EXPLAIN (COSTS OFF) SELECT select_one_func(1);
+CALL select_one_proc(1);
+SELECT calls, generic_plan_calls, custom_plan_calls, toplevel, query FROM pg_stat_statements
+ ORDER BY query COLLATE "C", toplevel;
+
+RESET pg_stat_statements.track;
+
+--
+-- Cleanup
+--
+DROP FUNCTION select_one_func(int);
+DROP PROCEDURE select_one_proc(int);
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index c1ce6f33436..e8148f2c5a2 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -142,6 +142,8 @@ static void do_sql_command_begin(PGconn *conn, const char *sql);
static void do_sql_command_end(PGconn *conn, const char *sql,
bool consume_input);
static void begin_remote_xact(ConnCacheEntry *entry);
+static void pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
+ const char *sql);
static void pgfdw_xact_callback(XactEvent event, void *arg);
static void pgfdw_subxact_callback(SubXactEvent event,
SubTransactionId mySubid,
@@ -815,7 +817,7 @@ static void
do_sql_command_begin(PGconn *conn, const char *sql)
{
if (!PQsendQuery(conn, sql))
- pgfdw_report_error(ERROR, NULL, conn, false, sql);
+ pgfdw_report_error(NULL, conn, sql);
}
static void
@@ -830,10 +832,10 @@ do_sql_command_end(PGconn *conn, const char *sql, bool consume_input)
* would be large compared to the overhead of PQconsumeInput.)
*/
if (consume_input && !PQconsumeInput(conn))
- pgfdw_report_error(ERROR, NULL, conn, false, sql);
+ pgfdw_report_error(NULL, conn, sql);
res = pgfdw_get_result(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, true, sql);
+ pgfdw_report_error(res, conn, sql);
PQclear(res);
}
@@ -966,63 +968,73 @@ pgfdw_get_result(PGconn *conn)
/*
* Report an error we got from the remote server.
*
- * elevel: error level to use (typically ERROR, but might be less)
- * res: PGresult containing the error
+ * Callers should use pgfdw_report_error() to throw an error, or use
+ * pgfdw_report() for lesser message levels. (We make this distinction
+ * so that pgfdw_report_error() can be marked noreturn.)
+ *
+ * res: PGresult containing the error (might be NULL)
* conn: connection we did the query on
- * clear: if true, PQclear the result (otherwise caller will handle it)
* sql: NULL, or text of remote command we tried to execute
*
+ * If "res" is not NULL, it'll be PQclear'ed here (unless we throw error,
+ * in which case memory context cleanup will clear it eventually).
+ *
* Note: callers that choose not to throw ERROR for a remote error are
* responsible for making sure that the associated ConnCacheEntry gets
* marked with have_error = true.
*/
void
-pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
- bool clear, const char *sql)
+pgfdw_report_error(PGresult *res, PGconn *conn, const char *sql)
{
- /* If requested, PGresult must be released before leaving this function. */
- PG_TRY();
- {
- char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
- char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
- char *message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
- char *message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
- char *message_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
- int sqlstate;
-
- if (diag_sqlstate)
- sqlstate = MAKE_SQLSTATE(diag_sqlstate[0],
- diag_sqlstate[1],
- diag_sqlstate[2],
- diag_sqlstate[3],
- diag_sqlstate[4]);
- else
- sqlstate = ERRCODE_CONNECTION_FAILURE;
+ pgfdw_report_internal(ERROR, res, conn, sql);
+ pg_unreachable();
+}
- /*
- * If we don't get a message from the PGresult, try the PGconn. This
- * is needed because for connection-level failures, PQgetResult may
- * just return NULL, not a PGresult at all.
- */
- if (message_primary == NULL)
- message_primary = pchomp(PQerrorMessage(conn));
-
- ereport(elevel,
- (errcode(sqlstate),
- (message_primary != NULL && message_primary[0] != '\0') ?
- errmsg_internal("%s", message_primary) :
- errmsg("could not obtain message string for remote error"),
- message_detail ? errdetail_internal("%s", message_detail) : 0,
- message_hint ? errhint("%s", message_hint) : 0,
- message_context ? errcontext("%s", message_context) : 0,
- sql ? errcontext("remote SQL command: %s", sql) : 0));
- }
- PG_FINALLY();
- {
- if (clear)
- PQclear(res);
- }
- PG_END_TRY();
+void
+pgfdw_report(int elevel, PGresult *res, PGconn *conn, const char *sql)
+{
+ Assert(elevel < ERROR); /* use pgfdw_report_error for that */
+ pgfdw_report_internal(elevel, res, conn, sql);
+}
+
+static void
+pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
+ const char *sql)
+{
+ char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
+ char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
+ char *message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
+ char *message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
+ char *message_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
+ int sqlstate;
+
+ if (diag_sqlstate)
+ sqlstate = MAKE_SQLSTATE(diag_sqlstate[0],
+ diag_sqlstate[1],
+ diag_sqlstate[2],
+ diag_sqlstate[3],
+ diag_sqlstate[4]);
+ else
+ sqlstate = ERRCODE_CONNECTION_FAILURE;
+
+ /*
+ * If we don't get a message from the PGresult, try the PGconn. This is
+ * needed because for connection-level failures, PQgetResult may just
+ * return NULL, not a PGresult at all.
+ */
+ if (message_primary == NULL)
+ message_primary = pchomp(PQerrorMessage(conn));
+
+ ereport(elevel,
+ (errcode(sqlstate),
+ (message_primary != NULL && message_primary[0] != '\0') ?
+ errmsg_internal("%s", message_primary) :
+ errmsg("could not obtain message string for remote error"),
+ message_detail ? errdetail_internal("%s", message_detail) : 0,
+ message_hint ? errhint("%s", message_hint) : 0,
+ message_context ? errcontext("%s", message_context) : 0,
+ sql ? errcontext("remote SQL command: %s", sql) : 0));
+ PQclear(res);
}
/*
@@ -1545,7 +1557,7 @@ pgfdw_exec_cleanup_query_begin(PGconn *conn, const char *query)
*/
if (!PQsendQuery(conn, query))
{
- pgfdw_report_error(WARNING, NULL, conn, false, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1570,7 +1582,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
*/
if (consume_input && !PQconsumeInput(conn))
{
- pgfdw_report_error(WARNING, NULL, conn, false, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1582,7 +1594,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
(errmsg("could not get query result due to timeout"),
errcontext("remote SQL command: %s", query)));
else
- pgfdw_report_error(WARNING, NULL, conn, false, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1590,7 +1602,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
/* Issue a warning if not successful. */
if (PQresultStatus(result) != PGRES_COMMAND_OK)
{
- pgfdw_report_error(WARNING, result, conn, true, query);
+ pgfdw_report(WARNING, result, conn, query);
return ignore_errors;
}
PQclear(result);
@@ -1618,103 +1630,90 @@ pgfdw_get_cleanup_result(PGconn *conn, TimestampTz endtime,
PGresult **result,
bool *timed_out)
{
- volatile bool failed = false;
- PGresult *volatile last_res = NULL;
+ bool failed = false;
+ PGresult *last_res = NULL;
+ int canceldelta = RETRY_CANCEL_TIMEOUT * 2;
*result = NULL;
*timed_out = false;
-
- /* In what follows, do not leak any PGresults on an error. */
- PG_TRY();
+ for (;;)
{
- int canceldelta = RETRY_CANCEL_TIMEOUT * 2;
+ PGresult *res;
- for (;;)
+ while (PQisBusy(conn))
{
- PGresult *res;
+ int wc;
+ TimestampTz now = GetCurrentTimestamp();
+ long cur_timeout;
- while (PQisBusy(conn))
+ /* If timeout has expired, give up. */
+ if (now >= endtime)
{
- int wc;
- TimestampTz now = GetCurrentTimestamp();
- long cur_timeout;
-
- /* If timeout has expired, give up. */
- if (now >= endtime)
- {
- *timed_out = true;
- failed = true;
- goto exit;
- }
+ *timed_out = true;
+ failed = true;
+ goto exit;
+ }
- /* If we need to re-issue the cancel request, do that. */
- if (now >= retrycanceltime)
- {
- /* We ignore failure to issue the repeated request. */
- (void) libpqsrv_cancel(conn, endtime);
+ /* If we need to re-issue the cancel request, do that. */
+ if (now >= retrycanceltime)
+ {
+ /* We ignore failure to issue the repeated request. */
+ (void) libpqsrv_cancel(conn, endtime);
- /* Recompute "now" in case that took measurable time. */
- now = GetCurrentTimestamp();
+ /* Recompute "now" in case that took measurable time. */
+ now = GetCurrentTimestamp();
- /* Adjust re-cancel timeout in increasing steps. */
- retrycanceltime = TimestampTzPlusMilliseconds(now,
- canceldelta);
- canceldelta += canceldelta;
- }
+ /* Adjust re-cancel timeout in increasing steps. */
+ retrycanceltime = TimestampTzPlusMilliseconds(now,
+ canceldelta);
+ canceldelta += canceldelta;
+ }
- /* If timeout has expired, give up, else get sleep time. */
- cur_timeout = TimestampDifferenceMilliseconds(now,
- Min(endtime,
- retrycanceltime));
- if (cur_timeout <= 0)
- {
- *timed_out = true;
- failed = true;
- goto exit;
- }
+ /* If timeout has expired, give up, else get sleep time. */
+ cur_timeout = TimestampDifferenceMilliseconds(now,
+ Min(endtime,
+ retrycanceltime));
+ if (cur_timeout <= 0)
+ {
+ *timed_out = true;
+ failed = true;
+ goto exit;
+ }
- /* first time, allocate or get the custom wait event */
- if (pgfdw_we_cleanup_result == 0)
- pgfdw_we_cleanup_result = WaitEventExtensionNew("PostgresFdwCleanupResult");
+ /* first time, allocate or get the custom wait event */
+ if (pgfdw_we_cleanup_result == 0)
+ pgfdw_we_cleanup_result = WaitEventExtensionNew("PostgresFdwCleanupResult");
- /* Sleep until there's something to do */
- wc = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_SOCKET_READABLE |
- WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
- PQsocket(conn),
- cur_timeout, pgfdw_we_cleanup_result);
- ResetLatch(MyLatch);
+ /* Sleep until there's something to do */
+ wc = WaitLatchOrSocket(MyLatch,
+ WL_LATCH_SET | WL_SOCKET_READABLE |
+ WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
+ PQsocket(conn),
+ cur_timeout, pgfdw_we_cleanup_result);
+ ResetLatch(MyLatch);
- CHECK_FOR_INTERRUPTS();
+ CHECK_FOR_INTERRUPTS();
- /* Data available in socket? */
- if (wc & WL_SOCKET_READABLE)
+ /* Data available in socket? */
+ if (wc & WL_SOCKET_READABLE)
+ {
+ if (!PQconsumeInput(conn))
{
- if (!PQconsumeInput(conn))
- {
- /* connection trouble */
- failed = true;
- goto exit;
- }
+ /* connection trouble */
+ failed = true;
+ goto exit;
}
}
+ }
- res = PQgetResult(conn);
- if (res == NULL)
- break; /* query is complete */
+ res = PQgetResult(conn);
+ if (res == NULL)
+ break; /* query is complete */
- PQclear(last_res);
- last_res = res;
- }
-exit: ;
- }
- PG_CATCH();
- {
PQclear(last_res);
- PG_RE_THROW();
+ last_res = res;
}
- PG_END_TRY();
-
+exit:
if (failed)
PQclear(last_res);
else
diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c
index c2f936640bc..d6fa89bad93 100644
--- a/contrib/postgres_fdw/option.c
+++ b/contrib/postgres_fdw/option.c
@@ -21,6 +21,7 @@
#include "libpq/libpq-be.h"
#include "postgres_fdw.h"
#include "utils/guc.h"
+#include "utils/memutils.h"
#include "utils/varlena.h"
/*
@@ -40,12 +41,6 @@ typedef struct PgFdwOption
static PgFdwOption *postgres_fdw_options;
/*
- * Valid options for libpq.
- * Allocated and filled in InitPgFdwOptions.
- */
-static PQconninfoOption *libpq_options;
-
-/*
* GUC parameters
*/
char *pgfdw_application_name = NULL;
@@ -239,6 +234,7 @@ static void
InitPgFdwOptions(void)
{
int num_libpq_opts;
+ PQconninfoOption *libpq_options;
PQconninfoOption *lopt;
PgFdwOption *popt;
@@ -307,8 +303,8 @@ InitPgFdwOptions(void)
* Get list of valid libpq options.
*
* To avoid unnecessary work, we get the list once and use it throughout
- * the lifetime of this backend process. We don't need to care about
- * memory context issues, because PQconndefaults allocates with malloc.
+ * the lifetime of this backend process. Hence, we'll allocate it in
+ * TopMemoryContext.
*/
libpq_options = PQconndefaults();
if (!libpq_options) /* assume reason for failure is OOM */
@@ -325,19 +321,11 @@ InitPgFdwOptions(void)
/*
* Construct an array which consists of all valid options for
* postgres_fdw, by appending FDW-specific options to libpq options.
- *
- * We use plain malloc here to allocate postgres_fdw_options because it
- * lives as long as the backend process does. Besides, keeping
- * libpq_options in memory allows us to avoid copying every keyword
- * string.
*/
postgres_fdw_options = (PgFdwOption *)
- malloc(sizeof(PgFdwOption) * num_libpq_opts +
- sizeof(non_libpq_options));
- if (postgres_fdw_options == NULL)
- ereport(ERROR,
- (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
- errmsg("out of memory")));
+ MemoryContextAlloc(TopMemoryContext,
+ sizeof(PgFdwOption) * num_libpq_opts +
+ sizeof(non_libpq_options));
popt = postgres_fdw_options;
for (lopt = libpq_options; lopt->keyword; lopt++)
@@ -355,8 +343,8 @@ InitPgFdwOptions(void)
if (strncmp(lopt->keyword, "oauth_", strlen("oauth_")) == 0)
continue;
- /* We don't have to copy keyword string, as described above. */
- popt->keyword = lopt->keyword;
+ popt->keyword = MemoryContextStrdup(TopMemoryContext,
+ lopt->keyword);
/*
* "user" and any secret options are allowed only on user mappings.
@@ -371,6 +359,9 @@ InitPgFdwOptions(void)
popt++;
}
+ /* Done with libpq's output structure. */
+ PQconninfoFree(libpq_options);
+
/* Append FDW-specific options and dummy terminator. */
memcpy(popt, non_libpq_options, sizeof(non_libpq_options));
}
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index e0a34b27c7c..456b267f70b 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -240,7 +240,6 @@ typedef struct PgFdwDirectModifyState
PGresult *result; /* result for query */
int num_tuples; /* # of result tuples */
int next_tuple; /* index of next one to return */
- MemoryContextCallback result_cb; /* ensures result will get freed */
Relation resultRel; /* relcache entry for the target relation */
AttrNumber *attnoMap; /* array of attnums of input user columns */
AttrNumber ctidAttno; /* attnum of input ctid column */
@@ -1703,13 +1702,9 @@ postgresReScanForeignScan(ForeignScanState *node)
return;
}
- /*
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
- */
res = pgfdw_exec_query(fsstate->conn, sql, fsstate->conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fsstate->conn, true, sql);
+ pgfdw_report_error(res, fsstate->conn, sql);
PQclear(res);
/* Now force a fresh FETCH. */
@@ -2672,17 +2667,6 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
node->fdw_state = dmstate;
/*
- * We use a memory context callback to ensure that the dmstate's PGresult
- * (if any) will be released, even if the query fails somewhere that's
- * outside our control. The callback is always armed for the duration of
- * the query; this relies on PQclear(NULL) being a no-op.
- */
- dmstate->result_cb.func = (MemoryContextCallbackFunction) PQclear;
- dmstate->result_cb.arg = NULL;
- MemoryContextRegisterResetCallback(CurrentMemoryContext,
- &dmstate->result_cb);
-
- /*
* Identify which user to do the remote access as. This should match what
* ExecCheckPermissions() does.
*/
@@ -2829,13 +2813,7 @@ postgresEndDirectModify(ForeignScanState *node)
return;
/* Release PGresult */
- if (dmstate->result)
- {
- PQclear(dmstate->result);
- dmstate->result = NULL;
- /* ... and don't forget to disable the callback */
- dmstate->result_cb.arg = NULL;
- }
+ PQclear(dmstate->result);
/* Release remote connection */
ReleaseConnection(dmstate->conn);
@@ -3626,41 +3604,32 @@ get_remote_estimate(const char *sql, PGconn *conn,
double *rows, int *width,
Cost *startup_cost, Cost *total_cost)
{
- PGresult *volatile res = NULL;
-
- /* PGresult must be released before leaving this function. */
- PG_TRY();
- {
- char *line;
- char *p;
- int n;
+ PGresult *res;
+ char *line;
+ char *p;
+ int n;
- /*
- * Execute EXPLAIN remotely.
- */
- res = pgfdw_exec_query(conn, sql, NULL);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, sql);
+ /*
+ * Execute EXPLAIN remotely.
+ */
+ res = pgfdw_exec_query(conn, sql, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, sql);
- /*
- * Extract cost numbers for topmost plan node. Note we search for a
- * left paren from the end of the line to avoid being confused by
- * other uses of parentheses.
- */
- line = PQgetvalue(res, 0, 0);
- p = strrchr(line, '(');
- if (p == NULL)
- elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
- n = sscanf(p, "(cost=%lf..%lf rows=%lf width=%d)",
- startup_cost, total_cost, rows, width);
- if (n != 4)
- elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
- }
- PG_FINALLY();
- {
- PQclear(res);
- }
- PG_END_TRY();
+ /*
+ * Extract cost numbers for topmost plan node. Note we search for a left
+ * paren from the end of the line to avoid being confused by other uses of
+ * parentheses.
+ */
+ line = PQgetvalue(res, 0, 0);
+ p = strrchr(line, '(');
+ if (p == NULL)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+ n = sscanf(p, "(cost=%lf..%lf rows=%lf width=%d)",
+ startup_cost, total_cost, rows, width);
+ if (n != 4)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+ PQclear(res);
}
/*
@@ -3800,17 +3769,14 @@ create_cursor(ForeignScanState *node)
*/
if (!PQsendQueryParams(conn, buf.data, numParams,
NULL, values, NULL, NULL, 0))
- pgfdw_report_error(ERROR, NULL, conn, false, buf.data);
+ pgfdw_report_error(NULL, conn, buf.data);
/*
* Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
*/
res = pgfdw_get_result(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, true, fsstate->query);
+ pgfdw_report_error(res, conn, fsstate->query);
PQclear(res);
/* Mark the cursor as created, and show no tuples have been retrieved */
@@ -3832,7 +3798,10 @@ static void
fetch_more_data(ForeignScanState *node)
{
PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
- PGresult *volatile res = NULL;
+ PGconn *conn = fsstate->conn;
+ PGresult *res;
+ int numrows;
+ int i;
MemoryContext oldcontext;
/*
@@ -3843,74 +3812,63 @@ fetch_more_data(ForeignScanState *node)
MemoryContextReset(fsstate->batch_cxt);
oldcontext = MemoryContextSwitchTo(fsstate->batch_cxt);
- /* PGresult must be released before leaving this function. */
- PG_TRY();
+ if (fsstate->async_capable)
{
- PGconn *conn = fsstate->conn;
- int numrows;
- int i;
+ Assert(fsstate->conn_state->pendingAreq);
- if (fsstate->async_capable)
- {
- Assert(fsstate->conn_state->pendingAreq);
+ /*
+ * The query was already sent by an earlier call to
+ * fetch_more_data_begin. So now we just fetch the result.
+ */
+ res = pgfdw_get_result(conn);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, fsstate->query);
- /*
- * The query was already sent by an earlier call to
- * fetch_more_data_begin. So now we just fetch the result.
- */
- res = pgfdw_get_result(conn);
- /* On error, report the original query, not the FETCH. */
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+ /* Reset per-connection state */
+ fsstate->conn_state->pendingAreq = NULL;
+ }
+ else
+ {
+ char sql[64];
- /* Reset per-connection state */
- fsstate->conn_state->pendingAreq = NULL;
- }
- else
- {
- char sql[64];
+ /* This is a regular synchronous fetch. */
+ snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+ fsstate->fetch_size, fsstate->cursor_number);
- /* This is a regular synchronous fetch. */
- snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
- fsstate->fetch_size, fsstate->cursor_number);
+ res = pgfdw_exec_query(conn, sql, fsstate->conn_state);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, fsstate->query);
+ }
- res = pgfdw_exec_query(conn, sql, fsstate->conn_state);
- /* On error, report the original query, not the FETCH. */
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
- }
+ /* Convert the data into HeapTuples */
+ numrows = PQntuples(res);
+ fsstate->tuples = (HeapTuple *) palloc0(numrows * sizeof(HeapTuple));
+ fsstate->num_tuples = numrows;
+ fsstate->next_tuple = 0;
- /* Convert the data into HeapTuples */
- numrows = PQntuples(res);
- fsstate->tuples = (HeapTuple *) palloc0(numrows * sizeof(HeapTuple));
- fsstate->num_tuples = numrows;
- fsstate->next_tuple = 0;
+ for (i = 0; i < numrows; i++)
+ {
+ Assert(IsA(node->ss.ps.plan, ForeignScan));
- for (i = 0; i < numrows; i++)
- {
- Assert(IsA(node->ss.ps.plan, ForeignScan));
-
- fsstate->tuples[i] =
- make_tuple_from_result_row(res, i,
- fsstate->rel,
- fsstate->attinmeta,
- fsstate->retrieved_attrs,
- node,
- fsstate->temp_cxt);
- }
+ fsstate->tuples[i] =
+ make_tuple_from_result_row(res, i,
+ fsstate->rel,
+ fsstate->attinmeta,
+ fsstate->retrieved_attrs,
+ node,
+ fsstate->temp_cxt);
+ }
- /* Update fetch_ct_2 */
- if (fsstate->fetch_ct_2 < 2)
- fsstate->fetch_ct_2++;
+ /* Update fetch_ct_2 */
+ if (fsstate->fetch_ct_2 < 2)
+ fsstate->fetch_ct_2++;
- /* Must be EOF if we didn't get as many tuples as we asked for. */
- fsstate->eof_reached = (numrows < fsstate->fetch_size);
- }
- PG_FINALLY();
- {
- PQclear(res);
- }
- PG_END_TRY();
+ /* Must be EOF if we didn't get as many tuples as we asked for. */
+ fsstate->eof_reached = (numrows < fsstate->fetch_size);
+
+ PQclear(res);
MemoryContextSwitchTo(oldcontext);
}
@@ -3984,14 +3942,9 @@ close_cursor(PGconn *conn, unsigned int cursor_number,
PGresult *res;
snprintf(sql, sizeof(sql), "CLOSE c%u", cursor_number);
-
- /*
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
- */
res = pgfdw_exec_query(conn, sql, conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, true, sql);
+ pgfdw_report_error(res, conn, sql);
PQclear(res);
}
@@ -4199,18 +4152,15 @@ execute_foreign_modify(EState *estate,
NULL,
NULL,
0))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+ pgfdw_report_error(NULL, fmstate->conn, fmstate->query);
/*
* Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
*/
res = pgfdw_get_result(fmstate->conn);
if (PQresultStatus(res) !=
(fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+ pgfdw_report_error(res, fmstate->conn, fmstate->query);
/* Check number of rows affected, and fetch RETURNING tuple if any */
if (fmstate->has_returning)
@@ -4269,17 +4219,14 @@ prepare_foreign_modify(PgFdwModifyState *fmstate)
fmstate->query,
0,
NULL))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, false, fmstate->query);
+ pgfdw_report_error(NULL, fmstate->conn, fmstate->query);
/*
* Get the result, and check for success.
- *
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
*/
res = pgfdw_get_result(fmstate->conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+ pgfdw_report_error(res, fmstate->conn, fmstate->query);
PQclear(res);
/* This action shows that the prepare has been done. */
@@ -4370,37 +4317,25 @@ convert_prep_stmt_params(PgFdwModifyState *fmstate,
/*
* store_returning_result
* Store the result of a RETURNING clause
- *
- * On error, be sure to release the PGresult on the way out. Callers do not
- * have PG_TRY blocks to ensure this happens.
*/
static void
store_returning_result(PgFdwModifyState *fmstate,
TupleTableSlot *slot, PGresult *res)
{
- PG_TRY();
- {
- HeapTuple newtup;
+ HeapTuple newtup;
- newtup = make_tuple_from_result_row(res, 0,
- fmstate->rel,
- fmstate->attinmeta,
- fmstate->retrieved_attrs,
- NULL,
- fmstate->temp_cxt);
+ newtup = make_tuple_from_result_row(res, 0,
+ fmstate->rel,
+ fmstate->attinmeta,
+ fmstate->retrieved_attrs,
+ NULL,
+ fmstate->temp_cxt);
- /*
- * The returning slot will not necessarily be suitable to store
- * heaptuples directly, so allow for conversion.
- */
- ExecForceStoreHeapTuple(newtup, slot, true);
- }
- PG_CATCH();
- {
- PQclear(res);
- PG_RE_THROW();
- }
- PG_END_TRY();
+ /*
+ * The returning slot will not necessarily be suitable to store heaptuples
+ * directly, so allow for conversion.
+ */
+ ExecForceStoreHeapTuple(newtup, slot, true);
}
/*
@@ -4436,14 +4371,9 @@ deallocate_query(PgFdwModifyState *fmstate)
return;
snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name);
-
- /*
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
- */
res = pgfdw_exec_query(fmstate->conn, sql, fmstate->conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fmstate->conn, true, sql);
+ pgfdw_report_error(res, fmstate->conn, sql);
PQclear(res);
pfree(fmstate->p_name);
fmstate->p_name = NULL;
@@ -4611,24 +4541,24 @@ execute_dml_stmt(ForeignScanState *node)
*/
if (!PQsendQueryParams(dmstate->conn, dmstate->query, numParams,
NULL, values, NULL, NULL, 0))
- pgfdw_report_error(ERROR, NULL, dmstate->conn, false, dmstate->query);
+ pgfdw_report_error(NULL, dmstate->conn, dmstate->query);
/*
* Get the result, and check for success.
- *
- * We use a memory context callback to ensure that the PGresult will be
- * released, even if the query fails somewhere that's outside our control.
- * The callback is already registered, just need to fill in its arg.
*/
- Assert(dmstate->result == NULL);
dmstate->result = pgfdw_get_result(dmstate->conn);
- dmstate->result_cb.arg = dmstate->result;
-
if (PQresultStatus(dmstate->result) !=
(dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, false,
+ pgfdw_report_error(dmstate->result, dmstate->conn,
dmstate->query);
+ /*
+ * The result potentially needs to survive across multiple executor row
+ * cycles, so move it to the context where the dmstate is.
+ */
+ dmstate->result = libpqsrv_PGresultSetParent(dmstate->result,
+ GetMemoryChunkContext(dmstate));
+
/* Get the number of rows affected. */
if (dmstate->has_returning)
dmstate->num_tuples = PQntuples(dmstate->result);
@@ -4965,7 +4895,7 @@ postgresAnalyzeForeignTable(Relation relation,
UserMapping *user;
PGconn *conn;
StringInfoData sql;
- PGresult *volatile res = NULL;
+ PGresult *res;
/* Return the row-analysis function pointer */
*func = postgresAcquireSampleRowsFunc;
@@ -4991,22 +4921,14 @@ postgresAnalyzeForeignTable(Relation relation,
initStringInfo(&sql);
deparseAnalyzeSizeSql(&sql, relation);
- /* In what follows, do not risk leaking any PGresults. */
- PG_TRY();
- {
- res = pgfdw_exec_query(conn, sql.data, NULL);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, sql.data);
+ res = pgfdw_exec_query(conn, sql.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, sql.data);
- if (PQntuples(res) != 1 || PQnfields(res) != 1)
- elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query");
- *totalpages = strtoul(PQgetvalue(res, 0, 0), NULL, 10);
- }
- PG_FINALLY();
- {
- PQclear(res);
- }
- PG_END_TRY();
+ if (PQntuples(res) != 1 || PQnfields(res) != 1)
+ elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query");
+ *totalpages = strtoul(PQgetvalue(res, 0, 0), NULL, 10);
+ PQclear(res);
ReleaseConnection(conn);
@@ -5027,9 +4949,9 @@ postgresGetAnalyzeInfoForForeignTable(Relation relation, bool *can_tablesample)
UserMapping *user;
PGconn *conn;
StringInfoData sql;
- PGresult *volatile res = NULL;
- volatile double reltuples = -1;
- volatile char relkind = 0;
+ PGresult *res;
+ double reltuples;
+ char relkind;
/* assume the remote relation does not support TABLESAMPLE */
*can_tablesample = false;
@@ -5048,24 +4970,15 @@ postgresGetAnalyzeInfoForForeignTable(Relation relation, bool *can_tablesample)
initStringInfo(&sql);
deparseAnalyzeInfoSql(&sql, relation);
- /* In what follows, do not risk leaking any PGresults. */
- PG_TRY();
- {
- res = pgfdw_exec_query(conn, sql.data, NULL);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, sql.data);
+ res = pgfdw_exec_query(conn, sql.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, sql.data);
- if (PQntuples(res) != 1 || PQnfields(res) != 2)
- elog(ERROR, "unexpected result from deparseAnalyzeInfoSql query");
- reltuples = strtod(PQgetvalue(res, 0, 0), NULL);
- relkind = *(PQgetvalue(res, 0, 1));
- }
- PG_FINALLY();
- {
- if (res)
- PQclear(res);
- }
- PG_END_TRY();
+ if (PQntuples(res) != 1 || PQnfields(res) != 2)
+ elog(ERROR, "unexpected result from deparseAnalyzeInfoSql query");
+ reltuples = strtod(PQgetvalue(res, 0, 0), NULL);
+ relkind = *(PQgetvalue(res, 0, 1));
+ PQclear(res);
ReleaseConnection(conn);
@@ -5105,10 +5018,12 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
int server_version_num;
PgFdwSamplingMethod method = ANALYZE_SAMPLE_AUTO; /* auto is default */
double sample_frac = -1.0;
- double reltuples;
+ double reltuples = -1.0;
unsigned int cursor_number;
StringInfoData sql;
- PGresult *volatile res = NULL;
+ PGresult *res;
+ char fetch_sql[64];
+ int fetch_size;
ListCell *lc;
/* Initialize workspace state */
@@ -5285,91 +5200,76 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
deparseAnalyzeSql(&sql, relation, method, sample_frac, &astate.retrieved_attrs);
- /* In what follows, do not risk leaking any PGresults. */
- PG_TRY();
- {
- char fetch_sql[64];
- int fetch_size;
-
- res = pgfdw_exec_query(conn, sql.data, NULL);
- if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, false, sql.data);
- PQclear(res);
- res = NULL;
+ res = pgfdw_exec_query(conn, sql.data, NULL);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(res, conn, sql.data);
+ PQclear(res);
- /*
- * Determine the fetch size. The default is arbitrary, but shouldn't
- * be enormous.
- */
- fetch_size = 100;
- foreach(lc, server->options)
- {
- DefElem *def = (DefElem *) lfirst(lc);
+ /*
+ * Determine the fetch size. The default is arbitrary, but shouldn't be
+ * enormous.
+ */
+ fetch_size = 100;
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
- if (strcmp(def->defname, "fetch_size") == 0)
- {
- (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
- break;
- }
- }
- foreach(lc, table->options)
+ if (strcmp(def->defname, "fetch_size") == 0)
{
- DefElem *def = (DefElem *) lfirst(lc);
-
- if (strcmp(def->defname, "fetch_size") == 0)
- {
- (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
- break;
- }
+ (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
+ break;
}
+ }
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
- /* Construct command to fetch rows from remote. */
- snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
- fetch_size, cursor_number);
-
- /* Retrieve and process rows a batch at a time. */
- for (;;)
+ if (strcmp(def->defname, "fetch_size") == 0)
{
- int numrows;
- int i;
+ (void) parse_int(defGetString(def), &fetch_size, 0, NULL);
+ break;
+ }
+ }
- /* Allow users to cancel long query */
- CHECK_FOR_INTERRUPTS();
+ /* Construct command to fetch rows from remote. */
+ snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
+ fetch_size, cursor_number);
- /*
- * XXX possible future improvement: if rowstoskip is large, we
- * could issue a MOVE rather than physically fetching the rows,
- * then just adjust rowstoskip and samplerows appropriately.
- */
+ /* Retrieve and process rows a batch at a time. */
+ for (;;)
+ {
+ int numrows;
+ int i;
- /* Fetch some rows */
- res = pgfdw_exec_query(conn, fetch_sql, NULL);
- /* On error, report the original query, not the FETCH. */
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, sql.data);
+ /* Allow users to cancel long query */
+ CHECK_FOR_INTERRUPTS();
- /* Process whatever we got. */
- numrows = PQntuples(res);
- for (i = 0; i < numrows; i++)
- analyze_row_processor(res, i, &astate);
+ /*
+ * XXX possible future improvement: if rowstoskip is large, we could
+ * issue a MOVE rather than physically fetching the rows, then just
+ * adjust rowstoskip and samplerows appropriately.
+ */
- PQclear(res);
- res = NULL;
+ /* Fetch some rows */
+ res = pgfdw_exec_query(conn, fetch_sql, NULL);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, sql.data);
- /* Must be EOF if we didn't get all the rows requested. */
- if (numrows < fetch_size)
- break;
- }
+ /* Process whatever we got. */
+ numrows = PQntuples(res);
+ for (i = 0; i < numrows; i++)
+ analyze_row_processor(res, i, &astate);
- /* Close the cursor, just to be tidy. */
- close_cursor(conn, cursor_number, NULL);
- }
- PG_CATCH();
- {
PQclear(res);
- PG_RE_THROW();
+
+ /* Must be EOF if we didn't get all the rows requested. */
+ if (numrows < fetch_size)
+ break;
}
- PG_END_TRY();
+
+ /* Close the cursor, just to be tidy. */
+ close_cursor(conn, cursor_number, NULL);
ReleaseConnection(conn);
@@ -5481,7 +5381,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
UserMapping *mapping;
PGconn *conn;
StringInfoData buf;
- PGresult *volatile res = NULL;
+ PGresult *res;
int numrows,
i;
ListCell *lc;
@@ -5520,243 +5420,231 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
/* Create workspace for strings */
initStringInfo(&buf);
- /* In what follows, do not risk leaking any PGresults. */
- PG_TRY();
- {
- /* Check that the schema really exists */
- appendStringInfoString(&buf, "SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = ");
- deparseStringLiteral(&buf, stmt->remote_schema);
+ /* Check that the schema really exists */
+ appendStringInfoString(&buf, "SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
- res = pgfdw_exec_query(conn, buf.data, NULL);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, buf.data);
+ res = pgfdw_exec_query(conn, buf.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, buf.data);
- if (PQntuples(res) != 1)
- ereport(ERROR,
- (errcode(ERRCODE_FDW_SCHEMA_NOT_FOUND),
- errmsg("schema \"%s\" is not present on foreign server \"%s\"",
- stmt->remote_schema, server->servername)));
+ if (PQntuples(res) != 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_SCHEMA_NOT_FOUND),
+ errmsg("schema \"%s\" is not present on foreign server \"%s\"",
+ stmt->remote_schema, server->servername)));
- PQclear(res);
- res = NULL;
- resetStringInfo(&buf);
+ PQclear(res);
+ resetStringInfo(&buf);
- /*
- * Fetch all table data from this schema, possibly restricted by
- * EXCEPT or LIMIT TO. (We don't actually need to pay any attention
- * to EXCEPT/LIMIT TO here, because the core code will filter the
- * statements we return according to those lists anyway. But it
- * should save a few cycles to not process excluded tables in the
- * first place.)
- *
- * Import table data for partitions only when they are explicitly
- * specified in LIMIT TO clause. Otherwise ignore them and only
- * include the definitions of the root partitioned tables to allow
- * access to the complete remote data set locally in the schema
- * imported.
- *
- * Note: because we run the connection with search_path restricted to
- * pg_catalog, the format_type() and pg_get_expr() outputs will always
- * include a schema name for types/functions in other schemas, which
- * is what we want.
- */
+ /*
+ * Fetch all table data from this schema, possibly restricted by EXCEPT or
+ * LIMIT TO. (We don't actually need to pay any attention to EXCEPT/LIMIT
+ * TO here, because the core code will filter the statements we return
+ * according to those lists anyway. But it should save a few cycles to
+ * not process excluded tables in the first place.)
+ *
+ * Import table data for partitions only when they are explicitly
+ * specified in LIMIT TO clause. Otherwise ignore them and only include
+ * the definitions of the root partitioned tables to allow access to the
+ * complete remote data set locally in the schema imported.
+ *
+ * Note: because we run the connection with search_path restricted to
+ * pg_catalog, the format_type() and pg_get_expr() outputs will always
+ * include a schema name for types/functions in other schemas, which is
+ * what we want.
+ */
+ appendStringInfoString(&buf,
+ "SELECT relname, "
+ " attname, "
+ " format_type(atttypid, atttypmod), "
+ " attnotnull, "
+ " pg_get_expr(adbin, adrelid), ");
+
+ /* Generated columns are supported since Postgres 12 */
+ if (PQserverVersion(conn) >= 120000)
appendStringInfoString(&buf,
- "SELECT relname, "
- " attname, "
- " format_type(atttypid, atttypmod), "
- " attnotnull, "
- " pg_get_expr(adbin, adrelid), ");
-
- /* Generated columns are supported since Postgres 12 */
- if (PQserverVersion(conn) >= 120000)
- appendStringInfoString(&buf,
- " attgenerated, ");
- else
- appendStringInfoString(&buf,
- " NULL, ");
-
- if (import_collate)
- appendStringInfoString(&buf,
- " collname, "
- " collnsp.nspname ");
- else
- appendStringInfoString(&buf,
- " NULL, NULL ");
-
+ " attgenerated, ");
+ else
appendStringInfoString(&buf,
- "FROM pg_class c "
- " JOIN pg_namespace n ON "
- " relnamespace = n.oid "
- " LEFT JOIN pg_attribute a ON "
- " attrelid = c.oid AND attnum > 0 "
- " AND NOT attisdropped "
- " LEFT JOIN pg_attrdef ad ON "
- " adrelid = c.oid AND adnum = attnum ");
-
- if (import_collate)
- appendStringInfoString(&buf,
- " LEFT JOIN pg_collation coll ON "
- " coll.oid = attcollation "
- " LEFT JOIN pg_namespace collnsp ON "
- " collnsp.oid = collnamespace ");
+ " NULL, ");
+ if (import_collate)
appendStringInfoString(&buf,
- "WHERE c.relkind IN ("
- CppAsString2(RELKIND_RELATION) ","
- CppAsString2(RELKIND_VIEW) ","
- CppAsString2(RELKIND_FOREIGN_TABLE) ","
- CppAsString2(RELKIND_MATVIEW) ","
- CppAsString2(RELKIND_PARTITIONED_TABLE) ") "
- " AND n.nspname = ");
- deparseStringLiteral(&buf, stmt->remote_schema);
-
- /* Partitions are supported since Postgres 10 */
- if (PQserverVersion(conn) >= 100000 &&
- stmt->list_type != FDW_IMPORT_SCHEMA_LIMIT_TO)
- appendStringInfoString(&buf, " AND NOT c.relispartition ");
-
- /* Apply restrictions for LIMIT TO and EXCEPT */
- if (stmt->list_type == FDW_IMPORT_SCHEMA_LIMIT_TO ||
- stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
+ " collname, "
+ " collnsp.nspname ");
+ else
+ appendStringInfoString(&buf,
+ " NULL, NULL ");
+
+ appendStringInfoString(&buf,
+ "FROM pg_class c "
+ " JOIN pg_namespace n ON "
+ " relnamespace = n.oid "
+ " LEFT JOIN pg_attribute a ON "
+ " attrelid = c.oid AND attnum > 0 "
+ " AND NOT attisdropped "
+ " LEFT JOIN pg_attrdef ad ON "
+ " adrelid = c.oid AND adnum = attnum ");
+
+ if (import_collate)
+ appendStringInfoString(&buf,
+ " LEFT JOIN pg_collation coll ON "
+ " coll.oid = attcollation "
+ " LEFT JOIN pg_namespace collnsp ON "
+ " collnsp.oid = collnamespace ");
+
+ appendStringInfoString(&buf,
+ "WHERE c.relkind IN ("
+ CppAsString2(RELKIND_RELATION) ","
+ CppAsString2(RELKIND_VIEW) ","
+ CppAsString2(RELKIND_FOREIGN_TABLE) ","
+ CppAsString2(RELKIND_MATVIEW) ","
+ CppAsString2(RELKIND_PARTITIONED_TABLE) ") "
+ " AND n.nspname = ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
+
+ /* Partitions are supported since Postgres 10 */
+ if (PQserverVersion(conn) >= 100000 &&
+ stmt->list_type != FDW_IMPORT_SCHEMA_LIMIT_TO)
+ appendStringInfoString(&buf, " AND NOT c.relispartition ");
+
+ /* Apply restrictions for LIMIT TO and EXCEPT */
+ if (stmt->list_type == FDW_IMPORT_SCHEMA_LIMIT_TO ||
+ stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
+ {
+ bool first_item = true;
+
+ appendStringInfoString(&buf, " AND c.relname ");
+ if (stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
+ appendStringInfoString(&buf, "NOT ");
+ appendStringInfoString(&buf, "IN (");
+
+ /* Append list of table names within IN clause */
+ foreach(lc, stmt->table_list)
{
- bool first_item = true;
+ RangeVar *rv = (RangeVar *) lfirst(lc);
- appendStringInfoString(&buf, " AND c.relname ");
- if (stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT)
- appendStringInfoString(&buf, "NOT ");
- appendStringInfoString(&buf, "IN (");
+ if (first_item)
+ first_item = false;
+ else
+ appendStringInfoString(&buf, ", ");
+ deparseStringLiteral(&buf, rv->relname);
+ }
+ appendStringInfoChar(&buf, ')');
+ }
- /* Append list of table names within IN clause */
- foreach(lc, stmt->table_list)
- {
- RangeVar *rv = (RangeVar *) lfirst(lc);
+ /* Append ORDER BY at the end of query to ensure output ordering */
+ appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum");
- if (first_item)
- first_item = false;
- else
- appendStringInfoString(&buf, ", ");
- deparseStringLiteral(&buf, rv->relname);
- }
- appendStringInfoChar(&buf, ')');
- }
+ /* Fetch the data */
+ res = pgfdw_exec_query(conn, buf.data, NULL);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(res, conn, buf.data);
- /* Append ORDER BY at the end of query to ensure output ordering */
- appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum");
+ /* Process results */
+ numrows = PQntuples(res);
+ /* note: incrementation of i happens in inner loop's while() test */
+ for (i = 0; i < numrows;)
+ {
+ char *tablename = PQgetvalue(res, i, 0);
+ bool first_item = true;
- /* Fetch the data */
- res = pgfdw_exec_query(conn, buf.data, NULL);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, false, buf.data);
+ resetStringInfo(&buf);
+ appendStringInfo(&buf, "CREATE FOREIGN TABLE %s (\n",
+ quote_identifier(tablename));
- /* Process results */
- numrows = PQntuples(res);
- /* note: incrementation of i happens in inner loop's while() test */
- for (i = 0; i < numrows;)
+ /* Scan all rows for this table */
+ do
{
- char *tablename = PQgetvalue(res, i, 0);
- bool first_item = true;
+ char *attname;
+ char *typename;
+ char *attnotnull;
+ char *attgenerated;
+ char *attdefault;
+ char *collname;
+ char *collnamespace;
+
+ /* If table has no columns, we'll see nulls here */
+ if (PQgetisnull(res, i, 1))
+ continue;
- resetStringInfo(&buf);
- appendStringInfo(&buf, "CREATE FOREIGN TABLE %s (\n",
- quote_identifier(tablename));
+ attname = PQgetvalue(res, i, 1);
+ typename = PQgetvalue(res, i, 2);
+ attnotnull = PQgetvalue(res, i, 3);
+ attdefault = PQgetisnull(res, i, 4) ? NULL :
+ PQgetvalue(res, i, 4);
+ attgenerated = PQgetisnull(res, i, 5) ? NULL :
+ PQgetvalue(res, i, 5);
+ collname = PQgetisnull(res, i, 6) ? NULL :
+ PQgetvalue(res, i, 6);
+ collnamespace = PQgetisnull(res, i, 7) ? NULL :
+ PQgetvalue(res, i, 7);
+
+ if (first_item)
+ first_item = false;
+ else
+ appendStringInfoString(&buf, ",\n");
- /* Scan all rows for this table */
- do
- {
- char *attname;
- char *typename;
- char *attnotnull;
- char *attgenerated;
- char *attdefault;
- char *collname;
- char *collnamespace;
-
- /* If table has no columns, we'll see nulls here */
- if (PQgetisnull(res, i, 1))
- continue;
+ /* Print column name and type */
+ appendStringInfo(&buf, " %s %s",
+ quote_identifier(attname),
+ typename);
- attname = PQgetvalue(res, i, 1);
- typename = PQgetvalue(res, i, 2);
- attnotnull = PQgetvalue(res, i, 3);
- attdefault = PQgetisnull(res, i, 4) ? NULL :
- PQgetvalue(res, i, 4);
- attgenerated = PQgetisnull(res, i, 5) ? NULL :
- PQgetvalue(res, i, 5);
- collname = PQgetisnull(res, i, 6) ? NULL :
- PQgetvalue(res, i, 6);
- collnamespace = PQgetisnull(res, i, 7) ? NULL :
- PQgetvalue(res, i, 7);
-
- if (first_item)
- first_item = false;
- else
- appendStringInfoString(&buf, ",\n");
+ /*
+ * Add column_name option so that renaming the foreign table's
+ * column doesn't break the association to the underlying column.
+ */
+ appendStringInfoString(&buf, " OPTIONS (column_name ");
+ deparseStringLiteral(&buf, attname);
+ appendStringInfoChar(&buf, ')');
- /* Print column name and type */
- appendStringInfo(&buf, " %s %s",
- quote_identifier(attname),
- typename);
+ /* Add COLLATE if needed */
+ if (import_collate && collname != NULL && collnamespace != NULL)
+ appendStringInfo(&buf, " COLLATE %s.%s",
+ quote_identifier(collnamespace),
+ quote_identifier(collname));
- /*
- * Add column_name option so that renaming the foreign table's
- * column doesn't break the association to the underlying
- * column.
- */
- appendStringInfoString(&buf, " OPTIONS (column_name ");
- deparseStringLiteral(&buf, attname);
- appendStringInfoChar(&buf, ')');
-
- /* Add COLLATE if needed */
- if (import_collate && collname != NULL && collnamespace != NULL)
- appendStringInfo(&buf, " COLLATE %s.%s",
- quote_identifier(collnamespace),
- quote_identifier(collname));
-
- /* Add DEFAULT if needed */
- if (import_default && attdefault != NULL &&
- (!attgenerated || !attgenerated[0]))
- appendStringInfo(&buf, " DEFAULT %s", attdefault);
-
- /* Add GENERATED if needed */
- if (import_generated && attgenerated != NULL &&
- attgenerated[0] == ATTRIBUTE_GENERATED_STORED)
- {
- Assert(attdefault != NULL);
- appendStringInfo(&buf,
- " GENERATED ALWAYS AS (%s) STORED",
- attdefault);
- }
+ /* Add DEFAULT if needed */
+ if (import_default && attdefault != NULL &&
+ (!attgenerated || !attgenerated[0]))
+ appendStringInfo(&buf, " DEFAULT %s", attdefault);
- /* Add NOT NULL if needed */
- if (import_not_null && attnotnull[0] == 't')
- appendStringInfoString(&buf, " NOT NULL");
+ /* Add GENERATED if needed */
+ if (import_generated && attgenerated != NULL &&
+ attgenerated[0] == ATTRIBUTE_GENERATED_STORED)
+ {
+ Assert(attdefault != NULL);
+ appendStringInfo(&buf,
+ " GENERATED ALWAYS AS (%s) STORED",
+ attdefault);
}
- while (++i < numrows &&
- strcmp(PQgetvalue(res, i, 0), tablename) == 0);
- /*
- * Add server name and table-level options. We specify remote
- * schema and table name as options (the latter to ensure that
- * renaming the foreign table doesn't break the association).
- */
- appendStringInfo(&buf, "\n) SERVER %s\nOPTIONS (",
- quote_identifier(server->servername));
+ /* Add NOT NULL if needed */
+ if (import_not_null && attnotnull[0] == 't')
+ appendStringInfoString(&buf, " NOT NULL");
+ }
+ while (++i < numrows &&
+ strcmp(PQgetvalue(res, i, 0), tablename) == 0);
- appendStringInfoString(&buf, "schema_name ");
- deparseStringLiteral(&buf, stmt->remote_schema);
- appendStringInfoString(&buf, ", table_name ");
- deparseStringLiteral(&buf, tablename);
+ /*
+ * Add server name and table-level options. We specify remote schema
+ * and table name as options (the latter to ensure that renaming the
+ * foreign table doesn't break the association).
+ */
+ appendStringInfo(&buf, "\n) SERVER %s\nOPTIONS (",
+ quote_identifier(server->servername));
- appendStringInfoString(&buf, ");");
+ appendStringInfoString(&buf, "schema_name ");
+ deparseStringLiteral(&buf, stmt->remote_schema);
+ appendStringInfoString(&buf, ", table_name ");
+ deparseStringLiteral(&buf, tablename);
- commands = lappend(commands, pstrdup(buf.data));
- }
- }
- PG_FINALLY();
- {
- PQclear(res);
+ appendStringInfoString(&buf, ");");
+
+ commands = lappend(commands, pstrdup(buf.data));
}
- PG_END_TRY();
+ PQclear(res);
ReleaseConnection(conn);
@@ -7424,7 +7312,7 @@ postgresForeignAsyncNotify(AsyncRequest *areq)
/* On error, report the original query, not the FETCH. */
if (!PQconsumeInput(fsstate->conn))
- pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query);
+ pgfdw_report_error(NULL, fsstate->conn, fsstate->query);
fetch_more_data(node);
@@ -7523,7 +7411,7 @@ fetch_more_data_begin(AsyncRequest *areq)
fsstate->fetch_size, fsstate->cursor_number);
if (!PQsendQuery(fsstate->conn, sql))
- pgfdw_report_error(ERROR, NULL, fsstate->conn, false, fsstate->query);
+ pgfdw_report_error(NULL, fsstate->conn, fsstate->query);
/* Remember that the request is in process */
fsstate->conn_state->pendingAreq = areq;
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 81358f3bde7..e69735298d7 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -15,7 +15,7 @@
#include "foreign/foreign.h"
#include "lib/stringinfo.h"
-#include "libpq-fe.h"
+#include "libpq/libpq-be-fe.h"
#include "nodes/execnodes.h"
#include "nodes/pathnodes.h"
#include "utils/relcache.h"
@@ -166,8 +166,10 @@ extern void do_sql_command(PGconn *conn, const char *sql);
extern PGresult *pgfdw_get_result(PGconn *conn);
extern PGresult *pgfdw_exec_query(PGconn *conn, const char *query,
PgFdwConnState *state);
-extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
- bool clear, const char *sql);
+pg_noreturn extern void pgfdw_report_error(PGresult *res, PGconn *conn,
+ const char *sql);
+extern void pgfdw_report(int elevel, PGresult *res, PGconn *conn,
+ const char *sql);
/* in option.c */
extern int ExtractConnectionOptions(List *defelems,
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index de5b5929ee0..74a16af04ad 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -3148,8 +3148,11 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in
</para>
<para>
Converts the first letter of each word to upper case and the
- rest to lower case. Words are sequences of alphanumeric
- characters separated by non-alphanumeric characters.
+ rest to lower case. When using the <literal>libc</literal> locale
+ provider, words are sequences of alphanumeric characters separated
+ by non-alphanumeric characters; when using the ICU locale provider,
+ words are separated according to
+ <ulink url="https://www.unicode.org/reports/tr29/#Word_Boundaries">Unicode Standard Annex #29</ulink>.
</para>
<para>
<literal>initcap('hi THOMAS')</literal>
diff --git a/doc/src/sgml/pageinspect.sgml b/doc/src/sgml/pageinspect.sgml
index 12929333665..f5014787c78 100644
--- a/doc/src/sgml/pageinspect.sgml
+++ b/doc/src/sgml/pageinspect.sgml
@@ -741,7 +741,7 @@ test=# SELECT first_tid, nbytes, tids[0:5] AS some_tids
For example:
<screen>
test=# SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2));
- lsn | nsn | rightlink | flags
+ lsn | nsn | rightlink | flags
------------+------------+-----------+--------
0/0B5FE088 | 0/00000000 | 1 | {leaf}
(1 row)
diff --git a/doc/src/sgml/pgstatstatements.sgml b/doc/src/sgml/pgstatstatements.sgml
index 7baa07dcdbf..d753de5836e 100644
--- a/doc/src/sgml/pgstatstatements.sgml
+++ b/doc/src/sgml/pgstatstatements.sgml
@@ -556,6 +556,24 @@
<row>
<entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>generic_plan_calls</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times the statement has been executed using a generic plan
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>custom_plan_calls</structfield> <type>bigint</type>
+ </para>
+ <para>
+ Number of times the statement has been executed using a custom plan
+ </para></entry>
+ </row>
+
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
<structfield>stats_since</structfield> <type>timestamp with time zone</type>
</para>
<para>
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index b115884acb3..e56eac8fd0f 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -537,6 +537,11 @@
The frontend should not respond to this message, but should
continue listening for a ReadyForQuery message.
</para>
+ <para>
+ The <productname>PostgreSQL</productname> server will always send this
+ message, but some third party backend implementations of the protocol
+ that don't support query cancellation are known not to.
+ </para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml
index ed6d206ae71..0d8d463479b 100644
--- a/doc/src/sgml/ref/create_trigger.sgml
+++ b/doc/src/sgml/ref/create_trigger.sgml
@@ -197,9 +197,11 @@ CREATE [ OR REPLACE ] [ CONSTRAINT ] TRIGGER <replaceable class="parameter">name
of the rows inserted, deleted, or modified by the current SQL statement.
This feature lets the trigger see a global view of what the statement did,
not just one row at a time. This option is only allowed for
- an <literal>AFTER</literal> trigger that is not a constraint trigger; also, if
- the trigger is an <literal>UPDATE</literal> trigger, it must not specify
- a <replaceable class="parameter">column_name</replaceable> list.
+ an <literal>AFTER</literal> trigger on a plain table (not a foreign table).
+ The trigger should not be a constraint trigger. Also, if the trigger is
+ an <literal>UPDATE</literal> trigger, it must not specify
+ a <replaceable class="parameter">column_name</replaceable> list when using
+ this option.
<literal>OLD TABLE</literal> may only be specified once, and only for a trigger
that can fire on <literal>UPDATE</literal> or <literal>DELETE</literal>; it creates a
transition relation containing the <firstterm>before-images</firstterm> of all rows
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index 2ae084b5fa6..0bc7609bdf8 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -1355,6 +1355,15 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>--statistics</option></term>
+ <listitem>
+ <para>
+ Dump statistics.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--statistics-only</option></term>
<listitem>
<para>
@@ -1441,33 +1450,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--with-data</option></term>
- <listitem>
- <para>
- Dump data. This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-schema</option></term>
- <listitem>
- <para>
- Dump schema (data definitions). This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-statistics</option></term>
- <listitem>
- <para>
- Dump statistics.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-?</option></term>
<term><option>--help</option></term>
<listitem>
@@ -1682,7 +1664,7 @@ CREATE DATABASE foo WITH TEMPLATE template0;
</para>
<para>
- If <option>--with-statistics</option> is specified,
+ If <option>--statistics</option> is specified,
<command>pg_dump</command> will include most optimizer statistics in the
resulting dump file. However, some statistics may not be included, such as
those created explicitly with <xref linkend="sql-createstatistics"/> or
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 8ca68da5a55..364442f00f2 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -16,10 +16,7 @@ PostgreSQL documentation
<refnamediv>
<refname>pg_dumpall</refname>
-
- <refpurpose>
- export a <productname>PostgreSQL</productname> database cluster as an SQL script or to other formats
- </refpurpose>
+ <refpurpose>extract a <productname>PostgreSQL</productname> database cluster into a script file</refpurpose>
</refnamediv>
<refsynopsisdiv>
@@ -36,7 +33,7 @@ PostgreSQL documentation
<para>
<application>pg_dumpall</application> is a utility for writing out
(<quote>dumping</quote>) all <productname>PostgreSQL</productname> databases
- of a cluster into an SQL script file or an archive. The output contains
+ of a cluster into one script file. The script file contains
<acronym>SQL</acronym> commands that can be used as input to <xref
linkend="app-psql"/> to restore the databases. It does this by
calling <xref linkend="app-pgdump"/> for each database in the cluster.
@@ -55,17 +52,12 @@ PostgreSQL documentation
</para>
<para>
- Plain text SQL scripts will be written to the standard output. Use the
+ The SQL script will be written to the standard output. Use the
<option>-f</option>/<option>--file</option> option or shell operators to
redirect it into a file.
</para>
<para>
- Archives in other formats will be placed in a directory named using the
- <option>-f</option>/<option>--file</option>, which is required in this case.
- </para>
-
- <para>
<application>pg_dumpall</application> needs to connect several
times to the <productname>PostgreSQL</productname> server (once per
database). If you use password authentication it will ask for
@@ -129,86 +121,11 @@ PostgreSQL documentation
<para>
Send output to the specified file. If this is omitted, the
standard output is used.
- Note: This option can only be omitted when <option>--format</option> is plain
</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><option>-F <replaceable class="parameter">format</replaceable></option></term>
- <term><option>--format=<replaceable class="parameter">format</replaceable></option></term>
- <listitem>
- <para>
- Specify the format of dump files. In plain format, all the dump data is
- sent in a single text stream. This is the default.
-
- In all other modes, <application>pg_dumpall</application> first creates two files:
- <filename>global.dat</filename> and <filename>map.dat</filename>, in the directory
- specified by <option>--file</option>.
- The first file contains global data, such as roles and tablespaces. The second
- contains a mapping between database oids and names. These files are used by
- <application>pg_restore</application>. Data for individual databases is placed in
- <filename>databases</filename> subdirectory, named using the database's <type>oid</type>.
-
- <variablelist>
- <varlistentry>
- <term><literal>d</literal></term>
- <term><literal>directory</literal></term>
- <listitem>
- <para>
- Output directory-format archives for each database,
- suitable for input into pg_restore. The directory
- will have database <type>oid</type> as its name.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><literal>p</literal></term>
- <term><literal>plain</literal></term>
- <listitem>
- <para>
- Output a plain-text SQL script file (the default).
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><literal>c</literal></term>
- <term><literal>custom</literal></term>
- <listitem>
- <para>
- Output a custom-format archive for each database,
- suitable for input into pg_restore. The archive
- will be named <filename>dboid.dmp</filename> where <type>dboid</type> is the
- <type>oid</type> of the database.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><literal>t</literal></term>
- <term><literal>tar</literal></term>
- <listitem>
- <para>
- Output a tar-format archive for each database,
- suitable for input into pg_restore. The archive
- will be named <filename>dboid.tar</filename> where <type>dboid</type> is the
- <type>oid</type> of the database.
- </para>
- </listitem>
- </varlistentry>
-
- </variablelist>
-
- Note: see <xref linkend="app-pgdump"/> for details
- of how the various non plain text archives work.
-
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-g</option></term>
<term><option>--globals-only</option></term>
<listitem>
@@ -689,6 +606,15 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
</varlistentry>
<varlistentry>
+ <term><option>--statistics</option></term>
+ <listitem>
+ <para>
+ Dump statistics.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--statistics-only</option></term>
<listitem>
<para>
@@ -724,33 +650,6 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
</varlistentry>
<varlistentry>
- <term><option>--with-data</option></term>
- <listitem>
- <para>
- Dump data. This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-schema</option></term>
- <listitem>
- <para>
- Dump schema (data definitions). This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-statistics</option></term>
- <listitem>
- <para>
- Dump statistics.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-?</option></term>
<term><option>--help</option></term>
<listitem>
@@ -961,7 +860,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
</para>
<para>
- If <option>--with-statistics</option> is specified,
+ If <option>--statistics</option> is specified,
<command>pg_dumpall</command> will include most optimizer statistics in the
resulting dump file. However, some statistics may not be included, such as
those created explicitly with <xref linkend="sql-createstatistics"/> or
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index b649bd3a5ae..261ead15039 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -18,9 +18,8 @@ PostgreSQL documentation
<refname>pg_restore</refname>
<refpurpose>
- restore <productname>PostgreSQL</productname> databases from archives
- created by <application>pg_dump</application> or
- <application>pg_dumpall</application>
+ restore a <productname>PostgreSQL</productname> database from an
+ archive file created by <application>pg_dump</application>
</refpurpose>
</refnamediv>
@@ -39,14 +38,13 @@ PostgreSQL documentation
<para>
<application>pg_restore</application> is a utility for restoring a
- <productname>PostgreSQL</productname> database or cluster from an archive
- created by <xref linkend="app-pgdump"/> or
- <xref linkend="app-pg-dumpall"/> in one of the non-plain-text
+ <productname>PostgreSQL</productname> database from an archive
+ created by <xref linkend="app-pgdump"/> in one of the non-plain-text
formats. It will issue the commands necessary to reconstruct the
- database or cluster to the state it was in at the time it was saved. The
- archives also allow <application>pg_restore</application> to
+ database to the state it was in at the time it was saved. The
+ archive files also allow <application>pg_restore</application> to
be selective about what is restored, or even to reorder the items
- prior to being restored. The archive formats are designed to be
+ prior to being restored. The archive files are designed to be
portable across architectures.
</para>
@@ -54,17 +52,10 @@ PostgreSQL documentation
<application>pg_restore</application> can operate in two modes.
If a database name is specified, <application>pg_restore</application>
connects to that database and restores archive contents directly into
- the database.
- When restoring from a dump made by <application>pg_dumpall</application>,
- each database will be created and then the restoration will be run in that
- database.
-
- Otherwise, when a database name is not specified, a script containing the SQL
- commands necessary to rebuild the database or cluster is created and written
+ the database. Otherwise, a script containing the SQL
+ commands necessary to rebuild the database is created and written
to a file or standard output. This script output is equivalent to
- the plain text output format of <application>pg_dump</application> or
- <application>pg_dumpall</application>.
-
+ the plain text output format of <application>pg_dump</application>.
Some of the options controlling the output are therefore analogous to
<application>pg_dump</application> options.
</para>
@@ -149,8 +140,6 @@ PostgreSQL documentation
commands that mention this database.
Access privileges for the database itself are also restored,
unless <option>--no-acl</option> is specified.
- <option>--create</option> is required when restoring multiple databases
- from an archive created by <application>pg_dumpall</application>.
</para>
<para>
@@ -247,19 +236,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>-g</option></term>
- <term><option>--globals-only</option></term>
- <listitem>
- <para>
- Restore only global objects (roles and tablespaces), no databases.
- </para>
- <para>
- This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-I <replaceable class="parameter">index</replaceable></option></term>
<term><option>--index=<replaceable class="parameter">index</replaceable></option></term>
<listitem>
@@ -604,28 +580,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--exclude-database=<replaceable class="parameter">pattern</replaceable></option></term>
- <listitem>
- <para>
- Do not restore databases whose name matches
- <replaceable class="parameter">pattern</replaceable>.
- Multiple patterns can be excluded by writing multiple
- <option>--exclude-database</option> switches. The
- <replaceable class="parameter">pattern</replaceable> parameter is
- interpreted as a pattern according to the same rules used by
- <application>psql</application>'s <literal>\d</literal>
- commands (see <xref linkend="app-psql-patterns"/>),
- so multiple databases can also be excluded by writing wildcard
- characters in the pattern. When using wildcards, be careful to
- quote the pattern if needed to prevent shell wildcard expansion.
- </para>
- <para>
- This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>--filter=<replaceable class="parameter">filename</replaceable></option></term>
<listitem>
<para>
@@ -862,6 +816,16 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>--statistics</option></term>
+ <listitem>
+ <para>
+ Output commands to restore statistics, if the archive contains them.
+ This is the default.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--statistics-only</option></term>
<listitem>
<para>
@@ -920,36 +884,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--with-data</option></term>
- <listitem>
- <para>
- Output commands to restore data, if the archive contains them.
- This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-schema</option></term>
- <listitem>
- <para>
- Output commands to restore schema (data definitions), if the archive
- contains them. This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
- <term><option>--with-statistics</option></term>
- <listitem>
- <para>
- Output commands to restore statistics, if the archive contains them.
- This is the default.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-?</option></term>
<term><option>--help</option></term>
<listitem>
diff --git a/doc/src/sgml/ref/vacuumdb.sgml b/doc/src/sgml/ref/vacuumdb.sgml
index b0680a61814..c7d9dca17b8 100644
--- a/doc/src/sgml/ref/vacuumdb.sgml
+++ b/doc/src/sgml/ref/vacuumdb.sgml
@@ -282,9 +282,11 @@ PostgreSQL documentation
<listitem>
<para>
Only analyze relations that are missing statistics for a column, index
- expression, or extended statistics object. This option prevents
- <application>vacuumdb</application> from deleting existing statistics
- so that the query optimizer's choices do not become transiently worse.
+ expression, or extended statistics object. When used with
+ <option>--analyze-in-stages</option>, this option prevents
+ <application>vacuumdb</application> from temporarily replacing existing
+ statistics with ones generated with lower statistics targets, thus
+ avoiding transiently worse query optimizer choices.
</para>
<para>
This option can only be used in conjunction with
diff --git a/meson.build b/meson.build
index 5365aaf95e6..ca423dc8e12 100644
--- a/meson.build
+++ b/meson.build
@@ -1996,10 +1996,7 @@ if cc.links('''
cdata.set('HAVE__BUILTIN_OP_OVERFLOW', 1)
endif
-
-# XXX: The configure.ac check for __cpuid() is broken, we don't copy that
-# here. To prevent problems due to two detection methods working, stop
-# checking after one.
+# Check for __get_cpuid() and __cpuid().
if cc.links('''
#include <cpuid.h>
int main(int arg, char **argv)
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index 04952b533de..8b1b357beaa 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -254,7 +254,7 @@ CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
PG_SYSROOT = @PG_SYSROOT@
-override CPPFLAGS := $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS)
ifdef PGXS
override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) $(CPPFLAGS)
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 745a04ef26e..8f918e00af7 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -364,7 +364,7 @@ visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
{
*vmbuf = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(*vmbuf))
- return false;
+ return (uint8) 0;
}
map = PageGetContents(BufferGetPage(*vmbuf));
diff --git a/src/backend/access/index/amapi.c b/src/backend/access/index/amapi.c
index f0f4f974bce..60684c53422 100644
--- a/src/backend/access/index/amapi.c
+++ b/src/backend/access/index/amapi.c
@@ -42,6 +42,19 @@ GetIndexAmRoutine(Oid amhandler)
elog(ERROR, "index access method handler function %u did not return an IndexAmRoutine struct",
amhandler);
+ /* Assert that all required callbacks are present. */
+ Assert(routine->ambuild != NULL);
+ Assert(routine->ambuildempty != NULL);
+ Assert(routine->aminsert != NULL);
+ Assert(routine->ambulkdelete != NULL);
+ Assert(routine->amvacuumcleanup != NULL);
+ Assert(routine->amcostestimate != NULL);
+ Assert(routine->amoptions != NULL);
+ Assert(routine->amvalidate != NULL);
+ Assert(routine->ambeginscan != NULL);
+ Assert(routine->amrescan != NULL);
+ Assert(routine->amendscan != NULL);
+
return routine;
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index eefffc4277a..b0891998b24 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -96,6 +96,7 @@
#include "utils/guc_hooks.h"
#include "utils/guc_tables.h"
#include "utils/injection_point.h"
+#include "utils/pgstat_internal.h"
#include "utils/ps_status.h"
#include "utils/relmapper.h"
#include "utils/snapmgr.h"
@@ -1091,6 +1092,9 @@ XLogInsertRecord(XLogRecData *rdata,
pgWalUsage.wal_bytes += rechdr->xl_tot_len;
pgWalUsage.wal_records++;
pgWalUsage.wal_fpi += num_fpi;
+
+ /* Required for the flush of pending stats WAL data */
+ pgstat_report_fixed = true;
}
return EndPos;
@@ -2108,6 +2112,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
LWLockRelease(WALWriteLock);
pgWalUsage.wal_buffers_full++;
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
+
+ /*
+ * Required for the flush of pending stats WAL data, per
+ * update of pgWalUsage.
+ */
+ pgstat_report_fixed = true;
}
}
}
diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c
index 63c2992d19f..244acf52f36 100644
--- a/src/backend/catalog/pg_subscription.c
+++ b/src/backend/catalog/pg_subscription.c
@@ -320,7 +320,7 @@ AddSubscriptionRelState(Oid subid, Oid relid, char state,
*/
void
UpdateSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn)
+ XLogRecPtr sublsn, bool already_locked)
{
Relation rel;
HeapTuple tup;
@@ -328,9 +328,24 @@ UpdateSubscriptionRelState(Oid subid, Oid relid, char state,
Datum values[Natts_pg_subscription_rel];
bool replaces[Natts_pg_subscription_rel];
- LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock);
+ if (already_locked)
+ {
+#ifdef USE_ASSERT_CHECKING
+ LOCKTAG tag;
- rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+ Assert(CheckRelationOidLockedByMe(SubscriptionRelRelationId,
+ RowExclusiveLock, true));
+ SET_LOCKTAG_OBJECT(tag, InvalidOid, SubscriptionRelationId, subid, 0);
+ Assert(LockHeldByMe(&tag, AccessShareLock, true));
+#endif
+
+ rel = table_open(SubscriptionRelRelationId, NoLock);
+ }
+ else
+ {
+ LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock);
+ rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+ }
/* Try finding existing mapping. */
tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP,
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 7e2792ead71..8345bc0264b 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3582,6 +3582,7 @@ static void
show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
{
Plan *plan = ((PlanState *) mstate)->plan;
+ Memoize *mplan = (Memoize *) plan;
ListCell *lc;
List *context;
StringInfoData keystr;
@@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
plan,
ancestors);
- foreach(lc, ((Memoize *) plan)->param_exprs)
+ foreach(lc, mplan->param_exprs)
{
Node *expr = (Node *) lfirst(lc);
@@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
pfree(keystr.data);
+ if (es->costs)
+ {
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n",
+ mplan->est_entries, mplan->est_unique_keys,
+ mplan->est_calls, mplan->est_hit_ratio * 100.0);
+ }
+ else
+ {
+ ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es);
+ ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es);
+ ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es);
+ ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es);
+ }
+ }
+
if (!es->analyze)
return;
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index fcd5fcd8915..77f8461f42e 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -1588,7 +1588,7 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt)
pstmt->utilityStmt = (Node *) cstmt;
pstmt->stmt_location = rs->stmt_location;
pstmt->stmt_len = rs->stmt_len;
- pstmt->cached_plan_type = PLAN_CACHE_NONE;
+ pstmt->planOrigin = PLAN_STMT_INTERNAL;
/* Execute statement */
ProcessUtility(pstmt, cmd, false,
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index c00f1a11384..0f03d9743d2 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -215,7 +215,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
wrapper->utilityStmt = stmt;
wrapper->stmt_location = stmt_location;
wrapper->stmt_len = stmt_len;
- wrapper->cached_plan_type = PLAN_CACHE_NONE;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
/* do this step */
ProcessUtility(wrapper,
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index fc76f22fb82..f098a5557cf 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -189,7 +189,7 @@ ExecSerializePlan(Plan *plan, EState *estate)
pstmt->permInfos = estate->es_rteperminfos;
pstmt->resultRelations = NIL;
pstmt->appendRelations = NIL;
- pstmt->cached_plan_type = PLAN_CACHE_NONE;
+ pstmt->planOrigin = PLAN_STMT_INTERNAL;
/*
* Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e8c12060b93..68677ba42e1 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -31,7 +31,7 @@ endif
# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
-override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(LLVM_CPPFLAGS)
SHLIB_LINK += $(LLVM_LIBS)
# Because this module includes C++ files, we need to use a C++
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 1f04a2c182c..344a3188317 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2572,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
Cost input_startup_cost = mpath->subpath->startup_cost;
Cost input_total_cost = mpath->subpath->total_cost;
double tuples = mpath->subpath->rows;
- double calls = mpath->calls;
+ Cardinality est_calls = mpath->est_calls;
int width = mpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
- double est_cache_entries;
- double ndistinct;
+ Cardinality est_cache_entries;
+ Cardinality ndistinct;
double evict_ratio;
double hit_ratio;
Cost startup_cost;
@@ -2604,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
- ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
&estinfo);
/*
@@ -2616,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* certainly mean a MemoizePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
- ndistinct = calls;
+ ndistinct = est_calls;
+
+ /* Remember the ndistinct estimate for EXPLAIN */
+ mpath->est_unique_keys = ndistinct;
/*
* Since we've already estimated the maximum number of entries we can
@@ -2644,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* must look at how many scans are estimated in total for this node and
* how many of those scans we expect to get a cache hit.
*/
- hit_ratio = ((calls - ndistinct) / calls) *
+ hit_ratio = ((est_calls - ndistinct) / est_calls) *
(est_cache_entries / Max(ndistinct, est_cache_entries));
+ /* Remember the hit ratio estimate for EXPLAIN */
+ mpath->est_hit_ratio = hit_ratio;
+
Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 8a9f1d7a943..bfefc7dbea1 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -284,7 +284,10 @@ static Material *make_material(Plan *lefttree);
static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
Oid *collations, List *param_exprs,
bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids);
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls,
+ Cardinality est_unique_keys,
+ double est_hit_ratio);
static WindowAgg *make_windowagg(List *tlist, WindowClause *wc,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
@@ -1753,7 +1756,8 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
plan = make_memoize(subplan, operators, collations, param_exprs,
best_path->singlerow, best_path->binary_mode,
- best_path->est_entries, keyparamids);
+ best_path->est_entries, keyparamids, best_path->est_calls,
+ best_path->est_unique_keys, best_path->est_hit_ratio);
copy_generic_path_info(&plan->plan, (Path *) best_path);
@@ -6749,7 +6753,9 @@ materialize_finished_plan(Plan *subplan)
static Memoize *
make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
List *param_exprs, bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids)
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls, Cardinality est_unique_keys,
+ double est_hit_ratio)
{
Memoize *node = makeNode(Memoize);
Plan *plan = &node->plan;
@@ -6767,6 +6773,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
node->binary_mode = binary_mode;
node->est_entries = est_entries;
node->keyparamids = keyparamids;
+ node->est_calls = est_calls;
+ node->est_unique_keys = est_unique_keys;
+ node->est_hit_ratio = est_hit_ratio;
return node;
}
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index a77b2147e95..d59d6e4c6a0 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -558,6 +558,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
result->commandType = parse->commandType;
result->queryId = parse->queryId;
+ result->planOrigin = PLAN_STMT_STANDARD;
result->hasReturning = (parse->returningList != NIL);
result->hasModifyingCTE = parse->hasModifyingCTE;
result->canSetTag = parse->canSetTag;
@@ -582,7 +583,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
result->utilityStmt = parse->utilityStmt;
result->stmt_location = parse->stmt_location;
result->stmt_len = parse->stmt_len;
- result->cached_plan_type = PLAN_CACHE_NONE;
result->jitFlags = PGJIT_NONE;
if (jit_enabled && jit_above_cost >= 0 &&
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 9cc602788ea..a4c5867cdcb 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1689,7 +1689,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
MemoizePath *
create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *param_exprs, List *hash_operators,
- bool singlerow, bool binary_mode, double calls)
+ bool singlerow, bool binary_mode, Cardinality est_calls)
{
MemoizePath *pathnode = makeNode(MemoizePath);
@@ -1710,7 +1710,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->param_exprs = param_exprs;
pathnode->singlerow = singlerow;
pathnode->binary_mode = binary_mode;
- pathnode->calls = clamp_row_est(calls);
/*
* For now we set est_entries to 0. cost_memoize_rescan() does all the
@@ -1720,6 +1719,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
*/
pathnode->est_entries = 0;
+ pathnode->est_calls = clamp_row_est(est_calls);
+
+ /* These will also be set later in cost_memoize_rescan() */
+ pathnode->est_unique_keys = 0.0;
+ pathnode->est_hit_ratio = 0.0;
+
/* we should not generate this path type when enable_memoize=false */
Assert(enable_memoize);
pathnode->path.disabled_nodes = subpath->disabled_nodes;
@@ -4259,7 +4264,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
mpath->hash_operators,
mpath->singlerow,
mpath->binary_mode,
- mpath->calls);
+ mpath->est_calls);
}
default:
break;
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 73345bb3c70..db43034b9db 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -318,6 +318,11 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <list> opt_qualified_name
%type <boolean> opt_concurrently
%type <dbehavior> opt_drop_behavior
+%type <list> opt_utility_option_list
+%type <list> utility_option_list
+%type <defelt> utility_option_elem
+%type <str> utility_option_name
+%type <node> utility_option_arg
%type <node> alter_column_default opclass_item opclass_drop alter_using
%type <ival> add_drop opt_asc_desc opt_nulls_order
@@ -338,10 +343,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
create_extension_opt_item alter_extension_opt_item
%type <ival> opt_lock lock_type cast_context
-%type <str> utility_option_name
-%type <defelt> utility_option_elem
-%type <list> utility_option_list
-%type <node> utility_option_arg
%type <defelt> drop_option
%type <boolean> opt_or_replace opt_no
opt_grant_grant_option
@@ -556,7 +557,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <list> generic_option_list alter_generic_option_list
%type <ival> reindex_target_relation reindex_target_all
-%type <list> opt_reindex_option_list
%type <node> copy_generic_opt_arg copy_generic_opt_arg_list_item
%type <defelt> copy_generic_opt_elem
@@ -1141,6 +1141,41 @@ opt_drop_behavior:
| /* EMPTY */ { $$ = DROP_RESTRICT; /* default */ }
;
+opt_utility_option_list:
+ '(' utility_option_list ')' { $$ = $2; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+utility_option_list:
+ utility_option_elem
+ {
+ $$ = list_make1($1);
+ }
+ | utility_option_list ',' utility_option_elem
+ {
+ $$ = lappend($1, $3);
+ }
+ ;
+
+utility_option_elem:
+ utility_option_name utility_option_arg
+ {
+ $$ = makeDefElem($1, $2, @1);
+ }
+ ;
+
+utility_option_name:
+ NonReservedWord { $$ = $1; }
+ | analyze_keyword { $$ = "analyze"; }
+ | FORMAT_LA { $$ = "format"; }
+ ;
+
+utility_option_arg:
+ opt_boolean_or_string { $$ = (Node *) makeString($1); }
+ | NumericOnly { $$ = (Node *) $1; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
/*****************************************************************************
*
* CALL statement
@@ -2028,18 +2063,12 @@ constraints_set_mode:
* Checkpoint statement
*/
CheckPointStmt:
- CHECKPOINT
+ CHECKPOINT opt_utility_option_list
{
CheckPointStmt *n = makeNode(CheckPointStmt);
$$ = (Node *) n;
- }
- | CHECKPOINT '(' utility_option_list ')'
- {
- CheckPointStmt *n = makeNode(CheckPointStmt);
-
- $$ = (Node *) n;
- n->options = $3;
+ n->options = $2;
}
;
@@ -9354,7 +9383,7 @@ DropTransformStmt: DROP TRANSFORM opt_if_exists FOR Typename LANGUAGE name opt_d
*****************************************************************************/
ReindexStmt:
- REINDEX opt_reindex_option_list reindex_target_relation opt_concurrently qualified_name
+ REINDEX opt_utility_option_list reindex_target_relation opt_concurrently qualified_name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9367,7 +9396,7 @@ ReindexStmt:
makeDefElem("concurrently", NULL, @4));
$$ = (Node *) n;
}
- | REINDEX opt_reindex_option_list SCHEMA opt_concurrently name
+ | REINDEX opt_utility_option_list SCHEMA opt_concurrently name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9380,7 +9409,7 @@ ReindexStmt:
makeDefElem("concurrently", NULL, @4));
$$ = (Node *) n;
}
- | REINDEX opt_reindex_option_list reindex_target_all opt_concurrently opt_single_name
+ | REINDEX opt_utility_option_list reindex_target_all opt_concurrently opt_single_name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9402,10 +9431,6 @@ reindex_target_all:
SYSTEM_P { $$ = REINDEX_OBJECT_SYSTEM; }
| DATABASE { $$ = REINDEX_OBJECT_DATABASE; }
;
-opt_reindex_option_list:
- '(' utility_option_list ')' { $$ = $2; }
- | /* EMPTY */ { $$ = NULL; }
- ;
/*****************************************************************************
*
@@ -11903,13 +11928,13 @@ ClusterStmt:
n->params = $3;
$$ = (Node *) n;
}
- | CLUSTER '(' utility_option_list ')'
+ | CLUSTER opt_utility_option_list
{
ClusterStmt *n = makeNode(ClusterStmt);
n->relation = NULL;
n->indexname = NULL;
- n->params = $3;
+ n->params = $2;
$$ = (Node *) n;
}
/* unparenthesized VERBOSE kept for pre-14 compatibility */
@@ -11919,21 +11944,18 @@ ClusterStmt:
n->relation = $3;
n->indexname = $4;
- n->params = NIL;
if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
/* unparenthesized VERBOSE kept for pre-17 compatibility */
- | CLUSTER opt_verbose
+ | CLUSTER VERBOSE
{
ClusterStmt *n = makeNode(ClusterStmt);
n->relation = NULL;
n->indexname = NULL;
- n->params = NIL;
- if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
/* kept for pre-8.3 compatibility */
@@ -11943,9 +11965,8 @@ ClusterStmt:
n->relation = $5;
n->indexname = $3;
- n->params = NIL;
if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
;
@@ -11996,64 +12017,31 @@ VacuumStmt: VACUUM opt_full opt_freeze opt_verbose opt_analyze opt_vacuum_relati
}
;
-AnalyzeStmt: analyze_keyword opt_verbose opt_vacuum_relation_list
+AnalyzeStmt: analyze_keyword opt_utility_option_list opt_vacuum_relation_list
{
VacuumStmt *n = makeNode(VacuumStmt);
- n->options = NIL;
- if ($2)
- n->options = lappend(n->options,
- makeDefElem("verbose", NULL, @2));
+ n->options = $2;
n->rels = $3;
n->is_vacuumcmd = false;
$$ = (Node *) n;
}
- | analyze_keyword '(' utility_option_list ')' opt_vacuum_relation_list
+ | analyze_keyword VERBOSE opt_vacuum_relation_list
{
VacuumStmt *n = makeNode(VacuumStmt);
- n->options = $3;
- n->rels = $5;
+ n->options = list_make1(makeDefElem("verbose", NULL, @2));
+ n->rels = $3;
n->is_vacuumcmd = false;
$$ = (Node *) n;
}
;
-utility_option_list:
- utility_option_elem
- {
- $$ = list_make1($1);
- }
- | utility_option_list ',' utility_option_elem
- {
- $$ = lappend($1, $3);
- }
- ;
-
analyze_keyword:
ANALYZE
| ANALYSE /* British */
;
-utility_option_elem:
- utility_option_name utility_option_arg
- {
- $$ = makeDefElem($1, $2, @1);
- }
- ;
-
-utility_option_name:
- NonReservedWord { $$ = $1; }
- | analyze_keyword { $$ = "analyze"; }
- | FORMAT_LA { $$ = "format"; }
- ;
-
-utility_option_arg:
- opt_boolean_or_string { $$ = (Node *) makeString($1); }
- | NumericOnly { $$ = (Node *) $1; }
- | /* EMPTY */ { $$ = NULL; }
- ;
-
opt_analyze:
analyze_keyword { $$ = true; }
| /*EMPTY*/ { $$ = false; }
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 9474095f271..8908603464c 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -562,10 +562,10 @@ AutoVacLauncherMain(const void *startup_data, size_t startup_data_len)
/*
* Create the initial database list. The invariant we want this list to
- * keep is that it's ordered by decreasing next_time. As soon as an entry
- * is updated to a higher time, it will be moved to the front (which is
- * correct because the only operation is to add autovacuum_naptime to the
- * entry, and time always increases).
+ * keep is that it's ordered by decreasing next_worker. As soon as an
+ * entry is updated to a higher time, it will be moved to the front (which
+ * is correct because the only operation is to add autovacuum_naptime to
+ * the entry, and time always increases).
*/
rebuild_database_list(InvalidOid);
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 116ddf7b835..1ad65c237c3 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -613,6 +613,7 @@ ResetBackgroundWorkerCrashTimes(void)
* resetting.
*/
rw->rw_crashed_at = 0;
+ rw->rw_pid = 0;
/*
* If there was anyone waiting for it, they're history.
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 2809e298a44..8490148a47d 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -130,6 +130,13 @@ typedef struct
int num_requests; /* current # of requests */
int max_requests; /* allocated array size */
+
+ int head; /* Index of the first request in the ring
+ * buffer */
+ int tail; /* Index of the last request in the ring
+ * buffer */
+
+ /* The ring buffer of pending checkpointer requests */
CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
} CheckpointerShmemStruct;
@@ -138,6 +145,12 @@ static CheckpointerShmemStruct *CheckpointerShmem;
/* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
#define WRITES_PER_ABSORB 1000
+/* Maximum number of checkpointer requests to process in one batch */
+#define CKPT_REQ_BATCH_SIZE 10000
+
+/* Max number of requests the checkpointer request queue can hold */
+#define MAX_CHECKPOINT_REQUESTS 10000000
+
/*
* GUC parameters
*/
@@ -973,7 +986,8 @@ CheckpointerShmemInit(void)
*/
MemSet(CheckpointerShmem, 0, size);
SpinLockInit(&CheckpointerShmem->ckpt_lck);
- CheckpointerShmem->max_requests = NBuffers;
+ CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
+ CheckpointerShmem->head = CheckpointerShmem->tail = 0;
ConditionVariableInit(&CheckpointerShmem->start_cv);
ConditionVariableInit(&CheckpointerShmem->done_cv);
}
@@ -1201,6 +1215,7 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
{
CheckpointerRequest *request;
bool too_full;
+ int insert_pos;
if (!IsUnderPostmaster)
return false; /* probably shouldn't even get here */
@@ -1224,10 +1239,14 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
}
/* OK, insert request */
- request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
+ insert_pos = CheckpointerShmem->tail;
+ request = &CheckpointerShmem->requests[insert_pos];
request->ftag = *ftag;
request->type = type;
+ CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
+ CheckpointerShmem->num_requests++;
+
/* If queue is more than half full, nudge the checkpointer to empty it */
too_full = (CheckpointerShmem->num_requests >=
CheckpointerShmem->max_requests / 2);
@@ -1269,12 +1288,16 @@ CompactCheckpointerRequestQueue(void)
struct CheckpointerSlotMapping
{
CheckpointerRequest request;
- int slot;
+ int ring_idx;
};
- int n,
- preserve_count;
+ int n;
int num_skipped = 0;
+ int head;
+ int max_requests;
+ int num_requests;
+ int read_idx,
+ write_idx;
HASHCTL ctl;
HTAB *htab;
bool *skip_slot;
@@ -1286,8 +1309,13 @@ CompactCheckpointerRequestQueue(void)
if (CritSectionCount > 0)
return false;
+ max_requests = CheckpointerShmem->max_requests;
+ num_requests = CheckpointerShmem->num_requests;
+
/* Initialize skip_slot array */
- skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
+ skip_slot = palloc0(sizeof(bool) * max_requests);
+
+ head = CheckpointerShmem->head;
/* Initialize temporary hash table */
ctl.keysize = sizeof(CheckpointerRequest);
@@ -1311,7 +1339,8 @@ CompactCheckpointerRequestQueue(void)
* away preceding entries that would end up being canceled anyhow), but
* it's not clear that the extra complexity would buy us anything.
*/
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = head;
+ for (n = 0; n < num_requests; n++)
{
CheckpointerRequest *request;
struct CheckpointerSlotMapping *slotmap;
@@ -1324,16 +1353,19 @@ CompactCheckpointerRequestQueue(void)
* CheckpointerShmemInit. Note also that RelFileLocator had better
* contain no pad bytes.
*/
- request = &CheckpointerShmem->requests[n];
+ request = &CheckpointerShmem->requests[read_idx];
slotmap = hash_search(htab, request, HASH_ENTER, &found);
if (found)
{
/* Duplicate, so mark the previous occurrence as skippable */
- skip_slot[slotmap->slot] = true;
+ skip_slot[slotmap->ring_idx] = true;
num_skipped++;
}
/* Remember slot containing latest occurrence of this request value */
- slotmap->slot = n;
+ slotmap->ring_idx = read_idx;
+
+ /* Move to the next request in the ring buffer */
+ read_idx = (read_idx + 1) % max_requests;
}
/* Done with the hash table. */
@@ -1347,17 +1379,34 @@ CompactCheckpointerRequestQueue(void)
}
/* We found some duplicates; remove them. */
- preserve_count = 0;
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = write_idx = head;
+ for (n = 0; n < num_requests; n++)
{
- if (skip_slot[n])
- continue;
- CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
+ /* If this slot is NOT skipped, keep it */
+ if (!skip_slot[read_idx])
+ {
+ /* If the read and write positions are different, copy the request */
+ if (write_idx != read_idx)
+ CheckpointerShmem->requests[write_idx] =
+ CheckpointerShmem->requests[read_idx];
+
+ /* Advance the write position */
+ write_idx = (write_idx + 1) % max_requests;
+ }
+
+ read_idx = (read_idx + 1) % max_requests;
}
+
+ /*
+ * Update ring buffer state: head remains the same, tail moves, count
+ * decreases
+ */
+ CheckpointerShmem->tail = write_idx;
+ CheckpointerShmem->num_requests -= num_skipped;
+
ereport(DEBUG1,
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
- CheckpointerShmem->num_requests, preserve_count)));
- CheckpointerShmem->num_requests = preserve_count;
+ num_requests, CheckpointerShmem->num_requests)));
/* Cleanup. */
pfree(skip_slot);
@@ -1378,40 +1427,64 @@ AbsorbSyncRequests(void)
{
CheckpointerRequest *requests = NULL;
CheckpointerRequest *request;
- int n;
+ int n,
+ i;
+ bool loop;
if (!AmCheckpointerProcess())
return;
- LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
-
- /*
- * We try to avoid holding the lock for a long time by copying the request
- * array, and processing the requests after releasing the lock.
- *
- * Once we have cleared the requests from shared memory, we have to PANIC
- * if we then fail to absorb them (eg, because our hashtable runs out of
- * memory). This is because the system cannot run safely if we are unable
- * to fsync what we have been told to fsync. Fortunately, the hashtable
- * is so small that the problem is quite unlikely to arise in practice.
- */
- n = CheckpointerShmem->num_requests;
- if (n > 0)
+ do
{
- requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
- }
+ LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
+
+ /*---
+ * We try to avoid holding the lock for a long time by:
+ * 1. Copying the request array and processing the requests after
+ * releasing the lock;
+ * 2. Processing not the whole queue, but only batches of
+ * CKPT_REQ_BATCH_SIZE at once.
+ *
+ * Once we have cleared the requests from shared memory, we must
+ * PANIC if we then fail to absorb them (e.g., because our hashtable
+ * runs out of memory). This is because the system cannot run safely
+ * if we are unable to fsync what we have been told to fsync.
+ * Fortunately, the hashtable is so small that the problem is quite
+ * unlikely to arise in practice.
+ *
+ * Note: The maximum possible size of a ring buffer is
+ * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
+ * allocation size of 1Gb. Our maximum batch size,
+ * CKPT_REQ_BATCH_SIZE, is even smaller.
+ */
+ n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
+ if (n > 0)
+ {
+ if (!requests)
+ requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- START_CRIT_SECTION();
+ for (i = 0; i < n; i++)
+ {
+ requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
+ CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
+ }
- CheckpointerShmem->num_requests = 0;
+ CheckpointerShmem->num_requests -= n;
- LWLockRelease(CheckpointerCommLock);
+ }
+
+ START_CRIT_SECTION();
+
+ /* Are there any requests in the queue? If so, keep going. */
+ loop = CheckpointerShmem->num_requests != 0;
+
+ LWLockRelease(CheckpointerCommLock);
- for (request = requests; n > 0; request++, n--)
- RememberSyncRequest(&request->ftag, request->type);
+ for (request = requests; n > 0; request++, n--)
+ RememberSyncRequest(&request->ftag, request->type);
- END_CRIT_SECTION();
+ END_CRIT_SECTION();
+ } while (loop);
if (requests)
pfree(requests);
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index cca9b946e53..e01d9f0cfe8 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2630,6 +2630,13 @@ CleanupBackend(PMChild *bp,
}
bp = NULL;
+ /*
+ * In a crash case, exit immediately without resetting background worker
+ * state. However, if restart_after_crash is enabled, the background
+ * worker state (e.g., rw_pid) still needs be reset so the worker can
+ * restart after crash recovery. This reset is handled in
+ * ResetBackgroundWorkerCrashTimes(), not here.
+ */
if (crashed)
{
HandleChildCrash(bp_pid, exitstatus, procname);
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 886d99951dd..239641bfbb6 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -421,31 +421,22 @@ libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli)
"IDENTIFY_SYSTEM",
WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive database system identifier and timeline ID from "
"the primary server: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
/*
* IDENTIFY_SYSTEM returns 3 columns in 9.3 and earlier, and 4 columns in
* 9.4 and onwards.
*/
if (PQnfields(res) < 3 || PQntuples(res) != 1)
- {
- int ntuples = PQntuples(res);
- int nfields = PQnfields(res);
-
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid response from primary server"),
errdetail("Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields.",
- ntuples, nfields, 1, 3)));
- }
+ PQntuples(res), PQnfields(res), 1, 3)));
primary_sysid = pstrdup(PQgetvalue(res, 0, 0));
*primary_tli = pg_strtoint32(PQgetvalue(res, 0, 1));
PQclear(res);
@@ -607,13 +598,10 @@ libpqrcv_startstreaming(WalReceiverConn *conn,
return false;
}
else if (PQresultStatus(res) != PGRES_COPY_BOTH)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not start WAL streaming: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
PQclear(res);
return true;
}
@@ -721,26 +709,17 @@ libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
cmd,
WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive timeline history file from "
"the primary server: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
if (PQnfields(res) != 2 || PQntuples(res) != 1)
- {
- int ntuples = PQntuples(res);
- int nfields = PQnfields(res);
-
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid response from primary server"),
errdetail("Expected 1 tuple with 2 fields, got %d tuples with %d fields.",
- ntuples, nfields)));
- }
+ PQntuples(res), PQnfields(res))));
*filename = pstrdup(PQgetvalue(res, 0, 0));
*len = PQgetlength(res, 0, 1);
@@ -844,13 +823,10 @@ libpqrcv_receive(WalReceiverConn *conn, char **buffer,
return -1;
}
else
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive data from WAL stream: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
}
if (rawlen < -1)
ereport(ERROR,
@@ -974,13 +950,10 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
pfree(cmd.data);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not create replication slot \"%s\": %s",
slotname, pchomp(PQerrorMessage(conn->streamConn)))));
- }
if (lsn)
*lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 5febd154b6b..34cf05668ae 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -2599,7 +2599,7 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
if (++changes_count >= CHANGES_THRESHOLD)
{
- rb->update_progress_txn(rb, txn, change->lsn);
+ rb->update_progress_txn(rb, txn, prev_lsn);
changes_count = 0;
}
}
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 3fea0a0206e..d3356bc84ee 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -316,7 +316,8 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
/*
* End streaming so that LogRepWorkerWalRcvConn can be used to drop
@@ -425,6 +426,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
ListCell *lc;
bool started_tx = false;
bool should_exit = false;
+ Relation rel = NULL;
Assert(!IsTransactionState());
@@ -492,7 +494,17 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* worker to remove the origin tracking as if there is any
* error while dropping we won't restart it to drop the
* origin. So passing missing_ok = true.
+ *
+ * Lock the subscription and origin in the same order as we
+ * are doing during DDL commands to avoid deadlocks. See
+ * AlterSubscription_refresh.
*/
+ LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid,
+ 0, AccessShareLock);
+
+ if (!rel)
+ rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+
ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid,
rstate->relid,
originname,
@@ -504,7 +516,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
*/
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
rstate->relid, rstate->state,
- rstate->lsn);
+ rstate->lsn, true);
}
}
else
@@ -555,7 +567,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* This is required to avoid any undetected deadlocks
* due to any existing lock as deadlock detector won't
* be able to detect the waits on the latch.
+ *
+ * Also close any tables prior to the commit.
*/
+ if (rel)
+ {
+ table_close(rel, NoLock);
+ rel = NULL;
+ }
CommitTransactionCommand();
pgstat_report_stat(false);
}
@@ -623,6 +642,11 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
}
}
+ /* Close table if opened */
+ if (rel)
+ table_close(rel, NoLock);
+
+
if (started_tx)
{
/*
@@ -1414,7 +1438,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
CommitTransactionCommand();
pgstat_report_stat(true);
@@ -1547,7 +1572,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
SUBREL_STATE_FINISHEDCOPY,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
CommitTransactionCommand();
diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l
index 7dec1f869c7..02004d621e7 100644
--- a/src/backend/replication/syncrep_scanner.l
+++ b/src/backend/replication/syncrep_scanner.l
@@ -157,17 +157,16 @@ syncrep_yyerror(SyncRepConfigData **syncrep_parse_result_p, char **syncrep_parse
{
struct yyguts_t *yyg = (struct yyguts_t *) yyscanner; /* needed for yytext
* macro */
- char *syncrep_parse_error_msg = *syncrep_parse_error_msg_p;
/* report only the first error in a parse operation */
- if (syncrep_parse_error_msg)
+ if (*syncrep_parse_error_msg_p)
return;
if (yytext[0])
- syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
- message, yytext);
+ *syncrep_parse_error_msg_p = psprintf("%s at or near \"%s\"",
+ message, yytext);
else
- syncrep_parse_error_msg = psprintf("%s at end of input",
- message);
+ *syncrep_parse_error_msg_p = psprintf("%s at end of input",
+ message);
}
void
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6afdd28dba6..67431208e7f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2743,12 +2743,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
* because mdread doesn't complain about reads beyond EOF (when
* zero_damaged_pages is ON) and so a previous attempt to read a block
* beyond EOF could have left a "valid" zero-filled buffer.
- * Unfortunately, we have also seen this case occurring because of
- * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
- * that doesn't account for a recent write. In that situation, the
- * pre-existing buffer would contain valid data that we don't want to
- * overwrite. Since the legitimate cases should always have left a
- * zero-filled buffer, complain if not PageIsNew.
+ *
+ * This has also been observed when relation was overwritten by
+ * external process. Since the legitimate cases should always have
+ * left a zero-filled buffer, complain if not PageIsNew.
*/
if (existing_id >= 0)
{
@@ -2778,8 +2776,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
existing_hdr->tag.blockNum,
- relpath(bmr.smgr->smgr_rlocator, fork).str),
- errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
+ relpath(bmr.smgr->smgr_rlocator, fork).str)));
/*
* We *must* do smgr[zero]extend before succeeding, else the page
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index c6aefd2f688..beadeb5e46a 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -187,9 +187,11 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout,
if (!(wakeEvents & WL_LATCH_SET))
latch = NULL;
ModifyWaitEvent(LatchWaitSet, LatchWaitSetLatchPos, WL_LATCH_SET, latch);
- ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos,
- (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)),
- NULL);
+
+ if (IsUnderPostmaster)
+ ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos,
+ (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)),
+ NULL);
if (WaitEventSetWait(LatchWaitSet,
(wakeEvents & WL_TIMEOUT) ? timeout : -1,
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index a9bb540b55a..087821311cc 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
void
SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len)
{
- Assert(backendPID != 0);
+ if (backendPID == 0)
+ {
+ ereport(LOG, (errmsg("invalid cancel request with PID 0")));
+ return;
+ }
/*
* See if we have a matching backend. Reading the pss_pid and
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index a297606cdd7..0cecd464902 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -988,7 +988,7 @@ pg_plan_queries(List *querytrees, const char *query_string, int cursorOptions,
stmt->stmt_location = query->stmt_location;
stmt->stmt_len = query->stmt_len;
stmt->queryId = query->queryId;
- stmt->cached_plan_type = PLAN_CACHE_NONE;
+ stmt->planOrigin = PLAN_STMT_INTERNAL;
}
else
{
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index babc34d0cbe..4f4191b0ea6 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -1234,7 +1234,7 @@ ProcessUtilitySlow(ParseState *pstate,
wrapper->utilityStmt = stmt;
wrapper->stmt_location = pstmt->stmt_location;
wrapper->stmt_len = pstmt->stmt_len;
- wrapper->cached_plan_type = PLAN_CACHE_NONE;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
ProcessUtility(wrapper,
queryString,
@@ -1965,7 +1965,7 @@ ProcessUtilityForAlterTable(Node *stmt, AlterTableUtilityContext *context)
wrapper->utilityStmt = stmt;
wrapper->stmt_location = context->pstmt->stmt_location;
wrapper->stmt_len = context->pstmt->stmt_len;
- wrapper->cached_plan_type = PLAN_CACHE_NONE;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
ProcessUtility(wrapper,
context->queryString,
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 8b57845e870..6bc91ce0dad 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -212,6 +212,11 @@ int pgstat_fetch_consistency = PGSTAT_FETCH_CONSISTENCY_CACHE;
PgStat_LocalState pgStatLocal;
+/*
+ * Track pending reports for fixed-numbered stats, used by
+ * pgstat_report_stat().
+ */
+bool pgstat_report_fixed = false;
/* ----------
* Local data
@@ -370,7 +375,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_off = offsetof(PgStatShared_Backend, stats),
.shared_data_len = sizeof(((PgStatShared_Backend *) 0)->stats),
- .have_static_pending_cb = pgstat_backend_have_pending_cb,
.flush_static_cb = pgstat_backend_flush_cb,
.reset_timestamp_cb = pgstat_backend_reset_timestamp_cb,
},
@@ -437,7 +441,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_IO *) 0)->stats),
.flush_static_cb = pgstat_io_flush_cb,
- .have_static_pending_cb = pgstat_io_have_pending_cb,
.init_shmem_cb = pgstat_io_init_shmem_cb,
.reset_all_cb = pgstat_io_reset_all_cb,
.snapshot_cb = pgstat_io_snapshot_cb,
@@ -455,7 +458,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_SLRU *) 0)->stats),
.flush_static_cb = pgstat_slru_flush_cb,
- .have_static_pending_cb = pgstat_slru_have_pending_cb,
.init_shmem_cb = pgstat_slru_init_shmem_cb,
.reset_all_cb = pgstat_slru_reset_all_cb,
.snapshot_cb = pgstat_slru_snapshot_cb,
@@ -474,7 +476,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.init_backend_cb = pgstat_wal_init_backend_cb,
.flush_static_cb = pgstat_wal_flush_cb,
- .have_static_pending_cb = pgstat_wal_have_pending_cb,
.init_shmem_cb = pgstat_wal_init_shmem_cb,
.reset_all_cb = pgstat_wal_reset_all_cb,
.snapshot_cb = pgstat_wal_snapshot_cb,
@@ -708,29 +709,10 @@ pgstat_report_stat(bool force)
}
/* Don't expend a clock check if nothing to do */
- if (dlist_is_empty(&pgStatPending))
+ if (dlist_is_empty(&pgStatPending) &&
+ !pgstat_report_fixed)
{
- bool do_flush = false;
-
- /* Check for pending stats */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
- {
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
-
- if (!kind_info)
- continue;
- if (!kind_info->have_static_pending_cb)
- continue;
-
- if (kind_info->have_static_pending_cb())
- {
- do_flush = true;
- break;
- }
- }
-
- if (!do_flush)
- return 0;
+ return 0;
}
/*
@@ -784,16 +766,19 @@ pgstat_report_stat(bool force)
partial_flush |= pgstat_flush_pending_entries(nowait);
/* flush of other stats kinds */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ if (pgstat_report_fixed)
{
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
+ for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ {
+ const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
- if (!kind_info)
- continue;
- if (!kind_info->flush_static_cb)
- continue;
+ if (!kind_info)
+ continue;
+ if (!kind_info->flush_static_cb)
+ continue;
- partial_flush |= kind_info->flush_static_cb(nowait);
+ partial_flush |= kind_info->flush_static_cb(nowait);
+ }
}
last_flush = now;
@@ -815,6 +800,7 @@ pgstat_report_stat(bool force)
}
pending_since = 0;
+ pgstat_report_fixed = false;
return 0;
}
diff --git a/src/backend/utils/activity/pgstat_backend.c b/src/backend/utils/activity/pgstat_backend.c
index 51256277e8d..8714a85e2d9 100644
--- a/src/backend/utils/activity/pgstat_backend.c
+++ b/src/backend/utils/activity/pgstat_backend.c
@@ -66,6 +66,7 @@ pgstat_count_backend_io_op_time(IOObject io_object, IOContext io_context,
io_time);
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
void
@@ -81,6 +82,7 @@ pgstat_count_backend_io_op(IOObject io_object, IOContext io_context,
PendingBackendStats.pending_io.bytes[io_object][io_context][io_op] += bytes;
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -302,18 +304,6 @@ pgstat_flush_backend(bool nowait, bits32 flags)
}
/*
- * Check if there are any backend stats waiting for flush.
- */
-bool
-pgstat_backend_have_pending_cb(void)
-{
- if (!pgstat_tracks_backend_bktype(MyBackendType))
- return false;
-
- return (backend_has_iostats || pgstat_backend_wal_have_pending());
-}
-
-/*
* Callback to flush out locally pending backend statistics.
*
* If some stats could not be flushed due to lock contention, return true.
diff --git a/src/backend/utils/activity/pgstat_io.c b/src/backend/utils/activity/pgstat_io.c
index d8d26379a57..13ae57ed649 100644
--- a/src/backend/utils/activity/pgstat_io.c
+++ b/src/backend/utils/activity/pgstat_io.c
@@ -80,6 +80,7 @@ pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op,
pgstat_count_backend_io_op(io_object, io_context, io_op, cnt, bytes);
have_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -168,15 +169,6 @@ pgstat_fetch_stat_io(void)
}
/*
- * Check if there any IO stats waiting for flush.
- */
-bool
-pgstat_io_have_pending_cb(void)
-{
- return have_iostats;
-}
-
-/*
* Simpler wrapper of pgstat_io_flush_cb()
*/
void
diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c
index b9e940dde45..7bd8744accb 100644
--- a/src/backend/utils/activity/pgstat_slru.c
+++ b/src/backend/utils/activity/pgstat_slru.c
@@ -144,15 +144,6 @@ pgstat_get_slru_index(const char *name)
}
/*
- * Check if there are any SLRU stats entries waiting for flush.
- */
-bool
-pgstat_slru_have_pending_cb(void)
-{
- return have_slrustats;
-}
-
-/*
* Flush out locally pending SLRU stats entries
*
* If nowait is true, this function returns false on lock failure. Otherwise
@@ -247,6 +238,7 @@ get_slru_entry(int slru_idx)
Assert((slru_idx >= 0) && (slru_idx < SLRU_NUM_ELEMENTS));
have_slrustats = true;
+ pgstat_report_fixed = true;
return &pending_SLRUStats[slru_idx];
}
diff --git a/src/backend/utils/activity/pgstat_wal.c b/src/backend/utils/activity/pgstat_wal.c
index 16a1ecb4d90..0d04480d2f6 100644
--- a/src/backend/utils/activity/pgstat_wal.c
+++ b/src/backend/utils/activity/pgstat_wal.c
@@ -72,6 +72,15 @@ pgstat_fetch_stat_wal(void)
}
/*
+ * To determine whether WAL usage happened.
+ */
+static inline bool
+pgstat_wal_have_pending(void)
+{
+ return pgWalUsage.wal_records != prevWalUsage.wal_records;
+}
+
+/*
* Calculate how much WAL usage counters have increased by subtracting the
* previous counters from the current ones.
*
@@ -92,7 +101,7 @@ pgstat_wal_flush_cb(bool nowait)
* This function can be called even if nothing at all has happened. Avoid
* taking lock for nothing in that case.
*/
- if (!pgstat_wal_have_pending_cb())
+ if (!pgstat_wal_have_pending())
return false;
/*
@@ -136,15 +145,6 @@ pgstat_wal_init_backend_cb(void)
prevWalUsage = pgWalUsage;
}
-/*
- * To determine whether WAL usage happened.
- */
-bool
-pgstat_wal_have_pending_cb(void)
-{
- return pgWalUsage.wal_records != prevWalUsage.wal_records;
-}
-
void
pgstat_wal_init_shmem_cb(void *stats)
{
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1b0df111717..39dab3e42df 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS)
/*
* Cope with possibility that unsigned long is wider than BlockNumber, in
* which case strtoul will not raise an error for some values that are out
- * of the range of BlockNumber. (See similar code in oidin().)
+ * of the range of BlockNumber. (See similar code in uint32in_subr().)
*/
#if SIZEOF_LONG > 4
if (cvt != (unsigned long) blockNumber &&
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index f7b731825fc..182e8f75db7 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1769,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and we parse in CONTENT mode, the list
- * of parsed nodes from the xmlParseInNodeContext call will be returned
+ * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes. (It is caller's responsibility to free that.)
*
* Errors normally result in ereport(ERROR), but if escontext is an
@@ -1795,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PgXmlErrorContext *xmlerrcxt;
volatile xmlParserCtxtPtr ctxt = NULL;
volatile xmlDocPtr doc = NULL;
+ volatile int save_keep_blanks = -1;
/*
* This step looks annoyingly redundant, but we must do it to have a
@@ -1822,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PG_TRY();
{
bool parse_as_document = false;
- int options;
int res_code;
size_t count = 0;
xmlChar *version = NULL;
@@ -1853,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
parse_as_document = true;
}
- /*
- * Select parse options.
- *
- * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
- * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by
- * internal DTD are applied'. As for external DTDs, we try to support
- * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really
- * happen because xmlPgEntityLoader prevents it.
- */
- options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
-
/* initialize output parameters */
if (parsed_xmloptiontype != NULL)
*parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT :
@@ -1874,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (parse_as_document)
{
+ int options;
+
+ /* set up parser context used by xmlCtxtReadDoc */
ctxt = xmlNewParserCtxt();
if (ctxt == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
+ /*
+ * Select parse options.
+ *
+ * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
+ * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined
+ * by internal DTD are applied'. As for external DTDs, we try to
+ * support them too (see SQL/XML:2008 GR 10.16.7.e), but that
+ * doesn't really happen because xmlPgEntityLoader prevents it.
+ */
+ options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
+
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL, /* no URL */
"UTF-8",
@@ -1900,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
}
else
{
- xmlNodePtr root;
- xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
-
- /* set up document with empty root node to be the context node */
+ /* set up document that xmlParseBalancedChunkMemory will add to */
doc = xmlNewDoc(version);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
@@ -1916,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
"could not allocate XML document");
doc->standalone = standalone;
- root = xmlNewNode(NULL, (const xmlChar *) "content-root");
- if (root == NULL || xmlerrcxt->err_occurred)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
- "could not allocate xml node");
-
- /*
- * This attaches root to doc, so we need not free it separately;
- * and there can't yet be any old root to free.
- */
- oldroot = xmlDocSetRootElement(doc, root);
- Assert(oldroot == NULL);
+ /* set parse options --- have to do this the ugly way */
+ save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0);
/* allow empty content */
if (*(utf8string + count))
{
- xmlNodePtr node_list = NULL;
- xmlParserErrors res;
-
- res = xmlParseInNodeContext(root,
- (char *) utf8string + count,
- strlen((char *) utf8string + count),
- options,
- &node_list);
-
- if (res != XML_ERR_OK || xmlerrcxt->err_occurred)
+ res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
+ utf8string + count,
+ parsed_nodes);
+ if (res_code != 0 || xmlerrcxt->err_occurred)
{
- xmlFreeNodeList(node_list);
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
goto fail;
}
-
- if (parsed_nodes != NULL)
- *parsed_nodes = node_list;
- else
- xmlFreeNodeList(node_list);
}
}
@@ -1961,6 +1940,8 @@ fail:
}
PG_CATCH();
{
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
if (doc != NULL)
xmlFreeDoc(doc);
if (ctxt != NULL)
@@ -1972,6 +1953,9 @@ fail:
}
PG_END_TRY();
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
+
if (ctxt != NULL)
xmlFreeParserCtxt(ctxt);
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index f4d2b9458a5..0c506d320b1 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -1390,7 +1390,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
{
PlannedStmt *pstmt = (PlannedStmt *) lfirst(lc);
- pstmt->cached_plan_type = customplan ? PLAN_CACHE_CUSTOM : PLAN_CACHE_GENERIC;
+ pstmt->planOrigin = customplan ? PLAN_STMT_CACHE_CUSTOM : PLAN_STMT_CACHE_GENERIC;
}
return plan;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 1ad155d446e..42e9be274fc 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -195,6 +195,7 @@ struct HASHHDR
long ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
+ bool isfixed; /* if true, don't enlarge */
#ifdef HASH_STATISTICS
@@ -227,7 +228,6 @@ struct HTAB
MemoryContext hcxt; /* memory context if default allocator used */
char *tabname; /* table name (for error messages) */
bool isshared; /* true if table is in shared memory */
- bool isfixed; /* if true, don't enlarge */
/* freezing a shared table isn't allowed, so we can keep state here */
bool frozen; /* true = no more inserts allowed */
@@ -618,8 +618,10 @@ hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
}
}
+ /* Set isfixed if requested, but not till after we build initial entries */
if (flags & HASH_FIXED_SIZE)
- hashp->isfixed = true;
+ hctl->isfixed = true;
+
return hashp;
}
@@ -644,6 +646,8 @@ hdefault(HTAB *hashp)
hctl->ssize = DEF_SEGSIZE;
hctl->sshift = DEF_SEGSIZE_SHIFT;
+ hctl->isfixed = false; /* can be enlarged */
+
#ifdef HASH_STATISTICS
hctl->accesses = hctl->collisions = 0;
#endif
@@ -1713,7 +1717,7 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx)
HASHELEMENT *prevElement;
int i;
- if (hashp->isfixed)
+ if (hctl->isfixed)
return false;
/* Each element has a HASHELEMENT header plus user data. */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 667df448732..ce5449f2878 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -4722,8 +4722,13 @@ AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt)
* the config file cannot cause postmaster start to fail, so we
* don't have to be too tense about possibly installing a bad
* value.)
+ *
+ * As an exception, we skip this check if this is a RESET command
+ * for an unknown custom GUC, else there'd be no way for users to
+ * remove such settings with reserved prefixes.
*/
- (void) assignable_custom_variable_name(name, false, ERROR);
+ if (value || !valid_custom_variable_name(name))
+ (void) assignable_custom_variable_name(name, false, ERROR);
}
/*
@@ -6711,6 +6716,7 @@ validate_option_array_item(const char *name, const char *value,
{
struct config_generic *gconf;
+ bool reset_custom;
/*
* There are three cases to consider:
@@ -6729,16 +6735,21 @@ validate_option_array_item(const char *name, const char *value,
* it's assumed to be fully validated.)
*
* name is not known and can't be created as a placeholder. Throw error,
- * unless skipIfNoPermissions is true, in which case return false.
+ * unless skipIfNoPermissions or reset_custom is true. If reset_custom is
+ * true, this is a RESET or RESET ALL operation for an unknown custom GUC
+ * with a reserved prefix, in which case we want to fall through to the
+ * placeholder case described in the preceding paragraph (else there'd be
+ * no way for users to remove them). Otherwise, return false.
*/
- gconf = find_option(name, true, skipIfNoPermissions, ERROR);
- if (!gconf)
+ reset_custom = (!value && valid_custom_variable_name(name));
+ gconf = find_option(name, true, skipIfNoPermissions || reset_custom, ERROR);
+ if (!gconf && !reset_custom)
{
/* not known, failed to make a placeholder */
return false;
}
- if (gconf->flags & GUC_CUSTOM_PLACEHOLDER)
+ if (!gconf || gconf->flags & GUC_CUSTOM_PLACEHOLDER)
{
/*
* We cannot do any meaningful check on the value, so only permissions
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 15fa4d0a55e..ce01dce9861 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -560,9 +560,7 @@ MemoryContextDeleteChildren(MemoryContext context)
* the specified context, since that means it will automatically be freed
* when no longer needed.
*
- * There is no API for deregistering a callback once registered. If you
- * want it to not do anything anymore, adjust the state pointed to by its
- * "arg" to indicate that.
+ * Note that callers can assume this cannot fail.
*/
void
MemoryContextRegisterResetCallback(MemoryContext context,
@@ -578,6 +576,41 @@ MemoryContextRegisterResetCallback(MemoryContext context,
}
/*
+ * MemoryContextUnregisterResetCallback
+ * Undo the effects of MemoryContextRegisterResetCallback.
+ *
+ * This can be used if a callback's effects are no longer required
+ * at some point before the context has been reset/deleted. It is the
+ * caller's responsibility to pfree the callback struct (if needed).
+ *
+ * An assertion failure occurs if the callback was not registered.
+ * We could alternatively define that case as a no-op, but that seems too
+ * likely to mask programming errors such as passing the wrong context.
+ */
+void
+MemoryContextUnregisterResetCallback(MemoryContext context,
+ MemoryContextCallback *cb)
+{
+ MemoryContextCallback *prev,
+ *cur;
+
+ Assert(MemoryContextIsValid(context));
+
+ for (prev = NULL, cur = context->reset_cbs; cur != NULL;
+ prev = cur, cur = cur->next)
+ {
+ if (cur != cb)
+ continue;
+ if (prev)
+ prev->next = cur->next;
+ else
+ context->reset_cbs = cur->next;
+ return;
+ }
+ Assert(false);
+}
+
+/*
* MemoryContextCallResetCallbacks
* Internal function to call all registered callbacks for context.
*/
diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile
index 997e0a013e9..c0470efda92 100644
--- a/src/bin/initdb/Makefile
+++ b/src/bin/initdb/Makefile
@@ -20,7 +20,7 @@ include $(top_builddir)/src/Makefile.global
# from libpq, else we have risks of version skew if we run with a libpq
# shared library from a different PG version. Define
# USE_PRIVATE_ENCODING_FUNCS to ensure that that happens.
-override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(ICU_CFLAGS) $(CPPFLAGS)
+override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) $(ICU_CFLAGS)
# We need libpq only because fe_utils does.
LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(ICU_LIBS)
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index aa1589e3331..a1976fae607 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -17,6 +17,7 @@
#include <ctype.h>
+#include "catalog/pg_am_d.h"
#include "catalog/pg_class_d.h"
#include "catalog/pg_collation_d.h"
#include "catalog/pg_extension_d.h"
@@ -945,6 +946,24 @@ findOprByOid(Oid oid)
}
/*
+ * findAccessMethodByOid
+ * finds the DumpableObject for the access method with the given oid
+ * returns NULL if not found
+ */
+AccessMethodInfo *
+findAccessMethodByOid(Oid oid)
+{
+ CatalogId catId;
+ DumpableObject *dobj;
+
+ catId.tableoid = AccessMethodRelationId;
+ catId.oid = oid;
+ dobj = findObjectByCatalogId(catId);
+ Assert(dobj == NULL || dobj->objType == DO_ACCESS_METHOD);
+ return (AccessMethodInfo *) dobj;
+}
+
+/*
* findCollationByOid
* finds the DumpableObject for the collation with the given oid
* returns NULL if not found
diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build
index 4a4ebbd8ec9..a2233b0a1b4 100644
--- a/src/bin/pg_dump/meson.build
+++ b/src/bin/pg_dump/meson.build
@@ -102,7 +102,6 @@ tests += {
't/003_pg_dump_with_server.pl',
't/004_pg_dump_parallel.pl',
't/005_pg_dump_filterfile.pl',
- 't/006_pg_dumpall.pl',
't/010_dump_connstr.pl',
],
},
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index 5974d6706fd..086adcdc502 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -334,16 +334,6 @@ on_exit_close_archive(Archive *AHX)
}
/*
- * When pg_restore restores multiple databases, then update already added entry
- * into array for cleanup.
- */
-void
-replace_on_exit_close_archive(Archive *AHX)
-{
- shutdown_info.AHX = AHX;
-}
-
-/*
* on_exit_nicely handler for shutting down database connections and
* worker processes cleanly.
*/
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index af0007fb6d2..4ebef1e8644 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -308,7 +308,7 @@ extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ro
extern void ProcessArchiveRestoreOptions(Archive *AHX);
-extern void RestoreArchive(Archive *AHX, bool append_data);
+extern void RestoreArchive(Archive *AHX);
/* Open an existing archive */
extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 30e0da31aa3..dce88f040ac 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -87,7 +87,7 @@ static int RestoringToDB(ArchiveHandle *AH);
static void dump_lo_buf(ArchiveHandle *AH);
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
static void SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec, bool append_data);
+ const pg_compress_specification compression_spec);
static CompressFileHandle *SaveOutput(ArchiveHandle *AH);
static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput);
@@ -339,14 +339,9 @@ ProcessArchiveRestoreOptions(Archive *AHX)
StrictNamesCheck(ropt);
}
-/*
- * RestoreArchive
- *
- * If append_data is set, then append data into file as we are restoring dump
- * of multiple databases which was taken by pg_dumpall.
- */
+/* Public */
void
-RestoreArchive(Archive *AHX, bool append_data)
+RestoreArchive(Archive *AHX)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
RestoreOptions *ropt = AH->public.ropt;
@@ -463,7 +458,7 @@ RestoreArchive(Archive *AHX, bool append_data)
*/
sav = SaveOutput(AH);
if (ropt->filename || ropt->compression_spec.algorithm != PG_COMPRESSION_NONE)
- SetOutput(AH, ropt->filename, ropt->compression_spec, append_data);
+ SetOutput(AH, ropt->filename, ropt->compression_spec);
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
@@ -1302,7 +1297,7 @@ PrintTOCSummary(Archive *AHX)
sav = SaveOutput(AH);
if (ropt->filename)
- SetOutput(AH, ropt->filename, out_compression_spec, false);
+ SetOutput(AH, ropt->filename, out_compression_spec);
if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
localtime(&AH->createDate)) == 0)
@@ -1681,8 +1676,7 @@ archprintf(Archive *AH, const char *fmt,...)
static void
SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec,
- bool append_data)
+ const pg_compress_specification compression_spec)
{
CompressFileHandle *CFH;
const char *mode;
@@ -1702,7 +1696,7 @@ SetOutput(ArchiveHandle *AH, const char *filename,
else
fn = fileno(stdout);
- if (append_data || AH->mode == archModeAppend)
+ if (AH->mode == archModeAppend)
mode = PG_BINARY_A;
else
mode = PG_BINARY_W;
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 365073b3eae..325b53fc9bd 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -394,7 +394,6 @@ struct _tocEntry
extern int parallel_restore(ArchiveHandle *AH, TocEntry *te);
extern void on_exit_close_archive(Archive *AHX);
-extern void replace_on_exit_close_archive(Archive *AHX);
extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3);
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index d94d0de2a5d..b5ba3b46dd9 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -826,7 +826,7 @@ _CloseArchive(ArchiveHandle *AH)
savVerbose = AH->public.verbose;
AH->public.verbose = 0;
- RestoreArchive((Archive *) AH, false);
+ RestoreArchive((Archive *) AH);
SetArchiveOptions((Archive *) AH, savDopt, savRopt);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 6298edb26b5..f3a353a61a5 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -449,8 +449,6 @@ main(int argc, char **argv)
bool data_only = false;
bool schema_only = false;
bool statistics_only = false;
- bool with_data = false;
- bool with_schema = false;
bool with_statistics = false;
bool no_data = false;
bool no_schema = false;
@@ -514,6 +512,7 @@ main(int argc, char **argv)
{"section", required_argument, NULL, 5},
{"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
{"snapshot", required_argument, NULL, 6},
+ {"statistics", no_argument, NULL, 22},
{"statistics-only", no_argument, NULL, 18},
{"strict-names", no_argument, &strict_names, 1},
{"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
@@ -528,9 +527,6 @@ main(int argc, char **argv)
{"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
{"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
{"no-sync", no_argument, NULL, 7},
- {"with-data", no_argument, NULL, 22},
- {"with-schema", no_argument, NULL, 23},
- {"with-statistics", no_argument, NULL, 24},
{"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
{"rows-per-insert", required_argument, NULL, 10},
{"include-foreign-data", required_argument, NULL, 11},
@@ -798,14 +794,6 @@ main(int argc, char **argv)
break;
case 22:
- with_data = true;
- break;
-
- case 23:
- with_schema = true;
- break;
-
- case 24:
with_statistics = true;
break;
@@ -852,13 +840,17 @@ main(int argc, char **argv)
if (statistics_only && no_statistics)
pg_fatal("options --statistics-only and --no-statistics cannot be used together");
- /* reject conflicting "with-" and "no-" options */
- if (with_data && no_data)
- pg_fatal("options --with-data and --no-data cannot be used together");
- if (with_schema && no_schema)
- pg_fatal("options --with-schema and --no-schema cannot be used together");
+ /* reject conflicting "no-" options */
if (with_statistics && no_statistics)
- pg_fatal("options --with-statistics and --no-statistics cannot be used together");
+ pg_fatal("options --statistics and --no-statistics cannot be used together");
+
+ /* reject conflicting "-only" options */
+ if (data_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-a/--data-only", "--statistics");
+ if (schema_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-s/--schema-only", "--statistics");
if (schema_only && foreign_servers_include_patterns.head != NULL)
pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
@@ -873,16 +865,14 @@ main(int argc, char **argv)
pg_fatal("option --if-exists requires option -c/--clean");
/*
- * Set derivative flags. An "-only" option may be overridden by an
- * explicit "with-" option; e.g. "--schema-only --with-statistics" will
- * include schema and statistics. Other ambiguous or nonsensical
- * combinations, e.g. "--schema-only --no-schema", will have already
- * caused an error in one of the checks above.
+ * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
+ * "--schema-only --no-schema", will have already caused an error in one
+ * of the checks above.
*/
dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
- (data_only || with_data)) && !no_data;
+ data_only) && !no_data;
dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
- (schema_only || with_schema)) && !no_schema;
+ schema_only) && !no_schema;
dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
(statistics_only || with_statistics)) && !no_statistics;
@@ -1265,7 +1255,7 @@ main(int argc, char **argv)
* right now.
*/
if (plainText)
- RestoreArchive(fout, false);
+ RestoreArchive(fout);
CloseArchive(fout);
@@ -1355,6 +1345,7 @@ help(const char *progname)
printf(_(" --sequence-data include sequence data in dump\n"));
printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
+ printf(_(" --statistics dump the statistics\n"));
printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
printf(_(" --strict-names require table and/or schema include patterns to\n"
" match at least one entity each\n"));
@@ -1363,9 +1354,6 @@ help(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -d, --dbname=DBNAME database to dump\n"));
@@ -2207,6 +2195,13 @@ selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
static void
selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
{
+ /* see getAccessMethods() comment about v9.6. */
+ if (fout->remoteVersion < 90600)
+ {
+ method->dobj.dump = DUMP_COMPONENT_NONE;
+ return;
+ }
+
if (checkExtensionMembership(&method->dobj, fout))
return; /* extension membership overrides all else */
@@ -6262,6 +6257,8 @@ getOperators(Archive *fout)
int i_oprnamespace;
int i_oprowner;
int i_oprkind;
+ int i_oprleft;
+ int i_oprright;
int i_oprcode;
/*
@@ -6273,6 +6270,8 @@ getOperators(Archive *fout)
"oprnamespace, "
"oprowner, "
"oprkind, "
+ "oprleft, "
+ "oprright, "
"oprcode::oid AS oprcode "
"FROM pg_operator");
@@ -6288,6 +6287,8 @@ getOperators(Archive *fout)
i_oprnamespace = PQfnumber(res, "oprnamespace");
i_oprowner = PQfnumber(res, "oprowner");
i_oprkind = PQfnumber(res, "oprkind");
+ i_oprleft = PQfnumber(res, "oprleft");
+ i_oprright = PQfnumber(res, "oprright");
i_oprcode = PQfnumber(res, "oprcode");
for (i = 0; i < ntups; i++)
@@ -6301,6 +6302,8 @@ getOperators(Archive *fout)
findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
+ oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
+ oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
/* Decide whether we want to dump it */
@@ -6329,6 +6332,7 @@ getCollations(Archive *fout)
int i_collname;
int i_collnamespace;
int i_collowner;
+ int i_collencoding;
query = createPQExpBuffer();
@@ -6339,7 +6343,8 @@ getCollations(Archive *fout)
appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
"collnamespace, "
- "collowner "
+ "collowner, "
+ "collencoding "
"FROM pg_collation");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6353,6 +6358,7 @@ getCollations(Archive *fout)
i_collname = PQfnumber(res, "collname");
i_collnamespace = PQfnumber(res, "collnamespace");
i_collowner = PQfnumber(res, "collowner");
+ i_collencoding = PQfnumber(res, "collencoding");
for (i = 0; i < ntups; i++)
{
@@ -6364,6 +6370,7 @@ getCollations(Archive *fout)
collinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
+ collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
/* Decide whether we want to dump it */
selectDumpableObject(&(collinfo[i].dobj), fout);
@@ -6454,16 +6461,28 @@ getAccessMethods(Archive *fout)
int i_amhandler;
int i_amtype;
- /* Before 9.6, there are no user-defined access methods */
- if (fout->remoteVersion < 90600)
- return;
-
query = createPQExpBuffer();
- /* Select all access methods from pg_am table */
- appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
- "amhandler::pg_catalog.regproc AS amhandler "
- "FROM pg_am");
+ /*
+ * Select all access methods from pg_am table. v9.6 introduced CREATE
+ * ACCESS METHOD, so earlier versions usually have only built-in access
+ * methods. v9.6 also changed the access method API, replacing dozens of
+ * pg_am columns with amhandler. Even if a user created an access method
+ * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
+ * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
+ * pg_am just to facilitate findAccessMethodByOid() providing the
+ * OID-to-name mapping.
+ */
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
+ if (fout->remoteVersion >= 90600)
+ appendPQExpBufferStr(query,
+ "amtype, "
+ "amhandler::pg_catalog.regproc AS amhandler ");
+ else
+ appendPQExpBufferStr(query,
+ "'i'::pg_catalog.\"char\" AS amtype, "
+ "'-'::pg_catalog.regproc AS amhandler ");
+ appendPQExpBufferStr(query, "FROM pg_am");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6512,6 +6531,7 @@ getOpclasses(Archive *fout)
OpclassInfo *opcinfo;
int i_tableoid;
int i_oid;
+ int i_opcmethod;
int i_opcname;
int i_opcnamespace;
int i_opcowner;
@@ -6521,7 +6541,7 @@ getOpclasses(Archive *fout)
* system-defined opclasses at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
"opcnamespace, "
"opcowner "
"FROM pg_opclass");
@@ -6534,6 +6554,7 @@ getOpclasses(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
+ i_opcmethod = PQfnumber(res, "opcmethod");
i_opcname = PQfnumber(res, "opcname");
i_opcnamespace = PQfnumber(res, "opcnamespace");
i_opcowner = PQfnumber(res, "opcowner");
@@ -6547,6 +6568,7 @@ getOpclasses(Archive *fout)
opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
opcinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
+ opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
/* Decide whether we want to dump it */
@@ -6572,6 +6594,7 @@ getOpfamilies(Archive *fout)
OpfamilyInfo *opfinfo;
int i_tableoid;
int i_oid;
+ int i_opfmethod;
int i_opfname;
int i_opfnamespace;
int i_opfowner;
@@ -6583,7 +6606,7 @@ getOpfamilies(Archive *fout)
* system-defined opfamilies at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
"opfnamespace, "
"opfowner "
"FROM pg_opfamily");
@@ -6597,6 +6620,7 @@ getOpfamilies(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
i_opfname = PQfnumber(res, "opfname");
+ i_opfmethod = PQfnumber(res, "opfmethod");
i_opfnamespace = PQfnumber(res, "opfnamespace");
i_opfowner = PQfnumber(res, "opfowner");
@@ -6609,6 +6633,7 @@ getOpfamilies(Archive *fout)
opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
opfinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
+ opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
/* Decide whether we want to dump it */
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 93a4475d51b..dde85ed156c 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -260,6 +260,8 @@ typedef struct _oprInfo
DumpableObject dobj;
const char *rolname;
char oprkind;
+ Oid oprleft;
+ Oid oprright;
Oid oprcode;
} OprInfo;
@@ -273,12 +275,14 @@ typedef struct _accessMethodInfo
typedef struct _opclassInfo
{
DumpableObject dobj;
+ Oid opcmethod;
const char *rolname;
} OpclassInfo;
typedef struct _opfamilyInfo
{
DumpableObject dobj;
+ Oid opfmethod;
const char *rolname;
} OpfamilyInfo;
@@ -286,6 +290,7 @@ typedef struct _collInfo
{
DumpableObject dobj;
const char *rolname;
+ int collencoding;
} CollInfo;
typedef struct _convInfo
@@ -760,6 +765,7 @@ extern TableInfo *findTableByOid(Oid oid);
extern TypeInfo *findTypeByOid(Oid oid);
extern FuncInfo *findFuncByOid(Oid oid);
extern OprInfo *findOprByOid(Oid oid);
+extern AccessMethodInfo *findAccessMethodByOid(Oid oid);
extern CollInfo *findCollationByOid(Oid oid);
extern NamespaceInfo *findNamespaceByOid(Oid oid);
extern ExtensionInfo *findExtensionByOid(Oid oid);
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index f99a0797ea7..a02da3e9652 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -162,6 +162,8 @@ static DumpId postDataBoundId;
static int DOTypeNameCompare(const void *p1, const void *p2);
+static int pgTypeNameCompare(Oid typid1, Oid typid2);
+static int accessMethodNameCompare(Oid am1, Oid am2);
static bool TopoSort(DumpableObject **objs,
int numObjs,
DumpableObject **ordering,
@@ -228,12 +230,39 @@ DOTypeNameCompare(const void *p1, const void *p2)
else if (obj2->namespace)
return 1;
- /* Sort by name */
+ /*
+ * Sort by name. With a few exceptions, names here are single catalog
+ * columns. To get a fuller picture, grep pg_dump.c for "dobj.name = ".
+ * Names here don't match "Name:" in plain format output, which is a
+ * _tocEntry.tag. For example, DumpableObject.name of a constraint is
+ * pg_constraint.conname, but _tocEntry.tag of a constraint is relname and
+ * conname joined with a space.
+ */
cmpval = strcmp(obj1->name, obj2->name);
if (cmpval != 0)
return cmpval;
- /* To have a stable sort order, break ties for some object types */
+ /*
+ * Sort by type. This helps types that share a type priority without
+ * sharing a unique name constraint, e.g. opclass and opfamily.
+ */
+ cmpval = obj1->objType - obj2->objType;
+ if (cmpval != 0)
+ return cmpval;
+
+ /*
+ * To have a stable sort order, break ties for some object types. Most
+ * catalogs have a natural key, e.g. pg_proc_proname_args_nsp_index. Where
+ * the above "namespace" and "name" comparisons don't cover all natural
+ * key columns, compare the rest here.
+ *
+ * The natural key usually refers to other catalogs by surrogate keys.
+ * Hence, this translates each of those references to the natural key of
+ * the referenced catalog. That may descend through multiple levels of
+ * catalog references. For example, to sort by pg_proc.proargtypes,
+ * descend to each pg_type and then further to its pg_namespace, for an
+ * overall sort by (nspname, typname).
+ */
if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
{
FuncInfo *fobj1 = *(FuncInfo *const *) p1;
@@ -246,22 +275,10 @@ DOTypeNameCompare(const void *p1, const void *p2)
return cmpval;
for (i = 0; i < fobj1->nargs; i++)
{
- TypeInfo *argtype1 = findTypeByOid(fobj1->argtypes[i]);
- TypeInfo *argtype2 = findTypeByOid(fobj2->argtypes[i]);
-
- if (argtype1 && argtype2)
- {
- if (argtype1->dobj.namespace && argtype2->dobj.namespace)
- {
- cmpval = strcmp(argtype1->dobj.namespace->dobj.name,
- argtype2->dobj.namespace->dobj.name);
- if (cmpval != 0)
- return cmpval;
- }
- cmpval = strcmp(argtype1->dobj.name, argtype2->dobj.name);
- if (cmpval != 0)
- return cmpval;
- }
+ cmpval = pgTypeNameCompare(fobj1->argtypes[i],
+ fobj2->argtypes[i]);
+ if (cmpval != 0)
+ return cmpval;
}
}
else if (obj1->objType == DO_OPERATOR)
@@ -273,6 +290,57 @@ DOTypeNameCompare(const void *p1, const void *p2)
cmpval = (oobj2->oprkind - oobj1->oprkind);
if (cmpval != 0)
return cmpval;
+ /* Within an oprkind, sort by argument type names */
+ cmpval = pgTypeNameCompare(oobj1->oprleft, oobj2->oprleft);
+ if (cmpval != 0)
+ return cmpval;
+ cmpval = pgTypeNameCompare(oobj1->oprright, oobj2->oprright);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_OPCLASS)
+ {
+ OpclassInfo *opcobj1 = *(OpclassInfo *const *) p1;
+ OpclassInfo *opcobj2 = *(OpclassInfo *const *) p2;
+
+ /* Sort by access method name, per pg_opclass_am_name_nsp_index */
+ cmpval = accessMethodNameCompare(opcobj1->opcmethod,
+ opcobj2->opcmethod);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_OPFAMILY)
+ {
+ OpfamilyInfo *opfobj1 = *(OpfamilyInfo *const *) p1;
+ OpfamilyInfo *opfobj2 = *(OpfamilyInfo *const *) p2;
+
+ /* Sort by access method name, per pg_opfamily_am_name_nsp_index */
+ cmpval = accessMethodNameCompare(opfobj1->opfmethod,
+ opfobj2->opfmethod);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_COLLATION)
+ {
+ CollInfo *cobj1 = *(CollInfo *const *) p1;
+ CollInfo *cobj2 = *(CollInfo *const *) p2;
+
+ /*
+ * Sort by encoding, per pg_collation_name_enc_nsp_index. Technically,
+ * this is not necessary, because wherever this changes dump order,
+ * restoring the dump fails anyway. CREATE COLLATION can't create a
+ * tie for this to break, because it imposes restrictions to make
+ * (nspname, collname) uniquely identify a collation within a given
+ * DatabaseEncoding. While pg_import_system_collations() can create a
+ * tie, pg_dump+restore fails after
+ * pg_import_system_collations('my_schema') does so. However, there's
+ * little to gain by ignoring one natural key column on the basis of
+ * those limitations elsewhere, so respect the full natural key like
+ * we do for other object types.
+ */
+ cmpval = cobj1->collencoding - cobj2->collencoding;
+ if (cmpval != 0)
+ return cmpval;
}
else if (obj1->objType == DO_ATTRDEF)
{
@@ -317,11 +385,143 @@ DOTypeNameCompare(const void *p1, const void *p2)
if (cmpval != 0)
return cmpval;
}
+ else if (obj1->objType == DO_CONSTRAINT)
+ {
+ ConstraintInfo *robj1 = *(ConstraintInfo *const *) p1;
+ ConstraintInfo *robj2 = *(ConstraintInfo *const *) p2;
- /* Usually shouldn't get here, but if we do, sort by OID */
+ /*
+ * Sort domain constraints before table constraints, for consistency
+ * with our decision to sort CREATE DOMAIN before CREATE TABLE.
+ */
+ if (robj1->condomain)
+ {
+ if (robj2->condomain)
+ {
+ /* Sort by domain name (domain namespace was considered) */
+ cmpval = strcmp(robj1->condomain->dobj.name,
+ robj2->condomain->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else
+ return PRIO_TYPE - PRIO_TABLE;
+ }
+ else if (robj2->condomain)
+ return PRIO_TABLE - PRIO_TYPE;
+ else
+ {
+ /* Sort by table name (table namespace was considered already) */
+ cmpval = strcmp(robj1->contable->dobj.name,
+ robj2->contable->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ }
+ else if (obj1->objType == DO_PUBLICATION_REL)
+ {
+ PublicationRelInfo *probj1 = *(PublicationRelInfo *const *) p1;
+ PublicationRelInfo *probj2 = *(PublicationRelInfo *const *) p2;
+
+ /* Sort by publication name, since (namespace, name) match the rel */
+ cmpval = strcmp(probj1->publication->dobj.name,
+ probj2->publication->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_PUBLICATION_TABLE_IN_SCHEMA)
+ {
+ PublicationSchemaInfo *psobj1 = *(PublicationSchemaInfo *const *) p1;
+ PublicationSchemaInfo *psobj2 = *(PublicationSchemaInfo *const *) p2;
+
+ /* Sort by publication name, since ->name is just nspname */
+ cmpval = strcmp(psobj1->publication->dobj.name,
+ psobj2->publication->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+
+ /*
+ * Shouldn't get here except after catalog corruption, but if we do, sort
+ * by OID. This may make logically-identical databases differ in the
+ * order of objects in dump output. Users will get spurious schema diffs.
+ * Expect flaky failures of 002_pg_upgrade.pl test 'dump outputs from
+ * original and restored regression databases match' if the regression
+ * database contains objects allowing that test to reach here. That's a
+ * consequence of the test using "pg_restore -j", which doesn't fully
+ * constrain OID assignment order.
+ */
+ Assert(false);
return oidcmp(obj1->catId.oid, obj2->catId.oid);
}
+/* Compare two OID-identified pg_type values by nspname, then by typname. */
+static int
+pgTypeNameCompare(Oid typid1, Oid typid2)
+{
+ TypeInfo *typobj1;
+ TypeInfo *typobj2;
+ int cmpval;
+
+ if (typid1 == typid2)
+ return 0;
+
+ typobj1 = findTypeByOid(typid1);
+ typobj2 = findTypeByOid(typid2);
+
+ if (!typobj1 || !typobj2)
+ {
+ /*
+ * getTypes() didn't find some OID. Assume catalog corruption, e.g.
+ * an oprright value without the corresponding OID in a pg_type row.
+ * Report as "equal", so the caller uses the next available basis for
+ * comparison, e.g. the next function argument.
+ *
+ * Unary operators have InvalidOid in oprleft (if oprkind='r') or in
+ * oprright (if oprkind='l'). Caller already sorted by oprkind,
+ * calling us only for like-kind operators. Hence, "typid1 == typid2"
+ * took care of InvalidOid. (v14 removed postfix operator support.
+ * Hence, when dumping from v14+, only oprleft can be InvalidOid.)
+ */
+ Assert(false);
+ return 0;
+ }
+
+ if (!typobj1->dobj.namespace || !typobj2->dobj.namespace)
+ Assert(false); /* catalog corruption */
+ else
+ {
+ cmpval = strcmp(typobj1->dobj.namespace->dobj.name,
+ typobj2->dobj.namespace->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ return strcmp(typobj1->dobj.name, typobj2->dobj.name);
+}
+
+/* Compare two OID-identified pg_am values by amname. */
+static int
+accessMethodNameCompare(Oid am1, Oid am2)
+{
+ AccessMethodInfo *amobj1;
+ AccessMethodInfo *amobj2;
+
+ if (am1 == am2)
+ return 0;
+
+ amobj1 = findAccessMethodByOid(am1);
+ amobj2 = findAccessMethodByOid(am2);
+
+ if (!amobj1 || !amobj2)
+ {
+ /* catalog corruption: handle like pgTypeNameCompare() does */
+ Assert(false);
+ return 0;
+ }
+
+ return strcmp(amobj1->dobj.name, amobj2->dobj.name);
+}
+
/*
* Sort the given objects into a safe dump order using dependency
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 100317b1aa9..27aa1b65698 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -65,10 +65,9 @@ static void dropTablespaces(PGconn *conn);
static void dumpTablespaces(PGconn *conn);
static void dropDBs(PGconn *conn);
static void dumpUserConfig(PGconn *conn, const char *username);
-static void dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat);
+static void dumpDatabases(PGconn *conn);
static void dumpTimestamp(const char *msg);
-static int runPgDump(const char *dbname, const char *create_opts,
- char *dbfile, ArchiveFormat archDumpFormat);
+static int runPgDump(const char *dbname, const char *create_opts);
static void buildShSecLabels(PGconn *conn,
const char *catalog_name, Oid objectId,
const char *objtype, const char *objname,
@@ -77,7 +76,6 @@ static void executeCommand(PGconn *conn, const char *query);
static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns,
SimpleStringList *names);
static void read_dumpall_filters(const char *filename, SimpleStringList *pattern);
-static ArchiveFormat parseDumpFormat(const char *format);
static char pg_dump_bin[MAXPGPATH];
static PQExpBuffer pgdumpopts;
@@ -107,8 +105,6 @@ static int no_subscriptions = 0;
static int no_toast_compression = 0;
static int no_unlogged_table_data = 0;
static int no_role_passwords = 0;
-static int with_data = 0;
-static int with_schema = 0;
static int with_statistics = 0;
static int server_version;
static int load_via_partition_root = 0;
@@ -150,7 +146,6 @@ main(int argc, char *argv[])
{"password", no_argument, NULL, 'W'},
{"no-privileges", no_argument, NULL, 'x'},
{"no-acl", no_argument, NULL, 'x'},
- {"format", required_argument, NULL, 'F'},
/*
* the following options don't have an equivalent short option letter
@@ -183,11 +178,9 @@ main(int argc, char *argv[])
{"no-sync", no_argument, NULL, 4},
{"no-toast-compression", no_argument, &no_toast_compression, 1},
{"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1},
- {"with-data", no_argument, &with_data, 1},
- {"with-schema", no_argument, &with_schema, 1},
- {"with-statistics", no_argument, &with_statistics, 1},
{"on-conflict-do-nothing", no_argument, &on_conflict_do_nothing, 1},
{"rows-per-insert", required_argument, NULL, 7},
+ {"statistics", no_argument, &with_statistics, 1},
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 8},
{"sequence-data", no_argument, &sequence_data, 1},
@@ -201,8 +194,6 @@ main(int argc, char *argv[])
char *pgdb = NULL;
char *use_role = NULL;
const char *dumpencoding = NULL;
- ArchiveFormat archDumpFormat = archNull;
- const char *formatName = "p";
trivalue prompt_password = TRI_DEFAULT;
bool data_only = false;
bool globals_only = false;
@@ -252,7 +243,7 @@ main(int argc, char *argv[])
pgdumpopts = createPQExpBuffer();
- while ((c = getopt_long(argc, argv, "acd:E:f:F:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
+ while ((c = getopt_long(argc, argv, "acd:E:f:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
{
switch (c)
{
@@ -280,9 +271,7 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " -f ");
appendShellString(pgdumpopts, filename);
break;
- case 'F':
- formatName = pg_strdup(optarg);
- break;
+
case 'g':
globals_only = true;
break;
@@ -431,21 +420,6 @@ main(int argc, char *argv[])
exit_nicely(1);
}
- /* Get format for dump. */
- archDumpFormat = parseDumpFormat(formatName);
-
- /*
- * If a non-plain format is specified, a file name is also required as the
- * path to the main directory.
- */
- if (archDumpFormat != archNull &&
- (!filename || strcmp(filename, "") == 0))
- {
- pg_log_error("option -F/--format=d|c|t requires option -f/--file");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- exit_nicely(1);
- }
-
/*
* If password values are not required in the dump, switch to using
* pg_roles which is equally useful, just more likely to have unrestricted
@@ -497,12 +471,8 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " --no-toast-compression");
if (no_unlogged_table_data)
appendPQExpBufferStr(pgdumpopts, " --no-unlogged-table-data");
- if (with_data)
- appendPQExpBufferStr(pgdumpopts, " --with-data");
- if (with_schema)
- appendPQExpBufferStr(pgdumpopts, " --with-schema");
if (with_statistics)
- appendPQExpBufferStr(pgdumpopts, " --with-statistics");
+ appendPQExpBufferStr(pgdumpopts, " --statistics");
if (on_conflict_do_nothing)
appendPQExpBufferStr(pgdumpopts, " --on-conflict-do-nothing");
if (statistics_only)
@@ -511,33 +481,6 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " --sequence-data");
/*
- * Open the output file if required, otherwise use stdout. If required,
- * then create new directory and global.dat file.
- */
- if (archDumpFormat != archNull)
- {
- char global_path[MAXPGPATH];
-
- /* Create new directory or accept the empty existing directory. */
- create_or_open_dir(filename);
-
- snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
-
- OPF = fopen(global_path, PG_BINARY_W);
- if (!OPF)
- pg_fatal("could not open file \"%s\": %m", global_path);
- }
- else if (filename)
- {
- OPF = fopen(filename, PG_BINARY_W);
- if (!OPF)
- pg_fatal("could not open output file \"%s\": %m",
- filename);
- }
- else
- OPF = stdout;
-
- /*
* If there was a database specified on the command line, use that,
* otherwise try to connect to database "postgres", and failing that
* "template1".
@@ -577,6 +520,19 @@ main(int argc, char *argv[])
&database_exclude_names);
/*
+ * Open the output file if required, otherwise use stdout
+ */
+ if (filename)
+ {
+ OPF = fopen(filename, PG_BINARY_W);
+ if (!OPF)
+ pg_fatal("could not open output file \"%s\": %m",
+ filename);
+ }
+ else
+ OPF = stdout;
+
+ /*
* Set the client encoding if requested.
*/
if (dumpencoding)
@@ -675,7 +631,7 @@ main(int argc, char *argv[])
}
if (!globals_only && !roles_only && !tablespaces_only)
- dumpDatabases(conn, archDumpFormat);
+ dumpDatabases(conn);
PQfinish(conn);
@@ -688,7 +644,7 @@ main(int argc, char *argv[])
fclose(OPF);
/* sync the resulting file, errors are not fatal */
- if (dosync && (archDumpFormat == archNull))
+ if (dosync)
(void) fsync_fname(filename, false);
}
@@ -699,14 +655,12 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database cluster as an SQL script.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nGeneral options:\n"));
printf(_(" -f, --file=FILENAME output file name\n"));
- printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
- " plain text (default))\n"));
printf(_(" -v, --verbose verbose mode\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
@@ -750,13 +704,11 @@ help(void)
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
printf(_(" --sequence-data include sequence data in dump\n"));
+ printf(_(" --statistics dump the statistics\n"));
printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -d, --dbname=CONNSTR connect using connection string\n"));
@@ -1013,6 +965,9 @@ dumpRoles(PGconn *conn)
* We do it this way because config settings for roles could mention the
* names of other roles.
*/
+ if (PQntuples(res) > 0)
+ fprintf(OPF, "\n--\n-- User Configurations\n--\n");
+
for (i = 0; i < PQntuples(res); i++)
dumpUserConfig(conn, PQgetvalue(res, i, i_rolname));
@@ -1526,7 +1481,6 @@ dumpUserConfig(PGconn *conn, const char *username)
{
PQExpBuffer buf = createPQExpBuffer();
PGresult *res;
- static bool header_done = false;
printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
"WHERE setdatabase = 0 AND setrole = "
@@ -1538,13 +1492,7 @@ dumpUserConfig(PGconn *conn, const char *username)
res = executeQuery(conn, buf->data);
if (PQntuples(res) > 0)
- {
- if (!header_done)
- fprintf(OPF, "\n--\n-- User Configurations\n--\n");
- header_done = true;
-
fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username);
- }
for (int i = 0; i < PQntuples(res); i++)
{
@@ -1618,13 +1566,10 @@ expand_dbname_patterns(PGconn *conn,
* Dump contents of databases.
*/
static void
-dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
+dumpDatabases(PGconn *conn)
{
PGresult *res;
int i;
- char db_subdir[MAXPGPATH];
- char dbfilepath[MAXPGPATH];
- FILE *map_file = NULL;
/*
* Skip databases marked not datallowconn, since we'd be unable to connect
@@ -1638,42 +1583,18 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
* doesn't have some failure mode with --clean.
*/
res = executeQuery(conn,
- "SELECT datname, oid "
+ "SELECT datname "
"FROM pg_database d "
"WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY (datname <> 'template1'), datname");
- if (archDumpFormat == archNull && PQntuples(res) > 0)
+ if (PQntuples(res) > 0)
fprintf(OPF, "--\n-- Databases\n--\n\n");
- /*
- * If directory/tar/custom format is specified, create a subdirectory
- * under the main directory and each database dump file or subdirectory
- * will be created in that subdirectory by pg_dump.
- */
- if (archDumpFormat != archNull)
- {
- char map_file_path[MAXPGPATH];
-
- snprintf(db_subdir, MAXPGPATH, "%s/databases", filename);
-
- /* Create a subdirectory with 'databases' name under main directory. */
- if (mkdir(db_subdir, pg_dir_create_mode) != 0)
- pg_fatal("could not create directory \"%s\": %m", db_subdir);
-
- snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
-
- /* Create a map file (to store dboid and dbname) */
- map_file = fopen(map_file_path, PG_BINARY_W);
- if (!map_file)
- pg_fatal("could not open file \"%s\": %m", map_file_path);
- }
-
for (i = 0; i < PQntuples(res); i++)
{
char *dbname = PQgetvalue(res, i, 0);
- char *oid = PQgetvalue(res, i, 1);
- const char *create_opts = "";
+ const char *create_opts;
int ret;
/* Skip template0, even if it's not marked !datallowconn. */
@@ -1687,27 +1608,9 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
continue;
}
- /*
- * If this is not a plain format dump, then append dboid and dbname to
- * the map.dat file.
- */
- if (archDumpFormat != archNull)
- {
- if (archDumpFormat == archCustom)
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid);
- else if (archDumpFormat == archTar)
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid);
- else
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid);
-
- /* Put one line entry for dboid and dbname in map file. */
- fprintf(map_file, "%s %s\n", oid, dbname);
- }
-
pg_log_info("dumping database \"%s\"", dbname);
- if (archDumpFormat == archNull)
- fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
+ fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
/*
* We assume that "template1" and "postgres" already exist in the
@@ -1721,9 +1624,12 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
{
if (output_clean)
create_opts = "--clean --create";
- /* Since pg_dump won't emit a \connect command, we must */
- else if (archDumpFormat == archNull)
+ else
+ {
+ create_opts = "";
+ /* Since pg_dump won't emit a \connect command, we must */
fprintf(OPF, "\\connect %s\n\n", dbname);
+ }
}
else
create_opts = "--create";
@@ -1731,30 +1637,19 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
if (filename)
fclose(OPF);
- ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat);
+ ret = runPgDump(dbname, create_opts);
if (ret != 0)
pg_fatal("pg_dump failed on database \"%s\", exiting", dbname);
if (filename)
{
- char global_path[MAXPGPATH];
-
- if (archDumpFormat != archNull)
- snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
- else
- snprintf(global_path, MAXPGPATH, "%s", filename);
-
- OPF = fopen(global_path, PG_BINARY_A);
+ OPF = fopen(filename, PG_BINARY_A);
if (!OPF)
pg_fatal("could not re-open the output file \"%s\": %m",
- global_path);
+ filename);
}
}
- /* Close map file */
- if (archDumpFormat != archNull)
- fclose(map_file);
-
PQclear(res);
}
@@ -1764,8 +1659,7 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
* Run pg_dump on dbname, with specified options.
*/
static int
-runPgDump(const char *dbname, const char *create_opts, char *dbfile,
- ArchiveFormat archDumpFormat)
+runPgDump(const char *dbname, const char *create_opts)
{
PQExpBufferData connstrbuf;
PQExpBufferData cmd;
@@ -1774,36 +1668,17 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile,
initPQExpBuffer(&connstrbuf);
initPQExpBuffer(&cmd);
+ printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
+ pgdumpopts->data, create_opts);
+
/*
- * If this is not a plain format dump, then append file name and dump
- * format to the pg_dump command to get archive dump.
+ * If we have a filename, use the undocumented plain-append pg_dump
+ * format.
*/
- if (archDumpFormat != archNull)
- {
- printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin,
- dbfile, create_opts);
-
- if (archDumpFormat == archDirectory)
- appendPQExpBufferStr(&cmd, " --format=directory ");
- else if (archDumpFormat == archCustom)
- appendPQExpBufferStr(&cmd, " --format=custom ");
- else if (archDumpFormat == archTar)
- appendPQExpBufferStr(&cmd, " --format=tar ");
- }
+ if (filename)
+ appendPQExpBufferStr(&cmd, " -Fa ");
else
- {
- printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
- pgdumpopts->data, create_opts);
-
- /*
- * If we have a filename, use the undocumented plain-append pg_dump
- * format.
- */
- if (filename)
- appendPQExpBufferStr(&cmd, " -Fa ");
- else
- appendPQExpBufferStr(&cmd, " -Fp ");
- }
+ appendPQExpBufferStr(&cmd, " -Fp ");
/*
* Append the database name to the already-constructed stem of connection
@@ -1948,36 +1823,3 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern)
filter_free(&fstate);
}
-
-/*
- * parseDumpFormat
- *
- * This will validate dump formats.
- */
-static ArchiveFormat
-parseDumpFormat(const char *format)
-{
- ArchiveFormat archDumpFormat;
-
- if (pg_strcasecmp(format, "c") == 0)
- archDumpFormat = archCustom;
- else if (pg_strcasecmp(format, "custom") == 0)
- archDumpFormat = archCustom;
- else if (pg_strcasecmp(format, "d") == 0)
- archDumpFormat = archDirectory;
- else if (pg_strcasecmp(format, "directory") == 0)
- archDumpFormat = archDirectory;
- else if (pg_strcasecmp(format, "p") == 0)
- archDumpFormat = archNull;
- else if (pg_strcasecmp(format, "plain") == 0)
- archDumpFormat = archNull;
- else if (pg_strcasecmp(format, "t") == 0)
- archDumpFormat = archTar;
- else if (pg_strcasecmp(format, "tar") == 0)
- archDumpFormat = archTar;
- else
- pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
- format);
-
- return archDumpFormat;
-}
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 6ef789cb06d..6c129278bc5 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -2,7 +2,7 @@
*
* pg_restore.c
* pg_restore is an utility extracting postgres database definitions
- * from a backup archive created by pg_dump/pg_dumpall using the archiver
+ * from a backup archive created by pg_dump using the archiver
* interface.
*
* pg_restore will read the backup archive and
@@ -41,15 +41,11 @@
#include "postgres_fe.h"
#include <ctype.h>
-#include <sys/stat.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
-#include "common/string.h"
-#include "connectdb.h"
#include "fe_utils/option_utils.h"
-#include "fe_utils/string_utils.h"
#include "filter.h"
#include "getopt_long.h"
#include "parallel.h"
@@ -57,43 +53,18 @@
static void usage(const char *progname);
static void read_restore_filters(const char *filename, RestoreOptions *opts);
-static bool file_exists_in_directory(const char *dir, const char *filename);
-static int restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
- int numWorkers, bool append_data, int num);
-static int read_one_statement(StringInfo inBuf, FILE *pfile);
-static int restore_all_databases(PGconn *conn, const char *dumpdirpath,
- SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers);
-static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath,
- const char *outfile);
-static void copy_or_print_global_file(const char *outfile, FILE *pfile);
-static int get_dbnames_list_to_restore(PGconn *conn,
- SimplePtrList *dbname_oid_list,
- SimpleStringList db_exclude_patterns);
-static int get_dbname_oid_list_from_mfile(const char *dumpdirpath,
- SimplePtrList *dbname_oid_list);
-
-/*
- * Stores a database OID and the corresponding name.
- */
-typedef struct DbOidName
-{
- Oid oid;
- char str[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated string here */
-} DbOidName;
-
int
main(int argc, char **argv)
{
RestoreOptions *opts;
int c;
+ int exit_code;
int numWorkers = 1;
+ Archive *AH;
char *inputFileSpec;
bool data_only = false;
bool schema_only = false;
- int n_errors = 0;
- bool globals_only = false;
- SimpleStringList db_exclude_patterns = {NULL, NULL};
static int disable_triggers = 0;
static int enable_row_security = 0;
static int if_exists = 0;
@@ -111,15 +82,12 @@ main(int argc, char **argv)
static int no_subscriptions = 0;
static int strict_names = 0;
static int statistics_only = 0;
- static int with_data = 0;
- static int with_schema = 0;
static int with_statistics = 0;
struct option cmdopts[] = {
{"clean", 0, NULL, 'c'},
{"create", 0, NULL, 'C'},
{"data-only", 0, NULL, 'a'},
- {"globals-only", 0, NULL, 'g'},
{"dbname", 1, NULL, 'd'},
{"exit-on-error", 0, NULL, 'e'},
{"exclude-schema", 1, NULL, 'N'},
@@ -169,12 +137,9 @@ main(int argc, char **argv)
{"no-security-labels", no_argument, &no_security_labels, 1},
{"no-subscriptions", no_argument, &no_subscriptions, 1},
{"no-statistics", no_argument, &no_statistics, 1},
- {"with-data", no_argument, &with_data, 1},
- {"with-schema", no_argument, &with_schema, 1},
- {"with-statistics", no_argument, &with_statistics, 1},
+ {"statistics", no_argument, &with_statistics, 1},
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 4},
- {"exclude-database", required_argument, NULL, 6},
{NULL, 0, NULL, 0}
};
@@ -203,7 +168,7 @@ main(int argc, char **argv)
}
}
- while ((c = getopt_long(argc, argv, "acCd:ef:F:gh:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
+ while ((c = getopt_long(argc, argv, "acCd:ef:F:h:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
cmdopts, NULL)) != -1)
{
switch (c)
@@ -230,14 +195,11 @@ main(int argc, char **argv)
if (strlen(optarg) != 0)
opts->formatName = pg_strdup(optarg);
break;
- case 'g':
- /* restore only global.dat file from directory */
- globals_only = true;
- break;
case 'h':
if (strlen(optarg) != 0)
opts->cparams.pghost = pg_strdup(optarg);
break;
+
case 'j': /* number of restore jobs */
if (!option_parse_int(optarg, "-j/--jobs", 1,
PG_MAX_JOBS,
@@ -352,9 +314,6 @@ main(int argc, char **argv)
exit(1);
opts->exit_on_error = true;
break;
- case 6: /* database patterns to skip */
- simple_string_list_append(&db_exclude_patterns, optarg);
- break;
default:
/* getopt_long already emitted a complaint */
@@ -382,13 +341,6 @@ main(int argc, char **argv)
if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary)
pg_fatal("one of -d/--dbname and -f/--file must be specified");
- if (db_exclude_patterns.head != NULL && globals_only)
- {
- pg_log_error("option --exclude-database cannot be used together with -g/--globals-only");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- exit_nicely(1);
- }
-
/* Should get at most one of -d and -f, else user is confused */
if (opts->cparams.dbname)
{
@@ -417,13 +369,17 @@ main(int argc, char **argv)
if (statistics_only && no_statistics)
pg_fatal("options --statistics-only and --no-statistics cannot be used together");
- /* reject conflicting "with-" and "no-" options */
- if (with_data && no_data)
- pg_fatal("options --with-data and --no-data cannot be used together");
- if (with_schema && no_schema)
- pg_fatal("options --with-schema and --no-schema cannot be used together");
+ /* reject conflicting "no-" options */
if (with_statistics && no_statistics)
- pg_fatal("options --with-statistics and --no-statistics cannot be used together");
+ pg_fatal("options --statistics and --no-statistics cannot be used together");
+
+ /* reject conflicting "only-" options */
+ if (data_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-a/--data-only", "--statistics");
+ if (schema_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-s/--schema-only", "--statistics");
if (data_only && opts->dropSchema)
pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
@@ -443,16 +399,14 @@ main(int argc, char **argv)
pg_fatal("cannot specify both --single-transaction and multiple jobs");
/*
- * Set derivative flags. An "-only" option may be overridden by an
- * explicit "with-" option; e.g. "--schema-only --with-statistics" will
- * include schema and statistics. Other ambiguous or nonsensical
- * combinations, e.g. "--schema-only --no-schema", will have already
- * caused an error in one of the checks above.
+ * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
+ * "--schema-only --no-schema", will have already caused an error in one
+ * of the checks above.
*/
opts->dumpData = ((opts->dumpData && !schema_only && !statistics_only) ||
- (data_only || with_data)) && !no_data;
+ data_only) && !no_data;
opts->dumpSchema = ((opts->dumpSchema && !data_only && !statistics_only) ||
- (schema_only || with_schema)) && !no_schema;
+ schema_only) && !no_schema;
opts->dumpStatistics = ((opts->dumpStatistics && !schema_only && !data_only) ||
(statistics_only || with_statistics)) && !no_statistics;
@@ -496,114 +450,6 @@ main(int argc, char **argv)
opts->formatName);
}
- /*
- * If toc.dat file is not present in the current path, then check for
- * global.dat. If global.dat file is present, then restore all the
- * databases from map.dat (if it exists), but skip restoring those
- * matching --exclude-database patterns.
- */
- if (inputFileSpec != NULL && !file_exists_in_directory(inputFileSpec, "toc.dat") &&
- file_exists_in_directory(inputFileSpec, "global.dat"))
- {
- PGconn *conn = NULL; /* Connection to restore global sql
- * commands. */
-
- /*
- * Can only use --list or --use-list options with a single database
- * dump.
- */
- if (opts->tocSummary)
- pg_fatal("option -l/--list cannot be used when restoring an archive created by pg_dumpall");
- else if (opts->tocFile)
- pg_fatal("option -L/--use-list cannot be used when restoring an archive created by pg_dumpall");
-
- /*
- * To restore from a pg_dumpall archive, -C (create database) option
- * must be specified unless we are only restoring globals.
- */
- if (!globals_only && opts->createDB != 1)
- {
- pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- pg_log_error_hint("Individual databases can be restored using their specific archives.");
- exit_nicely(1);
- }
-
- /*
- * Connect to the database to execute global sql commands from
- * global.dat file.
- */
- if (opts->cparams.dbname)
- {
- conn = ConnectDatabase(opts->cparams.dbname, NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
-
-
- if (!conn)
- pg_fatal("could not connect to database \"%s\"", opts->cparams.dbname);
- }
-
- /* If globals-only, then return from here. */
- if (globals_only)
- {
- /*
- * Open global.dat file and execute/append all the global sql
- * commands.
- */
- n_errors = process_global_sql_commands(conn, inputFileSpec,
- opts->filename);
-
- if (conn)
- PQfinish(conn);
-
- pg_log_info("database restoring skipped because option -g/--globals-only was specified");
- }
- else
- {
- /* Now restore all the databases from map.dat */
- n_errors = restore_all_databases(conn, inputFileSpec, db_exclude_patterns,
- opts, numWorkers);
- }
-
- /* Free db pattern list. */
- simple_string_list_destroy(&db_exclude_patterns);
- }
- else /* process if global.dat file does not exist. */
- {
- if (db_exclude_patterns.head != NULL)
- pg_fatal("option --exclude-database can be used only when restoring an archive created by pg_dumpall");
-
- if (globals_only)
- pg_fatal("option -g/--globals-only can be used only when restoring an archive created by pg_dumpall");
-
- n_errors = restore_one_database(inputFileSpec, opts, numWorkers, false, 0);
- }
-
- /* Done, print a summary of ignored errors during restore. */
- if (n_errors)
- {
- pg_log_warning("errors ignored on restore: %d", n_errors);
- return 1;
- }
-
- return 0;
-}
-
-/*
- * restore_one_database
- *
- * This will restore one database using toc.dat file.
- *
- * returns the number of errors while doing restore.
- */
-static int
-restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
- int numWorkers, bool append_data, int num)
-{
- Archive *AH;
- int n_errors;
-
AH = OpenArchive(inputFileSpec, opts->format);
SetArchiveOptions(AH, NULL, opts);
@@ -611,15 +457,9 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
/*
* We don't have a connection yet but that doesn't matter. The connection
* is initialized to NULL and if we terminate through exit_nicely() while
- * it's still NULL, the cleanup function will just be a no-op. If we are
- * restoring multiple databases, then only update AX handle for cleanup as
- * the previous entry was already in the array and we had closed previous
- * connection, so we can use the same array slot.
+ * it's still NULL, the cleanup function will just be a no-op.
*/
- if (!append_data || num == 0)
- on_exit_close_archive(AH);
- else
- replace_on_exit_close_archive(AH);
+ on_exit_close_archive(AH);
/* Let the archiver know how noisy to be */
AH->verbose = opts->verbose;
@@ -639,21 +479,25 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
else
{
ProcessArchiveRestoreOptions(AH);
- RestoreArchive(AH, append_data);
+ RestoreArchive(AH);
}
- n_errors = AH->n_errors;
+ /* done, print a summary of ignored errors */
+ if (AH->n_errors)
+ pg_log_warning("errors ignored on restore: %d", AH->n_errors);
/* AH may be freed in CloseArchive? */
+ exit_code = AH->n_errors ? 1 : 0;
+
CloseArchive(AH);
- return n_errors;
+ return exit_code;
}
static void
usage(const char *progname)
{
- printf(_("%s restores PostgreSQL databases from archives created by pg_dump or pg_dumpall.\n\n"), progname);
+ printf(_("%s restores a PostgreSQL database from an archive created by pg_dump.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [FILE]\n"), progname);
@@ -671,7 +515,6 @@ usage(const char *progname)
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create create the target database\n"));
printf(_(" -e, --exit-on-error exit on error, default is to continue\n"));
- printf(_(" -g, --globals-only restore only global objects, no databases\n"));
printf(_(" -I, --index=NAME restore named index\n"));
printf(_(" -j, --jobs=NUM use this many parallel jobs to restore\n"));
printf(_(" -L, --use-list=FILENAME use table of contents from this file for\n"
@@ -688,7 +531,6 @@ usage(const char *progname)
printf(_(" -1, --single-transaction restore as a single transaction\n"));
printf(_(" --disable-triggers disable triggers during data-only restore\n"));
printf(_(" --enable-row-security enable row security\n"));
- printf(_(" --exclude-database=PATTERN do not restore the specified database(s)\n"));
printf(_(" --filter=FILENAME restore or skip objects based on expressions\n"
" in FILENAME\n"));
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
@@ -705,6 +547,7 @@ usage(const char *progname)
printf(_(" --no-table-access-method do not restore table access methods\n"));
printf(_(" --no-tablespaces do not restore tablespace assignments\n"));
printf(_(" --section=SECTION restore named section (pre-data, data, or post-data)\n"));
+ printf(_(" --statistics restore the statistics\n"));
printf(_(" --statistics-only restore only the statistics, not schema or data\n"));
printf(_(" --strict-names require table and/or schema include patterns to\n"
" match at least one entity each\n"));
@@ -712,9 +555,6 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data restore the data\n"));
- printf(_(" --with-schema restore the schema\n"));
- printf(_(" --with-statistics restore the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -725,8 +565,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n"
- "combined and specified multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, and --section can be combined and specified\n"
+ "multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -831,585 +671,3 @@ read_restore_filters(const char *filename, RestoreOptions *opts)
filter_free(&fstate);
}
-
-/*
- * file_exists_in_directory
- *
- * Returns true if the file exists in the given directory.
- */
-static bool
-file_exists_in_directory(const char *dir, const char *filename)
-{
- struct stat st;
- char buf[MAXPGPATH];
-
- if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH)
- pg_fatal("directory name too long: \"%s\"", dir);
-
- return (stat(buf, &st) == 0 && S_ISREG(st.st_mode));
-}
-
-/*
- * read_one_statement
- *
- * This will start reading from passed file pointer using fgetc and read till
- * semicolon(sql statement terminator for global.dat file)
- *
- * EOF is returned if end-of-file input is seen; time to shut down.
- */
-
-static int
-read_one_statement(StringInfo inBuf, FILE *pfile)
-{
- int c; /* character read from getc() */
- int m;
-
- StringInfoData q;
-
- initStringInfo(&q);
-
- resetStringInfo(inBuf);
-
- /*
- * Read characters until EOF or the appropriate delimiter is seen.
- */
- while ((c = fgetc(pfile)) != EOF)
- {
- if (c != '\'' && c != '"' && c != '\n' && c != ';')
- {
- appendStringInfoChar(inBuf, (char) c);
- while ((c = fgetc(pfile)) != EOF)
- {
- if (c != '\'' && c != '"' && c != ';' && c != '\n')
- appendStringInfoChar(inBuf, (char) c);
- else
- break;
- }
- }
-
- if (c == '\'' || c == '"')
- {
- appendStringInfoChar(&q, (char) c);
- m = c;
-
- while ((c = fgetc(pfile)) != EOF)
- {
- appendStringInfoChar(&q, (char) c);
-
- if (c == m)
- {
- appendStringInfoString(inBuf, q.data);
- resetStringInfo(&q);
- break;
- }
- }
- }
-
- if (c == ';')
- {
- appendStringInfoChar(inBuf, (char) ';');
- break;
- }
-
- if (c == '\n')
- appendStringInfoChar(inBuf, (char) '\n');
- }
-
- pg_free(q.data);
-
- /* No input before EOF signal means time to quit. */
- if (c == EOF && inBuf->len == 0)
- return EOF;
-
- /* return something that's not EOF */
- return 'Q';
-}
-
-/*
- * get_dbnames_list_to_restore
- *
- * This will mark for skipping any entries from dbname_oid_list that pattern match an
- * entry in the db_exclude_patterns list.
- *
- * Returns the number of database to be restored.
- *
- */
-static int
-get_dbnames_list_to_restore(PGconn *conn,
- SimplePtrList *dbname_oid_list,
- SimpleStringList db_exclude_patterns)
-{
- int count_db = 0;
- PQExpBuffer query;
- PGresult *res;
-
- query = createPQExpBuffer();
-
- if (!conn)
- pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore");
-
- /*
- * Process one by one all dbnames and if specified to skip restoring, then
- * remove dbname from list.
- */
- for (SimplePtrListCell *db_cell = dbname_oid_list->head;
- db_cell; db_cell = db_cell->next)
- {
- DbOidName *dbidname = (DbOidName *) db_cell->ptr;
- bool skip_db_restore = false;
- PQExpBuffer db_lit = createPQExpBuffer();
-
- appendStringLiteralConn(db_lit, dbidname->str, conn);
-
- for (SimpleStringListCell *pat_cell = db_exclude_patterns.head; pat_cell; pat_cell = pat_cell->next)
- {
- /*
- * If there is an exact match then we don't need to try a pattern
- * match
- */
- if (pg_strcasecmp(dbidname->str, pat_cell->val) == 0)
- skip_db_restore = true;
- /* Otherwise, try a pattern match if there is a connection */
- else if (conn)
- {
- int dotcnt;
-
- appendPQExpBufferStr(query, "SELECT 1 ");
- processSQLNamePattern(conn, query, pat_cell->val, false,
- false, NULL, db_lit->data,
- NULL, NULL, NULL, &dotcnt);
-
- if (dotcnt > 0)
- {
- pg_log_error("improper qualified name (too many dotted names): %s",
- dbidname->str);
- PQfinish(conn);
- exit_nicely(1);
- }
-
- res = executeQuery(conn, query->data);
-
- if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
- {
- skip_db_restore = true;
- pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val);
- }
-
- PQclear(res);
- resetPQExpBuffer(query);
- }
-
- if (skip_db_restore)
- break;
- }
-
- destroyPQExpBuffer(db_lit);
-
- /*
- * Mark db to be skipped or increment the counter of dbs to be
- * restored
- */
- if (skip_db_restore)
- {
- pg_log_info("excluding database \"%s\"", dbidname->str);
- dbidname->oid = InvalidOid;
- }
- else
- {
- count_db++;
- }
- }
-
- destroyPQExpBuffer(query);
-
- return count_db;
-}
-
-/*
- * get_dbname_oid_list_from_mfile
- *
- * Open map.dat file and read line by line and then prepare a list of database
- * names and corresponding db_oid.
- *
- * Returns, total number of database names in map.dat file.
- */
-static int
-get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oid_list)
-{
- StringInfoData linebuf;
- FILE *pfile;
- char map_file_path[MAXPGPATH];
- int count = 0;
-
-
- /*
- * If there is only global.dat file in dump, then return from here as
- * there is no database to restore.
- */
- if (!file_exists_in_directory(dumpdirpath, "map.dat"))
- {
- pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath);
- return 0;
- }
-
- snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath);
-
- /* Open map.dat file. */
- pfile = fopen(map_file_path, PG_BINARY_R);
-
- if (pfile == NULL)
- pg_fatal("could not open file \"%s\": %m", map_file_path);
-
- initStringInfo(&linebuf);
-
- /* Append all the dbname/db_oid combinations to the list. */
- while (pg_get_line_buf(pfile, &linebuf))
- {
- Oid db_oid = InvalidOid;
- char *dbname;
- DbOidName *dbidname;
- int namelen;
- char *p = linebuf.data;
-
- /* Extract dboid. */
- while (isdigit((unsigned char) *p))
- p++;
- if (p > linebuf.data && *p == ' ')
- {
- sscanf(linebuf.data, "%u", &db_oid);
- p++;
- }
-
- /* dbname is the rest of the line */
- dbname = p;
- namelen = strlen(dbname);
-
- /* Report error and exit if the file has any corrupted data. */
- if (!OidIsValid(db_oid) || namelen <= 1)
- pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path,
- count + 1);
-
- pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"",
- dbname, db_oid, map_file_path);
-
- dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1);
- dbidname->oid = db_oid;
- strlcpy(dbidname->str, dbname, namelen);
-
- simple_ptr_list_append(dbname_oid_list, dbidname);
- count++;
- }
-
- /* Close map.dat file. */
- fclose(pfile);
-
- return count;
-}
-
-/*
- * restore_all_databases
- *
- * This will restore databases those dumps are present in
- * directory based on map.dat file mapping.
- *
- * This will skip restoring for databases that are specified with
- * exclude-database option.
- *
- * returns, number of errors while doing restore.
- */
-static int
-restore_all_databases(PGconn *conn, const char *dumpdirpath,
- SimpleStringList db_exclude_patterns, RestoreOptions *opts,
- int numWorkers)
-{
- SimplePtrList dbname_oid_list = {NULL, NULL};
- int num_db_restore = 0;
- int num_total_db;
- int n_errors_total;
- int count = 0;
- char *connected_db = NULL;
- bool dumpData = opts->dumpData;
- bool dumpSchema = opts->dumpSchema;
- bool dumpStatistics = opts->dumpSchema;
-
- /* Save db name to reuse it for all the database. */
- if (opts->cparams.dbname)
- connected_db = opts->cparams.dbname;
-
- num_total_db = get_dbname_oid_list_from_mfile(dumpdirpath, &dbname_oid_list);
-
- /* If map.dat has no entries, return after processing global.dat */
- if (dbname_oid_list.head == NULL)
- return process_global_sql_commands(conn, dumpdirpath, opts->filename);
-
- pg_log_info(ngettext("found %d database name in \"%s\"",
- "found %d database names in \"%s\"",
- num_total_db),
- num_total_db, "map.dat");
-
- if (!conn)
- {
- pg_log_info("trying to connect to database \"%s\"", "postgres");
-
- conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
-
- /* Try with template1. */
- if (!conn)
- {
- pg_log_info("trying to connect to database \"%s\"", "template1");
-
- conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
- }
- }
-
- /*
- * filter the db list according to the exclude patterns
- */
- num_db_restore = get_dbnames_list_to_restore(conn, &dbname_oid_list,
- db_exclude_patterns);
-
- /* Open global.dat file and execute/append all the global sql commands. */
- n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename);
-
- /* Close the db connection as we are done with globals and patterns. */
- if (conn)
- PQfinish(conn);
-
- /* Exit if no db needs to be restored. */
- if (dbname_oid_list.head == NULL || num_db_restore == 0)
- {
- pg_log_info(ngettext("no database needs restoring out of %d database",
- "no database needs restoring out of %d databases", num_total_db),
- num_total_db);
- return n_errors_total;
- }
-
- pg_log_info("need to restore %d databases out of %d databases", num_db_restore, num_total_db);
-
- /*
- * We have a list of databases to restore after processing the
- * exclude-database switch(es). Now we can restore them one by one.
- */
- for (SimplePtrListCell *db_cell = dbname_oid_list.head;
- db_cell; db_cell = db_cell->next)
- {
- DbOidName *dbidname = (DbOidName *) db_cell->ptr;
- char subdirpath[MAXPGPATH];
- char subdirdbpath[MAXPGPATH];
- char dbfilename[MAXPGPATH];
- int n_errors;
-
- /* ignore dbs marked for skipping */
- if (dbidname->oid == InvalidOid)
- continue;
-
- /*
- * We need to reset override_dbname so that objects can be restored
- * into an already created database. (used with -d/--dbname option)
- */
- if (opts->cparams.override_dbname)
- {
- pfree(opts->cparams.override_dbname);
- opts->cparams.override_dbname = NULL;
- }
-
- snprintf(subdirdbpath, MAXPGPATH, "%s/databases", dumpdirpath);
-
- /*
- * Look for the database dump file/dir. If there is an {oid}.tar or
- * {oid}.dmp file, use it. Otherwise try to use a directory called
- * {oid}
- */
- snprintf(dbfilename, MAXPGPATH, "%u.tar", dbidname->oid);
- if (file_exists_in_directory(subdirdbpath, dbfilename))
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.tar", dumpdirpath, dbidname->oid);
- else
- {
- snprintf(dbfilename, MAXPGPATH, "%u.dmp", dbidname->oid);
-
- if (file_exists_in_directory(subdirdbpath, dbfilename))
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.dmp", dumpdirpath, dbidname->oid);
- else
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u", dumpdirpath, dbidname->oid);
- }
-
- pg_log_info("restoring database \"%s\"", dbidname->str);
-
- /* If database is already created, then don't set createDB flag. */
- if (opts->cparams.dbname)
- {
- PGconn *test_conn;
-
- test_conn = ConnectDatabase(dbidname->str, NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
- if (test_conn)
- {
- PQfinish(test_conn);
-
- /* Use already created database for connection. */
- opts->createDB = 0;
- opts->cparams.dbname = dbidname->str;
- }
- else
- {
- /* we'll have to create it */
- opts->createDB = 1;
- opts->cparams.dbname = connected_db;
- }
- }
-
- /*
- * Reset flags - might have been reset in pg_backup_archiver.c by the
- * previous restore.
- */
- opts->dumpData = dumpData;
- opts->dumpSchema = dumpSchema;
- opts->dumpStatistics = dumpStatistics;
-
- /* Restore the single database. */
- n_errors = restore_one_database(subdirpath, opts, numWorkers, true, count);
-
- /* Print a summary of ignored errors during single database restore. */
- if (n_errors)
- {
- n_errors_total += n_errors;
- pg_log_warning("errors ignored on database \"%s\" restore: %d", dbidname->str, n_errors);
- }
-
- count++;
- }
-
- /* Log number of processed databases. */
- pg_log_info("number of restored databases is %d", num_db_restore);
-
- /* Free dbname and dboid list. */
- simple_ptr_list_destroy(&dbname_oid_list);
-
- return n_errors_total;
-}
-
-/*
- * process_global_sql_commands
- *
- * Open global.dat and execute or copy the sql commands one by one.
- *
- * If outfile is not NULL, copy all sql commands into outfile rather than
- * executing them.
- *
- * Returns the number of errors while processing global.dat
- */
-static int
-process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile)
-{
- char global_file_path[MAXPGPATH];
- PGresult *result;
- StringInfoData sqlstatement,
- user_create;
- FILE *pfile;
- int n_errors = 0;
-
- snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath);
-
- /* Open global.dat file. */
- pfile = fopen(global_file_path, PG_BINARY_R);
-
- if (pfile == NULL)
- pg_fatal("could not open file \"%s\": %m", global_file_path);
-
- /*
- * If outfile is given, then just copy all global.dat file data into
- * outfile.
- */
- if (outfile)
- {
- copy_or_print_global_file(outfile, pfile);
- return 0;
- }
-
- /* Init sqlstatement to append commands. */
- initStringInfo(&sqlstatement);
-
- /* creation statement for our current role */
- initStringInfo(&user_create);
- appendStringInfoString(&user_create, "CREATE ROLE ");
- /* should use fmtId here, but we don't know the encoding */
- appendStringInfoString(&user_create, PQuser(conn));
- appendStringInfoChar(&user_create, ';');
-
- /* Process file till EOF and execute sql statements. */
- while (read_one_statement(&sqlstatement, pfile) != EOF)
- {
- /* don't try to create the role we are connected as */
- if (strstr(sqlstatement.data, user_create.data))
- continue;
-
- pg_log_info("executing query: %s", sqlstatement.data);
- result = PQexec(conn, sqlstatement.data);
-
- switch (PQresultStatus(result))
- {
- case PGRES_COMMAND_OK:
- case PGRES_TUPLES_OK:
- case PGRES_EMPTY_QUERY:
- break;
- default:
- n_errors++;
- pg_log_error("could not execute query: %s", PQerrorMessage(conn));
- pg_log_error_detail("Command was: %s", sqlstatement.data);
- }
- PQclear(result);
- }
-
- /* Print a summary of ignored errors during global.dat. */
- if (n_errors)
- pg_log_warning(ngettext("ignored %d error in file \"%s\"",
- "ignored %d errors in file \"%s\"", n_errors),
- n_errors, global_file_path);
- fclose(pfile);
-
- return n_errors;
-}
-
-/*
- * copy_or_print_global_file
- *
- * Copy global.dat into the output file. If "-" is used as outfile,
- * then print commands to stdout.
- */
-static void
-copy_or_print_global_file(const char *outfile, FILE *pfile)
-{
- char out_file_path[MAXPGPATH];
- FILE *OPF;
- int c;
-
- /* "-" is used for stdout. */
- if (strcmp(outfile, "-") == 0)
- OPF = stdout;
- else
- {
- snprintf(out_file_path, MAXPGPATH, "%s", outfile);
- OPF = fopen(out_file_path, PG_BINARY_W);
-
- if (OPF == NULL)
- {
- fclose(pfile);
- pg_fatal("could not open file: \"%s\"", outfile);
- }
- }
-
- /* Append global.dat into output file or print to stdout. */
- while ((c = fgetc(pfile)) != EOF)
- fputc(c, OPF);
-
- fclose(pfile);
-
- /* Close output file. */
- if (strcmp(outfile, "-") != 0)
- fclose(OPF);
-}
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index c3c5fae11ea..37d893d5e6a 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -237,24 +237,6 @@ command_fails_like(
'pg_restore: options -C\/--create and -1\/--single-transaction cannot be used together'
);
-command_fails_like(
- [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
- qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
-);
-
-command_fails_like(
- [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
- qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --exclude-database is used in pg_restore with dump of pg_dump'
-);
-
-command_fails_like(
- [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
- qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --globals-only is not used in pg_restore with dump of pg_dump'
-);
-
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
[ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ],
@@ -262,8 +244,4 @@ command_fails_like(
'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only'
);
-command_fails_like(
- [ 'pg_dumpall', '--format', 'x' ],
- qr/\Qpg_dumpall: error: unrecognized output format "x";\E/,
- 'pg_dumpall: unrecognized output format');
done_testing();
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 6c7ec80e271..a86b38466de 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -68,7 +68,7 @@ my %pgdump_runs = (
'--no-data',
'--sequence-data',
'--binary-upgrade',
- '--with-statistics',
+ '--statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
@@ -76,7 +76,7 @@ my %pgdump_runs = (
'--format' => 'custom',
'--verbose',
'--file' => "$tempdir/binary_upgrade.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/binary_upgrade.dump",
],
},
@@ -90,13 +90,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_custom.dump",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_gzip_custom.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
@@ -119,7 +119,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'gzip:1',
'--file' => "$tempdir/compression_gzip_dir",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -137,7 +137,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_gzip_dir.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_gzip_dir",
],
},
@@ -150,7 +150,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -169,13 +169,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_custom.dump",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_lz4_custom.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
@@ -198,7 +198,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'lz4:1',
'--file' => "$tempdir/compression_lz4_dir",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Verify that data files were compressed
@@ -210,7 +210,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_lz4_dir.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_lz4_dir",
],
},
@@ -223,7 +223,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -245,13 +245,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'zstd',
'--file' => "$tempdir/compression_zstd_custom.dump",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_zstd_custom.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
@@ -273,7 +273,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'zstd:1',
'--file' => "$tempdir/compression_zstd_dir",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -294,7 +294,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_zstd_dir.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/compression_zstd_dir",
],
},
@@ -308,7 +308,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'zstd:long',
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
- '--with-statistics',
+ '--statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -327,7 +327,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/clean.sql",
'--clean',
- '--with-statistics',
+ '--statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
},
@@ -338,7 +338,7 @@ my %pgdump_runs = (
'--clean',
'--if-exists',
'--encoding' => 'UTF8', # no-op, just for testing
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -357,7 +357,7 @@ my %pgdump_runs = (
'--create',
'--no-reconnect', # no-op, just for testing
'--verbose',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -376,7 +376,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults.sql",
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -385,7 +385,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_no_public.sql",
- '--with-statistics',
+ '--statistics',
'regress_pg_dump_test',
],
},
@@ -395,7 +395,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--clean',
'--file' => "$tempdir/defaults_no_public_clean.sql",
- '--with-statistics',
+ '--statistics',
'regress_pg_dump_test',
],
},
@@ -404,7 +404,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_public_owner.sql",
- '--with-statistics',
+ '--statistics',
'regress_public_owner',
],
},
@@ -419,14 +419,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.dump",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/defaults_custom_format.dump",
],
command_like => {
@@ -451,14 +451,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/defaults_dir_format",
],
command_like => {
@@ -484,13 +484,13 @@ my %pgdump_runs = (
'--format' => 'directory',
'--jobs' => 2,
'--file' => "$tempdir/defaults_parallel",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/defaults_parallel.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/defaults_parallel",
],
},
@@ -502,14 +502,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.tar",
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/defaults_tar_format.tar",
],
},
@@ -518,7 +518,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema' => 'dump_test',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -527,7 +527,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_test_table.sql",
'--exclude-table' => 'dump_test.test_table',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -536,7 +536,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_measurement.sql",
'--exclude-table-and-children' => 'dump_test.measurement',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -546,7 +546,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_measurement_data.sql",
'--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -556,7 +556,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_test_table_data.sql",
'--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -575,7 +575,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_globals.sql",
'--globals-only',
'--no-sync',
- '--with-statistics',
+ '--statistics',
],
},
pg_dumpall_globals_clean => {
@@ -585,14 +585,14 @@ my %pgdump_runs = (
'--globals-only',
'--clean',
'--no-sync',
- '--with-statistics',
+ '--statistics',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
- '--with-statistics',
+ '--statistics',
],
},
pg_dumpall_exclude => {
@@ -602,7 +602,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_exclude.sql",
'--exclude-database' => '*dump_test*',
'--no-sync',
- '--with-statistics',
+ '--statistics',
],
},
no_toast_compression => {
@@ -610,7 +610,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_toast_compression.sql",
'--no-toast-compression',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -619,7 +619,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_large_objects.sql",
'--no-large-objects',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -628,7 +628,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_policies.sql",
'--no-policies',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -637,7 +637,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_privs.sql",
'--no-privileges',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -646,7 +646,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_owner.sql",
'--no-owner',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -655,7 +655,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_table_access_method.sql",
'--no-table-access-method',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -664,7 +664,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/only_dump_test_schema.sql",
'--schema' => 'dump_test',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -675,7 +675,7 @@ my %pgdump_runs = (
'--table' => 'dump_test.test_table',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -686,7 +686,7 @@ my %pgdump_runs = (
'--table-and-children' => 'dump_test.measurement',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -696,7 +696,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/role.sql",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -709,13 +709,13 @@ my %pgdump_runs = (
'--file' => "$tempdir/role_parallel",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
- '--with-statistics',
+ '--statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/role_parallel.sql",
- '--with-statistics',
+ '--statistics',
"$tempdir/role_parallel",
],
},
@@ -744,7 +744,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_pre_data.sql",
'--section' => 'pre-data',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -753,7 +753,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_data.sql",
'--section' => 'data',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -762,7 +762,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_post_data.sql",
'--section' => 'post-data',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -773,7 +773,7 @@ my %pgdump_runs = (
'--schema' => 'dump_test',
'--large-objects',
'--no-large-objects',
- '--with-statistics',
+ '--statistics',
'postgres',
],
},
@@ -789,7 +789,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
"--file=$tempdir/no_data_no_schema.sql", '--no-data',
'--no-schema', 'postgres',
- '--with-statistics',
+ '--statistics',
],
},
statistics_only => {
@@ -799,18 +799,11 @@ my %pgdump_runs = (
'postgres',
],
},
- schema_only_with_statistics => {
- dump_cmd => [
- 'pg_dump', '--no-sync',
- "--file=$tempdir/schema_only_with_statistics.sql",
- '--schema-only', '--with-statistics', 'postgres',
- ],
- },
no_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema',
- '--with-statistics', 'postgres',
+ '--statistics', 'postgres',
],
},);
@@ -5212,6 +5205,17 @@ command_fails_like(
'pg_dump',
'--port' => $port,
'--strict-names',
+ '--schema-only',
+ '--statistics',
+ ],
+ qr/\Qpg_dump: error: options -s\/--schema-only and --statistics cannot be used together\E/,
+ 'cannot use --schema-only and --statistics together');
+
+command_fails_like(
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
'--table' => 'nonexistent*'
],
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
deleted file mode 100644
index c274b777586..00000000000
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ /dev/null
@@ -1,400 +0,0 @@
-# Copyright (c) 2021-2025, PostgreSQL Global Development Group
-
-use strict;
-use warnings FATAL => 'all';
-
-use PostgreSQL::Test::Cluster;
-use PostgreSQL::Test::Utils;
-use Test::More;
-
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
-my $run_db = 'postgres';
-my $sep = $windows_os ? "\\" : "/";
-
-# Tablespace locations used by "restore_tablespace" test case.
-my $tablespace1 = "${tempdir}${sep}tbl1";
-my $tablespace2 = "${tempdir}${sep}tbl2";
-mkdir($tablespace1) || die "mkdir $tablespace1 $!";
-mkdir($tablespace2) || die "mkdir $tablespace2 $!";
-
-# Scape tablespace locations on Windows.
-$tablespace1 = $windows_os ? ($tablespace1 =~ s/\\/\\\\/gr) : $tablespace1;
-$tablespace2 = $windows_os ? ($tablespace2 =~ s/\\/\\\\/gr) : $tablespace2;
-
-# Where pg_dumpall will be executed.
-my $node = PostgreSQL::Test::Cluster->new('node');
-$node->init;
-$node->start;
-
-
-###############################################################
-# Definition of the pg_dumpall test cases to run.
-#
-# Each of these test cases are named and those names are used for fail
-# reporting and also to save the dump and restore information needed for the
-# test to assert.
-#
-# The "setup_sql" is a psql valid script that contains SQL commands to execute
-# before of actually execute the tests. The setups are all executed before of
-# any test execution.
-#
-# The "dump_cmd" and "restore_cmd" are the commands that will be executed. The
-# "restore_cmd" must have the --file flag to save the restore output so that we
-# can assert on it.
-#
-# The "like" and "unlike" is a regexp that is used to match the pg_restore
-# output. It must have at least one of then filled per test cases but it also
-# can have both. See "excluding_databases" test case for example.
-my %pgdumpall_runs = (
- restore_roles => {
- setup_sql => '
- CREATE ROLE dumpall WITH ENCRYPTED PASSWORD \'admin\' SUPERUSER;
- CREATE ROLE dumpall2 WITH REPLICATION CONNECTION LIMIT 10;',
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_roles",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_roles.sql",
- "$tempdir/restore_roles",
- ],
- like => qr/
- ^\s*\QCREATE ROLE dumpall;\E\s*\n
- \s*\QALTER ROLE dumpall WITH SUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS PASSWORD 'SCRAM-SHA-256\E
- [^']+';\s*\n
- \s*\QCREATE ROLE dumpall2;\E
- \s*\QALTER ROLE dumpall2 WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN REPLICATION NOBYPASSRLS CONNECTION LIMIT 10;\E
- /xm
- },
-
- restore_tablespace => {
- setup_sql => "
- CREATE ROLE tap;
- CREATE TABLESPACE tbl1 OWNER tap LOCATION '$tablespace1';
- CREATE TABLESPACE tbl2 OWNER tap LOCATION '$tablespace2' WITH (seq_page_cost=1.0);",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_tablespace",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_tablespace.sql",
- "$tempdir/restore_tablespace",
- ],
- # Match "E" as optional since it is added on LOCATION when running on
- # Windows.
- like => qr/^
- \n\QCREATE TABLESPACE tbl1 OWNER tap LOCATION \E(?:E)?\Q'$tablespace1';\E
- \n\QCREATE TABLESPACE tbl2 OWNER tap LOCATION \E(?:E)?\Q'$tablespace2';\E
- \n\QALTER TABLESPACE tbl2 SET (seq_page_cost=1.0);\E
- /xm,
- },
-
- restore_grants => {
- setup_sql => "
- CREATE DATABASE tapgrantsdb;
- CREATE SCHEMA private;
- CREATE SEQUENCE serial START 101;
- CREATE FUNCTION fn() RETURNS void AS \$\$
- BEGIN
- END;
- \$\$ LANGUAGE plpgsql;
- CREATE ROLE super;
- CREATE ROLE grant1;
- CREATE ROLE grant2;
- CREATE ROLE grant3;
- CREATE ROLE grant4;
- CREATE ROLE grant5;
- CREATE ROLE grant6;
- CREATE ROLE grant7;
- CREATE ROLE grant8;
-
- CREATE TABLE t (id int);
- INSERT INTO t VALUES (1), (2), (3), (4);
-
- GRANT SELECT ON TABLE t TO grant1;
- GRANT INSERT ON TABLE t TO grant2;
- GRANT ALL PRIVILEGES ON TABLE t to grant3;
- GRANT CONNECT, CREATE ON DATABASE tapgrantsdb TO grant4;
- GRANT USAGE, CREATE ON SCHEMA private TO grant5;
- GRANT USAGE, SELECT, UPDATE ON SEQUENCE serial TO grant6;
- GRANT super TO grant7;
- GRANT EXECUTE ON FUNCTION fn() TO grant8;
- ",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_grants",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_grants.sql",
- "$tempdir/restore_grants",
- ],
- like => qr/^
- \n\QGRANT super TO grant7 WITH INHERIT TRUE GRANTED BY\E
- (.*\n)*
- \n\QGRANT ALL ON SCHEMA private TO grant5;\E
- (.*\n)*
- \n\QGRANT ALL ON FUNCTION public.fn() TO grant8;\E
- (.*\n)*
- \n\QGRANT ALL ON SEQUENCE public.serial TO grant6;\E
- (.*\n)*
- \n\QGRANT SELECT ON TABLE public.t TO grant1;\E
- \n\QGRANT INSERT ON TABLE public.t TO grant2;\E
- \n\QGRANT ALL ON TABLE public.t TO grant3;\E
- (.*\n)*
- \n\QGRANT CREATE,CONNECT ON DATABASE tapgrantsdb TO grant4;\E
- /xm,
- },
-
- excluding_databases => {
- setup_sql => 'CREATE DATABASE db1;
- \c db1
- CREATE TABLE t1 (id int);
- INSERT INTO t1 VALUES (1), (2), (3), (4);
- CREATE TABLE t2 (id int);
- INSERT INTO t2 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE db2;
- \c db2
- CREATE TABLE t3 (id int);
- INSERT INTO t3 VALUES (1), (2), (3), (4);
- CREATE TABLE t4 (id int);
- INSERT INTO t4 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE dbex3;
- \c dbex3
- CREATE TABLE t5 (id int);
- INSERT INTO t5 VALUES (1), (2), (3), (4);
- CREATE TABLE t6 (id int);
- INSERT INTO t6 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE dbex4;
- \c dbex4
- CREATE TABLE t7 (id int);
- INSERT INTO t7 VALUES (1), (2), (3), (4);
- CREATE TABLE t8 (id int);
- INSERT INTO t8 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE db5;
- \c db5
- CREATE TABLE t9 (id int);
- INSERT INTO t9 VALUES (1), (2), (3), (4);
- CREATE TABLE t10 (id int);
- INSERT INTO t10 VALUES (1), (2), (3), (4);
- ',
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/excluding_databases",
- '--exclude-database' => 'dbex*',
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/excluding_databases.sql",
- '--exclude-database' => 'db5',
- "$tempdir/excluding_databases",
- ],
- like => qr/^
- \n\QCREATE DATABASE db1\E
- (.*\n)*
- \n\QCREATE TABLE public.t1 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t2 (\E
- (.*\n)*
- \n\QCREATE DATABASE db2\E
- (.*\n)*
- \n\QCREATE TABLE public.t3 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t4 (/xm,
- unlike => qr/^
- \n\QCREATE DATABASE db3\E
- (.*\n)*
- \n\QCREATE TABLE public.t5 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t6 (\E
- (.*\n)*
- \n\QCREATE DATABASE db4\E
- (.*\n)*
- \n\QCREATE TABLE public.t7 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t8 (\E
- \n\QCREATE DATABASE db5\E
- (.*\n)*
- \n\QCREATE TABLE public.t9 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t10 (\E
- /xm,
- },
-
- format_directory => {
- setup_sql => "CREATE TABLE format_directory(a int, b boolean, c text);
- INSERT INTO format_directory VALUES (1, true, 'name1'), (2, false, 'name2');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/format_directory",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/format_directory.sql",
- "$tempdir/format_directory",
- ],
- like => qr/^\n\QCOPY public.format_directory (a, b, c) FROM stdin;/xm
- },
-
- format_tar => {
- setup_sql => "CREATE TABLE format_tar(a int, b boolean, c text);
- INSERT INTO format_tar VALUES (1, false, 'name3'), (2, true, 'name4');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'tar',
- '--file' => "$tempdir/format_tar",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'tar',
- '--file' => "$tempdir/format_tar.sql",
- "$tempdir/format_tar",
- ],
- like => qr/^\n\QCOPY public.format_tar (a, b, c) FROM stdin;/xm
- },
-
- format_custom => {
- setup_sql => "CREATE TABLE format_custom(a int, b boolean, c text);
- INSERT INTO format_custom VALUES (1, false, 'name5'), (2, true, 'name6');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'custom',
- '--file' => "$tempdir/format_custom",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'custom',
- '--file' => "$tempdir/format_custom.sql",
- "$tempdir/format_custom",
- ],
- like => qr/^ \n\QCOPY public.format_custom (a, b, c) FROM stdin;/xm
- },
-
- dump_globals_only => {
- setup_sql => "CREATE TABLE format_dir(a int, b boolean, c text);
- INSERT INTO format_dir VALUES (1, false, 'name5'), (2, true, 'name6');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--globals-only',
- '--file' => "$tempdir/dump_globals_only",
- ],
- restore_cmd => [
- 'pg_restore', '-C', '--globals-only',
- '--format' => 'directory',
- '--file' => "$tempdir/dump_globals_only.sql",
- "$tempdir/dump_globals_only",
- ],
- like => qr/
- ^\s*\QCREATE ROLE dumpall;\E\s*\n
- /xm
- },);
-
-# First execute the setup_sql
-foreach my $run (sort keys %pgdumpall_runs)
-{
- if ($pgdumpall_runs{$run}->{setup_sql})
- {
- $node->safe_psql($run_db, $pgdumpall_runs{$run}->{setup_sql});
- }
-}
-
-# Execute the tests
-foreach my $run (sort keys %pgdumpall_runs)
-{
- # Create a new target cluster to pg_restore each test case run so that we
- # don't need to take care of the cleanup from the target cluster after each
- # run.
- my $target_node = PostgreSQL::Test::Cluster->new("target_$run");
- $target_node->init;
- $target_node->start;
-
- # Dumpall from node cluster.
- $node->command_ok(\@{ $pgdumpall_runs{$run}->{dump_cmd} },
- "$run: pg_dumpall runs");
-
- # Restore the dump on "target_node" cluster.
- my @restore_cmd = (
- @{ $pgdumpall_runs{$run}->{restore_cmd} },
- '--host', $target_node->host, '--port', $target_node->port);
-
- my ($stdout, $stderr) = run_command(\@restore_cmd);
-
- # pg_restore --file output file.
- my $output_file = slurp_file("$tempdir/${run}.sql");
-
- if ( !($pgdumpall_runs{$run}->{like})
- && !($pgdumpall_runs{$run}->{unlike}))
- {
- die "missing \"like\" or \"unlike\" in test \"$run\"";
- }
-
- if ($pgdumpall_runs{$run}->{like})
- {
- like($output_file, $pgdumpall_runs{$run}->{like}, "should dump $run");
- }
-
- if ($pgdumpall_runs{$run}->{unlike})
- {
- unlike(
- $output_file,
- $pgdumpall_runs{$run}->{unlike},
- "should not dump $run");
- }
-}
-
-# Some negative test case with dump of pg_dumpall and restore using pg_restore
-# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
-$node->command_fails_like(
- [
- 'pg_restore',
- "$tempdir/format_custom",
- '--format' => 'custom',
- '--file' => "$tempdir/error_test.sql",
- ],
- qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
- 'When -C is not used in pg_restore with dump of pg_dumpall');
-
-# test case 2: When --list option is used with dump of pg_dumpall
-$node->command_fails_like(
- [
- 'pg_restore',
- "$tempdir/format_custom", '-C',
- '--format' => 'custom',
- '--list',
- '--file' => "$tempdir/error_test.sql",
- ],
- qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
- 'When --list is used in pg_restore with dump of pg_dumpall');
-
-# test case 3: When non-exist database is given with -d option
-$node->command_fails_like(
- [
- 'pg_restore',
- "$tempdir/format_custom", '-C',
- '--format' => 'custom',
- '-d' => 'dbpq',
- ],
- qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
-);
-
-$node->stop('fast');
-
-done_testing();
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 5e6403f0773..310f53c5577 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -956,12 +956,12 @@ check_for_new_tablespace_dir(void)
prep_status("Checking for new cluster tablespace directories");
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
{
struct stat statbuf;
snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
- os_info.old_tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum],
new_cluster.tablespace_suffix);
if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
@@ -1013,17 +1013,17 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
* directory. We can't create a proper old cluster delete script in that
* case.
*/
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
{
- char old_tablespace_dir[MAXPGPATH];
+ char new_tablespace_dir[MAXPGPATH];
- strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
- canonicalize_path(old_tablespace_dir);
- if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
+ strlcpy(new_tablespace_dir, new_cluster.tablespaces[tblnum], MAXPGPATH);
+ canonicalize_path(new_tablespace_dir);
+ if (path_is_prefix_of_path(old_cluster_pgdata, new_tablespace_dir))
{
/* reproduce warning from CREATE TABLESPACE that is in the log */
pg_log(PG_WARNING,
- "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir);
+ "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", new_tablespace_dir);
/* Unlink file in case it is left over from a previous run. */
unlink(*deletion_script_file_name);
@@ -1051,9 +1051,9 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
/* delete old cluster's alternate tablespaces */
old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix);
fix_path_separator(old_tblspc_suffix);
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
- fix_path_separator(os_info.old_tablespaces[tblnum]),
+ fix_path_separator(old_cluster.tablespaces[tblnum]),
old_tblspc_suffix, PATH_QUOTE);
pfree(old_tblspc_suffix);
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 183f08ce1e8..55f6e7b4d9c 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -58,7 +58,7 @@ generate_old_dump(void)
(user_opts.transfer_mode == TRANSFER_MODE_SWAP) ?
"" : "--sequence-data",
log_opts.verbose ? "--verbose" : "",
- user_opts.do_statistics ? "--with-statistics" : "--no-statistics",
+ user_opts.do_statistics ? "--statistics" : "--no-statistics",
log_opts.dumpdir,
sql_file_name, escaped_connstr.data);
diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c
index a437067cdca..c39eb077c2f 100644
--- a/src/bin/pg_upgrade/info.c
+++ b/src/bin/pg_upgrade/info.c
@@ -443,10 +443,26 @@ get_db_infos(ClusterInfo *cluster)
for (tupnum = 0; tupnum < ntups; tupnum++)
{
+ char *spcloc = PQgetvalue(res, tupnum, i_spclocation);
+ bool inplace = spcloc[0] && !is_absolute_path(spcloc);
+
dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname));
- snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s",
- PQgetvalue(res, tupnum, i_spclocation));
+
+ /*
+ * The tablespace location might be "", meaning the cluster default
+ * location, i.e. pg_default or pg_global. For in-place tablespaces,
+ * pg_tablespace_location() returns a path relative to the data
+ * directory.
+ */
+ if (inplace)
+ snprintf(dbinfos[tupnum].db_tablespace,
+ sizeof(dbinfos[tupnum].db_tablespace),
+ "%s/%s", cluster->pgdata, spcloc);
+ else
+ snprintf(dbinfos[tupnum].db_tablespace,
+ sizeof(dbinfos[tupnum].db_tablespace),
+ "%s", spcloc);
}
PQclear(res);
@@ -616,11 +632,21 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg)
/* Is the tablespace oid non-default? */
if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
{
+ char *spcloc = PQgetvalue(res, relnum, i_spclocation);
+ bool inplace = spcloc[0] && !is_absolute_path(spcloc);
+
/*
* The tablespace location might be "", meaning the cluster
- * default location, i.e. pg_default or pg_global.
+ * default location, i.e. pg_default or pg_global. For in-place
+ * tablespaces, pg_tablespace_location() returns a path relative
+ * to the data directory.
*/
- tablespace = PQgetvalue(res, relnum, i_spclocation);
+ if (inplace)
+ tablespace = psprintf("%s/%s",
+ os_info.running_cluster->pgdata,
+ spcloc);
+ else
+ tablespace = spcloc;
/* Can we reuse the previous string allocation? */
if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
@@ -630,6 +656,10 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg)
last_tablespace = curr->tablespace = pg_strdup(tablespace);
curr->tblsp_alloc = true;
}
+
+ /* Free palloc'd string for in-place tablespaces. */
+ if (inplace)
+ pfree(tablespace);
}
else
/* A zero reltablespace oid indicates the database tablespace. */
diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c
index 056aa2edaee..6d7941844a7 100644
--- a/src/bin/pg_upgrade/parallel.c
+++ b/src/bin/pg_upgrade/parallel.c
@@ -40,6 +40,7 @@ typedef struct
char *old_pgdata;
char *new_pgdata;
char *old_tablespace;
+ char *new_tablespace;
} transfer_thread_arg;
static exec_thread_arg **exec_thread_args;
@@ -171,7 +172,7 @@ win32_exec_prog(exec_thread_arg *args)
void
parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
- char *old_tablespace)
+ char *old_tablespace, char *new_tablespace)
{
#ifndef WIN32
pid_t child;
@@ -181,7 +182,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
#endif
if (user_opts.jobs <= 1)
- transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL);
+ transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL, NULL);
else
{
/* parallel */
@@ -225,7 +226,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
if (child == 0)
{
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata,
- old_tablespace);
+ old_tablespace, new_tablespace);
/* if we take another exit path, it will be non-zero */
/* use _exit to skip atexit() functions */
_exit(0);
@@ -246,6 +247,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
new_arg->new_pgdata = pg_strdup(new_pgdata);
pg_free(new_arg->old_tablespace);
new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL;
+ new_arg->new_tablespace = new_tablespace ? pg_strdup(new_tablespace) : NULL;
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs,
new_arg, 0, NULL);
@@ -263,7 +265,8 @@ DWORD
win32_transfer_all_new_dbs(transfer_thread_arg *args)
{
transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata,
- args->new_pgdata, args->old_tablespace);
+ args->new_pgdata, args->old_tablespace,
+ args->new_tablespace);
/* terminates thread */
return 0;
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index e9401430e69..0ef47be0dc1 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -300,6 +300,8 @@ typedef struct
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
uint32 bin_version; /* version returned from pg_ctl */
+ char **tablespaces; /* tablespace directories */
+ int num_tablespaces;
const char *tablespace_suffix; /* directory specification */
int nsubs; /* number of subscriptions */
bool sub_retain_dead_tuples; /* whether a subscription enables
@@ -356,8 +358,6 @@ typedef struct
const char *progname; /* complete pathname for this program */
char *user; /* username for clusters */
bool user_specified; /* user specified on command-line */
- char **old_tablespaces; /* tablespaces */
- int num_old_tablespaces;
LibraryInfo *libraries; /* loadable libraries */
int num_libraries;
ClusterInfo *running_cluster;
@@ -457,7 +457,7 @@ void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
void transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+ char *old_tablespace, char *new_tablespace);
/* tablespace.c */
@@ -505,7 +505,7 @@ void parallel_exec_prog(const char *log_file, const char *opt_log_file,
const char *fmt,...) pg_attribute_printf(3, 4);
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+ char *old_tablespace, char *new_tablespace);
bool reap_child(bool wait_for_child);
/* task.c */
diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c
index 8d8e816a01f..38c17ceabf2 100644
--- a/src/bin/pg_upgrade/relfilenumber.c
+++ b/src/bin/pg_upgrade/relfilenumber.c
@@ -17,7 +17,7 @@
#include "common/logging.h"
#include "pg_upgrade.h"
-static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace);
+static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace);
static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_frozenbit);
/*
@@ -136,21 +136,22 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
*/
if (user_opts.jobs <= 1)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, NULL);
+ new_pgdata, NULL, NULL);
else
{
int tblnum;
/* transfer default tablespace */
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, old_pgdata);
+ new_pgdata, old_pgdata, new_pgdata);
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
parallel_transfer_all_new_dbs(old_db_arr,
new_db_arr,
old_pgdata,
new_pgdata,
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
@@ -169,7 +170,8 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
*/
void
transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata, char *old_tablespace)
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace, char *new_tablespace)
{
int old_dbnum,
new_dbnum;
@@ -204,7 +206,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
new_pgdata);
if (n_maps)
{
- transfer_single_new_db(mappings, n_maps, old_tablespace);
+ transfer_single_new_db(mappings, n_maps, old_tablespace, new_tablespace);
}
/* We allocate something even for n_maps == 0 */
pg_free(mappings);
@@ -234,10 +236,10 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
* moved_db_dir: Destination for the pg_restore-generated database directory.
*/
static bool
-prepare_for_swap(const char *old_tablespace, Oid db_oid,
- char *old_catalog_dir, char *new_db_dir, char *moved_db_dir)
+prepare_for_swap(const char *old_tablespace, const char *new_tablespace,
+ Oid db_oid, char *old_catalog_dir, char *new_db_dir,
+ char *moved_db_dir)
{
- const char *new_tablespace;
const char *old_tblspc_suffix;
const char *new_tblspc_suffix;
char old_tblspc[MAXPGPATH];
@@ -247,24 +249,14 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid,
struct stat st;
if (strcmp(old_tablespace, old_cluster.pgdata) == 0)
- {
- new_tablespace = new_cluster.pgdata;
- new_tblspc_suffix = "/base";
old_tblspc_suffix = "/base";
- }
else
- {
- /*
- * XXX: The below line is a hack to deal with the fact that we
- * presently don't have an easy way to find the corresponding new
- * tablespace's path. This will need to be fixed if/when we add
- * pg_upgrade support for in-place tablespaces.
- */
- new_tablespace = old_tablespace;
+ old_tblspc_suffix = old_cluster.tablespace_suffix;
+ if (strcmp(new_tablespace, new_cluster.pgdata) == 0)
+ new_tblspc_suffix = "/base";
+ else
new_tblspc_suffix = new_cluster.tablespace_suffix;
- old_tblspc_suffix = old_cluster.tablespace_suffix;
- }
/* Old and new cluster paths. */
snprintf(old_tblspc, sizeof(old_tblspc), "%s%s", old_tablespace, old_tblspc_suffix);
@@ -450,7 +442,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
* during pg_restore.
*/
static void
-do_swap(FileNameMap *maps, int size, char *old_tablespace)
+do_swap(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace)
{
char old_catalog_dir[MAXPGPATH];
char new_db_dir[MAXPGPATH];
@@ -470,21 +462,23 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace)
*/
if (old_tablespace)
{
- if (prepare_for_swap(old_tablespace, maps[0].db_oid,
+ if (prepare_for_swap(old_tablespace, new_tablespace, maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
}
else
{
- if (prepare_for_swap(old_cluster.pgdata, maps[0].db_oid,
+ if (prepare_for_swap(old_cluster.pgdata, new_cluster.pgdata, maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
- for (int tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (int tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
{
- if (prepare_for_swap(os_info.old_tablespaces[tblnum], maps[0].db_oid,
+ if (prepare_for_swap(old_cluster.tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum],
+ maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
@@ -498,7 +492,8 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace)
* create links for mappings stored in "maps" array.
*/
static void
-transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
+transfer_single_new_db(FileNameMap *maps, int size,
+ char *old_tablespace, char *new_tablespace)
{
int mapnum;
bool vm_must_add_frozenbit = false;
@@ -520,7 +515,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
*/
Assert(!vm_must_add_frozenbit);
- do_swap(maps, size, old_tablespace);
+ do_swap(maps, size, old_tablespace, new_tablespace);
return;
}
diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl
index 58fe8a8c7dc..348f4021462 100644
--- a/src/bin/pg_upgrade/t/006_transfer_modes.pl
+++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl
@@ -38,6 +38,13 @@ sub test_mode
}
$new->init();
+ # allow_in_place_tablespaces is available as far back as v10.
+ if ($old->pg_version >= 10)
+ {
+ $new->append_conf('postgresql.conf', "allow_in_place_tablespaces = true");
+ $old->append_conf('postgresql.conf', "allow_in_place_tablespaces = true");
+ }
+
# Create a small variety of simple test objects on the old cluster. We'll
# check that these reach the new version after upgrading.
$old->start;
@@ -49,8 +56,7 @@ sub test_mode
$old->safe_psql('testdb1', "VACUUM FULL test2");
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
- # For cross-version tests, we can also check that pg_upgrade handles
- # tablespaces.
+ # If an old installation is provided, we can test non-in-place tablespaces.
if (defined($ENV{oldinstall}))
{
my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
@@ -64,6 +70,19 @@ sub test_mode
$old->safe_psql('testdb2',
"CREATE TABLE test4 AS SELECT generate_series(400, 502)");
}
+
+ # If the old cluster is >= v10, we can test in-place tablespaces.
+ if ($old->pg_version >= 10)
+ {
+ $old->safe_psql('postgres',
+ "CREATE TABLESPACE inplc_tblspc LOCATION ''");
+ $old->safe_psql('postgres',
+ "CREATE DATABASE testdb3 TABLESPACE inplc_tblspc");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test5 TABLESPACE inplc_tblspc AS SELECT generate_series(503, 606)");
+ $old->safe_psql('testdb3',
+ "CREATE TABLE test6 AS SELECT generate_series(607, 711)");
+ }
$old->stop;
my $result = command_ok_or_fails_like(
@@ -94,8 +113,7 @@ sub test_mode
$result = $new->safe_psql('testdb1', "SELECT nextval('testseq')");
is($result, '5432', "sequence data after pg_upgrade $mode");
- # For cross-version tests, we should have some objects in a non-default
- # tablespace.
+ # Tests for non-in-place tablespaces.
if (defined($ENV{oldinstall}))
{
$result =
@@ -105,6 +123,15 @@ sub test_mode
$new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
is($result, '103', "test4 data after pg_upgrade $mode");
}
+
+ # Tests for in-place tablespaces.
+ if ($old->pg_version >= 10)
+ {
+ $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test5");
+ is($result, '104', "test5 data after pg_upgrade $mode");
+ $result = $new->safe_psql('testdb3', "SELECT COUNT(*) FROM test6");
+ is($result, '105', "test6 data after pg_upgrade $mode");
+ }
$new->stop;
}
diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c
index 3520a75ba31..151d74e1734 100644
--- a/src/bin/pg_upgrade/tablespace.c
+++ b/src/bin/pg_upgrade/tablespace.c
@@ -23,10 +23,20 @@ init_tablespaces(void)
set_tablespace_directory_suffix(&old_cluster);
set_tablespace_directory_suffix(&new_cluster);
- if (os_info.num_old_tablespaces > 0 &&
+ if (old_cluster.num_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
- pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
- "using tablespaces.");
+ {
+ for (int i = 0; i < old_cluster.num_tablespaces; i++)
+ {
+ /*
+ * In-place tablespaces are okay for same-version upgrades because
+ * their paths will differ between clusters.
+ */
+ if (strcmp(old_cluster.tablespaces[i], new_cluster.tablespaces[i]) == 0)
+ pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
+ "using tablespaces.");
+ }
+ }
}
@@ -53,19 +63,48 @@ get_tablespace_paths(void)
res = executeQueryOrDie(conn, "%s", query);
- if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
- os_info.old_tablespaces =
- (char **) pg_malloc(os_info.num_old_tablespaces * sizeof(char *));
+ old_cluster.num_tablespaces = PQntuples(res);
+ new_cluster.num_tablespaces = PQntuples(res);
+
+ if (PQntuples(res) != 0)
+ {
+ old_cluster.tablespaces =
+ (char **) pg_malloc(old_cluster.num_tablespaces * sizeof(char *));
+ new_cluster.tablespaces =
+ (char **) pg_malloc(new_cluster.num_tablespaces * sizeof(char *));
+ }
else
- os_info.old_tablespaces = NULL;
+ {
+ old_cluster.tablespaces = NULL;
+ new_cluster.tablespaces = NULL;
+ }
i_spclocation = PQfnumber(res, "spclocation");
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
{
struct stat statBuf;
+ char *spcloc = PQgetvalue(res, tblnum, i_spclocation);
- os_info.old_tablespaces[tblnum] = pg_strdup(PQgetvalue(res, tblnum, i_spclocation));
+ /*
+ * For now, we do not expect non-in-place tablespaces to move during
+ * upgrade. If that changes, it will likely become necessary to run
+ * the above query on the new cluster, too.
+ *
+ * pg_tablespace_location() returns absolute paths for non-in-place
+ * tablespaces and relative paths for in-place ones, so we use
+ * is_absolute_path() to distinguish between them.
+ */
+ if (is_absolute_path(PQgetvalue(res, tblnum, i_spclocation)))
+ {
+ old_cluster.tablespaces[tblnum] = pg_strdup(spcloc);
+ new_cluster.tablespaces[tblnum] = old_cluster.tablespaces[tblnum];
+ }
+ else
+ {
+ old_cluster.tablespaces[tblnum] = psprintf("%s/%s", old_cluster.pgdata, spcloc);
+ new_cluster.tablespaces[tblnum] = psprintf("%s/%s", new_cluster.pgdata, spcloc);
+ }
/*
* Check that the tablespace path exists and is a directory.
@@ -76,21 +115,21 @@ get_tablespace_paths(void)
* that contains user tablespaces is moved as part of pg_upgrade
* preparation and the symbolic links are not updated.
*/
- if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
+ if (stat(old_cluster.tablespaces[tblnum], &statBuf) != 0)
{
if (errno == ENOENT)
report_status(PG_FATAL,
"tablespace directory \"%s\" does not exist",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
else
report_status(PG_FATAL,
"could not stat tablespace directory \"%s\": %m",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
}
if (!S_ISDIR(statBuf.st_mode))
report_status(PG_FATAL,
"tablespace path \"%s\" is not a directory",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
}
PQclear(res);
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index dbc586c5bc3..1f2ca946fc5 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -1010,7 +1010,7 @@ static const SchemaQuery Query_for_trigger_of_table = {
#define Query_for_list_of_database_vars \
"SELECT conf FROM ("\
-" SELECT setdatabase, pg_catalog.split_part(unnest(setconfig),'=',1) conf"\
+" SELECT setdatabase, pg_catalog.split_part(pg_catalog.unnest(setconfig),'=',1) conf"\
" FROM pg_db_role_setting "\
" ) s, pg_database d "\
" WHERE s.setdatabase = d.oid "\
@@ -1086,9 +1086,12 @@ Keywords_for_list_of_owner_roles, "PUBLIC"
" WHERE usename LIKE '%s'"
#define Query_for_list_of_user_vars \
-" SELECT pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) "\
-" FROM pg_catalog.pg_roles "\
-" WHERE rolname LIKE '%s'"
+"SELECT conf FROM ("\
+" SELECT rolname, pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) conf"\
+" FROM pg_catalog.pg_roles"\
+" ) s"\
+" WHERE s.conf like '%s' "\
+" AND s.rolname LIKE '%s'"
#define Query_for_list_of_access_methods \
" SELECT amname "\
@@ -2517,7 +2520,10 @@ match_previous_words(int pattern_id,
/* ALTER USER,ROLE <name> RESET */
else if (Matches("ALTER", "USER|ROLE", MatchAny, "RESET"))
+ {
+ set_completion_reference(prev2_wd);
COMPLETE_WITH_QUERY_PLUS(Query_for_list_of_user_vars, "ALL");
+ }
/* ALTER USER,ROLE <name> WITH */
else if (Matches("ALTER", "USER|ROLE", MatchAny, "WITH"))
@@ -5015,7 +5021,7 @@ match_previous_words(int pattern_id,
/* Complete with a variable name */
else if (TailMatches("SET|RESET") &&
!TailMatches("UPDATE", MatchAny, "SET") &&
- !TailMatches("ALTER", "DATABASE", MatchAny, "RESET"))
+ !TailMatches("ALTER", "DATABASE|USER|ROLE", MatchAny, "RESET"))
COMPLETE_WITH_QUERY_VERBATIM_PLUS(Query_for_list_of_set_vars,
"CONSTRAINTS",
"TRANSACTION",
diff --git a/src/common/Makefile b/src/common/Makefile
index 1e2b91c83c4..2c720caa509 100644
--- a/src/common/Makefile
+++ b/src/common/Makefile
@@ -163,7 +163,7 @@ libpgcommon_shlib.a: $(OBJS_SHLIB)
# The JSON API normally exits on out-of-memory; disable that behavior for shared
# library builds. This requires libpq's pqexpbuffer.h.
jsonapi_shlib.o: override CPPFLAGS += -DJSONAPI_USE_PQEXPBUFFER
-jsonapi_shlib.o: override CPPFLAGS += -I$(libpq_srcdir)
+jsonapi_shlib.o: override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS)
# Because this uses its own compilation rule, it doesn't use the
# dependency tracking logic from Makefile.global. To make sure that
diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h
index c91797c869c..f458447a0e5 100644
--- a/src/include/catalog/pg_subscription_rel.h
+++ b/src/include/catalog/pg_subscription_rel.h
@@ -85,7 +85,7 @@ typedef struct SubscriptionRelState
extern void AddSubscriptionRelState(Oid subid, Oid relid, char state,
XLogRecPtr sublsn, bool retain_lock);
extern void UpdateSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn);
+ XLogRecPtr sublsn, bool already_locked);
extern char GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn);
extern void RemoveSubscriptionRel(Oid subid, Oid relid);
diff --git a/src/include/libpq/libpq-be-fe-helpers.h b/src/include/libpq/libpq-be-fe-helpers.h
index af13bd6bf3d..1c4a342090c 100644
--- a/src/include/libpq/libpq-be-fe-helpers.h
+++ b/src/include/libpq/libpq-be-fe-helpers.h
@@ -30,17 +30,7 @@
#ifndef LIBPQ_BE_FE_HELPERS_H
#define LIBPQ_BE_FE_HELPERS_H
-/*
- * Despite the name, BUILDING_DLL is set only when building code directly part
- * of the backend. Which also is where libpq isn't allowed to be
- * used. Obviously this doesn't protect against libpq-fe.h getting included
- * otherwise, but perhaps still protects against a few mistakes...
- */
-#ifdef BUILDING_DLL
-#error "libpq may not be used code directly built into the backend"
-#endif
-
-#include "libpq-fe.h"
+#include "libpq/libpq-be-fe.h"
#include "miscadmin.h"
#include "storage/fd.h"
#include "storage/latch.h"
@@ -289,41 +279,30 @@ libpqsrv_exec_params(PGconn *conn,
static inline PGresult *
libpqsrv_get_result_last(PGconn *conn, uint32 wait_event_info)
{
- PGresult *volatile lastResult = NULL;
+ PGresult *lastResult = NULL;
- /* In what follows, do not leak any PGresults on an error. */
- PG_TRY();
+ for (;;)
{
- for (;;)
- {
- /* Wait for, and collect, the next PGresult. */
- PGresult *result;
+ /* Wait for, and collect, the next PGresult. */
+ PGresult *result;
- result = libpqsrv_get_result(conn, wait_event_info);
- if (result == NULL)
- break; /* query is complete, or failure */
+ result = libpqsrv_get_result(conn, wait_event_info);
+ if (result == NULL)
+ break; /* query is complete, or failure */
- /*
- * Emulate PQexec()'s behavior of returning the last result when
- * there are many.
- */
- PQclear(lastResult);
- lastResult = result;
-
- if (PQresultStatus(lastResult) == PGRES_COPY_IN ||
- PQresultStatus(lastResult) == PGRES_COPY_OUT ||
- PQresultStatus(lastResult) == PGRES_COPY_BOTH ||
- PQstatus(conn) == CONNECTION_BAD)
- break;
- }
- }
- PG_CATCH();
- {
+ /*
+ * Emulate PQexec()'s behavior of returning the last result when there
+ * are many.
+ */
PQclear(lastResult);
- PG_RE_THROW();
- }
- PG_END_TRY();
+ lastResult = result;
+ if (PQresultStatus(lastResult) == PGRES_COPY_IN ||
+ PQresultStatus(lastResult) == PGRES_COPY_OUT ||
+ PQresultStatus(lastResult) == PGRES_COPY_BOTH ||
+ PQstatus(conn) == CONNECTION_BAD)
+ break;
+ }
return lastResult;
}
@@ -462,13 +441,21 @@ exit: ;
* This function is intended to be set via PQsetNoticeReceiver() so that
* NOTICE, WARNING, and similar messages from the connection are reported via
* ereport(), instead of being printed to stderr.
+ *
+ * Because this will be called from libpq with a "real" (not wrapped)
+ * PGresult, we need to temporarily ignore libpq-be-fe.h's wrapper macros
+ * for PGresult and also PQresultErrorMessage, and put back the wrappers
+ * afterwards. That's not pretty, but there seems no better alternative.
*/
+#undef PGresult
+#undef PQresultErrorMessage
+
static inline void
libpqsrv_notice_receiver(void *arg, const PGresult *res)
{
- char *message;
+ const char *message;
int len;
- char *prefix = (char *) arg;
+ const char *prefix = (const char *) arg;
/*
* Trim the trailing newline from the message text returned from
@@ -484,4 +471,7 @@ libpqsrv_notice_receiver(void *arg, const PGresult *res)
errmsg_internal("%s: %.*s", prefix, len, message));
}
+#define PGresult libpqsrv_PGresult
+#define PQresultErrorMessage libpqsrv_PQresultErrorMessage
+
#endif /* LIBPQ_BE_FE_HELPERS_H */
diff --git a/src/include/libpq/libpq-be-fe.h b/src/include/libpq/libpq-be-fe.h
new file mode 100644
index 00000000000..e3f796b0230
--- /dev/null
+++ b/src/include/libpq/libpq-be-fe.h
@@ -0,0 +1,259 @@
+/*-------------------------------------------------------------------------
+ *
+ * libpq-be-fe.h
+ * Wrapper functions for using libpq in extensions
+ *
+ * Code built directly into the backend is not allowed to link to libpq
+ * directly. Extension code is allowed to use libpq however. One of the
+ * main risks in doing so is leaking the malloc-allocated structures
+ * returned by libpq, causing a process-lifespan memory leak.
+ *
+ * This file provides wrapper objects to help in building memory-safe code.
+ * A PGresult object wrapped this way acts much as if it were palloc'd:
+ * it will go away when the specified context is reset or deleted.
+ * We might later extend the concept to other objects such as PGconns.
+ *
+ * See also the libpq-be-fe-helpers.h file, which provides additional
+ * facilities built on top of this one.
+ *
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/libpq/libpq-be-fe.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef LIBPQ_BE_FE_H
+#define LIBPQ_BE_FE_H
+
+/*
+ * Despite the name, BUILDING_DLL is set only when building code directly part
+ * of the backend. Which also is where libpq isn't allowed to be
+ * used. Obviously this doesn't protect against libpq-fe.h getting included
+ * otherwise, but perhaps still protects against a few mistakes...
+ */
+#ifdef BUILDING_DLL
+#error "libpq may not be used in code directly built into the backend"
+#endif
+
+#include "libpq-fe.h"
+
+/*
+ * Memory-context-safe wrapper object for a PGresult.
+ */
+typedef struct libpqsrv_PGresult
+{
+ PGresult *res; /* the wrapped PGresult */
+ MemoryContext ctx; /* the MemoryContext it's attached to */
+ MemoryContextCallback cb; /* the callback that implements freeing */
+} libpqsrv_PGresult;
+
+
+/*
+ * Wrap the given PGresult in a libpqsrv_PGresult object, so that it will
+ * go away automatically if the current memory context is reset or deleted.
+ *
+ * To avoid potential memory leaks, backend code must always apply this
+ * immediately to the output of any PGresult-yielding libpq function.
+ */
+static inline libpqsrv_PGresult *
+libpqsrv_PQwrap(PGresult *res)
+{
+ libpqsrv_PGresult *bres;
+ MemoryContext ctx = CurrentMemoryContext;
+
+ /* We pass through a NULL result as-is, since there's nothing to free */
+ if (res == NULL)
+ return NULL;
+ /* Attempt to allocate the wrapper ... this had better not throw error */
+ bres = (libpqsrv_PGresult *)
+ MemoryContextAllocExtended(ctx,
+ sizeof(libpqsrv_PGresult),
+ MCXT_ALLOC_NO_OOM);
+ /* If we failed to allocate a wrapper, free the PGresult before failing */
+ if (bres == NULL)
+ {
+ PQclear(res);
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+ }
+ /* Okay, set up the wrapper */
+ bres->res = res;
+ bres->ctx = ctx;
+ bres->cb.func = (MemoryContextCallbackFunction) PQclear;
+ bres->cb.arg = res;
+ MemoryContextRegisterResetCallback(ctx, &bres->cb);
+ return bres;
+}
+
+/*
+ * Free a wrapped PGresult, after detaching it from the memory context.
+ * Like PQclear(), allow the argument to be NULL.
+ */
+static inline void
+libpqsrv_PQclear(libpqsrv_PGresult *bres)
+{
+ if (bres)
+ {
+ MemoryContextUnregisterResetCallback(bres->ctx, &bres->cb);
+ PQclear(bres->res);
+ pfree(bres);
+ }
+}
+
+/*
+ * Move a wrapped PGresult to have a different parent context.
+ */
+static inline libpqsrv_PGresult *
+libpqsrv_PGresultSetParent(libpqsrv_PGresult *bres, MemoryContext ctx)
+{
+ libpqsrv_PGresult *newres;
+
+ /* We pass through a NULL result as-is */
+ if (bres == NULL)
+ return NULL;
+ /* Make a new wrapper in the target context, raising error on OOM */
+ newres = (libpqsrv_PGresult *)
+ MemoryContextAlloc(ctx, sizeof(libpqsrv_PGresult));
+ /* Okay, set up the new wrapper */
+ newres->res = bres->res;
+ newres->ctx = ctx;
+ newres->cb.func = (MemoryContextCallbackFunction) PQclear;
+ newres->cb.arg = bres->res;
+ MemoryContextRegisterResetCallback(ctx, &newres->cb);
+ /* Disarm and delete the old wrapper */
+ MemoryContextUnregisterResetCallback(bres->ctx, &bres->cb);
+ pfree(bres);
+ return newres;
+}
+
+/*
+ * Convenience wrapper for PQgetResult.
+ *
+ * We could supply wrappers for other PGresult-returning functions too,
+ * but at present there's no need.
+ */
+static inline libpqsrv_PGresult *
+libpqsrv_PQgetResult(PGconn *conn)
+{
+ return libpqsrv_PQwrap(PQgetResult(conn));
+}
+
+/*
+ * Accessor functions for libpqsrv_PGresult. While it's not necessary to use
+ * these, they emulate the behavior of the underlying libpq functions when
+ * passed a NULL pointer. This is particularly important for PQresultStatus,
+ * which is often the first check on a result.
+ */
+
+static inline ExecStatusType
+libpqsrv_PQresultStatus(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return PGRES_FATAL_ERROR;
+ return PQresultStatus(res->res);
+}
+
+static inline const char *
+libpqsrv_PQresultErrorMessage(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return "";
+ return PQresultErrorMessage(res->res);
+}
+
+static inline char *
+libpqsrv_PQresultErrorField(const libpqsrv_PGresult *res, int fieldcode)
+{
+ if (!res)
+ return NULL;
+ return PQresultErrorField(res->res, fieldcode);
+}
+
+static inline char *
+libpqsrv_PQcmdStatus(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return NULL;
+ return PQcmdStatus(res->res);
+}
+
+static inline int
+libpqsrv_PQntuples(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return 0;
+ return PQntuples(res->res);
+}
+
+static inline int
+libpqsrv_PQnfields(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return 0;
+ return PQnfields(res->res);
+}
+
+static inline char *
+libpqsrv_PQgetvalue(const libpqsrv_PGresult *res, int tup_num, int field_num)
+{
+ if (!res)
+ return NULL;
+ return PQgetvalue(res->res, tup_num, field_num);
+}
+
+static inline int
+libpqsrv_PQgetlength(const libpqsrv_PGresult *res, int tup_num, int field_num)
+{
+ if (!res)
+ return 0;
+ return PQgetlength(res->res, tup_num, field_num);
+}
+
+static inline int
+libpqsrv_PQgetisnull(const libpqsrv_PGresult *res, int tup_num, int field_num)
+{
+ if (!res)
+ return 1; /* pretend it is null */
+ return PQgetisnull(res->res, tup_num, field_num);
+}
+
+static inline char *
+libpqsrv_PQfname(const libpqsrv_PGresult *res, int field_num)
+{
+ if (!res)
+ return NULL;
+ return PQfname(res->res, field_num);
+}
+
+static inline const char *
+libpqsrv_PQcmdTuples(const libpqsrv_PGresult *res)
+{
+ if (!res)
+ return "";
+ return PQcmdTuples(res->res);
+}
+
+/*
+ * Redefine these libpq entry point names concerned with PGresults so that
+ * they will operate on libpqsrv_PGresults instead. This avoids needing to
+ * convert a lot of pre-existing code, and reduces the notational differences
+ * between frontend and backend libpq-using code.
+ */
+#define PGresult libpqsrv_PGresult
+#define PQclear libpqsrv_PQclear
+#define PQgetResult libpqsrv_PQgetResult
+#define PQresultStatus libpqsrv_PQresultStatus
+#define PQresultErrorMessage libpqsrv_PQresultErrorMessage
+#define PQresultErrorField libpqsrv_PQresultErrorField
+#define PQcmdStatus libpqsrv_PQcmdStatus
+#define PQntuples libpqsrv_PQntuples
+#define PQnfields libpqsrv_PQnfields
+#define PQgetvalue libpqsrv_PQgetvalue
+#define PQgetlength libpqsrv_PQgetlength
+#define PQgetisnull libpqsrv_PQgetisnull
+#define PQfname libpqsrv_PQfname
+#define PQcmdTuples libpqsrv_PQcmdTuples
+
+#endif /* LIBPQ_BE_FE_H */
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index e5dd15098f6..ad2726f026f 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -2133,10 +2133,12 @@ typedef struct MemoizePath
* complete after caching the first record. */
bool binary_mode; /* true when cache key should be compared bit
* by bit, false when using hash equality ops */
- Cardinality calls; /* expected number of rescans */
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
+ Cardinality est_calls; /* expected number of rescans */
+ Cardinality est_unique_keys; /* estimated unique keys, for EXPLAIN */
+ double est_hit_ratio; /* estimated cache hit ratio, for EXPLAIN */
} MemoizePath;
/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 46e2e09ea35..29d7732d6a0 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -29,18 +29,19 @@
*/
/* ----------------
- * CachedPlanType
+ * PlannedStmtOrigin
*
- * CachedPlanType identifies whether a PlannedStmt is a cached plan, and if
- * so, whether it is generic or custom.
+ * PlannedStmtOrigin identifies from where a PlannedStmt comes from.
* ----------------
*/
-typedef enum CachedPlanType
+typedef enum PlannedStmtOrigin
{
- PLAN_CACHE_NONE = 0, /* Not a cached plan */
- PLAN_CACHE_GENERIC, /* Generic cached plan */
- PLAN_CACHE_CUSTOM, /* Custom cached plan */
-} CachedPlanType;
+ PLAN_STMT_UNKNOWN = 0, /* plan origin is not yet known */
+ PLAN_STMT_INTERNAL, /* generated internally by a query */
+ PLAN_STMT_STANDARD, /* standard planned statement */
+ PLAN_STMT_CACHE_GENERIC, /* Generic cached plan */
+ PLAN_STMT_CACHE_CUSTOM, /* Custom cached plan */
+} PlannedStmtOrigin;
/* ----------------
* PlannedStmt node
@@ -72,8 +73,8 @@ typedef struct PlannedStmt
/* plan identifier (can be set by plugins) */
int64 planId;
- /* type of cached plan */
- CachedPlanType cached_plan_type;
+ /* origin of plan */
+ PlannedStmtOrigin planOrigin;
/* is it insert|update|delete|merge RETURNING? */
bool hasReturning;
@@ -1073,6 +1074,16 @@ typedef struct Memoize
/* paramids from param_exprs */
Bitmapset *keyparamids;
+
+ /* Estimated number of rescans, for EXPLAIN */
+ Cardinality est_calls;
+
+ /* Estimated number of distinct lookup keys, for EXPLAIN */
+ Cardinality est_unique_keys;
+
+ /* Estimated cache hit ratio, for EXPLAIN */
+ double est_hit_ratio;
+
} Memoize;
/* ----------------
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 60dcdb77e41..58936e963cb 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -90,7 +90,7 @@ extern MemoizePath *create_memoize_path(PlannerInfo *root,
List *hash_operators,
bool singlerow,
bool binary_mode,
- double calls);
+ Cardinality est_calls);
extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
Path *subpath, SpecialJoinInfo *sjinfo);
extern GatherPath *create_gather_path(PlannerInfo *root,
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index e1b42267b22..039b9cba61a 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -133,6 +133,8 @@ MemoryContextSwitchTo(MemoryContext context)
/* Registration of memory context reset/delete callbacks */
extern void MemoryContextRegisterResetCallback(MemoryContext context,
MemoryContextCallback *cb);
+extern void MemoryContextUnregisterResetCallback(MemoryContext context,
+ MemoryContextCallback *cb);
/*
* These are like standard strdup() except the copied string is
diff --git a/src/include/utils/pgstat_internal.h b/src/include/utils/pgstat_internal.h
index d5557e6e998..6cf00008f63 100644
--- a/src/include/utils/pgstat_internal.h
+++ b/src/include/utils/pgstat_internal.h
@@ -295,18 +295,11 @@ typedef struct PgStat_KindInfo
*
* Returns true if some of the stats could not be flushed, due to lock
* contention for example. Optional.
- */
- bool (*flush_static_cb) (bool nowait);
-
- /*
- * For fixed-numbered or variable-numbered statistics: Check for pending
- * stats in need of flush with flush_static_cb, when these do not use
- * PgStat_EntryRef->pending.
*
- * Returns true if there are any stats pending for flush, triggering
- * flush_static_cb. Optional.
+ * "pgstat_report_fixed" needs to be set to trigger the flush of pending
+ * stats.
*/
- bool (*have_static_pending_cb) (void);
+ bool (*flush_static_cb) (bool nowait);
/*
* For fixed-numbered statistics: Reset All.
@@ -627,7 +620,6 @@ extern void pgstat_archiver_snapshot_cb(void);
extern bool pgstat_flush_backend(bool nowait, bits32 flags);
extern bool pgstat_backend_flush_cb(bool nowait);
-extern bool pgstat_backend_have_pending_cb(void);
extern void pgstat_backend_reset_timestamp_cb(PgStatShared_Common *header,
TimestampTz ts);
@@ -676,7 +668,6 @@ extern bool pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait);
extern void pgstat_flush_io(bool nowait);
-extern bool pgstat_io_have_pending_cb(void);
extern bool pgstat_io_flush_cb(bool nowait);
extern void pgstat_io_init_shmem_cb(void *stats);
extern void pgstat_io_reset_all_cb(TimestampTz ts);
@@ -738,7 +729,6 @@ extern PgStatShared_Common *pgstat_init_entry(PgStat_Kind kind,
* Functions in pgstat_slru.c
*/
-extern bool pgstat_slru_have_pending_cb(void);
extern bool pgstat_slru_flush_cb(bool nowait);
extern void pgstat_slru_init_shmem_cb(void *stats);
extern void pgstat_slru_reset_all_cb(TimestampTz ts);
@@ -750,7 +740,6 @@ extern void pgstat_slru_snapshot_cb(void);
*/
extern void pgstat_wal_init_backend_cb(void);
-extern bool pgstat_wal_have_pending_cb(void);
extern bool pgstat_wal_flush_cb(bool nowait);
extern void pgstat_wal_init_shmem_cb(void *stats);
extern void pgstat_wal_reset_all_cb(TimestampTz ts);
@@ -778,8 +767,23 @@ extern void pgstat_create_transactional(PgStat_Kind kind, Oid dboid, uint64 obji
* Variables in pgstat.c
*/
-extern PGDLLIMPORT PgStat_LocalState pgStatLocal;
+/*
+ * Track if *any* pending fixed-numbered statistics should be flushed to
+ * shared memory.
+ *
+ * This flag can be switched to true by fixed-numbered statistics to let
+ * pgstat_report_stat() know if it needs to go through one round of
+ * reports, calling flush_static_cb for each fixed-numbered statistics
+ * kind. When this flag is not set, pgstat_report_stat() is able to do
+ * a fast exit, knowing that there are no pending fixed-numbered statistics.
+ *
+ * Statistics callbacks should never reset this flag; pgstat_report_stat()
+ * is in charge of doing that.
+ */
+extern PGDLLIMPORT bool pgstat_report_fixed;
+/* Backend-local stats state */
+extern PGDLLIMPORT PgStat_LocalState pgStatLocal;
/*
* Implementation of inline functions declared above.
diff --git a/src/include/utils/pgstat_kind.h b/src/include/utils/pgstat_kind.h
index f44169fd5a3..eb5f0b3ae6d 100644
--- a/src/include/utils/pgstat_kind.h
+++ b/src/include/utils/pgstat_kind.h
@@ -18,7 +18,7 @@
/* Range of IDs allowed, for built-in and custom kinds */
#define PGSTAT_KIND_MIN 1 /* Minimum ID allowed */
-#define PGSTAT_KIND_MAX 256 /* Maximum ID allowed */
+#define PGSTAT_KIND_MAX 32 /* Maximum ID allowed */
/* use 0 for INVALID, to catch zero-initialized data */
#define PGSTAT_KIND_INVALID 0
@@ -46,7 +46,7 @@
/* Custom stats kinds */
/* Range of IDs allowed for custom stats kinds */
-#define PGSTAT_KIND_CUSTOM_MIN 128
+#define PGSTAT_KIND_CUSTOM_MIN 24
#define PGSTAT_KIND_CUSTOM_MAX PGSTAT_KIND_MAX
#define PGSTAT_KIND_CUSTOM_SIZE (PGSTAT_KIND_CUSTOM_MAX - PGSTAT_KIND_CUSTOM_MIN + 1)
@@ -55,7 +55,7 @@
* development and have not reserved their own unique kind ID yet. See:
* https://wiki.postgresql.org/wiki/CustomCumulativeStats
*/
-#define PGSTAT_KIND_EXPERIMENTAL 128
+#define PGSTAT_KIND_EXPERIMENTAL 24
static inline bool
pgstat_is_kind_builtin(PgStat_Kind kind)
diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c
index dd6fd1fe7f4..06f0135813b 100644
--- a/src/interfaces/ecpg/ecpglib/prepare.c
+++ b/src/interfaces/ecpg/ecpglib/prepare.c
@@ -603,7 +603,10 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha
prep = ecpg_find_prepared_statement(stmtID, con, NULL);
/* This prepared name doesn't exist on this connection. */
if (!prep && !prepare_common(lineno, con, stmtID, query))
+ {
+ ecpg_free(*name);
return false;
+ }
}
else
@@ -619,11 +622,17 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha
return false;
if (!ECPGprepare(lineno, connection_name, 0, stmtID, query))
+ {
+ ecpg_free(*name);
return false;
+ }
entNo = AddStmtToCache(lineno, stmtID, connection_name, compat, query);
if (entNo < 0)
+ {
+ ecpg_free(*name);
return false;
+ }
}
/* increase usage counter */
diff --git a/src/interfaces/libpq-oauth/Makefile b/src/interfaces/libpq-oauth/Makefile
index 270fc0cf2d9..682f17413b3 100644
--- a/src/interfaces/libpq-oauth/Makefile
+++ b/src/interfaces/libpq-oauth/Makefile
@@ -24,7 +24,7 @@ NAME = pq-oauth-$(MAJORVERSION)
override shlib := lib$(NAME)$(DLSUFFIX)
override stlib := libpq-oauth.a
-override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(LIBCURL_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(CPPFLAGS) $(LIBCURL_CPPFLAGS)
OBJS = \
$(WIN32RES)
diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile
index 47d67811509..da6650066d4 100644
--- a/src/interfaces/libpq/Makefile
+++ b/src/interfaces/libpq/Makefile
@@ -24,7 +24,7 @@ NAME= pq
SO_MAJOR_VERSION= 5
SO_MINOR_VERSION= $(MAJORVERSION)
-override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port
+override CPPFLAGS := -I$(srcdir) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port $(CPPFLAGS)
ifneq ($(PORTNAME), win32)
override CFLAGS += $(PTHREAD_CFLAGS)
endif
diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c
index 65517c5703b..c872a0267f0 100644
--- a/src/interfaces/libpq/fe-cancel.c
+++ b/src/interfaces/libpq/fe-cancel.c
@@ -379,7 +379,24 @@ PQgetCancel(PGconn *conn)
/* Check that we have received a cancellation key */
if (conn->be_cancel_key_len == 0)
- return NULL;
+ {
+ /*
+ * In case there is no cancel key, return an all-zero PGcancel object.
+ * Actually calling PQcancel on this will fail, but we allow creating
+ * the PGcancel object anyway. Arguably it would be better return NULL
+ * to indicate that cancellation is not possible, but there'd be no
+ * way for the caller to distinguish "out of memory" from "server did
+ * not send a cancel key". Also, this is how PGgetCancel() has always
+ * behaved, and if we changed it, some clients would stop working
+ * altogether with servers that don't support cancellation. (The
+ * modern PQcancelCreate() function returns a failed connection object
+ * instead.)
+ *
+ * The returned dummy object has cancel_pkt_len == 0; we check for
+ * that in PQcancel() to identify it as a dummy.
+ */
+ return calloc(1, sizeof(PGcancel));
+ }
cancel_req_len = offsetof(CancelRequestPacket, cancelAuthCode) + conn->be_cancel_key_len;
cancel = malloc(offsetof(PGcancel, cancel_req) + cancel_req_len);
@@ -544,6 +561,15 @@ PQcancel(PGcancel *cancel, char *errbuf, int errbufsize)
return false;
}
+ if (cancel->cancel_pkt_len == 0)
+ {
+ /* This is a dummy PGcancel object, see PQgetCancel */
+ strlcpy(errbuf, "PQcancel() -- no cancellation key received", errbufsize);
+ /* strlcpy probably doesn't change errno, but be paranoid */
+ SOCK_ERRNO_SET(save_errno);
+ return false;
+ }
+
/*
* We need to open a temporary connection to the postmaster. Do this with
* only kernel calls.
diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile
index f959083a0bd..25f295c3709 100644
--- a/src/pl/plpython/Makefile
+++ b/src/pl/plpython/Makefile
@@ -11,7 +11,7 @@ ifeq ($(PORTNAME), win32)
override python_libspec =
endif
-override CPPFLAGS := -I. -I$(srcdir) $(python_includespec) $(CPPFLAGS)
+override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(python_includespec)
rpathdir = $(python_libdir)
diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile
index ea52a2efc22..dd57f7d694c 100644
--- a/src/pl/tcl/Makefile
+++ b/src/pl/tcl/Makefile
@@ -11,7 +11,7 @@ top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
-override CPPFLAGS := -I. -I$(srcdir) $(TCL_INCLUDE_SPEC) $(CPPFLAGS)
+override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(TCL_INCLUDE_SPEC)
# On Windows, we don't link directly with the Tcl library; see below
ifneq ($(PORTNAME), win32)
diff --git a/src/test/modules/injection_points/injection_stats.c b/src/test/modules/injection_points/injection_stats.c
index 14903c629e0..e3947b23ba5 100644
--- a/src/test/modules/injection_points/injection_stats.c
+++ b/src/test/modules/injection_points/injection_stats.c
@@ -59,7 +59,7 @@ static const PgStat_KindInfo injection_stats = {
/*
* Kind ID reserved for statistics of injection points.
*/
-#define PGSTAT_KIND_INJECTION 129
+#define PGSTAT_KIND_INJECTION 25
/* Track if stats are loaded */
static bool inj_stats_loaded = false;
diff --git a/src/test/modules/injection_points/injection_stats_fixed.c b/src/test/modules/injection_points/injection_stats_fixed.c
index 3d0c01bdd05..bc54c79d190 100644
--- a/src/test/modules/injection_points/injection_stats_fixed.c
+++ b/src/test/modules/injection_points/injection_stats_fixed.c
@@ -64,7 +64,7 @@ static const PgStat_KindInfo injection_stats_fixed = {
/*
* Kind ID reserved for statistics of injection points.
*/
-#define PGSTAT_KIND_INJECTION_FIXED 130
+#define PGSTAT_KIND_INJECTION_FIXED 26
/* Track if fixed-numbered stats are loaded */
static bool inj_fixed_loaded = false;
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 61f68e0cc2e..35413f14019 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -304,6 +304,7 @@ sub is_alive
my $ret = PostgreSQL::Test::Utils::system_log(
'pg_isready',
+ '--timeout' => $PostgreSQL::Test::Utils::timeout_default,
'--host' => $self->host,
'--port' => $self->port);
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index debfa635c36..4c5af018ee4 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -228,6 +228,13 @@ is( $node->safe_psql(
'before-orderly-restart',
'can still write after crash restart');
+# Confirm that the logical replication launcher, a background worker
+# without the never-restart flag, has also restarted successfully.
+is($node->poll_query_until('postgres',
+ "SELECT count(*) = 1 FROM pg_stat_activity WHERE backend_type = 'logical replication launcher'"),
+ '1',
+ 'logical replication launcher restarted after crash');
+
# Just to be sure, check that an orderly restart now still works
$node->restart();
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index f9bd252444f..dc541d61adf 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -1750,7 +1750,7 @@ Indexes:
Referenced by:
TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b)
--- Check the exsting FK trigger
+-- Check the existing FK trigger
SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype
FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid)
WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass)
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 3a2eacd793f..1ec3fa34a2d 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -1934,3 +1934,24 @@ RESET client_min_messages;
RESET SESSION AUTHORIZATION;
DROP ROLE regress_publication_user, regress_publication_user2;
DROP ROLE regress_publication_user_dummy;
+-- stage objects for pg_dump tests
+CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int);
+CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int);
+SET client_min_messages = 'ERROR';
+CREATE PUBLICATION dump_pub_qual_1ct FOR
+ TABLE ONLY pubme.t0 (c, d) WHERE (c > 0);
+CREATE PUBLICATION dump_pub_qual_2ct FOR
+ TABLE ONLY pubme.t0 (c) WHERE (c > 0),
+ TABLE ONLY pubme.t1 (c);
+CREATE PUBLICATION dump_pub_nsp_1ct FOR
+ TABLES IN SCHEMA pubme;
+CREATE PUBLICATION dump_pub_nsp_2ct FOR
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2;
+CREATE PUBLICATION dump_pub_all FOR
+ TABLE ONLY pubme.t0,
+ TABLE ONLY pubme.t1 WHERE (c < 0),
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2
+ WITH (publish_via_partition_root = true);
+RESET client_min_messages;
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index cfcecb4e911..39174ad1eb9 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1296,7 +1296,7 @@ UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500;
-- check psql behavior
\d fk_notpartitioned_pk
--- Check the exsting FK trigger
+-- Check the existing FK trigger
SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype
FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid)
WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass)
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index c9e309190df..2585f083181 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -1229,3 +1229,25 @@ RESET client_min_messages;
RESET SESSION AUTHORIZATION;
DROP ROLE regress_publication_user, regress_publication_user2;
DROP ROLE regress_publication_user_dummy;
+
+-- stage objects for pg_dump tests
+CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int);
+CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int);
+SET client_min_messages = 'ERROR';
+CREATE PUBLICATION dump_pub_qual_1ct FOR
+ TABLE ONLY pubme.t0 (c, d) WHERE (c > 0);
+CREATE PUBLICATION dump_pub_qual_2ct FOR
+ TABLE ONLY pubme.t0 (c) WHERE (c > 0),
+ TABLE ONLY pubme.t1 (c);
+CREATE PUBLICATION dump_pub_nsp_1ct FOR
+ TABLES IN SCHEMA pubme;
+CREATE PUBLICATION dump_pub_nsp_2ct FOR
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2;
+CREATE PUBLICATION dump_pub_all FOR
+ TABLE ONLY pubme.t0,
+ TABLE ONLY pubme.t1 WHERE (c < 0),
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2
+ WITH (publish_via_partition_root = true);
+RESET client_min_messages;
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 4353befab99..e6f2e93b2d6 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -391,7 +391,6 @@ CachedFunctionHashEntry
CachedFunctionHashKey
CachedPlan
CachedPlanSource
-CachedPlanType
CallContext
CallStmt
CancelRequestPacket
@@ -2276,6 +2275,7 @@ PlanInvalItem
PlanRowMark
PlanState
PlannedStmt
+PlannedStmtOrigin
PlannerGlobal
PlannerInfo
PlannerParamItem
@@ -3757,6 +3757,7 @@ leafSegmentInfo
leaf_item
libpq_gettext_func
libpq_source
+libpqsrv_PGresult
line_t
lineno_t
list_sort_comparator