From ca9112a424ff68ec4f2ef67b47122f7d61412964 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 15 Aug 2016 13:49:49 -0400 Subject: Stamp HEAD as 10devel. This is a good bit more complicated than the average new-version stamping commit, because it includes various adjustments in pursuit of changing from three-part to two-part version numbers. It's likely some further work will be needed around that change; but this is enough to get through the regression tests, at least in Unix builds. Peter Eisentraut and Tom Lane --- src/backend/catalog/genbki.pl | 4 ++-- src/backend/utils/init/miscinit.c | 26 +++++++++++++------------- src/bin/pg_upgrade/check.c | 6 +++--- src/bin/pg_upgrade/server.c | 2 +- src/include/pg_config.h.win32 | 8 ++++---- src/interfaces/libpq/libpq.rc.in | 8 ++++---- src/port/win32ver.rc | 4 ++-- src/tools/msvc/Solution.pm | 6 +++--- src/tools/version_stamp.pl | 14 ++++++-------- 9 files changed, 38 insertions(+), 40 deletions(-) (limited to 'src') diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 54a14e5dc3..26d165203d 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -43,8 +43,8 @@ while (@ARGV) elsif ($arg =~ /^--set-version=(.*)$/) { $major_version = $1; - die "Version must be in format nn.nn.\n" - if !($major_version =~ /^\d+\.\d+$/); + die "Invalid version string.\n" + if !($major_version =~ /^\d+$/); } else { diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index d4625a6238..22b046e006 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1334,16 +1334,13 @@ ValidatePgVersion(const char *path) char full_path[MAXPGPATH]; FILE *file; int ret; - long file_major, - file_minor; - long my_major = 0, - my_minor = 0; + long file_major; + long my_major; char *endptr; - const char *version_string = PG_VERSION; + char file_version_string[64]; + const char *my_version_string = PG_VERSION; - my_major = strtol(version_string, &endptr, 10); - if (*endptr == '.') - my_minor = strtol(endptr + 1, NULL, 10); + my_major = strtol(my_version_string, &endptr, 10); snprintf(full_path, sizeof(full_path), "%s/PG_VERSION", path); @@ -1362,8 +1359,11 @@ ValidatePgVersion(const char *path) errmsg("could not open file \"%s\": %m", full_path))); } - ret = fscanf(file, "%ld.%ld", &file_major, &file_minor); - if (ret != 2) + file_version_string[0] = '\0'; + ret = fscanf(file, "%63s", file_version_string); + file_major = strtol(file_version_string, &endptr, 10); + + if (ret != 1 || endptr == file_version_string) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("\"%s\" is not a valid data directory", @@ -1374,13 +1374,13 @@ ValidatePgVersion(const char *path) FreeFile(file); - if (my_major != file_major || my_minor != file_minor) + if (my_major != file_major) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("database files are incompatible with server"), - errdetail("The data directory was initialized by PostgreSQL version %ld.%ld, " + errdetail("The data directory was initialized by PostgreSQL version %s, " "which is not compatible with this version %s.", - file_major, file_minor, version_string))); + file_version_string, my_version_string))); } /*------------------------------------------------------------------------- diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index f901e3c512..ed41dee6a5 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -1082,8 +1082,8 @@ get_bin_version(ClusterInfo *cluster) char cmd[MAXPGPATH], cmd_output[MAX_STRING]; FILE *output; - int pre_dot, - post_dot; + int pre_dot = 0, + post_dot = 0; snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); @@ -1098,7 +1098,7 @@ get_bin_version(ClusterInfo *cluster) if (strchr(cmd_output, '\n') != NULL) *strchr(cmd_output, '\n') = '\0'; - if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2) + if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) < 1) pg_fatal("could not get version from %s\n", cmd); cluster->bin_version = (pre_dot * 100 + post_dot) * 100; diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c index 830335f501..12432bb1d0 100644 --- a/src/bin/pg_upgrade/server.c +++ b/src/bin/pg_upgrade/server.c @@ -166,7 +166,7 @@ get_major_server_version(ClusterInfo *cluster) if (fscanf(version_fd, "%63s", cluster->major_version_str) == 0 || sscanf(cluster->major_version_str, "%d.%d", &integer_version, - &fractional_version) != 2) + &fractional_version) < 1) pg_fatal("could not get version from %s\n", cluster->pgdata); fclose(version_fd); diff --git a/src/include/pg_config.h.win32 b/src/include/pg_config.h.win32 index b6b88fcf0d..8892c3cb4f 100644 --- a/src/include/pg_config.h.win32 +++ b/src/include/pg_config.h.win32 @@ -554,10 +554,10 @@ #define PACKAGE_NAME "PostgreSQL" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "PostgreSQL 9.6beta4" +#define PACKAGE_STRING "PostgreSQL 10devel" /* Define to the version of this package. */ -#define PACKAGE_VERSION "9.6beta4" +#define PACKAGE_VERSION "10devel" /* Define to the name of a signed 128-bit integer type. */ #undef PG_INT128_TYPE @@ -566,10 +566,10 @@ #define PG_INT64_TYPE long long int /* PostgreSQL version as a string */ -#define PG_VERSION "9.6beta4" +#define PG_VERSION "10devel" /* PostgreSQL version as a number */ -#define PG_VERSION_NUM 90600 +#define PG_VERSION_NUM 100000 /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "postgresql" diff --git a/src/interfaces/libpq/libpq.rc.in b/src/interfaces/libpq/libpq.rc.in index e41a1a27f4..0d6f7049f5 100644 --- a/src/interfaces/libpq/libpq.rc.in +++ b/src/interfaces/libpq/libpq.rc.in @@ -1,8 +1,8 @@ #include VS_VERSION_INFO VERSIONINFO - FILEVERSION 9,6,0,0 - PRODUCTVERSION 9,6,0,0 + FILEVERSION 10,0,0,0 + PRODUCTVERSION 10,0,0,0 FILEFLAGSMASK 0x3fL FILEFLAGS 0 FILEOS VOS__WINDOWS32 @@ -15,13 +15,13 @@ BEGIN BEGIN VALUE "CompanyName", "\0" VALUE "FileDescription", "PostgreSQL Access Library\0" - VALUE "FileVersion", "9.6.0\0" + VALUE "FileVersion", "10.0\0" VALUE "InternalName", "libpq\0" VALUE "LegalCopyright", "Copyright (C) 2016\0" VALUE "LegalTrademarks", "\0" VALUE "OriginalFilename", "libpq.dll\0" VALUE "ProductName", "PostgreSQL\0" - VALUE "ProductVersion", "9.6.0\0" + VALUE "ProductVersion", "10.0\0" END END BLOCK "VarFileInfo" diff --git a/src/port/win32ver.rc b/src/port/win32ver.rc index c21b74c017..3ce092382b 100644 --- a/src/port/win32ver.rc +++ b/src/port/win32ver.rc @@ -2,8 +2,8 @@ #include "pg_config.h" VS_VERSION_INFO VERSIONINFO - FILEVERSION 9,6,0,0 - PRODUCTVERSION 9,6,0,0 + FILEVERSION 10,0,0,0 + PRODUCTVERSION 10,0,0,0 FILEFLAGSMASK 0x17L FILEFLAGS 0x0L FILEOS VOS_NT_WINDOWS32 diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm index f07029bce1..9cb1ad36cf 100644 --- a/src/tools/msvc/Solution.pm +++ b/src/tools/msvc/Solution.pm @@ -131,12 +131,12 @@ sub GenerateFiles if (/^AC_INIT\(\[PostgreSQL\], \[([^\]]+)\]/) { $self->{strver} = $1; - if ($self->{strver} !~ /^(\d+)\.(\d+)(?:\.(\d+))?/) + if ($self->{strver} !~ /^(\d+)(?:\.(\d+))?/) { confess "Bad format of version: $self->{strver}\n"; } - $self->{numver} = sprintf("%d%02d%02d", $1, $2, $3 ? $3 : 0); - $self->{majorver} = sprintf("%d.%d", $1, $2); + $self->{numver} = sprintf("%d%04d", $1, $2 ? $2 : 0); + $self->{majorver} = sprintf("%d", $1); } } close(C); diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl index cc685453dd..3edd7bedaf 100755 --- a/src/tools/version_stamp.pl +++ b/src/tools/version_stamp.pl @@ -22,8 +22,7 @@ # Major version is hard-wired into the script. We update it when we branch # a new development version. -$major1 = 9; -$major2 = 6; +$majorversion = 10; # Validate argument and compute derived variables $minor = shift; @@ -60,7 +59,6 @@ else } # Create various required forms of the version number -$majorversion = $major1 . "." . $major2; if ($dotneeded) { $fullversion = $majorversion . "." . $minor; @@ -70,7 +68,7 @@ else $fullversion = $majorversion . $minor; } $numericversion = $majorversion . "." . $numericminor; -$padnumericversion = sprintf("%d%02d%02d", $major1, $major2, $numericminor); +$padnumericversion = sprintf("%d%04d", $majorversion, $numericminor); # Get the autoconf version number for eventual nag message # (this also ensures we're in the right directory) @@ -110,15 +108,15 @@ sed_file("src/include/pg_config.h.win32", ); sed_file("src/interfaces/libpq/libpq.rc.in", -"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $major1,$major2,$numericminor,0/' " - . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $major1,$major2,$numericminor,0/' " +"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " + . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $majorversion,0,$numericminor,0/' " . "-e 's/VALUE \"FileVersion\", \"[0-9.]*/VALUE \"FileVersion\", \"$numericversion/' " . "-e 's/VALUE \"ProductVersion\", \"[0-9.]*/VALUE \"ProductVersion\", \"$numericversion/'" ); sed_file("src/port/win32ver.rc", -"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $major1,$major2,$numericminor,0/' " - . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $major1,$major2,$numericminor,0/'" +"-e 's/FILEVERSION [0-9]*,[0-9]*,[0-9]*,0/FILEVERSION $majorversion,0,$numericminor,0/' " + . "-e 's/PRODUCTVERSION [0-9]*,[0-9]*,[0-9]*,0/PRODUCTVERSION $majorversion,0,$numericminor,0/'" ); print "Stamped these files with version number $fullversion:\n$fixedfiles"; -- cgit v1.2.3 From 0b9358d4406b9b45a06855d53f491cc7ce9550a9 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 15 Aug 2016 14:35:55 -0400 Subject: Stamp shared-library minor version numbers for v10. --- src/interfaces/ecpg/compatlib/Makefile | 2 +- src/interfaces/ecpg/ecpglib/Makefile | 2 +- src/interfaces/ecpg/pgtypeslib/Makefile | 2 +- src/interfaces/ecpg/preproc/Makefile | 2 +- src/interfaces/libpq/Makefile | 2 +- src/tools/msvc/Mkvcbuild.pm | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/interfaces/ecpg/compatlib/Makefile b/src/interfaces/ecpg/compatlib/Makefile index 0a04a7310f..2f58ad8592 100644 --- a/src/interfaces/ecpg/compatlib/Makefile +++ b/src/interfaces/ecpg/compatlib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "ECPG compat - compatibility library for ECPG" NAME= ecpg_compat SO_MAJOR_VERSION= 3 -SO_MINOR_VERSION= 8 +SO_MINOR_VERSION= 9 override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/ecpg/ecpglib/Makefile b/src/interfaces/ecpg/ecpglib/Makefile index 39c4232580..00503b33dd 100644 --- a/src/interfaces/ecpg/ecpglib/Makefile +++ b/src/interfaces/ecpg/ecpglib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "ECPG - embedded SQL in C" NAME= ecpg SO_MAJOR_VERSION= 6 -SO_MINOR_VERSION= 8 +SO_MINOR_VERSION= 9 override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -I$(top_builddir)/src/port -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/ecpg/pgtypeslib/Makefile b/src/interfaces/ecpg/pgtypeslib/Makefile index 1c1a42fa8f..ac278948a5 100644 --- a/src/interfaces/ecpg/pgtypeslib/Makefile +++ b/src/interfaces/ecpg/pgtypeslib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "pgtypes - library for data type mapping" NAME= pgtypes SO_MAJOR_VERSION= 3 -SO_MINOR_VERSION= 7 +SO_MINOR_VERSION= 8 override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/ecpg/preproc/Makefile b/src/interfaces/ecpg/preproc/Makefile index 30db5a049a..7dd4b2ffdb 100644 --- a/src/interfaces/ecpg/preproc/Makefile +++ b/src/interfaces/ecpg/preproc/Makefile @@ -16,7 +16,7 @@ top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global MAJOR_VERSION= 4 -MINOR_VERSION= 12 +MINOR_VERSION= 13 PATCHLEVEL=0 override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 1b292d2cf2..83b30b0f9e 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -17,7 +17,7 @@ include $(top_builddir)/src/Makefile.global # shared library parameters NAME= pq SO_MAJOR_VERSION= 5 -SO_MINOR_VERSION= 9 +SO_MINOR_VERSION= 10 override CPPFLAGS := -DFRONTEND -DUNSAFE_STAT_OK -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port ifneq ($(PORTNAME), win32) diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index fe905d3c9d..16180f68ed 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -270,7 +270,7 @@ sub mkvcbuild $ecpg->AddPrefixInclude('src/interfaces/ecpg/preproc'); $ecpg->AddFiles('src/interfaces/ecpg/preproc', 'pgc.l', 'preproc.y'); $ecpg->AddDefine('MAJOR_VERSION=4'); - $ecpg->AddDefine('MINOR_VERSION=12'); + $ecpg->AddDefine('MINOR_VERSION=13'); $ecpg->AddDefine('PATCHLEVEL=0'); $ecpg->AddDefine('ECPG_COMPILE'); $ecpg->AddReference($libpgcommon, $libpgport); -- cgit v1.2.3 From 3149a12166120d0b476f5ca7837ebcf0e7124703 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 15 Aug 2016 15:24:54 -0400 Subject: Update git_changelog to know that there's a 9.6 branch. Missed this in the main 10devel version stamping patch. --- src/tools/git_changelog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/tools/git_changelog b/src/tools/git_changelog index c9a503f3fe..b9e631bb9c 100755 --- a/src/tools/git_changelog +++ b/src/tools/git_changelog @@ -57,7 +57,7 @@ require IPC::Open2; # (We could get this from "git branches", but not worth the trouble.) # NB: master must be first! my @BRANCHES = qw(master - REL9_5_STABLE + REL9_6_STABLE REL9_5_STABLE REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE REL8_4_STABLE REL8_3_STABLE REL8_2_STABLE REL8_1_STABLE REL8_0_STABLE REL7_4_STABLE REL7_3_STABLE REL7_2_STABLE REL7_1_STABLE REL7_0_PATCHES -- cgit v1.2.3 From b25b6c9701e5c18e3ad3b701df62380f8d138ba0 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 15 Aug 2016 18:09:55 -0400 Subject: Once again allow LWLocks to be used within DSM segments. Prior to commit 7882c3b0b95640e361f1533fe0f2d02e4e5d8610, it was possible to use LWLocks within DSM segments, but that commit broke this use case by switching from a doubly linked list to a circular linked list. Switch back, using a new bit of general infrastructure for maintaining lists of PGPROCs. Thomas Munro, reviewed by me. --- src/backend/storage/lmgr/lwlock.c | 63 +++++++------- src/include/storage/lwlock.h | 4 +- src/include/storage/proc.h | 6 +- src/include/storage/proclist.h | 154 +++++++++++++++++++++++++++++++++++ src/include/storage/proclist_types.h | 45 ++++++++++ src/tools/pgindent/typedefs.list | 3 + 6 files changed, 240 insertions(+), 35 deletions(-) create mode 100644 src/include/storage/proclist.h create mode 100644 src/include/storage/proclist_types.h (limited to 'src') diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 7ffa87d914..303e99c65b 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -84,6 +84,7 @@ #include "storage/ipc.h" #include "storage/predicate.h" #include "storage/proc.h" +#include "storage/proclist.h" #include "storage/spin.h" #include "utils/memutils.h" @@ -717,7 +718,7 @@ LWLockInitialize(LWLock *lock, int tranche_id) pg_atomic_init_u32(&lock->nwaiters, 0); #endif lock->tranche = tranche_id; - dlist_init(&lock->waiters); + proclist_init(&lock->waiters); } /* @@ -920,25 +921,25 @@ LWLockWakeup(LWLock *lock) { bool new_release_ok; bool wokeup_somebody = false; - dlist_head wakeup; - dlist_mutable_iter iter; + proclist_head wakeup; + proclist_mutable_iter iter; - dlist_init(&wakeup); + proclist_init(&wakeup); new_release_ok = true; /* lock wait list while collecting backends to wake up */ LWLockWaitListLock(lock); - dlist_foreach_modify(iter, &lock->waiters) + proclist_foreach_modify(iter, &lock->waiters, lwWaitLink) { - PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur); + PGPROC *waiter = GetPGProcByNumber(iter.cur); if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE) continue; - dlist_delete(&waiter->lwWaitLink); - dlist_push_tail(&wakeup, &waiter->lwWaitLink); + proclist_delete(&lock->waiters, iter.cur, lwWaitLink); + proclist_push_tail(&wakeup, iter.cur, lwWaitLink); if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE) { @@ -963,7 +964,7 @@ LWLockWakeup(LWLock *lock) break; } - Assert(dlist_is_empty(&wakeup) || pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS); + Assert(proclist_is_empty(&wakeup) || pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS); /* unset required flags, and release lock, in one fell swoop */ { @@ -982,7 +983,7 @@ LWLockWakeup(LWLock *lock) else desired_state &= ~LW_FLAG_RELEASE_OK; - if (dlist_is_empty(&wakeup)) + if (proclist_is_empty(&wakeup)) desired_state &= ~LW_FLAG_HAS_WAITERS; desired_state &= ~LW_FLAG_LOCKED; /* release lock */ @@ -994,12 +995,12 @@ LWLockWakeup(LWLock *lock) } /* Awaken any waiters I removed from the queue. */ - dlist_foreach_modify(iter, &wakeup) + proclist_foreach_modify(iter, &wakeup, lwWaitLink) { - PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur); + PGPROC *waiter = GetPGProcByNumber(iter.cur); LOG_LWDEBUG("LWLockRelease", lock, "release waiter"); - dlist_delete(&waiter->lwWaitLink); + proclist_delete(&wakeup, iter.cur, lwWaitLink); /* * Guarantee that lwWaiting being unset only becomes visible once the @@ -1046,9 +1047,9 @@ LWLockQueueSelf(LWLock *lock, LWLockMode mode) /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */ if (mode == LW_WAIT_UNTIL_FREE) - dlist_push_head(&lock->waiters, &MyProc->lwWaitLink); + proclist_push_head(&lock->waiters, MyProc->pgprocno, lwWaitLink); else - dlist_push_tail(&lock->waiters, &MyProc->lwWaitLink); + proclist_push_tail(&lock->waiters, MyProc->pgprocno, lwWaitLink); /* Can release the mutex now */ LWLockWaitListUnlock(lock); @@ -1070,7 +1071,7 @@ static void LWLockDequeueSelf(LWLock *lock) { bool found = false; - dlist_mutable_iter iter; + proclist_mutable_iter iter; #ifdef LWLOCK_STATS lwlock_stats *lwstats; @@ -1086,19 +1087,17 @@ LWLockDequeueSelf(LWLock *lock) * Can't just remove ourselves from the list, but we need to iterate over * all entries as somebody else could have unqueued us. */ - dlist_foreach_modify(iter, &lock->waiters) + proclist_foreach_modify(iter, &lock->waiters, lwWaitLink) { - PGPROC *proc = dlist_container(PGPROC, lwWaitLink, iter.cur); - - if (proc == MyProc) + if (iter.cur == MyProc->pgprocno) { found = true; - dlist_delete(&proc->lwWaitLink); + proclist_delete(&lock->waiters, iter.cur, lwWaitLink); break; } } - if (dlist_is_empty(&lock->waiters) && + if (proclist_is_empty(&lock->waiters) && (pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS) != 0) { pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_HAS_WAITERS); @@ -1719,12 +1718,12 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval) void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val) { - dlist_head wakeup; - dlist_mutable_iter iter; + proclist_head wakeup; + proclist_mutable_iter iter; PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE); - dlist_init(&wakeup); + proclist_init(&wakeup); LWLockWaitListLock(lock); @@ -1737,15 +1736,15 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val) * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken * up. They are always in the front of the queue. */ - dlist_foreach_modify(iter, &lock->waiters) + proclist_foreach_modify(iter, &lock->waiters, lwWaitLink) { - PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur); + PGPROC *waiter = GetPGProcByNumber(iter.cur); if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE) break; - dlist_delete(&waiter->lwWaitLink); - dlist_push_tail(&wakeup, &waiter->lwWaitLink); + proclist_delete(&lock->waiters, iter.cur, lwWaitLink); + proclist_push_tail(&wakeup, iter.cur, lwWaitLink); } /* We are done updating shared state of the lock itself. */ @@ -1754,11 +1753,11 @@ LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val) /* * Awaken any waiters I removed from the queue. */ - dlist_foreach_modify(iter, &wakeup) + proclist_foreach_modify(iter, &wakeup, lwWaitLink) { - PGPROC *waiter = dlist_container(PGPROC, lwWaitLink, iter.cur); + PGPROC *waiter = GetPGProcByNumber(iter.cur); - dlist_delete(&waiter->lwWaitLink); + proclist_delete(&wakeup, iter.cur, lwWaitLink); /* check comment in LWLockWakeup() about this barrier */ pg_write_barrier(); waiter->lwWaiting = false; diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index 3db11e43f0..959f5f1e4d 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -18,7 +18,7 @@ #error "lwlock.h may not be included from frontend code" #endif -#include "lib/ilist.h" +#include "storage/proclist_types.h" #include "storage/s_lock.h" #include "port/atomics.h" @@ -59,7 +59,7 @@ typedef struct LWLock { uint16 tranche; /* tranche ID */ pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */ - dlist_head waiters; /* list of waiting PGPROCs */ + proclist_head waiters; /* list of waiting PGPROCs */ #ifdef LOCK_DEBUG pg_atomic_uint32 nwaiters; /* number of waiters */ struct PGPROC *owner; /* last exclusive owner of the lock */ diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h index 775c66a197..f576f052df 100644 --- a/src/include/storage/proc.h +++ b/src/include/storage/proc.h @@ -19,6 +19,7 @@ #include "storage/latch.h" #include "storage/lock.h" #include "storage/pg_sema.h" +#include "storage/proclist_types.h" /* * Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds @@ -112,7 +113,7 @@ struct PGPROC /* Info about LWLock the process is currently waiting for, if any. */ bool lwWaiting; /* true if waiting for an LW lock */ uint8 lwWaitMode; /* lwlock mode being waited for */ - dlist_node lwWaitLink; /* position in LW lock wait list */ + proclist_node lwWaitLink; /* position in LW lock wait list */ /* Info about lock the process is currently waiting for, if any. */ /* waitLock and waitProcLock are NULL if not currently waiting. */ @@ -243,6 +244,9 @@ extern PROC_HDR *ProcGlobal; extern PGPROC *PreparedXactProcs; +/* Accessor for PGPROC given a pgprocno. */ +#define GetPGProcByNumber(n) (&ProcGlobal->allProcs[(n)]) + /* * We set aside some extra PGPROC structures for auxiliary processes, * ie things that aren't full-fledged backends but need shmem access. diff --git a/src/include/storage/proclist.h b/src/include/storage/proclist.h new file mode 100644 index 0000000000..2013a406a3 --- /dev/null +++ b/src/include/storage/proclist.h @@ -0,0 +1,154 @@ +/*------------------------------------------------------------------------- + * + * proclist.h + * operations on doubly-linked lists of pgprocnos + * + * The interface is similar to dlist from ilist.h, but uses pgprocno instead + * of pointers. This allows proclist_head to be mapped at different addresses + * in different backends. + * + * See proclist_types.h for the structs that these functions operate on. They + * are separated to break a header dependency cycle with proc.h. + * + * Portions Copyright (c) 2016, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/storage/proclist.h + *------------------------------------------------------------------------- + */ +#ifndef PROCLIST_H +#define PROCLIST_H + +#include "storage/proc.h" +#include "storage/proclist_types.h" + +/* + * Initialize a proclist. + */ +static inline void +proclist_init(proclist_head *list) +{ + list->head = list->tail = INVALID_PGPROCNO; +} + +/* + * Is the list empty? + */ +static inline bool +proclist_is_empty(proclist_head *list) +{ + return list->head == INVALID_PGPROCNO; +} + +/* + * Get a pointer to a proclist_node inside a given PGPROC, given a procno and + * an offset. + */ +static inline proclist_node * +proclist_node_get(int procno, size_t node_offset) +{ + char *entry = (char *) GetPGProcByNumber(procno); + + return (proclist_node *) (entry + node_offset); +} + +/* + * Insert a node at the beginning of a list. + */ +static inline void +proclist_push_head_offset(proclist_head *list, int procno, size_t node_offset) +{ + proclist_node *node = proclist_node_get(procno, node_offset); + + if (list->head == INVALID_PGPROCNO) + { + Assert(list->tail == INVALID_PGPROCNO); + node->next = node->prev = INVALID_PGPROCNO; + list->head = list->tail = procno; + } + else + { + Assert(list->tail != INVALID_PGPROCNO); + node->next = list->head; + proclist_node_get(node->next, node_offset)->prev = procno; + node->prev = INVALID_PGPROCNO; + list->head = procno; + } +} + +/* + * Insert a node a the end of a list. + */ +static inline void +proclist_push_tail_offset(proclist_head *list, int procno, size_t node_offset) +{ + proclist_node *node = proclist_node_get(procno, node_offset); + + if (list->tail == INVALID_PGPROCNO) + { + Assert(list->head == INVALID_PGPROCNO); + node->next = node->prev = INVALID_PGPROCNO; + list->head = list->tail = procno; + } + else + { + Assert(list->head != INVALID_PGPROCNO); + node->prev = list->tail; + proclist_node_get(node->prev, node_offset)->next = procno; + node->next = INVALID_PGPROCNO; + list->tail = procno; + } +} + +/* + * Delete a node. The node must be in the list. + */ +static inline void +proclist_delete_offset(proclist_head *list, int procno, size_t node_offset) +{ + proclist_node *node = proclist_node_get(procno, node_offset); + + if (node->prev == INVALID_PGPROCNO) + list->head = node->next; + else + proclist_node_get(node->prev, node_offset)->next = node->next; + + if (node->next == INVALID_PGPROCNO) + list->tail = node->prev; + else + proclist_node_get(node->next, node_offset)->prev = node->prev; +} + +/* + * Helper macros to avoid repetition of offsetof(PGPROC, ). + * 'link_member' is the name of a proclist_node member in PGPROC. + */ +#define proclist_delete(list, procno, link_member) \ + proclist_delete_offset((list), (procno), offsetof(PGPROC, link_member)) +#define proclist_push_head(list, procno, link_member) \ + proclist_push_head_offset((list), (procno), offsetof(PGPROC, link_member)) +#define proclist_push_tail(list, procno, link_member) \ + proclist_push_tail_offset((list), (procno), offsetof(PGPROC, link_member)) + +/* + * Iterate through the list pointed at by 'lhead', storing the current + * position in 'iter'. 'link_member' is the name of a proclist_node member in + * PGPROC. Access the current position with iter.cur. + * + * The only list modification allowed while iterating is deleting the current + * node with proclist_delete(list, iter.cur, node_offset). + */ +#define proclist_foreach_modify(iter, lhead, link_member) \ + for (AssertVariableIsOfTypeMacro(iter, proclist_mutable_iter), \ + AssertVariableIsOfTypeMacro(lhead, proclist_head *), \ + (iter).cur = (lhead)->head, \ + (iter).next = (iter).cur == INVALID_PGPROCNO ? INVALID_PGPROCNO : \ + proclist_node_get((iter).cur, \ + offsetof(PGPROC, link_member))->next; \ + (iter).cur != INVALID_PGPROCNO; \ + (iter).cur = (iter).next, \ + (iter).next = (iter).cur == INVALID_PGPROCNO ? INVALID_PGPROCNO : \ + proclist_node_get((iter).cur, \ + offsetof(PGPROC, link_member))->next) + +#endif diff --git a/src/include/storage/proclist_types.h b/src/include/storage/proclist_types.h new file mode 100644 index 0000000000..b8b0326853 --- /dev/null +++ b/src/include/storage/proclist_types.h @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * proclist_types.h + * doubly-linked lists of pgprocnos + * + * See proclist.h for functions that operate on these types. + * + * Portions Copyright (c) 2016, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/include/storage/proclist_types.h + *------------------------------------------------------------------------- + */ + +#ifndef PROCLIST_TYPES_H +#define PROCLIST_TYPES_H + +/* + * A node in a list of processes. + */ +typedef struct proclist_node +{ + int next; /* pgprocno of the next PGPROC */ + int prev; /* pgprocno of the prev PGPROC */ +} proclist_node; + +/* + * Head of a doubly-linked list of PGPROCs, identified by pgprocno. + */ +typedef struct proclist_head +{ + int head; /* pgprocno of the head PGPROC */ + int tail; /* pgprocno of the tail PGPROC */ +} proclist_head; + +/* + * List iterator allowing some modifications while iterating. + */ +typedef struct proclist_mutable_iter +{ + int cur; /* pgprocno of the current PGPROC */ + int next; /* pgprocno of the next PGPROC */ +} proclist_mutable_iter; + +#endif diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index dff6f65ef0..932be2f665 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -2668,6 +2668,9 @@ printTextRule priv_map process_file_callback_t process_sublinks_context +proclist_head +proclist_mutable_iter +proclist_node promptStatus_t pthread_attr_t pthread_key_t -- cgit v1.2.3 From a7b5573d665c8a37fad9bc69f44c5b4e8760a73b Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 16 Aug 2016 12:49:30 -0400 Subject: Remove separate version numbering for ecpg preprocessor. Once upon a time, it made sense for the ecpg preprocessor to have its own version number, because it used a manually-maintained grammar that wasn't always in sync with the core grammar. But those days are thankfully long gone, leaving only a maintenance nuisance behind. Let's use the PG v10 version numbering changeover as an excuse to get rid of the ecpg version number and just have ecpg identify itself by PG_VERSION. From the user's standpoint, ecpg will go from "4.12" in the 9.6 branch to "10" in the 10 branch, so there's no failure of monotonicity. Discussion: <1471332659.4410.67.camel@postgresql.org> --- src/interfaces/ecpg/preproc/Makefile | 9 ++------- src/interfaces/ecpg/preproc/ecpg.c | 10 +++++----- src/tools/RELEASE_CHANGES | 4 +--- src/tools/msvc/Mkvcbuild.pm | 3 --- 4 files changed, 8 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/interfaces/ecpg/preproc/Makefile b/src/interfaces/ecpg/preproc/Makefile index 7dd4b2ffdb..b5dbdd6150 100644 --- a/src/interfaces/ecpg/preproc/Makefile +++ b/src/interfaces/ecpg/preproc/Makefile @@ -15,16 +15,11 @@ subdir = src/interfaces/ecpg/preproc top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -MAJOR_VERSION= 4 -MINOR_VERSION= 13 -PATCHLEVEL=0 - override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ - -I. -I$(srcdir) -DMAJOR_VERSION=$(MAJOR_VERSION) \ - -DMINOR_VERSION=$(MINOR_VERSION) -DPATCHLEVEL=$(PATCHLEVEL) \ + -I. -I$(srcdir) -DECPG_COMPILE \ $(CPPFLAGS) -override CFLAGS += $(PTHREAD_CFLAGS) -DECPG_COMPILE +override CFLAGS += $(PTHREAD_CFLAGS) OBJS= preproc.o pgc.o type.o ecpg.o output.o parser.o \ keywords.o c_keywords.o ecpg_keywords.o ../ecpglib/typename.o descriptor.o variable.o \ diff --git a/src/interfaces/ecpg/preproc/ecpg.c b/src/interfaces/ecpg/preproc/ecpg.c index c7fd034bc1..3ce9d04bcc 100644 --- a/src/interfaces/ecpg/preproc/ecpg.c +++ b/src/interfaces/ecpg/preproc/ecpg.c @@ -150,8 +150,7 @@ main(int argc, char *const argv[]) switch (c) { case ECPG_GETOPT_LONG_VERSION: - printf("ecpg (PostgreSQL %s) %d.%d.%d\n", PG_VERSION, - MAJOR_VERSION, MINOR_VERSION, PATCHLEVEL); + printf("ecpg %s\n", PG_VERSION); exit(0); case ECPG_GETOPT_LONG_HELP: help(progname); @@ -264,8 +263,9 @@ main(int argc, char *const argv[]) if (verbose) { - fprintf(stderr, _("%s, the PostgreSQL embedded C preprocessor, version %d.%d.%d\n"), - progname, MAJOR_VERSION, MINOR_VERSION, PATCHLEVEL); + fprintf(stderr, + _("%s, the PostgreSQL embedded C preprocessor, version %s\n"), + progname, PG_VERSION); fprintf(stderr, _("EXEC SQL INCLUDE ... search starts here:\n")); for (ip = include_paths; ip != NULL; ip = ip->next) fprintf(stderr, " %s\n", ip->path); @@ -440,7 +440,7 @@ main(int argc, char *const argv[]) if (regression_mode) fprintf(yyout, "/* Processed by ecpg (regression mode) */\n"); else - fprintf(yyout, "/* Processed by ecpg (%d.%d.%d) */\n", MAJOR_VERSION, MINOR_VERSION, PATCHLEVEL); + fprintf(yyout, "/* Processed by ecpg (%s) */\n", PG_VERSION); if (header_mode == false) { diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES index ad49220592..e6e294b07e 100644 --- a/src/tools/RELEASE_CHANGES +++ b/src/tools/RELEASE_CHANGES @@ -80,13 +80,11 @@ Starting a New Development Cycle * Add version tag to src/tools/git_changelog * Bump minor library versions, major if appropriate (see below) - o Look for SO_MINOR_VERSION and MINOR_VERSION macros in + o Look for SO_MINOR_VERSION macros in src/interfaces/ecpg/compatlib/Makefile src/interfaces/ecpg/ecpglib/Makefile src/interfaces/ecpg/pgtypeslib/Makefile - src/interfaces/ecpg/preproc/Makefile src/interfaces/libpq/Makefile - src/tools/msvc/Mkvcbuild.pm Creating Back-Branch Release Notes diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index 16180f68ed..da4d9847fc 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -269,9 +269,6 @@ sub mkvcbuild $ecpg->AddIncludeDir('src/interfaces/libpq'); $ecpg->AddPrefixInclude('src/interfaces/ecpg/preproc'); $ecpg->AddFiles('src/interfaces/ecpg/preproc', 'pgc.l', 'preproc.y'); - $ecpg->AddDefine('MAJOR_VERSION=4'); - $ecpg->AddDefine('MINOR_VERSION=13'); - $ecpg->AddDefine('PATCHLEVEL=0'); $ecpg->AddDefine('ECPG_COMPILE'); $ecpg->AddReference($libpgcommon, $libpgport); -- cgit v1.2.3 From 41fb35fabf03bffa812caddf24323d4d06f811ba Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 16 Aug 2016 13:23:32 -0400 Subject: Fix possible crash due to incorrect allocation context. Commit af33039317ddc4a0e38a02e2255c2bf453115fd2 aimed to reduce leakage from tqueue.c, which is good. Unfortunately, by changing the memory context in which all of gather_readnext() executes, it also changed the context in which ExecShutdownGatherWorkers executes, which is not good, because that function eventually causes a call to ExecParallelRetrieveInstrumentation, which proceeds to allocate planstate->worker_instrument in a short-lived context, causing a crash. Rushabh Lathia, reviewed by Amit Kapila and by me. --- src/backend/executor/execParallel.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index 380d743f6c..5aa6f023bf 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -500,6 +500,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, int n; int ibytes; int plan_node_id = planstate->plan->plan_node_id; + MemoryContext oldcontext; /* Find the instumentation for this node. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) @@ -514,10 +515,19 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, for (n = 0; n < instrumentation->num_workers; ++n) InstrAggNode(planstate->instrument, &instrument[n]); - /* Also store the per-worker detail. */ + /* + * Also store the per-worker detail. + * + * Worker instrumentation should be allocated in the same context as + * the regular instrumentation information, which is the per-query + * context. Switch into per-query memory context. + */ + oldcontext = MemoryContextSwitchTo(planstate->state->es_query_cxt); ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation)); planstate->worker_instrument = palloc(ibytes + offsetof(WorkerInstrumentation, instrument)); + MemoryContextSwitchTo(oldcontext); + planstate->worker_instrument->num_workers = instrumentation->num_workers; memcpy(&planstate->worker_instrument->instrument, instrument, ibytes); -- cgit v1.2.3 From a3bce17ef1ca6408e8d1e7b10fb767aef1729be6 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 16 Aug 2016 13:58:44 -0400 Subject: Automate the maintenance of SO_MINOR_VERSION for our shared libraries. Up to now we've manually adjusted these numbers in several different Makefiles at the start of each development cycle. While that's not much work, it's easily forgotten, so let's get rid of it by setting the SO_MINOR_VERSION values directly from $(MAJORVERSION). In the case of libpq, this dev cycle's value of SO_MINOR_VERSION happens to be "10" anyway, so this switch is transparent. For ecpg's shared libraries, this will result in skipping one or two minor version numbers between v9.6 and v10, which seems like no big problem; and it was a bit inconsistent that they didn't have equal minor version numbers anyway. Discussion: <21969.1471287988@sss.pgh.pa.us> --- src/interfaces/ecpg/compatlib/Makefile | 2 +- src/interfaces/ecpg/ecpglib/Makefile | 2 +- src/interfaces/ecpg/pgtypeslib/Makefile | 2 +- src/interfaces/libpq/Makefile | 2 +- src/tools/RELEASE_CHANGES | 30 ++++++++++++++++-------------- 5 files changed, 20 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/src/interfaces/ecpg/compatlib/Makefile b/src/interfaces/ecpg/compatlib/Makefile index 2f58ad8592..0b19d832b0 100644 --- a/src/interfaces/ecpg/compatlib/Makefile +++ b/src/interfaces/ecpg/compatlib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "ECPG compat - compatibility library for ECPG" NAME= ecpg_compat SO_MAJOR_VERSION= 3 -SO_MINOR_VERSION= 9 +SO_MINOR_VERSION= $(MAJORVERSION) override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/ecpg/ecpglib/Makefile b/src/interfaces/ecpg/ecpglib/Makefile index 00503b33dd..c9c2499a5e 100644 --- a/src/interfaces/ecpg/ecpglib/Makefile +++ b/src/interfaces/ecpg/ecpglib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "ECPG - embedded SQL in C" NAME= ecpg SO_MAJOR_VERSION= 6 -SO_MINOR_VERSION= 9 +SO_MINOR_VERSION= $(MAJORVERSION) override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -I$(libpq_srcdir) -I$(top_builddir)/src/port -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/ecpg/pgtypeslib/Makefile b/src/interfaces/ecpg/pgtypeslib/Makefile index ac278948a5..9c9ff08ae8 100644 --- a/src/interfaces/ecpg/pgtypeslib/Makefile +++ b/src/interfaces/ecpg/pgtypeslib/Makefile @@ -16,7 +16,7 @@ include $(top_builddir)/src/Makefile.global PGFILEDESC = "pgtypes - library for data type mapping" NAME= pgtypes SO_MAJOR_VERSION= 3 -SO_MINOR_VERSION= 8 +SO_MINOR_VERSION= $(MAJORVERSION) override CPPFLAGS := -I../include -I$(top_srcdir)/src/interfaces/ecpg/include \ -DFRONTEND $(CPPFLAGS) diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 83b30b0f9e..0b4065ed8f 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -17,7 +17,7 @@ include $(top_builddir)/src/Makefile.global # shared library parameters NAME= pq SO_MAJOR_VERSION= 5 -SO_MINOR_VERSION= 10 +SO_MINOR_VERSION= $(MAJORVERSION) override CPPFLAGS := -DFRONTEND -DUNSAFE_STAT_OK -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port ifneq ($(PORTNAME), win32) diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES index e6e294b07e..4f481de12c 100644 --- a/src/tools/RELEASE_CHANGES +++ b/src/tools/RELEASE_CHANGES @@ -73,19 +73,12 @@ Starting a New Development Cycle for example, git push origin master:refs/heads/REL9_2_STABLE +* Add new branch's name to list in src/tools/git_changelog + * Increment the major version number in src/tools/version_stamp.pl * Run "src/tools/version_stamp.pl devel", then run autoconf -* Add version tag to src/tools/git_changelog - -* Bump minor library versions, major if appropriate (see below) - o Look for SO_MINOR_VERSION macros in - src/interfaces/ecpg/compatlib/Makefile - src/interfaces/ecpg/ecpglib/Makefile - src/interfaces/ecpg/pgtypeslib/Makefile - src/interfaces/libpq/Makefile - Creating Back-Branch Release Notes ================================== @@ -139,8 +132,7 @@ function which would give the new field a suitable default value. Adding a new function should NOT force an increase in the major version number. (Packagers will see the standard minor number update and install the new library.) When the major version is increased all applications -which link to the library MUST be recompiled - this is not desirable. When -the major version is updated the minor version gets reset. +which link to the library MUST be recompiled - this is not desirable. Minor Version ============= @@ -150,9 +142,19 @@ the library has changed, typically a change in source code between releases would mean an increase in the minor version number so long as it does not require a major version increase. -Given that we make at least minor changes to our libraries in every major -PostgreSQL version, we always bump all minor library version numbers at the -start of each development cycle as a matter of policy. +Given that we make at least some changes to our libraries in every major +PostgreSQL version, we always bump all minor library version numbers in +each development cycle as a matter of policy. This is currently mechanized +by referencing the MAJORVERSION make macro in the value of SO_MINOR_VERSION +for each shared library. As of v10, SO_MINOR_VERSION is simply equal to +MAJORVERSION in all cases. If we ever make an incompatible break in a +library's API, forcing a major version bump, we could continue to increase +SO_MINOR_VERSION (thus, perhaps, going from libpq.so.5.12 to libpq.so.6.13), +or we could reset SO_MINOR_VERSION to zero, using makefile code along the +lines of + SO_MINOR_VERSION= $(shell expr $(MAJORVERSION) - 13) +so that the number continues to increase automatically in later branches. +For now, that complication is not necessary. Minimizing Changes ================== -- cgit v1.2.3 From f0fe1c8f70bacb65513f1cbaea14eb384d346ee8 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 16 Aug 2016 12:00:00 -0400 Subject: Fix typos From: Alexander Law --- doc/src/sgml/release-9.6.sgml | 2 +- doc/src/sgml/runtime.sgml | 2 +- src/backend/access/transam/multixact.c | 2 +- src/backend/utils/adt/tsquery.c | 2 +- src/test/regress/expected/privileges.out | 2 +- src/test/regress/sql/privileges.sql | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/doc/src/sgml/release-9.6.sgml b/doc/src/sgml/release-9.6.sgml index cc886fa2bb..9003b1f6e4 100644 --- a/doc/src/sgml/release-9.6.sgml +++ b/doc/src/sgml/release-9.6.sgml @@ -1026,7 +1026,7 @@ This commit is also listed under libpq and psql This view exposes the same information available from - the the pg_config comand-line utility, + the pg_config comand-line utility, namely assorted compile-time configuration information for PostgreSQL. diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index 66fbe441ac..60a06590fe 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -184,7 +184,7 @@ postgres$ initdb -D /usr/local/pgsql/data - Non-C and and non-POSIX locales rely on the + Non-C and non-POSIX locales rely on the operating system's collation library for character set ordering. This controls the ordering of keys stored in indexes. For this reason, a cluster cannot switch to an incompatible collation library version, diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index c2e4fa377d..0c8c17af33 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -2802,7 +2802,7 @@ ReadMultiXactCounts(uint32 *multixacts, MultiXactOffset *members) * more aggressive in clamping this value. That not only causes autovacuum * to ramp up, but also makes any manual vacuums the user issues more * aggressive. This happens because vacuum_set_xid_limits() clamps the - * freeze table and and the minimum freeze age based on the effective + * freeze table and the minimum freeze age based on the effective * autovacuum_multixact_freeze_max_age this function returns. In the worst * case, we'll claim the freeze_max_age to zero, and every vacuum of any * table will try to freeze every multixact. diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index c0a4a0606b..3d11a1c208 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -691,7 +691,7 @@ parse_tsquery(char *buf, findoprnd(ptr, query->size, &needcleanup); /* - * QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed + * QI_VALSTOP nodes should be cleaned and OP_PHRASE should be pushed * down */ if (needcleanup) diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index 996ebcdca2..f66b4432a1 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -390,7 +390,7 @@ INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- f ERROR: permission denied for relation atest5 INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT) ERROR: permission denied for relation atest5 --- Check that the the columns in the inference require select privileges +-- Check that the columns in the inference require select privileges -- Error. No privs on four INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10; ERROR: permission denied for relation atest5 diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index 0aa9c672d5..00dc7bd4ab 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -259,7 +259,7 @@ INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLU INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three; INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE) INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT) --- Check that the the columns in the inference require select privileges +-- Check that the columns in the inference require select privileges -- Error. No privs on four INSERT INTO atest5(three) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 10; -- cgit v1.2.3 From 7f61fd10ceb715eceece49451f6dfe9058044e15 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 16 Aug 2016 15:58:30 -0400 Subject: Fix assorted places in psql to print version numbers >= 10 in new style. This is somewhat cosmetic, since as long as you know what you are looking at, "10.0" is a serviceable substitute for "10". But there is a potential for confusion between version numbers with minor numbers and those without --- we don't want people asking "why is psql saying 10.0 when my server is 10.2". Therefore, back-patch as far as practical, which turns out to be 9.3. I could have redone the patch to use fprintf(stderr) in place of psql_error(), but it seems more work than is warranted for branches that will be EOL or nearly so by the time v10 comes out. Although only psql seems to contain any code that needs this, I chose to put the support function into fe_utils, since it seems likely we'll need it in other client programs in future. (In 9.3-9.5, use dumputils.c, the predecessor of fe_utils/string_utils.c.) In HEAD, also fix the backend code that whines about loadable-library version mismatch. I don't see much need to back-patch that. --- src/backend/utils/fmgr/dfmgr.c | 18 +++++-- src/bin/psql/command.c | 50 ++++++++++------- src/bin/psql/common.c | 8 ++- src/bin/psql/describe.c | 105 +++++++++++++++++++++++++----------- src/fe_utils/string_utils.c | 38 +++++++++++++ src/include/fe_utils/string_utils.h | 3 ++ 6 files changed, 167 insertions(+), 55 deletions(-) (limited to 'src') diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index f41035d33c..6f70813a6d 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -300,14 +300,22 @@ incompatible_module_error(const char *libname, * block might not even have the fields we expect. */ if (magic_data.version != module_magic_data->version) + { + char library_version[32]; + + if (module_magic_data->version >= 1000) + snprintf(library_version, sizeof(library_version), "%d", + module_magic_data->version / 100); + else + snprintf(library_version, sizeof(library_version), "%d.%d", + module_magic_data->version / 100, + module_magic_data->version % 100); ereport(ERROR, (errmsg("incompatible library \"%s\": version mismatch", libname), - errdetail("Server is version %d.%d, library is version %d.%d.", - magic_data.version / 100, - magic_data.version % 100, - module_magic_data->version / 100, - module_magic_data->version % 100))); + errdetail("Server is version %d, library is version %s.", + magic_data.version / 100, library_version))); + } /* * Otherwise, spell out which fields don't agree. diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 9c0af4e848..4aaf657cce 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -635,8 +635,11 @@ exec_command(const char *cmd, if (pset.sversion < 80400) { - psql_error("The server (version %d.%d) does not support editing function source.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support editing function source.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); status = PSQL_CMD_ERROR; } else if (!query_buf) @@ -731,8 +734,11 @@ exec_command(const char *cmd, if (pset.sversion < 70400) { - psql_error("The server (version %d.%d) does not support editing view definitions.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support editing view definitions.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); status = PSQL_CMD_ERROR; } else if (!query_buf) @@ -1362,8 +1368,11 @@ exec_command(const char *cmd, OT_WHOLE_LINE, NULL, true); if (pset.sversion < 80400) { - psql_error("The server (version %d.%d) does not support showing function source.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support showing function source.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); status = PSQL_CMD_ERROR; } else if (!func) @@ -1441,8 +1450,11 @@ exec_command(const char *cmd, OT_WHOLE_LINE, NULL, true); if (pset.sversion < 70400) { - psql_error("The server (version %d.%d) does not support showing view definitions.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support showing view definitions.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); status = PSQL_CMD_ERROR; } else if (!view) @@ -2014,22 +2026,21 @@ connection_warnings(bool in_startup) if (!pset.quiet && !pset.notty) { int client_ver = PG_VERSION_NUM; + char cverbuf[32]; + char sverbuf[32]; if (pset.sversion != client_ver) { const char *server_version; - char server_ver_str[16]; /* Try to get full text form, might include "devel" etc */ server_version = PQparameterStatus(pset.db, "server_version"); + /* Otherwise fall back on pset.sversion */ if (!server_version) { - snprintf(server_ver_str, sizeof(server_ver_str), - "%d.%d.%d", - pset.sversion / 10000, - (pset.sversion / 100) % 100, - pset.sversion % 100); - server_version = server_ver_str; + formatPGVersionNumber(pset.sversion, true, + sverbuf, sizeof(sverbuf)); + server_version = sverbuf; } printf(_("%s (%s, server %s)\n"), @@ -2040,10 +2051,13 @@ connection_warnings(bool in_startup) printf("%s (%s)\n", pset.progname, PG_VERSION); if (pset.sversion / 100 > client_ver / 100) - printf(_("WARNING: %s major version %d.%d, server major version %d.%d.\n" + printf(_("WARNING: %s major version %s, server major version %s.\n" " Some psql features might not work.\n"), - pset.progname, client_ver / 10000, (client_ver / 100) % 100, - pset.sversion / 10000, (pset.sversion / 100) % 100); + pset.progname, + formatPGVersionNumber(client_ver, false, + cverbuf, sizeof(cverbuf)), + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); #ifdef WIN32 checkWin32Codepage(); diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 2450b9c3f8..7399950284 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -18,6 +18,7 @@ #include #endif +#include "fe_utils/string_utils.h" #include "portability/instr_time.h" #include "settings.h" @@ -1202,8 +1203,11 @@ SendQuery(const char *query) { if (on_error_rollback_warning == false && pset.sversion < 80000) { - psql_error("The server (version %d.%d) does not support savepoints for ON_ERROR_ROLLBACK.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); on_error_rollback_warning = true; } else diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 27be10215b..6275a688c7 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -142,8 +142,11 @@ describeAccessMethods(const char *pattern, bool verbose) if (pset.sversion < 90600) { - psql_error("The server (version %d.%d) does not support access methods.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support access methods.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -205,8 +208,11 @@ describeTablespaces(const char *pattern, bool verbose) if (pset.sversion < 80000) { - psql_error("The server (version %d.%d) does not support tablespaces.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support tablespaces.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -311,8 +317,11 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool if (showWindow && pset.sversion < 80400) { - psql_error("\\df does not take a \"w\" option with server version %d.%d\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("\\df does not take a \"w\" option with server version %s\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -962,8 +971,11 @@ listDefaultACLs(const char *pattern) if (pset.sversion < 90000) { - psql_error("The server (version %d.%d) does not support altering default privileges.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support altering default privileges.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -3548,8 +3560,11 @@ listCollations(const char *pattern, bool verbose, bool showSystem) if (pset.sversion < 90100) { - psql_error("The server (version %d.%d) does not support collations.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support collations.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -3680,8 +3695,11 @@ listTSParsers(const char *pattern, bool verbose) if (pset.sversion < 80300) { - psql_error("The server (version %d.%d) does not support full text search.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support full text search.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -3915,8 +3933,11 @@ listTSDictionaries(const char *pattern, bool verbose) if (pset.sversion < 80300) { - psql_error("The server (version %d.%d) does not support full text search.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support full text search.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -3983,8 +4004,11 @@ listTSTemplates(const char *pattern, bool verbose) if (pset.sversion < 80300) { - psql_error("The server (version %d.%d) does not support full text search.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support full text search.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4051,8 +4075,11 @@ listTSConfigs(const char *pattern, bool verbose) if (pset.sversion < 80300) { - psql_error("The server (version %d.%d) does not support full text search.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support full text search.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4249,8 +4276,11 @@ listForeignDataWrappers(const char *pattern, bool verbose) if (pset.sversion < 80400) { - psql_error("The server (version %d.%d) does not support foreign-data wrappers.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support foreign-data wrappers.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4329,8 +4359,11 @@ listForeignServers(const char *pattern, bool verbose) if (pset.sversion < 80400) { - psql_error("The server (version %d.%d) does not support foreign servers.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support foreign servers.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4408,8 +4441,11 @@ listUserMappings(const char *pattern, bool verbose) if (pset.sversion < 80400) { - psql_error("The server (version %d.%d) does not support user mappings.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support user mappings.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4466,8 +4502,11 @@ listForeignTables(const char *pattern, bool verbose) if (pset.sversion < 90100) { - psql_error("The server (version %d.%d) does not support foreign tables.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support foreign tables.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4541,8 +4580,11 @@ listExtensions(const char *pattern) if (pset.sversion < 90100) { - psql_error("The server (version %d.%d) does not support extensions.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support extensions.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4595,8 +4637,11 @@ listExtensionContents(const char *pattern) if (pset.sversion < 90100) { - psql_error("The server (version %d.%d) does not support extensions.\n", - pset.sversion / 10000, (pset.sversion / 100) % 100); + char sverbuf[32]; + + psql_error("The server (version %s) does not support extensions.\n", + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } diff --git a/src/fe_utils/string_utils.c b/src/fe_utils/string_utils.c index f986dbcf39..2c566b1ad7 100644 --- a/src/fe_utils/string_utils.c +++ b/src/fe_utils/string_utils.c @@ -168,6 +168,44 @@ fmtQualifiedId(int remoteVersion, const char *schema, const char *id) } +/* + * Format a Postgres version number (in the PG_VERSION_NUM integer format + * returned by PQserverVersion()) as a string. This exists mainly to + * encapsulate knowledge about two-part vs. three-part version numbers. + * + * For re-entrancy, caller must supply the buffer the string is put in. + * Recommended size of the buffer is 32 bytes. + * + * Returns address of 'buf', as a notational convenience. + */ +char * +formatPGVersionNumber(int version_number, bool include_minor, + char *buf, size_t buflen) +{ + if (version_number >= 100000) + { + /* New two-part style */ + if (include_minor) + snprintf(buf, buflen, "%d.%d", version_number / 10000, + version_number % 10000); + else + snprintf(buf, buflen, "%d", version_number / 10000); + } + else + { + /* Old three-part style */ + if (include_minor) + snprintf(buf, buflen, "%d.%d.%d", version_number / 10000, + (version_number / 100) % 100, + version_number % 100); + else + snprintf(buf, buflen, "%d.%d", version_number / 10000, + (version_number / 100) % 100); + } + return buf; +} + + /* * Convert a string value to an SQL string literal and append it to * the given buffer. We assume the specified client_encoding and diff --git a/src/include/fe_utils/string_utils.h b/src/include/fe_utils/string_utils.h index 7bbed360a3..452ffc0771 100644 --- a/src/include/fe_utils/string_utils.h +++ b/src/include/fe_utils/string_utils.h @@ -30,6 +30,9 @@ extern const char *fmtId(const char *identifier); extern const char *fmtQualifiedId(int remoteVersion, const char *schema, const char *id); +extern char *formatPGVersionNumber(int version_number, bool include_minor, + char *buf, size_t buflen); + extern void appendStringLiteral(PQExpBuffer buf, const char *str, int encoding, bool std_strings); extern void appendStringLiteralConn(PQExpBuffer buf, const char *str, -- cgit v1.2.3 From 4bc4cfe3bd186b4a1d1b01279bfd0e6ab11268b2 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 16 Aug 2016 16:14:16 -0400 Subject: Suppress -Wunused-result warning for strtol(). I'm not sure which bozo thought it's a problem to use strtol() only for its endptr result, but silence the warning using same method used elsewhere. Report: --- src/backend/utils/adt/dbsize.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 0e8a82d6f4..3167bad92b 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -759,13 +759,15 @@ pg_size_bytes(PG_FUNCTION_ARGS) /* Part (4): optional exponent */ if (*endptr == 'e' || *endptr == 'E') { + long exponent; char *cp; /* * Note we might one day support EB units, so if what follows 'E' * isn't a number, just treat it all as a unit to be parsed. */ - (void) strtol(endptr + 1, &cp, 10); + exponent = strtol(endptr + 1, &cp, 10); + (void) exponent; /* Silence -Wunused-result warnings */ if (cp > endptr + 1) endptr = cp; } -- cgit v1.2.3 From 0bb51aa96783e8a6c473c2b5e3725e23e95db834 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 16 Aug 2016 20:33:01 -0400 Subject: Improve parsetree representation of special functions such as CURRENT_DATE. We implement a dozen or so parameterless functions that the SQL standard defines special syntax for. Up to now, that was done by converting them into more or less ad-hoc constructs such as "'now'::text::date". That's messy for multiple reasons: it exposes what should be implementation details to users, and performance is worse than it needs to be in several cases. To improve matters, invent a new expression node type SQLValueFunction that can represent any of these parameterless functions. Bump catversion because this changes stored parsetrees for rules. Discussion: <30058.1463091294@sss.pgh.pa.us> --- contrib/pg_stat_statements/pg_stat_statements.c | 9 ++ src/backend/executor/execQual.c | 78 +++++++++++++++ src/backend/nodes/copyfuncs.c | 19 ++++ src/backend/nodes/equalfuncs.c | 14 +++ src/backend/nodes/nodeFuncs.c | 27 +++++- src/backend/nodes/outfuncs.c | 14 +++ src/backend/nodes/readfuncs.c | 18 ++++ src/backend/optimizer/util/clauses.c | 16 ++- src/backend/parser/gram.y | 124 ++++++------------------ src/backend/parser/parse_expr.c | 62 ++++++++++++ src/backend/parser/parse_target.c | 43 ++++++++ src/backend/utils/adt/date.c | 93 ++++++++++++++++-- src/backend/utils/adt/ruleutils.c | 62 ++++++++++++ src/backend/utils/adt/timestamp.c | 60 ++++++++++-- src/include/catalog/catversion.h | 2 +- src/include/nodes/nodes.h | 1 + src/include/nodes/primnodes.h | 39 ++++++++ src/include/utils/date.h | 4 + src/include/utils/timestamp.h | 4 + src/pl/plpgsql/src/pl_exec.c | 3 + src/test/regress/expected/rowsecurity.out | 16 +-- src/test/regress/expected/rules.out | 6 +- src/test/regress/expected/select_views.out | 40 ++++---- src/test/regress/expected/select_views_1.out | 40 ++++---- 24 files changed, 626 insertions(+), 168 deletions(-) (limited to 'src') diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 3d9b8e45d9..8ce24e0401 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2632,6 +2632,15 @@ JumbleExpr(pgssJumbleState *jstate, Node *node) JumbleExpr(jstate, (Node *) mmexpr->args); } break; + case T_SQLValueFunction: + { + SQLValueFunction *svf = (SQLValueFunction *) node; + + APP_JUMB(svf->op); + /* type is fully determined by op */ + APP_JUMB(svf->typmod); + } + break; case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index cbb76d1f1c..743e7d636a 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -53,8 +53,10 @@ #include "pgstat.h" #include "utils/acl.h" #include "utils/builtins.h" +#include "utils/date.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/timestamp.h" #include "utils/typcache.h" #include "utils/xml.h" @@ -147,6 +149,9 @@ static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr, static Datum ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); +static Datum ExecEvalSQLValueFunction(ExprState *svfExpr, + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalXml(XmlExprState *xmlExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, @@ -3530,6 +3535,75 @@ ExecEvalMinMax(MinMaxExprState *minmaxExpr, ExprContext *econtext, return result; } +/* ---------------------------------------------------------------- + * ExecEvalSQLValueFunction + * ---------------------------------------------------------------- + */ +static Datum +ExecEvalSQLValueFunction(ExprState *svfExpr, + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone) +{ + Datum result = (Datum) 0; + SQLValueFunction *svf = (SQLValueFunction *) svfExpr->expr; + FunctionCallInfoData fcinfo; + + if (isDone) + *isDone = ExprSingleResult; + *isNull = false; + + /* + * Note: current_schema() can return NULL. current_user() etc currently + * cannot, but might as well code those cases the same way for safety. + */ + switch (svf->op) + { + case SVFOP_CURRENT_DATE: + result = DateADTGetDatum(GetSQLCurrentDate()); + break; + case SVFOP_CURRENT_TIME: + case SVFOP_CURRENT_TIME_N: + result = TimeTzADTPGetDatum(GetSQLCurrentTime(svf->typmod)); + break; + case SVFOP_CURRENT_TIMESTAMP: + case SVFOP_CURRENT_TIMESTAMP_N: + result = TimestampTzGetDatum(GetSQLCurrentTimestamp(svf->typmod)); + break; + case SVFOP_LOCALTIME: + case SVFOP_LOCALTIME_N: + result = TimeADTGetDatum(GetSQLLocalTime(svf->typmod)); + break; + case SVFOP_LOCALTIMESTAMP: + case SVFOP_LOCALTIMESTAMP_N: + result = TimestampGetDatum(GetSQLLocalTimestamp(svf->typmod)); + break; + case SVFOP_CURRENT_ROLE: + case SVFOP_CURRENT_USER: + case SVFOP_USER: + InitFunctionCallInfoData(fcinfo, NULL, 0, InvalidOid, NULL, NULL); + result = current_user(&fcinfo); + *isNull = fcinfo.isnull; + break; + case SVFOP_SESSION_USER: + InitFunctionCallInfoData(fcinfo, NULL, 0, InvalidOid, NULL, NULL); + result = session_user(&fcinfo); + *isNull = fcinfo.isnull; + break; + case SVFOP_CURRENT_CATALOG: + InitFunctionCallInfoData(fcinfo, NULL, 0, InvalidOid, NULL, NULL); + result = current_database(&fcinfo); + *isNull = fcinfo.isnull; + break; + case SVFOP_CURRENT_SCHEMA: + InitFunctionCallInfoData(fcinfo, NULL, 0, InvalidOid, NULL, NULL); + result = current_schema(&fcinfo); + *isNull = fcinfo.isnull; + break; + } + + return result; +} + /* ---------------------------------------------------------------- * ExecEvalXml * ---------------------------------------------------------------- @@ -5086,6 +5160,10 @@ ExecInitExpr(Expr *node, PlanState *parent) state = (ExprState *) mstate; } break; + case T_SQLValueFunction: + state = (ExprState *) makeNode(ExprState); + state->evalfunc = ExecEvalSQLValueFunction; + break; case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 3244c76ddc..c7a06442ba 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -1752,6 +1752,22 @@ _copyMinMaxExpr(const MinMaxExpr *from) return newnode; } +/* + * _copySQLValueFunction + */ +static SQLValueFunction * +_copySQLValueFunction(const SQLValueFunction *from) +{ + SQLValueFunction *newnode = makeNode(SQLValueFunction); + + COPY_SCALAR_FIELD(op); + COPY_SCALAR_FIELD(type); + COPY_SCALAR_FIELD(typmod); + COPY_LOCATION_FIELD(location); + + return newnode; +} + /* * _copyXmlExpr */ @@ -4525,6 +4541,9 @@ copyObject(const void *from) case T_MinMaxExpr: retval = _copyMinMaxExpr(from); break; + case T_SQLValueFunction: + retval = _copySQLValueFunction(from); + break; case T_XmlExpr: retval = _copyXmlExpr(from); break; diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 1eb679926a..448e1a9d55 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -619,6 +619,17 @@ _equalMinMaxExpr(const MinMaxExpr *a, const MinMaxExpr *b) return true; } +static bool +_equalSQLValueFunction(const SQLValueFunction *a, const SQLValueFunction *b) +{ + COMPARE_SCALAR_FIELD(op); + COMPARE_SCALAR_FIELD(type); + COMPARE_SCALAR_FIELD(typmod); + COMPARE_LOCATION_FIELD(location); + + return true; +} + static bool _equalXmlExpr(const XmlExpr *a, const XmlExpr *b) { @@ -2842,6 +2853,9 @@ equal(const void *a, const void *b) case T_MinMaxExpr: retval = _equalMinMaxExpr(a, b); break; + case T_SQLValueFunction: + retval = _equalSQLValueFunction(a, b); + break; case T_XmlExpr: retval = _equalXmlExpr(a, b); break; diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index cd39167351..399744193c 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -218,6 +218,9 @@ exprType(const Node *expr) case T_MinMaxExpr: type = ((const MinMaxExpr *) expr)->minmaxtype; break; + case T_SQLValueFunction: + type = ((const SQLValueFunction *) expr)->type; + break; case T_XmlExpr: if (((const XmlExpr *) expr)->op == IS_DOCUMENT) type = BOOLOID; @@ -479,6 +482,8 @@ exprTypmod(const Node *expr) return typmod; } break; + case T_SQLValueFunction: + return ((const SQLValueFunction *) expr)->typmod; case T_CoerceToDomain: return ((const CoerceToDomain *) expr)->resulttypmod; case T_CoerceToDomainValue: @@ -718,6 +723,8 @@ expression_returns_set_walker(Node *node, void *context) return false; if (IsA(node, MinMaxExpr)) return false; + if (IsA(node, SQLValueFunction)) + return false; if (IsA(node, XmlExpr)) return false; @@ -883,6 +890,9 @@ exprCollation(const Node *expr) case T_MinMaxExpr: coll = ((const MinMaxExpr *) expr)->minmaxcollid; break; + case T_SQLValueFunction: + coll = InvalidOid; /* all cases return non-collatable types */ + break; case T_XmlExpr: /* @@ -1091,6 +1101,9 @@ exprSetCollation(Node *expr, Oid collation) case T_MinMaxExpr: ((MinMaxExpr *) expr)->minmaxcollid = collation; break; + case T_SQLValueFunction: + Assert(!OidIsValid(collation)); /* no collatable results */ + break; case T_XmlExpr: Assert((((XmlExpr *) expr)->op == IS_XMLSERIALIZE) ? (collation == DEFAULT_COLLATION_OID) : @@ -1364,6 +1377,10 @@ exprLocation(const Node *expr) /* GREATEST/LEAST keyword should always be the first thing */ loc = ((const MinMaxExpr *) expr)->location; break; + case T_SQLValueFunction: + /* function keyword should always be the first thing */ + loc = ((const SQLValueFunction *) expr)->location; + break; case T_XmlExpr: { const XmlExpr *xexpr = (const XmlExpr *) expr; @@ -1633,9 +1650,10 @@ set_sa_opfuncid(ScalarArrayOpExpr *opexpr) * for themselves, in case additional checks should be made, or because they * have special rules about which parts of the tree need to be visited. * - * Note: we ignore MinMaxExpr, XmlExpr, and CoerceToDomain nodes, because they - * do not contain SQL function OIDs. However, they can invoke SQL-visible - * functions, so callers should take thought about how to treat them. + * Note: we ignore MinMaxExpr, SQLValueFunction, XmlExpr, and CoerceToDomain + * nodes, because they do not contain SQL function OIDs. However, they can + * invoke SQL-visible functions, so callers should take thought about how to + * treat them. */ bool check_functions_in_node(Node *node, check_function_callback checker, @@ -1859,6 +1877,7 @@ expression_tree_walker(Node *node, case T_CaseTestExpr: case T_SetToDefault: case T_CurrentOfExpr: + case T_SQLValueFunction: case T_RangeTblRef: case T_SortGroupClause: /* primitive node types with no expression subnodes */ @@ -2433,6 +2452,7 @@ expression_tree_mutator(Node *node, case T_CaseTestExpr: case T_SetToDefault: case T_CurrentOfExpr: + case T_SQLValueFunction: case T_RangeTblRef: case T_SortGroupClause: return (Node *) copyObject(node); @@ -3197,6 +3217,7 @@ raw_expression_tree_walker(Node *node, { case T_SetToDefault: case T_CurrentOfExpr: + case T_SQLValueFunction: case T_Integer: case T_Float: case T_String: diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index acaf4ea5eb..1fab807772 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -1423,6 +1423,17 @@ _outMinMaxExpr(StringInfo str, const MinMaxExpr *node) WRITE_LOCATION_FIELD(location); } +static void +_outSQLValueFunction(StringInfo str, const SQLValueFunction *node) +{ + WRITE_NODE_TYPE("SQLVALUEFUNCTION"); + + WRITE_ENUM_FIELD(op, SQLValueFunctionOp); + WRITE_OID_FIELD(type); + WRITE_INT_FIELD(typmod); + WRITE_LOCATION_FIELD(location); +} + static void _outXmlExpr(StringInfo str, const XmlExpr *node) { @@ -3522,6 +3533,9 @@ outNode(StringInfo str, const void *obj) case T_MinMaxExpr: _outMinMaxExpr(str, obj); break; + case T_SQLValueFunction: + _outSQLValueFunction(str, obj); + break; case T_XmlExpr: _outXmlExpr(str, obj); break; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 94954dcc72..c83063e219 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -1041,6 +1041,22 @@ _readMinMaxExpr(void) READ_DONE(); } +/* + * _readSQLValueFunction + */ +static SQLValueFunction * +_readSQLValueFunction(void) +{ + READ_LOCALS(SQLValueFunction); + + READ_ENUM_FIELD(op, SQLValueFunctionOp); + READ_OID_FIELD(type); + READ_INT_FIELD(typmod); + READ_LOCATION_FIELD(location); + + READ_DONE(); +} + /* * _readXmlExpr */ @@ -2348,6 +2364,8 @@ parseNodeString(void) return_value = _readCoalesceExpr(); else if (MATCH("MINMAX", 6)) return_value = _readMinMaxExpr(); + else if (MATCH("SQLVALUEFUNCTION", 16)) + return_value = _readSQLValueFunction(); else if (MATCH("XMLEXPR", 7)) return_value = _readXmlExpr(); else if (MATCH("NULLTEST", 8)) diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 4e23898ff9..a40ad40606 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -962,6 +962,12 @@ contain_mutable_functions_walker(Node *node, void *context) context)) return true; + if (IsA(node, SQLValueFunction)) + { + /* all variants of SQLValueFunction are stable */ + return true; + } + /* * It should be safe to treat MinMaxExpr as immutable, because it will * depend on a non-cross-type btree comparison function, and those should @@ -1031,7 +1037,8 @@ contain_volatile_functions_walker(Node *node, void *context) /* * See notes in contain_mutable_functions_walker about why we treat - * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable. + * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable, while + * SQLValueFunction is stable. Hence, none of them are of interest here. */ /* Recurse to check arguments */ @@ -1076,7 +1083,8 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) /* * See notes in contain_mutable_functions_walker about why we treat - * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable. + * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable, while + * SQLValueFunction is stable. Hence, none of them are of interest here. */ /* Recurse to check arguments */ @@ -1143,7 +1151,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) * (Note: in principle that's wrong because a domain constraint could * contain a parallel-unsafe function; but useful constraints probably * never would have such, and assuming they do would cripple use of - * parallel query in the presence of domain types.) + * parallel query in the presence of domain types.) SQLValueFunction + * should be safe in all cases. */ if (IsA(node, CoerceToDomain)) { @@ -1458,6 +1467,7 @@ contain_leaked_vars_walker(Node *node, void *context) case T_CaseTestExpr: case T_RowExpr: case T_MinMaxExpr: + case T_SQLValueFunction: case T_NullTest: case T_BooleanTest: case T_List: diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 6a0f7b393c..cb5cfc480c 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -165,6 +165,8 @@ static Node *makeAndExpr(Node *lexpr, Node *rexpr, int location); static Node *makeOrExpr(Node *lexpr, Node *rexpr, int location); static Node *makeNotExpr(Node *expr, int location); static Node *makeAArrayExpr(List *elements, int location); +static Node *makeSQLValueFunction(SQLValueFunctionOp op, int32 typmod, + int location); static Node *makeXmlExpr(XmlExprOp op, char *name, List *named_args, List *args, int location); static List *mergeTableFuncParameters(List *func_args, List *columns); @@ -12330,143 +12332,63 @@ func_expr_common_subexpr: } | CURRENT_DATE { - /* - * Translate as "'now'::text::date". - * - * We cannot use "'now'::date" because coerce_type() will - * immediately reduce that to a constant representing - * today's date. We need to delay the conversion until - * runtime, else the wrong things will happen when - * CURRENT_DATE is used in a column default value or rule. - * - * This could be simplified if we had a way to generate - * an expression tree representing runtime application - * of type-input conversion functions. (As of PG 7.3 - * that is actually possible, but not clear that we want - * to rely on it.) - * - * The token location is attached to the run-time - * typecast, not to the Const, for the convenience of - * pg_stat_statements (which doesn't want these constructs - * to appear to be replaceable constants). - */ - Node *n; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - $$ = makeTypeCast(n, SystemTypeName("date"), @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_DATE, -1, @1); } | CURRENT_TIME { - /* - * Translate as "'now'::text::timetz". - * See comments for CURRENT_DATE. - */ - Node *n; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - $$ = makeTypeCast(n, SystemTypeName("timetz"), @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_TIME, -1, @1); } | CURRENT_TIME '(' Iconst ')' { - /* - * Translate as "'now'::text::timetz(n)". - * See comments for CURRENT_DATE. - */ - Node *n; - TypeName *d; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - d = SystemTypeName("timetz"); - d->typmods = list_make1(makeIntConst($3, @3)); - $$ = makeTypeCast(n, d, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_TIME_N, $3, @1); } | CURRENT_TIMESTAMP { - /* - * Translate as "now()", since we have a function that - * does exactly what is needed. - */ - $$ = (Node *) makeFuncCall(SystemFuncName("now"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_TIMESTAMP, -1, @1); } | CURRENT_TIMESTAMP '(' Iconst ')' { - /* - * Translate as "'now'::text::timestamptz(n)". - * See comments for CURRENT_DATE. - */ - Node *n; - TypeName *d; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - d = SystemTypeName("timestamptz"); - d->typmods = list_make1(makeIntConst($3, @3)); - $$ = makeTypeCast(n, d, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_TIMESTAMP_N, $3, @1); } | LOCALTIME { - /* - * Translate as "'now'::text::time". - * See comments for CURRENT_DATE. - */ - Node *n; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - $$ = makeTypeCast((Node *)n, SystemTypeName("time"), @1); + $$ = makeSQLValueFunction(SVFOP_LOCALTIME, -1, @1); } | LOCALTIME '(' Iconst ')' { - /* - * Translate as "'now'::text::time(n)". - * See comments for CURRENT_DATE. - */ - Node *n; - TypeName *d; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - d = SystemTypeName("time"); - d->typmods = list_make1(makeIntConst($3, @3)); - $$ = makeTypeCast((Node *)n, d, @1); + $$ = makeSQLValueFunction(SVFOP_LOCALTIME_N, $3, @1); } | LOCALTIMESTAMP { - /* - * Translate as "'now'::text::timestamp". - * See comments for CURRENT_DATE. - */ - Node *n; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - $$ = makeTypeCast(n, SystemTypeName("timestamp"), @1); + $$ = makeSQLValueFunction(SVFOP_LOCALTIMESTAMP, -1, @1); } | LOCALTIMESTAMP '(' Iconst ')' { - /* - * Translate as "'now'::text::timestamp(n)". - * See comments for CURRENT_DATE. - */ - Node *n; - TypeName *d; - n = makeStringConstCast("now", -1, SystemTypeName("text")); - d = SystemTypeName("timestamp"); - d->typmods = list_make1(makeIntConst($3, @3)); - $$ = makeTypeCast(n, d, @1); + $$ = makeSQLValueFunction(SVFOP_LOCALTIMESTAMP_N, $3, @1); } | CURRENT_ROLE { - $$ = (Node *) makeFuncCall(SystemFuncName("current_user"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_ROLE, -1, @1); } | CURRENT_USER { - $$ = (Node *) makeFuncCall(SystemFuncName("current_user"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_USER, -1, @1); } | SESSION_USER { - $$ = (Node *) makeFuncCall(SystemFuncName("session_user"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_SESSION_USER, -1, @1); } | USER { - $$ = (Node *) makeFuncCall(SystemFuncName("current_user"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_USER, -1, @1); } | CURRENT_CATALOG { - $$ = (Node *) makeFuncCall(SystemFuncName("current_database"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_CATALOG, -1, @1); } | CURRENT_SCHEMA { - $$ = (Node *) makeFuncCall(SystemFuncName("current_schema"), NIL, @1); + $$ = makeSQLValueFunction(SVFOP_CURRENT_SCHEMA, -1, @1); } | CAST '(' a_expr AS Typename ')' { $$ = makeTypeCast($3, $5, @1); } @@ -14710,6 +14632,18 @@ makeAArrayExpr(List *elements, int location) return (Node *) n; } +static Node * +makeSQLValueFunction(SQLValueFunctionOp op, int32 typmod, int location) +{ + SQLValueFunction *svf = makeNode(SQLValueFunction); + + svf->op = op; + /* svf->type will be filled during parse analysis */ + svf->typmod = typmod; + svf->location = location; + return (Node *) svf; +} + static Node * makeXmlExpr(XmlExprOp op, char *name, List *named_args, List *args, int location) diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index cead21283d..63f7965532 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -34,7 +34,9 @@ #include "parser/parse_type.h" #include "parser/parse_agg.h" #include "utils/builtins.h" +#include "utils/date.h" #include "utils/lsyscache.h" +#include "utils/timestamp.h" #include "utils/xml.h" @@ -107,6 +109,8 @@ static Node *transformArrayExpr(ParseState *pstate, A_ArrayExpr *a, static Node *transformRowExpr(ParseState *pstate, RowExpr *r); static Node *transformCoalesceExpr(ParseState *pstate, CoalesceExpr *c); static Node *transformMinMaxExpr(ParseState *pstate, MinMaxExpr *m); +static Node *transformSQLValueFunction(ParseState *pstate, + SQLValueFunction *svf); static Node *transformXmlExpr(ParseState *pstate, XmlExpr *x); static Node *transformXmlSerialize(ParseState *pstate, XmlSerialize *xs); static Node *transformBooleanTest(ParseState *pstate, BooleanTest *b); @@ -306,6 +310,11 @@ transformExprRecurse(ParseState *pstate, Node *expr) result = transformMinMaxExpr(pstate, (MinMaxExpr *) expr); break; + case T_SQLValueFunction: + result = transformSQLValueFunction(pstate, + (SQLValueFunction *) expr); + break; + case T_XmlExpr: result = transformXmlExpr(pstate, (XmlExpr *) expr); break; @@ -2178,6 +2187,59 @@ transformMinMaxExpr(ParseState *pstate, MinMaxExpr *m) return (Node *) newm; } +static Node * +transformSQLValueFunction(ParseState *pstate, SQLValueFunction *svf) +{ + /* + * All we need to do is insert the correct result type and (where needed) + * validate the typmod, so we just modify the node in-place. + */ + switch (svf->op) + { + case SVFOP_CURRENT_DATE: + svf->type = DATEOID; + break; + case SVFOP_CURRENT_TIME: + svf->type = TIMETZOID; + break; + case SVFOP_CURRENT_TIME_N: + svf->type = TIMETZOID; + svf->typmod = anytime_typmod_check(true, svf->typmod); + break; + case SVFOP_CURRENT_TIMESTAMP: + svf->type = TIMESTAMPTZOID; + break; + case SVFOP_CURRENT_TIMESTAMP_N: + svf->type = TIMESTAMPTZOID; + svf->typmod = anytimestamp_typmod_check(true, svf->typmod); + break; + case SVFOP_LOCALTIME: + svf->type = TIMEOID; + break; + case SVFOP_LOCALTIME_N: + svf->type = TIMEOID; + svf->typmod = anytime_typmod_check(false, svf->typmod); + break; + case SVFOP_LOCALTIMESTAMP: + svf->type = TIMESTAMPOID; + break; + case SVFOP_LOCALTIMESTAMP_N: + svf->type = TIMESTAMPOID; + svf->typmod = anytimestamp_typmod_check(false, svf->typmod); + break; + case SVFOP_CURRENT_ROLE: + case SVFOP_CURRENT_USER: + case SVFOP_USER: + case SVFOP_SESSION_USER: + case SVFOP_CURRENT_CATALOG: + case SVFOP_CURRENT_SCHEMA: + svf->type = NAMEOID; + break; + } + + return (Node *) svf; +} + static Node * transformXmlExpr(ParseState *pstate, XmlExpr *x) { diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c index fc93063ed0..b7b82bfb6b 100644 --- a/src/backend/parser/parse_target.c +++ b/src/backend/parser/parse_target.c @@ -1761,6 +1761,49 @@ FigureColnameInternal(Node *node, char **name) return 2; } break; + case T_SQLValueFunction: + /* make these act like a function or variable */ + switch (((SQLValueFunction *) node)->op) + { + case SVFOP_CURRENT_DATE: + *name = "current_date"; + return 2; + case SVFOP_CURRENT_TIME: + case SVFOP_CURRENT_TIME_N: + *name = "current_time"; + return 2; + case SVFOP_CURRENT_TIMESTAMP: + case SVFOP_CURRENT_TIMESTAMP_N: + *name = "current_timestamp"; + return 2; + case SVFOP_LOCALTIME: + case SVFOP_LOCALTIME_N: + *name = "localtime"; + return 2; + case SVFOP_LOCALTIMESTAMP: + case SVFOP_LOCALTIMESTAMP_N: + *name = "localtimestamp"; + return 2; + case SVFOP_CURRENT_ROLE: + *name = "current_role"; + return 2; + case SVFOP_CURRENT_USER: + *name = "current_user"; + return 2; + case SVFOP_USER: + *name = "user"; + return 2; + case SVFOP_SESSION_USER: + *name = "session_user"; + return 2; + case SVFOP_CURRENT_CATALOG: + *name = "current_catalog"; + return 2; + case SVFOP_CURRENT_SCHEMA: + *name = "current_schema"; + return 2; + } + break; case T_XmlExpr: /* make SQL/XML functions act like a regular function */ switch (((XmlExpr *) node)->op) diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index 420f383a80..bc7d190210 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -21,6 +21,7 @@ #include #include "access/hash.h" +#include "access/xact.h" #include "libpq/pqformat.h" #include "miscadmin.h" #include "parser/scansup.h" @@ -51,7 +52,6 @@ static void AdjustTimeForTypmod(TimeADT *time, int32 typmod); static int32 anytime_typmodin(bool istz, ArrayType *ta) { - int32 typmod; int32 *tl; int n; @@ -66,22 +66,27 @@ anytime_typmodin(bool istz, ArrayType *ta) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid type modifier"))); - if (*tl < 0) + return anytime_typmod_check(istz, tl[0]); +} + +/* exported so parse_expr.c can use it */ +int32 +anytime_typmod_check(bool istz, int32 typmod) +{ + if (typmod < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIME(%d)%s precision must not be negative", - *tl, (istz ? " WITH TIME ZONE" : "")))); - if (*tl > MAX_TIME_PRECISION) + typmod, (istz ? " WITH TIME ZONE" : "")))); + if (typmod > MAX_TIME_PRECISION) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIME(%d)%s precision reduced to maximum allowed, %d", - *tl, (istz ? " WITH TIME ZONE" : ""), + typmod, (istz ? " WITH TIME ZONE" : ""), MAX_TIME_PRECISION))); typmod = MAX_TIME_PRECISION; } - else - typmod = *tl; return typmod; } @@ -298,6 +303,80 @@ EncodeSpecialDate(DateADT dt, char *str) } +/* + * GetSQLCurrentDate -- implements CURRENT_DATE + */ +DateADT +GetSQLCurrentDate(void) +{ + TimestampTz ts; + struct pg_tm tt, + *tm = &tt; + fsec_t fsec; + int tz; + + ts = GetCurrentTransactionStartTimestamp(); + + if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("timestamp out of range"))); + + return date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE; +} + +/* + * GetSQLCurrentTime -- implements CURRENT_TIME, CURRENT_TIME(n) + */ +TimeTzADT * +GetSQLCurrentTime(int32 typmod) +{ + TimeTzADT *result; + TimestampTz ts; + struct pg_tm tt, + *tm = &tt; + fsec_t fsec; + int tz; + + ts = GetCurrentTransactionStartTimestamp(); + + if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("timestamp out of range"))); + + result = (TimeTzADT *) palloc(sizeof(TimeTzADT)); + tm2timetz(tm, fsec, tz, result); + AdjustTimeForTypmod(&(result->time), typmod); + return result; +} + +/* + * GetSQLLocalTime -- implements LOCALTIME, LOCALTIME(n) + */ +TimeADT +GetSQLLocalTime(int32 typmod) +{ + TimeADT result; + TimestampTz ts; + struct pg_tm tt, + *tm = &tt; + fsec_t fsec; + int tz; + + ts = GetCurrentTransactionStartTimestamp(); + + if (timestamp2tm(ts, &tz, tm, &fsec, NULL, NULL) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("timestamp out of range"))); + + tm2time(tm, fsec, &result); + AdjustTimeForTypmod(&result, typmod); + return result; +} + + /* * Comparison functions for dates */ diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index ec966c752e..8a81d7a078 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -6884,6 +6884,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags) case T_RowExpr: case T_CoalesceExpr: case T_MinMaxExpr: + case T_SQLValueFunction: case T_XmlExpr: case T_NullIfExpr: case T_Aggref: @@ -7871,6 +7872,67 @@ get_rule_expr(Node *node, deparse_context *context, } break; + case T_SQLValueFunction: + { + SQLValueFunction *svf = (SQLValueFunction *) node; + + /* + * Note: this code knows that typmod for time, timestamp, and + * timestamptz just prints as integer. + */ + switch (svf->op) + { + case SVFOP_CURRENT_DATE: + appendStringInfoString(buf, "CURRENT_DATE"); + break; + case SVFOP_CURRENT_TIME: + appendStringInfoString(buf, "CURRENT_TIME"); + break; + case SVFOP_CURRENT_TIME_N: + appendStringInfo(buf, "CURRENT_TIME(%d)", svf->typmod); + break; + case SVFOP_CURRENT_TIMESTAMP: + appendStringInfoString(buf, "CURRENT_TIMESTAMP"); + break; + case SVFOP_CURRENT_TIMESTAMP_N: + appendStringInfo(buf, "CURRENT_TIMESTAMP(%d)", + svf->typmod); + break; + case SVFOP_LOCALTIME: + appendStringInfoString(buf, "LOCALTIME"); + break; + case SVFOP_LOCALTIME_N: + appendStringInfo(buf, "LOCALTIME(%d)", svf->typmod); + break; + case SVFOP_LOCALTIMESTAMP: + appendStringInfoString(buf, "LOCALTIMESTAMP"); + break; + case SVFOP_LOCALTIMESTAMP_N: + appendStringInfo(buf, "LOCALTIMESTAMP(%d)", + svf->typmod); + break; + case SVFOP_CURRENT_ROLE: + appendStringInfoString(buf, "CURRENT_ROLE"); + break; + case SVFOP_CURRENT_USER: + appendStringInfoString(buf, "CURRENT_USER"); + break; + case SVFOP_USER: + appendStringInfoString(buf, "USER"); + break; + case SVFOP_SESSION_USER: + appendStringInfoString(buf, "SESSION_USER"); + break; + case SVFOP_CURRENT_CATALOG: + appendStringInfoString(buf, "CURRENT_CATALOG"); + break; + case SVFOP_CURRENT_SCHEMA: + appendStringInfoString(buf, "CURRENT_SCHEMA"); + break; + } + } + break; + case T_XmlExpr: { XmlExpr *xexpr = (XmlExpr *) node; diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index d7ee865cf7..c1d6f05b5e 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -72,13 +72,13 @@ static Timestamp dt2local(Timestamp dt, int timezone); static void AdjustTimestampForTypmod(Timestamp *time, int32 typmod); static void AdjustIntervalForTypmod(Interval *interval, int32 typmod); static TimestampTz timestamp2timestamptz(Timestamp timestamp); +static Timestamp timestamptz2timestamp(TimestampTz timestamp); /* common code for timestamptypmodin and timestamptztypmodin */ static int32 anytimestamp_typmodin(bool istz, ArrayType *ta) { - int32 typmod; int32 *tl; int n; @@ -93,22 +93,27 @@ anytimestamp_typmodin(bool istz, ArrayType *ta) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid type modifier"))); - if (*tl < 0) + return anytimestamp_typmod_check(istz, tl[0]); +} + +/* exported so parse_expr.c can use it */ +int32 +anytimestamp_typmod_check(bool istz, int32 typmod) +{ + if (typmod < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIMESTAMP(%d)%s precision must not be negative", - *tl, (istz ? " WITH TIME ZONE" : "")))); - if (*tl > MAX_TIMESTAMP_PRECISION) + typmod, (istz ? " WITH TIME ZONE" : "")))); + if (typmod > MAX_TIMESTAMP_PRECISION) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("TIMESTAMP(%d)%s precision reduced to maximum allowed, %d", - *tl, (istz ? " WITH TIME ZONE" : ""), + typmod, (istz ? " WITH TIME ZONE" : ""), MAX_TIMESTAMP_PRECISION))); typmod = MAX_TIMESTAMP_PRECISION; } - else - typmod = *tl; return typmod; } @@ -336,6 +341,10 @@ timestamp_scale(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMP(result); } +/* + * AdjustTimestampForTypmod --- round off a timestamp to suit given typmod + * Works for either timestamp or timestamptz. + */ static void AdjustTimestampForTypmod(Timestamp *time, int32 typmod) { @@ -1686,6 +1695,34 @@ IntegerTimestampToTimestampTz(int64 timestamp) } #endif +/* + * GetSQLCurrentTimestamp -- implements CURRENT_TIMESTAMP, CURRENT_TIMESTAMP(n) + */ +TimestampTz +GetSQLCurrentTimestamp(int32 typmod) +{ + TimestampTz ts; + + ts = GetCurrentTransactionStartTimestamp(); + if (typmod >= 0) + AdjustTimestampForTypmod(&ts, typmod); + return ts; +} + +/* + * GetSQLLocalTimestamp -- implements LOCALTIMESTAMP, LOCALTIMESTAMP(n) + */ +Timestamp +GetSQLLocalTimestamp(int32 typmod) +{ + Timestamp ts; + + ts = timestamptz2timestamp(GetCurrentTransactionStartTimestamp()); + if (typmod >= 0) + AdjustTimestampForTypmod(&ts, typmod); + return ts; +} + /* * TimestampDifference -- convert the difference between two timestamps * into integer seconds and microseconds @@ -5415,6 +5452,13 @@ Datum timestamptz_timestamp(PG_FUNCTION_ARGS) { TimestampTz timestamp = PG_GETARG_TIMESTAMPTZ(0); + + PG_RETURN_TIMESTAMP(timestamptz2timestamp(timestamp)); +} + +static Timestamp +timestamptz2timestamp(TimestampTz timestamp) +{ Timestamp result; struct pg_tm tt, *tm = &tt; @@ -5434,7 +5478,7 @@ timestamptz_timestamp(PG_FUNCTION_ARGS) (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), errmsg("timestamp out of range"))); } - PG_RETURN_TIMESTAMP(result); + return result; } /* timestamptz_zone() diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 2ca3cd911a..82810c8fba 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201608131 +#define CATALOG_VERSION_NO 201608161 #endif diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 6b850e4bc4..2f7efa810c 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -166,6 +166,7 @@ typedef enum NodeTag T_RowCompareExpr, T_CoalesceExpr, T_MinMaxExpr, + T_SQLValueFunction, T_XmlExpr, T_NullTest, T_BooleanTest, diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index df2d27d77c..65510b010b 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -1050,6 +1050,45 @@ typedef struct MinMaxExpr int location; /* token location, or -1 if unknown */ } MinMaxExpr; +/* + * SQLValueFunction - parameterless functions with special grammar productions + * + * The SQL standard categorizes some of these as + * and others as . We call 'em SQLValueFunctions + * for lack of a better term. We store type and typmod of the result so that + * some code doesn't need to know each function individually, and because + * we would need to store typmod anyway for some of the datetime functions. + * Note that currently, all variants return non-collating datatypes, so we do + * not need a collation field; also, all these functions are stable. + */ +typedef enum SQLValueFunctionOp +{ + SVFOP_CURRENT_DATE, + SVFOP_CURRENT_TIME, + SVFOP_CURRENT_TIME_N, + SVFOP_CURRENT_TIMESTAMP, + SVFOP_CURRENT_TIMESTAMP_N, + SVFOP_LOCALTIME, + SVFOP_LOCALTIME_N, + SVFOP_LOCALTIMESTAMP, + SVFOP_LOCALTIMESTAMP_N, + SVFOP_CURRENT_ROLE, + SVFOP_CURRENT_USER, + SVFOP_USER, + SVFOP_SESSION_USER, + SVFOP_CURRENT_CATALOG, + SVFOP_CURRENT_SCHEMA +} SQLValueFunctionOp; + +typedef struct SQLValueFunction +{ + Expr xpr; + SQLValueFunctionOp op; /* which function this is */ + Oid type; /* result type/typmod */ + int32 typmod; + int location; /* token location, or -1 if unknown */ +} SQLValueFunction; + /* * XmlExpr - various SQL/XML functions requiring special grammar productions * diff --git a/src/include/utils/date.h b/src/include/utils/date.h index 1b962af7d8..df753c4450 100644 --- a/src/include/utils/date.h +++ b/src/include/utils/date.h @@ -89,8 +89,12 @@ typedef struct /* date.c */ +extern int32 anytime_typmod_check(bool istz, int32 typmod); extern double date2timestamp_no_overflow(DateADT dateVal); extern void EncodeSpecialDate(DateADT dt, char *str); +extern DateADT GetSQLCurrentDate(void); +extern TimeTzADT *GetSQLCurrentTime(int32 typmod); +extern TimeADT GetSQLLocalTime(int32 typmod); extern Datum date_in(PG_FUNCTION_ARGS); extern Datum date_out(PG_FUNCTION_ARGS); diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h index 85cc7ce1fe..93b90fe3a0 100644 --- a/src/include/utils/timestamp.h +++ b/src/include/utils/timestamp.h @@ -215,7 +215,11 @@ extern Datum generate_series_timestamptz(PG_FUNCTION_ARGS); /* Internal routines (not fmgr-callable) */ +extern int32 anytimestamp_typmod_check(bool istz, int32 typmod); + extern TimestampTz GetCurrentTimestamp(void); +extern TimestampTz GetSQLCurrentTimestamp(int32 typmod); +extern Timestamp GetSQLLocalTimestamp(int32 typmod); extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs); extern bool TimestampDifferenceExceeds(TimestampTz start_time, diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 586ff1f329..fec55e502f 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -6494,6 +6494,9 @@ exec_simple_check_node(Node *node) return TRUE; } + case T_SQLValueFunction: + return TRUE; + case T_XmlExpr: { XmlExpr *expr = (XmlExpr *) node; diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index c15bf958a5..abfee92f4d 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -182,7 +182,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); Filter: (dlevel <= $0) InitPlan 1 (returns $0) -> Index Scan using uaccount_pkey on uaccount - Index Cond: (pguser = "current_user"()) + Index Cond: (pguser = CURRENT_USER) (7 rows) EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); @@ -198,7 +198,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dt Filter: (dlevel <= $0) InitPlan 1 (returns $0) -> Index Scan using uaccount_pkey on uaccount - Index Cond: (pguser = "current_user"()) + Index Cond: (pguser = CURRENT_USER) (11 rows) -- only owner can change policies @@ -265,22 +265,22 @@ NOTICE: f_leak => great manga (3 rows) EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); - QUERY PLAN ----------------------------------------------- + QUERY PLAN +------------------------------------------ Subquery Scan on document Filter: f_leak(document.dtitle) -> Seq Scan on document document_1 - Filter: (dauthor = "current_user"()) + Filter: (dauthor = CURRENT_USER) (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); - QUERY PLAN ----------------------------------------------------- + QUERY PLAN +-------------------------------------------------- Nested Loop -> Subquery Scan on document Filter: f_leak(document.dtitle) -> Seq Scan on document document_1 - Filter: (dauthor = "current_user"()) + Filter: (dauthor = CURRENT_USER) -> Index Scan using category_pkey on category Index Cond: (cid = document.cid) (7 rows) diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index c5ff3181a3..8157324fee 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2269,14 +2269,14 @@ pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; rtest_emp|rtest_emp_del|CREATE RULE rtest_emp_del AS ON DELETE TO rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) - VALUES (old.ename, "current_user"(), 'fired'::bpchar, '$0.00'::money, old.salary); + VALUES (old.ename, CURRENT_USER, 'fired'::bpchar, '$0.00'::money, old.salary); rtest_emp|rtest_emp_ins|CREATE RULE rtest_emp_ins AS ON INSERT TO rtest_emp DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) - VALUES (new.ename, "current_user"(), 'hired'::bpchar, new.salary, '$0.00'::money); + VALUES (new.ename, CURRENT_USER, 'hired'::bpchar, new.salary, '$0.00'::money); rtest_emp|rtest_emp_upd|CREATE RULE rtest_emp_upd AS ON UPDATE TO rtest_emp WHERE (new.salary <> old.salary) DO INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal) - VALUES (new.ename, "current_user"(), 'honored'::bpchar, new.salary, old.salary); + VALUES (new.ename, CURRENT_USER, 'honored'::bpchar, new.salary, old.salary); rtest_nothn1|rtest_nothn_r1|CREATE RULE rtest_nothn_r1 AS ON INSERT TO rtest_nothn1 WHERE ((new.a >= 10) AND (new.a < 20)) DO INSTEAD NOTHING; diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out index 7f575266c1..878035332b 100644 --- a/src/test/regress/expected/select_views.out +++ b/src/test/regress/expected/select_views.out @@ -1326,10 +1326,10 @@ NOTICE: f_leak => hamburger (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------- Seq Scan on customer - Filter: (f_leak(passwd) AND (name = ("current_user"())::text)) + Filter: (f_leak(passwd) AND (name = (CURRENT_USER)::text)) (2 rows) SELECT * FROM my_property_secure WHERE f_leak(passwd); @@ -1340,12 +1340,12 @@ NOTICE: f_leak => passwd123 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd); - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------- Subquery Scan on my_property_secure Filter: f_leak(my_property_secure.passwd) -> Seq Scan on customer - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (4 rows) -- @@ -1367,10 +1367,10 @@ NOTICE: f_leak => hamburger EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN ---------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------- Seq Scan on customer - Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = ("current_user"())::text)) + Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = (CURRENT_USER)::text)) (2 rows) SELECT * FROM my_property_secure v @@ -1386,12 +1386,12 @@ NOTICE: f_leak => passwd EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN --------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------- Subquery Scan on v Filter: f_leak(v.passwd) -> Seq Scan on customer - Filter: (f_leak('passwd'::text) AND (name = ("current_user"())::text)) + Filter: (f_leak('passwd'::text) AND (name = (CURRENT_USER)::text)) (4 rows) -- @@ -1409,15 +1409,15 @@ NOTICE: f_leak => 9801-2345-6789-0123 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------------- + QUERY PLAN +----------------------------------------------------- Hash Join Hash Cond: (r.cid = l.cid) -> Seq Scan on credit_card r Filter: f_leak(cnum) -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (7 rows) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); @@ -1428,8 +1428,8 @@ NOTICE: f_leak => 1111-2222-3333-4444 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------- Subquery Scan on my_credit_card_secure Filter: f_leak(my_credit_card_secure.cnum) -> Hash Join @@ -1437,7 +1437,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); -> Seq Scan on credit_card r -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (8 rows) -- @@ -1471,7 +1471,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal -> Seq Scan on credit_card r_1 -> Hash -> Seq Scan on customer l_1 - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (13 rows) SELECT * FROM my_credit_card_usage_secure @@ -1502,7 +1502,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure -> Seq Scan on credit_card r_1 -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (13 rows) -- diff --git a/src/test/regress/expected/select_views_1.out b/src/test/regress/expected/select_views_1.out index 5275ef0b2d..1a05c6ccbd 100644 --- a/src/test/regress/expected/select_views_1.out +++ b/src/test/regress/expected/select_views_1.out @@ -1326,10 +1326,10 @@ NOTICE: f_leak => hamburger (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd); - QUERY PLAN ------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------- Seq Scan on customer - Filter: (f_leak(passwd) AND (name = ("current_user"())::text)) + Filter: (f_leak(passwd) AND (name = (CURRENT_USER)::text)) (2 rows) SELECT * FROM my_property_secure WHERE f_leak(passwd); @@ -1340,12 +1340,12 @@ NOTICE: f_leak => passwd123 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd); - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------- Subquery Scan on my_property_secure Filter: f_leak(my_property_secure.passwd) -> Seq Scan on customer - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (4 rows) -- @@ -1367,10 +1367,10 @@ NOTICE: f_leak => hamburger EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN ---------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------- Seq Scan on customer - Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = ("current_user"())::text)) + Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = (CURRENT_USER)::text)) (2 rows) SELECT * FROM my_property_secure v @@ -1386,12 +1386,12 @@ NOTICE: f_leak => passwd EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v WHERE f_leak('passwd') AND f_leak(passwd); - QUERY PLAN --------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------- Subquery Scan on v Filter: f_leak(v.passwd) -> Seq Scan on customer - Filter: (f_leak('passwd'::text) AND (name = ("current_user"())::text)) + Filter: (f_leak('passwd'::text) AND (name = (CURRENT_USER)::text)) (4 rows) -- @@ -1409,15 +1409,15 @@ NOTICE: f_leak => 9801-2345-6789-0123 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------------- + QUERY PLAN +----------------------------------------------------- Hash Join Hash Cond: (r.cid = l.cid) -> Seq Scan on credit_card r Filter: f_leak(cnum) -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (7 rows) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); @@ -1428,8 +1428,8 @@ NOTICE: f_leak => 1111-2222-3333-4444 (1 row) EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); - QUERY PLAN ---------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------- Subquery Scan on my_credit_card_secure Filter: f_leak(my_credit_card_secure.cnum) -> Hash Join @@ -1437,7 +1437,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); -> Seq Scan on credit_card r -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (8 rows) -- @@ -1471,7 +1471,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal -> Seq Scan on credit_card r_1 -> Hash -> Seq Scan on customer l_1 - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (13 rows) SELECT * FROM my_credit_card_usage_secure @@ -1502,7 +1502,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure -> Seq Scan on credit_card r_1 -> Hash -> Seq Scan on customer l - Filter: (name = ("current_user"())::text) + Filter: (name = (CURRENT_USER)::text) (13 rows) -- -- cgit v1.2.3 From 092155465710b22ec9a99820ce6400a889580805 Mon Sep 17 00:00:00 2001 From: Magnus Hagander Date: Wed, 17 Aug 2016 10:39:22 +0200 Subject: Disable update_process_title by default on Windows The performance overhead of this can be significant on Windows, and most people don't have the tools to view it anyway as Windows does not have native support for process titles. Discussion: <0A3221C70F24FB45833433255569204D1F5BE3E8@G01JPEXMBYT05> Takayuki Tsunakawa --- doc/src/sgml/config.sgml | 2 ++ src/backend/utils/misc/guc.c | 4 ++++ src/bin/initdb/initdb.c | 6 ++++++ 3 files changed, 12 insertions(+) (limited to 'src') diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 02f917b375..5c8db97343 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -5430,6 +5430,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; is received by the server. The process title is typically viewed by the ps command, or in Windows by using the Process Explorer. + This value defaults to off on Windows platforms due to the + platform's significant overhead for updating the process title. Only superusers can change this setting. diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 9c93df0f0a..c5178f7cad 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -1210,7 +1210,11 @@ static struct config_bool ConfigureNamesBool[] = gettext_noop("Enables updating of the process title every time a new SQL command is received by the server.") }, &update_process_title, +#ifdef WIN32 + false, +#else true, +#endif NULL, NULL, NULL }, diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 73cb7ee683..a978bbc328 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1292,6 +1292,12 @@ setup_config(void) "#effective_io_concurrency = 0"); #endif +#ifdef WIN32 + conflines = replace_token(conflines, + "#update_process_title = on", + "#update_process_title = off"); +#endif + snprintf(path, sizeof(path), "%s/postgresql.conf", pg_data); writefile(path, conflines); -- cgit v1.2.3 From bfaaacc805aef9f07b4805c6a274256340b9cddf Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 17 Aug 2016 14:51:10 -0400 Subject: Improve plpgsql's memory management to fix some function-lifespan leaks. In some cases, exiting out of a plpgsql statement due to an error, then catching the error in a surrounding exception block, led to leakage of temporary data the statement was working with, because we kept all such data in the function-lifespan SPI Proc context. Iterating such behavior many times within one function call thus led to noticeable memory bloat. To fix, create an additional memory context meant to have statement lifespan. Since many plpgsql statements, particularly the simpler/more common ones, don't need this, create it only on demand. Reset this context at the end of any statement that uses it, and arrange for exception cleanup to reset it too, thereby fixing the memory-leak issue. Allow a stack of such contexts to exist to handle cases where a compound statement needs statement-lifespan data that persists across calls of inner statements. While at it, clean up code and improve comments referring to the existing short-term memory context, which by plpgsql convention is the per-tuple context of the eval_econtext ExprContext. We now uniformly refer to that as the eval_mcontext, whereas the new statement-lifespan memory contexts are called stmt_mcontext. This change adds some context-creation overhead, but on the other hand it allows removal of some retail pfree's in favor of context resets. On balance it seems to be about a wash performance-wise. In principle this is a bug fix, but it seems too invasive for a back-patch, and the infrequency of complaints weighs against taking the risk in the back branches. So we'll fix it only in HEAD, at least for now. Tom Lane, reviewed by Pavel Stehule Discussion: <17863.1469142152@sss.pgh.pa.us> --- src/pl/plpgsql/src/pl_exec.c | 590 +++++++++++++++++++++++++++++-------------- src/pl/plpgsql/src/plpgsql.h | 6 +- 2 files changed, 403 insertions(+), 193 deletions(-) (limited to 'src') diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index fec55e502f..f9b3b22d08 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -48,7 +48,6 @@ typedef struct Oid *types; /* types of arguments */ Datum *values; /* evaluated argument values */ char *nulls; /* null markers (' '/'n' style) */ - bool *freevals; /* which arguments are pfree-able */ } PreparedParamsData; /* @@ -87,6 +86,36 @@ typedef struct SimpleEcontextStackEntry static EState *shared_simple_eval_estate = NULL; static SimpleEcontextStackEntry *simple_econtext_stack = NULL; +/* + * Memory management within a plpgsql function generally works with three + * contexts: + * + * 1. Function-call-lifespan data, such as variable values, is kept in the + * "main" context, a/k/a the "SPI Proc" context established by SPI_connect(). + * This is usually the CurrentMemoryContext while running code in this module + * (which is not good, because careless coding can easily cause + * function-lifespan memory leaks, but we live with it for now). + * + * 2. Some statement-execution routines need statement-lifespan workspace. + * A suitable context is created on-demand by get_stmt_mcontext(), and must + * be reset at the end of the requesting routine. Error recovery will clean + * it up automatically. Nested statements requiring statement-lifespan + * workspace will result in a stack of such contexts, see push_stmt_mcontext(). + * + * 3. We use the eval_econtext's per-tuple memory context for expression + * evaluation, and as a general-purpose workspace for short-lived allocations. + * Such allocations usually aren't explicitly freed, but are left to be + * cleaned up by a context reset, typically done by exec_eval_cleanup(). + * + * These macros are for use in making short-lived allocations: + */ +#define get_eval_mcontext(estate) \ + ((estate)->eval_econtext->ecxt_per_tuple_memory) +#define eval_mcontext_alloc(estate, sz) \ + MemoryContextAlloc(get_eval_mcontext(estate), sz) +#define eval_mcontext_alloc0(estate, sz) \ + MemoryContextAllocZero(get_eval_mcontext(estate), sz) + /* * We use a session-wide hash table for caching cast information. * @@ -128,6 +157,9 @@ static HTAB *shared_cast_hash = NULL; ************************************************************/ static void plpgsql_exec_error_callback(void *arg); static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum *datum); +static MemoryContext get_stmt_mcontext(PLpgSQL_execstate *estate); +static void push_stmt_mcontext(PLpgSQL_execstate *estate); +static void pop_stmt_mcontext(PLpgSQL_execstate *estate); static int exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block); @@ -191,7 +223,7 @@ static void exec_eval_cleanup(PLpgSQL_execstate *estate); static void exec_prepare_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr, int cursorOptions); static bool exec_simple_check_node(Node *node); -static void exec_simple_check_plan(PLpgSQL_expr *expr); +static void exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr); static void exec_simple_recheck_plan(PLpgSQL_expr *expr, CachedPlan *cplan); static void exec_check_rw_parameter(PLpgSQL_expr *expr, int target_dno); static bool contains_target_param(Node *node, int *target_dno); @@ -271,11 +303,9 @@ static void assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str); static PreparedParamsData *exec_eval_using_params(PLpgSQL_execstate *estate, List *params); -static void free_params_data(PreparedParamsData *ppd); static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate, PLpgSQL_expr *dynquery, List *params, const char *portalname, int cursorOptions); - static char *format_expr_params(PLpgSQL_execstate *estate, const PLpgSQL_expr *expr); static char *format_preparedparamsdata(PLpgSQL_execstate *estate, @@ -562,6 +592,7 @@ plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, /* Clean up any leftover temporary memory */ plpgsql_destroy_econtext(&estate); exec_eval_cleanup(&estate); + /* stmt_mcontext will be destroyed when function's main context is */ /* * Pop the error context stack @@ -832,6 +863,7 @@ plpgsql_exec_trigger(PLpgSQL_function *func, /* Clean up any leftover temporary memory */ plpgsql_destroy_econtext(&estate); exec_eval_cleanup(&estate); + /* stmt_mcontext will be destroyed when function's main context is */ /* * Pop the error context stack @@ -844,6 +876,11 @@ plpgsql_exec_trigger(PLpgSQL_function *func, return rettup; } +/* ---------- + * plpgsql_exec_event_trigger Called by the call handler for + * event trigger execution. + * ---------- + */ void plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) { @@ -915,6 +952,7 @@ plpgsql_exec_event_trigger(PLpgSQL_function *func, EventTriggerData *trigdata) /* Clean up any leftover temporary memory */ plpgsql_destroy_econtext(&estate); exec_eval_cleanup(&estate); + /* stmt_mcontext will be destroyed when function's main context is */ /* * Pop the error context stack @@ -1041,7 +1079,64 @@ copy_plpgsql_datum(PLpgSQL_datum *datum) return result; } +/* + * Create a memory context for statement-lifespan variables, if we don't + * have one already. It will be a child of stmt_mcontext_parent, which is + * either the function's main context or a pushed-down outer stmt_mcontext. + */ +static MemoryContext +get_stmt_mcontext(PLpgSQL_execstate *estate) +{ + if (estate->stmt_mcontext == NULL) + { + estate->stmt_mcontext = + AllocSetContextCreate(estate->stmt_mcontext_parent, + "PLpgSQL per-statement data", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } + return estate->stmt_mcontext; +} +/* + * Push down the current stmt_mcontext so that called statements won't use it. + * This is needed by statements that have statement-lifespan data and need to + * preserve it across some inner statements. The caller should eventually do + * pop_stmt_mcontext(). + */ +static void +push_stmt_mcontext(PLpgSQL_execstate *estate) +{ + /* Should have done get_stmt_mcontext() first */ + Assert(estate->stmt_mcontext != NULL); + /* Assert we've not messed up the stack linkage */ + Assert(MemoryContextGetParent(estate->stmt_mcontext) == estate->stmt_mcontext_parent); + /* Push it down to become the parent of any nested stmt mcontext */ + estate->stmt_mcontext_parent = estate->stmt_mcontext; + /* And make it not available for use directly */ + estate->stmt_mcontext = NULL; +} + +/* + * Undo push_stmt_mcontext(). We assume this is done just before or after + * resetting the caller's stmt_mcontext; since that action will also delete + * any child contexts, there's no need to explicitly delete whatever context + * might currently be estate->stmt_mcontext. + */ +static void +pop_stmt_mcontext(PLpgSQL_execstate *estate) +{ + /* We need only pop the stack */ + estate->stmt_mcontext = estate->stmt_mcontext_parent; + estate->stmt_mcontext_parent = MemoryContextGetParent(estate->stmt_mcontext); +} + + +/* + * Subroutine for exec_stmt_block: does any condition in the condition list + * match the current exception? + */ static bool exception_matches_conditions(ErrorData *edata, PLpgSQL_condition *cond) { @@ -1174,9 +1269,21 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) ResourceOwner oldowner = CurrentResourceOwner; ExprContext *old_eval_econtext = estate->eval_econtext; ErrorData *save_cur_error = estate->cur_error; + MemoryContext stmt_mcontext; estate->err_text = gettext_noop("during statement block entry"); + /* + * We will need a stmt_mcontext to hold the error data if an error + * occurs. It seems best to force it to exist before entering the + * subtransaction, so that we reduce the risk of out-of-memory during + * error recovery, and because this greatly simplifies restoring the + * stmt_mcontext stack to the correct state after an error. We can + * ameliorate the cost of this by allowing the called statements to + * use this mcontext too; so we don't push it down here. + */ + stmt_mcontext = get_stmt_mcontext(estate); + BeginInternalSubTransaction(NULL); /* Want to run statements inside function's memory context */ MemoryContextSwitchTo(oldcontext); @@ -1202,7 +1309,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) * If the block ended with RETURN, we may need to copy the return * value out of the subtransaction eval_context. This is * currently only needed for scalar result types --- rowtype - * values will always exist in the function's own memory context. + * values will always exist in the function's main memory context, + * cf. exec_stmt_return(). We can avoid a physical copy if the + * value happens to be a R/W expanded object. */ if (rc == PLPGSQL_RC_RETURN && !estate->retisset && @@ -1213,8 +1322,8 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) bool resTypByVal; get_typlenbyval(estate->rettype, &resTypLen, &resTypByVal); - estate->retval = datumCopy(estate->retval, - resTypByVal, resTypLen); + estate->retval = datumTransfer(estate->retval, + resTypByVal, resTypLen); } /* Commit the inner transaction, return to outer xact context */ @@ -1222,6 +1331,9 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; + /* Assert that the stmt_mcontext stack is unchanged */ + Assert(stmt_mcontext == estate->stmt_mcontext); + /* * Revert to outer eval_econtext. (The inner one was * automatically cleaned up during subxact exit.) @@ -1241,8 +1353,8 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) estate->err_text = gettext_noop("during exception cleanup"); - /* Save error info */ - MemoryContextSwitchTo(oldcontext); + /* Save error info in our stmt_mcontext */ + MemoryContextSwitchTo(stmt_mcontext); edata = CopyErrorData(); FlushErrorState(); @@ -1251,6 +1363,26 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) MemoryContextSwitchTo(oldcontext); CurrentResourceOwner = oldowner; + /* + * Set up the stmt_mcontext stack as though we had restored our + * previous state and then done push_stmt_mcontext(). The push is + * needed so that statements in the exception handler won't + * clobber the error data that's in our stmt_mcontext. + */ + estate->stmt_mcontext_parent = stmt_mcontext; + estate->stmt_mcontext = NULL; + + /* + * Now we can delete any nested stmt_mcontexts that might have + * been created as children of ours. (Note: we do not immediately + * release any statement-lifespan data that might have been left + * behind in stmt_mcontext itself. We could attempt that by doing + * a MemoryContextReset on it before collecting the error data + * above, but it seems too risky to do any significant amount of + * work before collecting the error.) + */ + MemoryContextDeleteChildren(stmt_mcontext); + /* Revert to outer eval_econtext */ estate->eval_econtext = old_eval_econtext; @@ -1319,8 +1451,10 @@ exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block) /* If no match found, re-throw the error */ if (e == NULL) ReThrowError(edata); - else - FreeErrorData(edata); + + /* Restore stmt_mcontext stack and release the error data */ + pop_stmt_mcontext(estate); + MemoryContextReset(stmt_mcontext); } PG_END_TRY(); @@ -1663,11 +1797,15 @@ exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt) case PLPGSQL_GETDIAG_CONTEXT: { - char *contextstackstr = GetErrorContextStack(); + char *contextstackstr; + MemoryContext oldcontext; - exec_assign_c_string(estate, var, contextstackstr); + /* Use eval_mcontext for short-lived string */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + contextstackstr = GetErrorContextStack(); + MemoryContextSwitchTo(oldcontext); - pfree(contextstackstr); + exec_assign_c_string(estate, var, contextstackstr); } break; @@ -1677,6 +1815,8 @@ exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt) } } + exec_eval_cleanup(estate); + return PLPGSQL_RC_OK; } @@ -1738,7 +1878,10 @@ exec_stmt_case(PLpgSQL_execstate *estate, PLpgSQL_stmt_case *stmt) /* * When expected datatype is different from real, change it. Note that * what we're modifying here is an execution copy of the datum, so - * this doesn't affect the originally stored function parse tree. + * this doesn't affect the originally stored function parse tree. (In + * theory, if the expression datatype keeps changing during execution, + * this could cause a function-lifespan memory leak. Doesn't seem + * worth worrying about though.) */ if (t_var->datatype->typoid != t_typoid || t_var->datatype->atttypmod != t_typmod) @@ -2132,6 +2275,7 @@ static int exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) { PLpgSQL_var *curvar; + MemoryContext stmt_mcontext = NULL; char *curname = NULL; PLpgSQL_expr *query; ParamListInfo paramLI; @@ -2146,7 +2290,14 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) curvar = (PLpgSQL_var *) (estate->datums[stmt->curvar]); if (!curvar->isnull) { + MemoryContext oldcontext; + + /* We only need stmt_mcontext to hold the cursor name string */ + stmt_mcontext = get_stmt_mcontext(estate); + oldcontext = MemoryContextSwitchTo(stmt_mcontext); curname = TextDatumGetCString(curvar->value); + MemoryContextSwitchTo(oldcontext); + if (SPI_cursor_find(curname) != NULL) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_CURSOR), @@ -2216,16 +2367,19 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) elog(ERROR, "could not open cursor: %s", SPI_result_code_string(SPI_result)); - /* don't need paramlist any more */ - if (paramLI) - pfree(paramLI); - /* * If cursor variable was NULL, store the generated portal name in it */ if (curname == NULL) assign_text_var(estate, curvar, portal->name); + /* + * Clean up before entering exec_for_query + */ + exec_eval_cleanup(estate); + if (stmt_mcontext) + MemoryContextReset(stmt_mcontext); + /* * Execute the loop. We can't prefetch because the cursor is accessible * to the user, for instance via UPDATE WHERE CURRENT OF within the loop. @@ -2241,9 +2395,6 @@ exec_stmt_forc(PLpgSQL_execstate *estate, PLpgSQL_stmt_forc *stmt) if (curname == NULL) assign_simple_var(estate, curvar, (Datum) 0, true, false); - if (curname) - pfree(curname); - return rc; } @@ -2266,6 +2417,8 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) Oid loop_var_elem_type; bool found = false; int rc = PLPGSQL_RC_OK; + MemoryContext stmt_mcontext; + MemoryContext oldcontext; ArrayIterator array_iterator; Oid iterator_result_type; int32 iterator_result_typmod; @@ -2279,6 +2432,15 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("FOREACH expression must not be null"))); + /* + * Do as much as possible of the code below in stmt_mcontext, to avoid any + * leaks from called subroutines. We need a private stmt_mcontext since + * we'll be calling arbitrary statement code. + */ + stmt_mcontext = get_stmt_mcontext(estate); + push_stmt_mcontext(estate); + oldcontext = MemoryContextSwitchTo(stmt_mcontext); + /* check the type of the expression - must be an array */ if (!OidIsValid(get_element_type(arrtype))) ereport(ERROR, @@ -2287,9 +2449,9 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) format_type_be(arrtype)))); /* - * We must copy the array, else it will disappear in exec_eval_cleanup. - * This is annoying, but cleanup will certainly happen while running the - * loop body, so we have little choice. + * We must copy the array into stmt_mcontext, else it will disappear in + * exec_eval_cleanup. This is annoying, but cleanup will certainly happen + * while running the loop body, so we have little choice. */ arr = DatumGetArrayTypePCopy(value); @@ -2355,6 +2517,9 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) { found = true; /* looped at least once */ + /* exec_assign_value and exec_stmts must run in the main context */ + MemoryContextSwitchTo(oldcontext); + /* Assign current element/slice to the loop variable */ exec_assign_value(estate, loop_var, value, isnull, iterator_result_type, iterator_result_typmod); @@ -2413,11 +2578,16 @@ exec_stmt_foreach_a(PLpgSQL_execstate *estate, PLpgSQL_stmt_foreach_a *stmt) break; } } + + MemoryContextSwitchTo(stmt_mcontext); } + /* Restore memory context state */ + MemoryContextSwitchTo(oldcontext); + pop_stmt_mcontext(estate); + /* Release temporary memory, including the array value */ - array_free_iterator(array_iterator); - pfree(arr); + MemoryContextReset(stmt_mcontext); /* * Set the FOUND variable to indicate the result of executing the loop @@ -2465,6 +2635,13 @@ exec_stmt_exit(PLpgSQL_execstate *estate, PLpgSQL_stmt_exit *stmt) /* ---------- * exec_stmt_return Evaluate an expression and start * returning from the function. + * + * Note: in the retistuple code paths, the returned tuple is always in the + * function's main context, whereas for non-tuple data types the result may + * be in the eval_mcontext. The former case is not a memory leak since we're + * about to exit the function anyway. (If you want to change it, note that + * exec_stmt_block() knows about this behavior.) The latter case means that + * we must not do exec_eval_cleanup while unwinding the control stack. * ---------- */ static int @@ -2639,8 +2816,8 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, { TupleDesc tupdesc; int natts; - HeapTuple tuple = NULL; - bool free_tuple = false; + HeapTuple tuple; + MemoryContext oldcontext; if (!estate->retisset) ereport(ERROR, @@ -2712,17 +2889,17 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, rec->refname), errdetail("The tuple structure of a not-yet-assigned" " record is indeterminate."))); + + /* Use eval_mcontext for tuple conversion work */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tupmap = convert_tuples_by_position(rec->tupdesc, tupdesc, gettext_noop("wrong record type supplied in RETURN NEXT")); tuple = rec->tup; - /* it might need conversion */ if (tupmap) - { tuple = do_convert_tuple(tuple, tupmap); - free_conversion_map(tupmap); - free_tuple = true; - } + tuplestore_puttuple(estate->tuple_store, tuple); + MemoryContextSwitchTo(oldcontext); } break; @@ -2730,12 +2907,15 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, { PLpgSQL_row *row = (PLpgSQL_row *) retvar; + /* Use eval_mcontext for tuple conversion work */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tuple = make_tuple_from_row(estate, row, tupdesc); if (tuple == NULL) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("wrong record type supplied in RETURN NEXT"))); - free_tuple = true; + tuplestore_puttuple(estate->tuple_store, tuple); + MemoryContextSwitchTo(oldcontext); } break; @@ -2770,24 +2950,17 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot return non-composite value from function returning composite type"))); + /* Use eval_mcontext for tuple conversion work */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tuple = get_tuple_from_datum(retval); - free_tuple = true; /* tuple is always freshly palloc'd */ - - /* it might need conversion */ retvaldesc = get_tupdesc_from_datum(retval); tupmap = convert_tuples_by_position(retvaldesc, tupdesc, gettext_noop("returned record type does not match expected record type")); if (tupmap) - { - HeapTuple newtuple; - - newtuple = do_convert_tuple(tuple, tupmap); - free_conversion_map(tupmap); - heap_freetuple(tuple); - tuple = newtuple; - } + tuple = do_convert_tuple(tuple, tupmap); + tuplestore_puttuple(estate->tuple_store, tuple); ReleaseTupleDesc(retvaldesc); - /* tuple will be stored into tuplestore below */ + MemoryContextSwitchTo(oldcontext); } else { @@ -2795,13 +2968,13 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, Datum *nulldatums; bool *nullflags; - nulldatums = (Datum *) palloc0(natts * sizeof(Datum)); - nullflags = (bool *) palloc(natts * sizeof(bool)); + nulldatums = (Datum *) + eval_mcontext_alloc0(estate, natts * sizeof(Datum)); + nullflags = (bool *) + eval_mcontext_alloc(estate, natts * sizeof(bool)); memset(nullflags, true, natts * sizeof(bool)); tuplestore_putvalues(estate->tuple_store, tupdesc, nulldatums, nullflags); - pfree(nulldatums); - pfree(nullflags); } } else @@ -2832,14 +3005,6 @@ exec_stmt_return_next(PLpgSQL_execstate *estate, errmsg("RETURN NEXT must have a parameter"))); } - if (HeapTupleIsValid(tuple)) - { - tuplestore_puttuple(estate->tuple_store, tuple); - - if (free_tuple) - heap_freetuple(tuple); - } - exec_eval_cleanup(estate); return PLPGSQL_RC_OK; @@ -2858,6 +3023,7 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, Portal portal; uint64 processed = 0; TupleConversionMap *tupmap; + MemoryContext oldcontext; if (!estate->retisset) ereport(ERROR, @@ -2881,6 +3047,9 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, CURSOR_OPT_PARALLEL_OK); } + /* Use eval_mcontext for tuple conversion work */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + tupmap = convert_tuples_by_position(portal->tupDesc, estate->rettupdesc, gettext_noop("structure of query does not match function result type")); @@ -2890,6 +3059,10 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, uint64 i; SPI_cursor_fetch(portal, true, 50); + + /* SPI will have changed CurrentMemoryContext */ + MemoryContextSwitchTo(get_eval_mcontext(estate)); + if (SPI_processed == 0) break; @@ -2908,12 +3081,12 @@ exec_stmt_return_query(PLpgSQL_execstate *estate, SPI_freetuptable(SPI_tuptable); } - if (tupmap) - free_conversion_map(tupmap); - SPI_freetuptable(SPI_tuptable); SPI_cursor_close(portal); + MemoryContextSwitchTo(oldcontext); + exec_eval_cleanup(estate); + estate->eval_processed = processed; exec_set_found(estate, processed != 0); @@ -2965,7 +3138,7 @@ do { \ (errcode(ERRCODE_SYNTAX_ERROR), \ errmsg("RAISE option already specified: %s", \ name))); \ - opt = pstrdup(extval); \ + opt = MemoryContextStrdup(stmt_mcontext, extval); \ } while (0) /* ---------- @@ -2985,6 +3158,7 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) char *err_datatype = NULL; char *err_table = NULL; char *err_schema = NULL; + MemoryContext stmt_mcontext; ListCell *lc; /* RAISE with no parameters: re-throw current exception */ @@ -2999,10 +3173,13 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) errmsg("RAISE without parameters cannot be used outside an exception handler"))); } + /* We'll need to accumulate the various strings in stmt_mcontext */ + stmt_mcontext = get_stmt_mcontext(estate); + if (stmt->condname) { err_code = plpgsql_recognize_err_condition(stmt->condname, true); - condname = pstrdup(stmt->condname); + condname = MemoryContextStrdup(stmt_mcontext, stmt->condname); } if (stmt->message) @@ -3010,8 +3187,13 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) StringInfoData ds; ListCell *current_param; char *cp; + MemoryContext oldcontext; + /* build string in stmt_mcontext */ + oldcontext = MemoryContextSwitchTo(stmt_mcontext); initStringInfo(&ds); + MemoryContextSwitchTo(oldcontext); + current_param = list_head(stmt->params); for (cp = stmt->message; *cp; cp++) @@ -3064,7 +3246,6 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) elog(ERROR, "unexpected RAISE parameter list length"); err_message = ds.data; - /* No pfree(ds.data), the pfree(err_message) does it */ } foreach(lc, stmt->options) @@ -3096,7 +3277,7 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) errmsg("RAISE option already specified: %s", "ERRCODE"))); err_code = plpgsql_recognize_err_condition(extval, true); - condname = pstrdup(extval); + condname = MemoryContextStrdup(stmt_mcontext, extval); break; case PLPGSQL_RAISEOPTION_MESSAGE: SET_RAISE_OPTION_TEXT(err_message, "MESSAGE"); @@ -3142,7 +3323,8 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) condname = NULL; } else - err_message = pstrdup(unpack_sql_state(err_code)); + err_message = MemoryContextStrdup(stmt_mcontext, + unpack_sql_state(err_code)); } /* @@ -3164,24 +3346,8 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt) (err_schema != NULL) ? err_generic_string(PG_DIAG_SCHEMA_NAME, err_schema) : 0)); - if (condname != NULL) - pfree(condname); - if (err_message != NULL) - pfree(err_message); - if (err_detail != NULL) - pfree(err_detail); - if (err_hint != NULL) - pfree(err_hint); - if (err_column != NULL) - pfree(err_column); - if (err_constraint != NULL) - pfree(err_constraint); - if (err_datatype != NULL) - pfree(err_datatype); - if (err_table != NULL) - pfree(err_table); - if (err_schema != NULL) - pfree(err_schema); + /* Clean up transient strings */ + MemoryContextReset(stmt_mcontext); return PLPGSQL_RC_OK; } @@ -3329,6 +3495,14 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, estate->cast_hash_context = shared_cast_context; } + /* + * We start with no stmt_mcontext; one will be created only if needed. + * That context will be a direct child of the function's main execution + * context. Additional stmt_mcontexts might be created as children of it. + */ + estate->stmt_mcontext = NULL; + estate->stmt_mcontext_parent = CurrentMemoryContext; + estate->eval_tuptable = NULL; estate->eval_processed = 0; estate->eval_lastoid = InvalidOid; @@ -3378,7 +3552,10 @@ exec_eval_cleanup(PLpgSQL_execstate *estate) SPI_freetuptable(estate->eval_tuptable); estate->eval_tuptable = NULL; - /* Clear result of exec_eval_simple_expr (but keep the econtext) */ + /* + * Clear result of exec_eval_simple_expr (but keep the econtext). This + * also clears any short-lived allocations done via get_eval_mcontext. + */ if (estate->eval_econtext != NULL) ResetExprContext(estate->eval_econtext); } @@ -3430,7 +3607,7 @@ exec_prepare_plan(PLpgSQL_execstate *estate, expr->plan = plan; /* Check to see if it's a simple expression */ - exec_simple_check_plan(expr); + exec_simple_check_plan(estate, expr); /* * Mark expression as not using a read-write param. exec_assign_value has @@ -3443,6 +3620,9 @@ exec_prepare_plan(PLpgSQL_execstate *estate, /* ---------- * exec_stmt_execsql Execute an SQL statement (possibly with INTO). + * + * Note: some callers rely on this not touching stmt_mcontext. If it ever + * needs to use that, fix those callers to push/pop stmt_mcontext. * ---------- */ static int @@ -3675,6 +3855,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, char *querystr; int exec_res; PreparedParamsData *ppd = NULL; + MemoryContext stmt_mcontext = get_stmt_mcontext(estate); /* * First we evaluate the string expression after the EXECUTE keyword. Its @@ -3689,8 +3870,8 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, /* Get the C-String representation */ querystr = convert_value_to_string(estate, query, restype); - /* copy it out of the temporary context before we clean up */ - querystr = pstrdup(querystr); + /* copy it into the stmt_mcontext before we clean up */ + querystr = MemoryContextStrdup(stmt_mcontext, querystr); exec_eval_cleanup(estate); @@ -3843,12 +4024,9 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate, */ } - if (ppd) - free_params_data(ppd); - - /* Release any result from SPI_execute, as well as the querystring */ + /* Release any result from SPI_execute, as well as transient data */ SPI_freetuptable(SPI_tuptable); - pfree(querystr); + MemoryContextReset(stmt_mcontext); return PLPGSQL_RC_OK; } @@ -3892,6 +4070,7 @@ static int exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) { PLpgSQL_var *curvar; + MemoryContext stmt_mcontext = NULL; char *curname = NULL; PLpgSQL_expr *query; Portal portal; @@ -3905,7 +4084,14 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) curvar = (PLpgSQL_var *) (estate->datums[stmt->curvar]); if (!curvar->isnull) { + MemoryContext oldcontext; + + /* We only need stmt_mcontext to hold the cursor name string */ + stmt_mcontext = get_stmt_mcontext(estate); + oldcontext = MemoryContextSwitchTo(stmt_mcontext); curname = TextDatumGetCString(curvar->value); + MemoryContextSwitchTo(oldcontext); + if (SPI_cursor_find(curname) != NULL) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_CURSOR), @@ -3942,7 +4128,10 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) stmt->cursor_options); /* - * If cursor variable was NULL, store the generated portal name in it + * If cursor variable was NULL, store the generated portal name in it. + * Note: exec_dynquery_with_params already reset the stmt_mcontext, so + * curname is a dangling pointer here; but testing it for nullness is + * OK. */ if (curname == NULL) assign_text_var(estate, curvar, portal->name); @@ -4019,10 +4208,10 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) if (curname == NULL) assign_text_var(estate, curvar, portal->name); - if (curname) - pfree(curname); - if (paramLI) - pfree(paramLI); + /* If we had any transient data, clean it up */ + exec_eval_cleanup(estate); + if (stmt_mcontext) + MemoryContextReset(stmt_mcontext); return PLPGSQL_RC_OK; } @@ -4036,7 +4225,7 @@ exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt) static int exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) { - PLpgSQL_var *curvar = NULL; + PLpgSQL_var *curvar; PLpgSQL_rec *rec = NULL; PLpgSQL_row *row = NULL; long how_many = stmt->how_many; @@ -4044,6 +4233,7 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) Portal portal; char *curname; uint64 n; + MemoryContext oldcontext; /* ---------- * Get the portal of the cursor by name @@ -4054,14 +4244,17 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("cursor variable \"%s\" is null", curvar->refname))); + + /* Use eval_mcontext for short-lived string */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); curname = TextDatumGetCString(curvar->value); + MemoryContextSwitchTo(oldcontext); portal = SPI_cursor_find(curname); if (portal == NULL) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), errmsg("cursor \"%s\" does not exist", curname))); - pfree(curname); /* Calculate position for FETCH_RELATIVE or FETCH_ABSOLUTE */ if (stmt->expr) @@ -4133,9 +4326,10 @@ exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt) static int exec_stmt_close(PLpgSQL_execstate *estate, PLpgSQL_stmt_close *stmt) { - PLpgSQL_var *curvar = NULL; + PLpgSQL_var *curvar; Portal portal; char *curname; + MemoryContext oldcontext; /* ---------- * Get the portal of the cursor by name @@ -4146,14 +4340,17 @@ exec_stmt_close(PLpgSQL_execstate *estate, PLpgSQL_stmt_close *stmt) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("cursor variable \"%s\" is null", curvar->refname))); + + /* Use eval_mcontext for short-lived string */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); curname = TextDatumGetCString(curvar->value); + MemoryContextSwitchTo(oldcontext); portal = SPI_cursor_find(curname); if (portal == NULL) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), errmsg("cursor \"%s\" does not exist", curname))); - pfree(curname); /* ---------- * And close it. @@ -4201,6 +4398,9 @@ exec_assign_expr(PLpgSQL_execstate *estate, PLpgSQL_datum *target, * exec_assign_c_string Put a C string into a text variable. * * We take a NULL pointer as signifying empty string, not SQL null. + * + * As with the underlying exec_assign_value, caller is expected to do + * exec_eval_cleanup later. * ---------- */ static void @@ -4208,21 +4408,25 @@ exec_assign_c_string(PLpgSQL_execstate *estate, PLpgSQL_datum *target, const char *str) { text *value; + MemoryContext oldcontext; + /* Use eval_mcontext for short-lived text value */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); if (str != NULL) value = cstring_to_text(str); else value = cstring_to_text(""); + MemoryContextSwitchTo(oldcontext); + exec_assign_value(estate, target, PointerGetDatum(value), false, TEXTOID, -1); - pfree(value); } /* ---------- * exec_assign_value Put a value into a target datum * - * Note: in some code paths, this will leak memory in the eval_econtext; + * Note: in some code paths, this will leak memory in the eval_mcontext; * we assume that will be cleaned up later by exec_eval_cleanup. We cannot * call exec_eval_cleanup here for fear of destroying the input Datum value. * ---------- @@ -4259,10 +4463,10 @@ exec_assign_value(PLpgSQL_execstate *estate, /* * If type is by-reference, copy the new value (which is - * probably in the eval_econtext) into the procedure's memory - * context. But if it's a read/write reference to an expanded - * object, no physical copy needs to happen; at most we need - * to reparent the object's memory context. + * probably in the eval_mcontext) into the procedure's main + * memory context. But if it's a read/write reference to an + * expanded object, no physical copy needs to happen; at most + * we need to reparent the object's memory context. * * If it's an array, we force the value to be stored in R/W * expanded form. This wins if the function later does, say, @@ -4402,9 +4606,9 @@ exec_assign_value(PLpgSQL_execstate *estate, * the attributes except the one we want to replace, use the * value that's in the old tuple. */ - values = palloc(sizeof(Datum) * natts); - nulls = palloc(sizeof(bool) * natts); - replaces = palloc(sizeof(bool) * natts); + values = eval_mcontext_alloc(estate, sizeof(Datum) * natts); + nulls = eval_mcontext_alloc(estate, sizeof(bool) * natts); + replaces = eval_mcontext_alloc(estate, sizeof(bool) * natts); memset(replaces, false, sizeof(bool) * natts); replaces[fno] = true; @@ -4437,10 +4641,6 @@ exec_assign_value(PLpgSQL_execstate *estate, rec->tup = newtup; rec->freetup = true; - pfree(values); - pfree(nulls); - pfree(replaces); - break; } @@ -4601,7 +4801,7 @@ exec_assign_value(PLpgSQL_execstate *estate, return; /* empty array, if any, and newarraydatum are short-lived */ - oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); if (oldarrayisnull) oldarraydatum = PointerGetDatum(construct_empty_array(arrayelem->elemtypoid)); @@ -4655,7 +4855,7 @@ exec_assign_value(PLpgSQL_execstate *estate, * responsibility that the results are semantically OK. * * In some cases we have to palloc a return value, and in such cases we put - * it into the estate's short-term memory context. + * it into the estate's eval_mcontext. */ static void exec_eval_datum(PLpgSQL_execstate *estate, @@ -4689,7 +4889,7 @@ exec_eval_datum(PLpgSQL_execstate *estate, elog(ERROR, "row variable has no tupdesc"); /* Make sure we have a valid type/typmod setting */ BlessTupleDesc(row->rowtupdesc); - oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); tup = make_tuple_from_row(estate, row, row->rowtupdesc); if (tup == NULL) /* should not happen */ elog(ERROR, "row not compatible with its own tupdesc"); @@ -4715,7 +4915,7 @@ exec_eval_datum(PLpgSQL_execstate *estate, /* Make sure we have a valid type/typmod setting */ BlessTupleDesc(rec->tupdesc); - oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); *typeid = rec->tupdesc->tdtypeid; *typetypmod = rec->tupdesc->tdtypmod; *value = heap_copy_tuple_as_datum(rec->tup, rec->tupdesc); @@ -5107,8 +5307,7 @@ exec_run_select(PLpgSQL_execstate *estate, if (*portalP == NULL) elog(ERROR, "could not open implicit cursor for query \"%s\": %s", expr->query, SPI_result_code_string(SPI_result)); - if (paramLI) - pfree(paramLI); + exec_eval_cleanup(estate); return SPI_OK_CURSOR; } @@ -5323,9 +5522,8 @@ loop_exit: * give correct results if that happens, and it's unlikely to be worth the * cycles to check. * - * Note: if pass-by-reference, the result is in the eval_econtext's - * temporary memory context. It will be freed when exec_eval_cleanup - * is done. + * Note: if pass-by-reference, the result is in the eval_mcontext. + * It will be freed when exec_eval_cleanup is done. * ---------- */ static bool @@ -5357,9 +5555,12 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, /* * Revalidate cached plan, so that we will notice if it became stale. (We - * need to hold a refcount while using the plan, anyway.) + * need to hold a refcount while using the plan, anyway.) If replanning + * is needed, do that work in the eval_mcontext. */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); cplan = SPI_plan_get_cached_plan(expr->plan); + MemoryContextSwitchTo(oldcontext); /* * We can't get a failure here, because the number of CachedPlanSources in @@ -5411,7 +5612,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, */ SPI_push(); - oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); if (!estate->readonly_func) { CommandCounterIncrement(); @@ -5449,8 +5650,7 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, /* Assorted cleanup */ expr->expr_simple_in_use = false; - if (paramLI && paramLI != estate->paramLI) - pfree(paramLI); + econtext->ecxt_param_list_info = NULL; estate->paramLI->parserSetupArg = save_setup_arg; @@ -5498,8 +5698,8 @@ exec_eval_simple_expr(PLpgSQL_execstate *estate, * throw errors (for example "no such record field") and we do not want that * to happen in a part of the expression that might never be evaluated at * runtime. For another thing, exec_eval_datum() may return short-lived - * values stored in the estate's short-term memory context, which will not - * necessarily survive to the next SPI operation. And for a third thing, ROW + * values stored in the estate's eval_mcontext, which will not necessarily + * survive to the next SPI operation. And for a third thing, ROW * and RECFIELD datums' values depend on other datums, and we don't have a * cheap way to track that. Therefore, param slots for non-VAR datum types * are always reset here and then filled on-demand by plpgsql_param_fetch(). @@ -5598,7 +5798,7 @@ setup_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) * to some trusted function. We don't want the R/W pointer to get into the * shared param list, where it could get passed to some less-trusted function. * - * Caller should pfree the result after use, if it's not NULL. + * The result, if not NULL, is in the estate's eval_mcontext. * * XXX. Could we use ParamListInfo's new paramMask to avoid creating unshared * parameter lists? @@ -5626,8 +5826,9 @@ setup_unshared_param_list(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) /* initialize ParamListInfo with one entry per datum, all invalid */ paramLI = (ParamListInfo) - palloc0(offsetof(ParamListInfoData, params) + - estate->ndatums * sizeof(ParamExternData)); + eval_mcontext_alloc0(estate, + offsetof(ParamListInfoData, params) + + estate->ndatums * sizeof(ParamExternData)); paramLI->paramFetch = plpgsql_param_fetch; paramLI->paramFetchArg = (void *) estate; paramLI->parserSetup = (ParserSetupHook) plpgsql_parser_setup; @@ -5784,12 +5985,11 @@ exec_move_row(PLpgSQL_execstate *estate, /* If we have a tupdesc but no data, form an all-nulls tuple */ bool *nulls; - nulls = (bool *) palloc(tupdesc->natts * sizeof(bool)); + nulls = (bool *) + eval_mcontext_alloc(estate, tupdesc->natts * sizeof(bool)); memset(nulls, true, tupdesc->natts * sizeof(bool)); tup = heap_form_tuple(tupdesc, NULL, nulls); - - pfree(nulls); } if (tupdesc) @@ -5907,6 +6107,9 @@ exec_move_row(PLpgSQL_execstate *estate, * make_tuple_from_row Make a tuple from the values of a row object * * A NULL return indicates rowtype mismatch; caller must raise suitable error + * + * The result tuple is freshly palloc'd in caller's context. Some junk + * may be left behind in eval_mcontext, too. * ---------- */ static HeapTuple @@ -5923,8 +6126,8 @@ make_tuple_from_row(PLpgSQL_execstate *estate, if (natts != row->nfields) return NULL; - dvalues = (Datum *) palloc0(natts * sizeof(Datum)); - nulls = (bool *) palloc(natts * sizeof(bool)); + dvalues = (Datum *) eval_mcontext_alloc0(estate, natts * sizeof(Datum)); + nulls = (bool *) eval_mcontext_alloc(estate, natts * sizeof(bool)); for (i = 0; i < natts; i++) { @@ -5949,16 +6152,13 @@ make_tuple_from_row(PLpgSQL_execstate *estate, tuple = heap_form_tuple(tupdesc, dvalues, nulls); - pfree(dvalues); - pfree(nulls); - return tuple; } /* ---------- * get_tuple_from_datum extract a tuple from a composite Datum * - * Returns a freshly palloc'd HeapTuple. + * Returns a HeapTuple, freshly palloc'd in caller's context. * * Note: it's caller's responsibility to be sure value is of composite type. * ---------- @@ -6041,7 +6241,7 @@ exec_move_row_from_datum(PLpgSQL_execstate *estate, /* ---------- * convert_value_to_string Convert a non-null Datum to C string * - * Note: the result is in the estate's eval_econtext, and will be cleared + * Note: the result is in the estate's eval_mcontext, and will be cleared * by the next exec_eval_cleanup() call. The invoked output function might * leave additional cruft there as well, so just pfree'ing the result string * would not be enough to avoid memory leaks if we did not do it like this. @@ -6061,7 +6261,7 @@ convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype) Oid typoutput; bool typIsVarlena; - oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); getTypeOutputInfo(valtype, &typoutput, &typIsVarlena); result = OidOutputFunctionCall(typoutput, value); MemoryContextSwitchTo(oldcontext); @@ -6076,7 +6276,7 @@ convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype) * unlikely that a cast operation would produce null from non-null or vice * versa, that could happen in principle. * - * Note: the estate's eval_econtext is used for temporary storage, and may + * Note: the estate's eval_mcontext is used for temporary storage, and may * also contain the result Datum if we have to do a conversion to a pass- * by-reference data type. Be sure to do an exec_eval_cleanup() call when * done with the result. @@ -6104,7 +6304,7 @@ exec_cast_value(PLpgSQL_execstate *estate, ExprContext *econtext = estate->eval_econtext; MemoryContext oldcontext; - oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); econtext->caseValue_datum = value; econtext->caseValue_isNull = *isnull; @@ -6161,10 +6361,10 @@ get_cast_hashentry(PLpgSQL_execstate *estate, /* * Since we could easily fail (no such coercion), construct a - * temporary coercion expression tree in a short-lived context, then - * if successful copy it to cast_hash_context. + * temporary coercion expression tree in the short-lived + * eval_mcontext, then if successful copy it to cast_hash_context. */ - oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); /* * We use a CaseTestExpr as the base of the coercion tree, since it's @@ -6548,12 +6748,13 @@ exec_simple_check_node(Node *node) * ---------- */ static void -exec_simple_check_plan(PLpgSQL_expr *expr) +exec_simple_check_plan(PLpgSQL_execstate *estate, PLpgSQL_expr *expr) { List *plansources; CachedPlanSource *plansource; Query *query; CachedPlan *cplan; + MemoryContext oldcontext; /* * Initialize to "not simple", and remember the plan generation number we @@ -6624,10 +6825,13 @@ exec_simple_check_plan(PLpgSQL_expr *expr) /* * OK, it seems worth constructing a plan for more careful checking. + * + * Get the generic plan for the query. If replanning is needed, do that + * work in the eval_mcontext. */ - - /* Get the generic plan for the query */ + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); cplan = SPI_plan_get_cached_plan(expr->plan); + MemoryContextSwitchTo(oldcontext); /* Can't fail, because we checked for a single CachedPlanSource above */ Assert(cplan != NULL); @@ -7011,23 +7215,30 @@ assign_text_var(PLpgSQL_execstate *estate, PLpgSQL_var *var, const char *str) /* * exec_eval_using_params --- evaluate params of USING clause + * + * The result data structure is created in the stmt_mcontext, and should + * be freed by resetting that context. */ static PreparedParamsData * exec_eval_using_params(PLpgSQL_execstate *estate, List *params) { PreparedParamsData *ppd; + MemoryContext stmt_mcontext = get_stmt_mcontext(estate); int nargs; int i; ListCell *lc; - ppd = (PreparedParamsData *) palloc(sizeof(PreparedParamsData)); + ppd = (PreparedParamsData *) + MemoryContextAlloc(stmt_mcontext, sizeof(PreparedParamsData)); nargs = list_length(params); ppd->nargs = nargs; - ppd->types = (Oid *) palloc(nargs * sizeof(Oid)); - ppd->values = (Datum *) palloc(nargs * sizeof(Datum)); - ppd->nulls = (char *) palloc(nargs * sizeof(char)); - ppd->freevals = (bool *) palloc(nargs * sizeof(bool)); + ppd->types = (Oid *) + MemoryContextAlloc(stmt_mcontext, nargs * sizeof(Oid)); + ppd->values = (Datum *) + MemoryContextAlloc(stmt_mcontext, nargs * sizeof(Datum)); + ppd->nulls = (char *) + MemoryContextAlloc(stmt_mcontext, nargs * sizeof(char)); i = 0; foreach(lc, params) @@ -7035,13 +7246,15 @@ exec_eval_using_params(PLpgSQL_execstate *estate, List *params) PLpgSQL_expr *param = (PLpgSQL_expr *) lfirst(lc); bool isnull; int32 ppdtypmod; + MemoryContext oldcontext; ppd->values[i] = exec_eval_expr(estate, param, &isnull, &ppd->types[i], &ppdtypmod); ppd->nulls[i] = isnull ? 'n' : ' '; - ppd->freevals[i] = false; + + oldcontext = MemoryContextSwitchTo(stmt_mcontext); if (ppd->types[i] == UNKNOWNOID) { @@ -7054,12 +7267,9 @@ exec_eval_using_params(PLpgSQL_execstate *estate, List *params) */ ppd->types[i] = TEXTOID; if (!isnull) - { ppd->values[i] = CStringGetTextDatum(DatumGetCString(ppd->values[i])); - ppd->freevals[i] = true; - } } - /* pass-by-ref non null values must be copied into plpgsql context */ + /* pass-by-ref non null values must be copied into stmt_mcontext */ else if (!isnull) { int16 typLen; @@ -7067,12 +7277,11 @@ exec_eval_using_params(PLpgSQL_execstate *estate, List *params) get_typlenbyval(ppd->types[i], &typLen, &typByVal); if (!typByVal) - { ppd->values[i] = datumCopy(ppd->values[i], typByVal, typLen); - ppd->freevals[i] = true; - } } + MemoryContextSwitchTo(oldcontext); + exec_eval_cleanup(estate); i++; @@ -7081,30 +7290,13 @@ exec_eval_using_params(PLpgSQL_execstate *estate, List *params) return ppd; } -/* - * free_params_data --- pfree all pass-by-reference values used in USING clause - */ -static void -free_params_data(PreparedParamsData *ppd) -{ - int i; - - for (i = 0; i < ppd->nargs; i++) - { - if (ppd->freevals[i]) - pfree(DatumGetPointer(ppd->values[i])); - } - - pfree(ppd->types); - pfree(ppd->values); - pfree(ppd->nulls); - pfree(ppd->freevals); - - pfree(ppd); -} - /* * Open portal for dynamic query + * + * Caution: this resets the stmt_mcontext at exit. We might eventually need + * to move that responsibility to the callers, but currently no caller needs + * to have statement-lifetime temp data that survives past this, so it's + * simpler to do it here. */ static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate, @@ -7119,6 +7311,7 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate, Oid restype; int32 restypmod; char *querystr; + MemoryContext stmt_mcontext = get_stmt_mcontext(estate); /* * Evaluate the string expression after the EXECUTE keyword. Its result is @@ -7133,8 +7326,8 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate, /* Get the C-String representation */ querystr = convert_value_to_string(estate, query, restype); - /* copy it out of the temporary context before we clean up */ - querystr = pstrdup(querystr); + /* copy it into the stmt_mcontext before we clean up */ + querystr = MemoryContextStrdup(stmt_mcontext, querystr); exec_eval_cleanup(estate); @@ -7154,7 +7347,6 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate, ppd->values, ppd->nulls, estate->readonly_func, cursorOptions); - free_params_data(ppd); } else { @@ -7169,7 +7361,9 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate, if (portal == NULL) elog(ERROR, "could not open implicit cursor for query \"%s\": %s", querystr, SPI_result_code_string(SPI_result)); - pfree(querystr); + + /* Release transient data */ + MemoryContextReset(stmt_mcontext); return portal; } @@ -7177,6 +7371,7 @@ exec_dynquery_with_params(PLpgSQL_execstate *estate, /* * Return a formatted string with information about an expression's parameters, * or NULL if the expression does not take any parameters. + * The result is in the eval_mcontext. */ static char * format_expr_params(PLpgSQL_execstate *estate, @@ -7185,10 +7380,13 @@ format_expr_params(PLpgSQL_execstate *estate, int paramno; int dno; StringInfoData paramstr; + MemoryContext oldcontext; if (!expr->paramnos) return NULL; + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + initStringInfo(¶mstr); paramno = 0; dno = -1; @@ -7230,12 +7428,15 @@ format_expr_params(PLpgSQL_execstate *estate, paramno++; } + MemoryContextSwitchTo(oldcontext); + return paramstr.data; } /* * Return a formatted string with information about PreparedParamsData, or NULL * if there are no parameters. + * The result is in the eval_mcontext. */ static char * format_preparedparamsdata(PLpgSQL_execstate *estate, @@ -7243,10 +7444,13 @@ format_preparedparamsdata(PLpgSQL_execstate *estate, { int paramno; StringInfoData paramstr; + MemoryContext oldcontext; if (!ppd) return NULL; + oldcontext = MemoryContextSwitchTo(get_eval_mcontext(estate)); + initStringInfo(¶mstr); for (paramno = 0; paramno < ppd->nargs; paramno++) { @@ -7272,5 +7476,7 @@ format_preparedparamsdata(PLpgSQL_execstate *estate, } } + MemoryContextSwitchTo(oldcontext); + return paramstr.data; } diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index 140bf4badd..bfd52af3e7 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -814,10 +814,14 @@ typedef struct PLpgSQL_execstate /* EState to use for "simple" expression evaluation */ EState *simple_eval_estate; - /* Lookup table to use for executing type casts */ + /* lookup table to use for executing type casts */ HTAB *cast_hash; MemoryContext cast_hash_context; + /* memory context for statement-lifespan temporary values */ + MemoryContext stmt_mcontext; /* current stmt context, or NULL if none */ + MemoryContext stmt_mcontext_parent; /* parent of current context */ + /* temporary state for results from evaluation of query or expr */ SPITupleTable *eval_tuptable; uint64 eval_processed; -- cgit v1.2.3 From 2d7e591007a6f44e5e27e2b6c1098483105c0d10 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Wed, 17 Aug 2016 13:15:03 -0700 Subject: Properly re-initialize replication slot shared memory upon creation. Slot creation did not clear all fields upon creation. After start the memory is zeroed, but when a physical replication slot was created in the shared memory of a previously existing logical slot, catalog_xmin would not be cleared. That in turn would prevent vacuum from doing its duties. To fix initialize all the fields. To make similar future bugs less likely, zero all of ReplicationSlotPersistentData, and re-order the rest of the initialization to be in struct member order. Analysis: Andrew Gierth Reported-By: md@chewy.com Author: Michael Paquier Discussion: <20160705173502.1398.70934@wrigleys.postgresql.org> Backpatch: 9.4, where replication slots were introduced --- src/backend/replication/slot.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'src') diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 2fb7c17d7d..0b2575ee9d 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -272,12 +272,22 @@ ReplicationSlotCreate(const char *name, bool db_specific, */ Assert(!slot->in_use); Assert(slot->active_pid == 0); - slot->data.persistency = persistency; - slot->data.xmin = InvalidTransactionId; - slot->effective_xmin = InvalidTransactionId; + + /* first initialize persistent data */ + memset(&slot->data, 0, sizeof(ReplicationSlotPersistentData)); StrNCpy(NameStr(slot->data.name), name, NAMEDATALEN); slot->data.database = db_specific ? MyDatabaseId : InvalidOid; - slot->data.restart_lsn = InvalidXLogRecPtr; + slot->data.persistency = persistency; + + /* and then data only present in shared memory */ + slot->just_dirtied = false; + slot->dirty = false; + slot->effective_xmin = InvalidTransactionId; + slot->effective_catalog_xmin = InvalidTransactionId; + slot->candidate_catalog_xmin = InvalidTransactionId; + slot->candidate_xmin_lsn = InvalidXLogRecPtr; + slot->candidate_restart_valid = InvalidXLogRecPtr; + slot->candidate_restart_lsn = InvalidXLogRecPtr; /* * Create the slot on disk. We haven't actually marked the slot allocated -- cgit v1.2.3 From cf9b0fea5f6d1bcc9b2c66f5c30ecb04684a0919 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 17 Aug 2016 18:32:56 -0400 Subject: Implement regexp_match(), a simplified alternative to regexp_matches(). regexp_match() is like regexp_matches(), but it disallows the 'g' flag and in consequence does not need to return a set. Instead, it returns a simple text array value, or NULL if there's no match. Previously people usually got that behavior with a sub-select, but this way is considerably more efficient. Documentation adjusted so that regexp_match() is presented first and then regexp_matches() is introduced as a more complicated version. This is a bit historically revisionist but seems pedagogically better. Still TODO: extend contrib/citext to support this function. Emre Hasegeli, reviewed by David Johnston Discussion: --- doc/src/sgml/func.sgml | 155 ++++++++++++++++++++--------- src/backend/catalog/information_schema.sql | 2 +- src/backend/utils/adt/regexp.c | 137 +++++++++++++++++-------- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.h | 8 +- src/include/utils/builtins.h | 2 + src/test/regress/expected/regex.out | 28 ++++++ src/test/regress/expected/strings.out | 4 +- src/test/regress/sql/regex.sql | 7 ++ 9 files changed, 252 insertions(+), 93 deletions(-) (limited to 'src') diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 426e562b03..169a385a9c 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -2036,6 +2036,23 @@ '42.5' + + + + regexp_match + + regexp_match(string text, pattern text [, flags text]) + + text[] + + Return captured substring(s) resulting from the first match of a POSIX + regular expression to the string. See + for more information. + + regexp_match('foobarbequebaz', '(bar)(beque)') + {bar,beque} + + @@ -2045,12 +2062,12 @@ setof text[] - Return all captured substrings resulting from matching a POSIX regular - expression against the string. See + Return captured substring(s) resulting from matching a POSIX regular + expression to the string. See for more information. - regexp_matches('foobarbequebaz', '(bar)(beque)') - {bar,beque} + regexp_matches('foobarbequebaz', 'ba.', 'g') + {bar}{baz} (2 rows) @@ -4112,6 +4129,9 @@ substring('foobar' from '#"o_b#"%' for '#') NULL regexp_replace + + regexp_match + regexp_matches @@ -4272,64 +4292,106 @@ regexp_replace('foobarbaz', 'b(..)', E'X\\1Y', 'g') - The regexp_matches function returns a text array of - all of the captured substrings resulting from matching a POSIX - regular expression pattern. It has the syntax - regexp_matches(string, pattern - , flags ). - The function can return no rows, one row, or multiple rows (see - the g flag below). If the pattern - does not match, the function returns no rows. If the pattern - contains no parenthesized subexpressions, then each row - returned is a single-element text array containing the substring - matching the whole pattern. If the pattern contains parenthesized - subexpressions, the function returns a text array whose - n'th element is the substring matching the - n'th parenthesized subexpression of the pattern - (not counting non-capturing parentheses; see below for - details). - The flags parameter is an optional text - string containing zero or more single-letter flags that change the - function's behavior. Flag g causes the function to find - each match in the string, not only the first one, and return a row for - each such match. Supported flags (though - not g) - are described in . + The regexp_match function returns a text array of + captured substring(s) resulting from the first match of a POSIX + regular expression pattern to a string. It has the syntax + regexp_match(string, + pattern , flags ). + If there is no match, the result is NULL. + If a match is found, and the pattern contains no + parenthesized subexpressions, then the result is a single-element text + array containing the substring matching the whole pattern. + If a match is found, and the pattern contains + parenthesized subexpressions, then the result is a text array + whose n'th element is the substring matching + the n'th parenthesized subexpression of + the pattern (not counting non-capturing + parentheses; see below for details). + The flags parameter is an optional text string + containing zero or more single-letter flags that change the function's + behavior. Supported flags are described + in . Some examples: -SELECT regexp_matches('foobarbequebaz', '(bar)(beque)'); - regexp_matches ----------------- +SELECT regexp_match('foobarbequebaz', 'bar.*que'); + regexp_match +-------------- + {barbeque} +(1 row) + +SELECT regexp_match('foobarbequebaz', '(bar)(beque)'); + regexp_match +-------------- {bar,beque} (1 row) + + In the common case where you just want the whole matching substring + or NULL for no match, write something like + +SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; + regexp_match +-------------- + barbeque +(1 row) + + + + + The regexp_matches function returns a set of text arrays + of captured substring(s) resulting from matching a POSIX regular + expression pattern to a string. It has the same syntax as + regexp_match. + This function returns no rows if there is no match, one row if there is + a match and the g flag is not given, or N + rows if there are N matches and the g flag + is given. Each returned row is a text array containing the whole + matched substring or the substrings matching parenthesized + subexpressions of the pattern, just as described above + for regexp_match. + regexp_matches accepts all the flags shown + in , plus + the g flag which commands it to return all matches, not + just the first one. + + + + Some examples: + + SELECT regexp_matches('foo', 'not there'); + regexp_matches +---------------- +(0 rows) SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); - regexp_matches + regexp_matches ---------------- {bar,beque} {bazil,barf} (2 rows) - -SELECT regexp_matches('foobarbequebaz', 'barbeque'); - regexp_matches ----------------- - {barbeque} -(1 row) - - It is possible to force regexp_matches() to always - return one row by using a sub-select; this is particularly useful - in a SELECT target list when you want all rows - returned, even non-matching ones: + + + In most cases regexp_matches() should be used with + the g flag, since if you only want the first match, it's + easier and more efficient to use regexp_match(). + However, regexp_match() only exists + in PostgreSQL version 10 and up. When working in older + versions, a common trick is to place a regexp_matches() + call in a sub-select, for example: SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab; - + This produces a text array if there's a match, or NULL if + not, the same as regexp_match() would do. Without the + sub-select, this query would produce no output at all for table rows + without a match, which is typically not the desired behavior. + + The regexp_split_to_table function splits a string using a POSIX @@ -4408,6 +4470,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; zero-length matches that occur at the start or end of the string or immediately after a previous match. This is contrary to the strict definition of regexp matching that is implemented by + regexp_match and regexp_matches, but is usually the most convenient behavior in practice. Other software systems such as Perl use similar definitions. @@ -5482,7 +5545,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); into the digits and the parts before and after them. We might try to do that like this: -SELECT regexp_matches('abc01234xyz', '(.*)(\d+)(.*)'); +SELECT regexp_match('abc01234xyz', '(.*)(\d+)(.*)'); Result: {abc0123,4,xyz} That didn't work: the first .* is greedy so @@ -5490,14 +5553,14 @@ SELECT regexp_matches('abc01234xyz', '(.*)(\d+)(.*)'); match at the last possible place, the last digit. We might try to fix that by making it non-greedy: -SELECT regexp_matches('abc01234xyz', '(.*?)(\d+)(.*)'); +SELECT regexp_match('abc01234xyz', '(.*?)(\d+)(.*)'); Result: {abc,0,""} That didn't work either, because now the RE as a whole is non-greedy and so it ends the overall match as soon as possible. We can get what we want by forcing the RE as a whole to be greedy: -SELECT regexp_matches('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); +SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Result: {abc,01234,xyz} Controlling the RE's overall greediness separately from its components' diff --git a/src/backend/catalog/information_schema.sql b/src/backend/catalog/information_schema.sql index 18be08fead..00550eb804 100644 --- a/src/backend/catalog/information_schema.sql +++ b/src/backend/catalog/information_schema.sql @@ -2068,7 +2068,7 @@ CREATE VIEW triggers AS -- XXX strange hacks follow CAST( CASE WHEN pg_has_role(c.relowner, 'USAGE') - THEN (SELECT m[1] FROM regexp_matches(pg_get_triggerdef(t.oid), E'.{35,} WHEN \\((.+)\\) EXECUTE PROCEDURE') AS rm(m) LIMIT 1) + THEN (regexp_match(pg_get_triggerdef(t.oid), E'.{35,} WHEN \\((.+)\\) EXECUTE PROCEDURE'))[1] ELSE null END AS character_data) AS action_condition, CAST( diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 5b216e0b72..bc5e34e222 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -47,7 +47,7 @@ typedef struct pg_re_flags bool glob; /* do it globally (for each occurrence) */ } pg_re_flags; -/* cross-call state for regexp_matches(), also regexp_split() */ +/* cross-call state for regexp_match and regexp_split functions */ typedef struct regexp_matches_ctx { text *orig_str; /* data string in original TEXT form */ @@ -57,7 +57,7 @@ typedef struct regexp_matches_ctx /* so the number of entries in match_locs is nmatches * npatterns * 2 */ int *match_locs; /* 0-based character indexes */ int next_match; /* 0-based index of next match to process */ - /* workspace for build_regexp_matches_result() */ + /* workspace for build_regexp_match_result() */ Datum *elems; /* has npatterns elements */ bool *nulls; /* has npatterns elements */ } regexp_matches_ctx; @@ -107,13 +107,12 @@ static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */ /* Local functions */ static regexp_matches_ctx *setup_regexp_matches(text *orig_str, text *pattern, - text *flags, + pg_re_flags *flags, Oid collation, - bool force_glob, bool use_subpatterns, bool ignore_degenerate); static void cleanup_regexp_matches(regexp_matches_ctx *matchctx); -static ArrayType *build_regexp_matches_result(regexp_matches_ctx *matchctx); +static ArrayType *build_regexp_match_result(regexp_matches_ctx *matchctx); static Datum build_regexp_split_result(regexp_matches_ctx *splitctx); @@ -350,7 +349,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len, /* - * parse_re_flags - parse the options argument of regexp_matches and friends + * parse_re_flags - parse the options argument of regexp_match and friends * * flags --- output argument, filled with desired options * opts --- TEXT object, or NULL for defaults @@ -840,9 +839,53 @@ similar_escape(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(result); } +/* + * regexp_match() + * Return the first substring(s) matching a pattern within a string. + */ +Datum +regexp_match(PG_FUNCTION_ARGS) +{ + text *orig_str = PG_GETARG_TEXT_PP(0); + text *pattern = PG_GETARG_TEXT_PP(1); + text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; + regexp_matches_ctx *matchctx; + + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* User mustn't specify 'g' */ + if (re_flags.glob) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("regexp_match does not support the global option"), + errhint("Use the regexp_matches function instead."))); + + matchctx = setup_regexp_matches(orig_str, pattern, &re_flags, + PG_GET_COLLATION(), true, false); + + if (matchctx->nmatches == 0) + PG_RETURN_NULL(); + + Assert(matchctx->nmatches == 1); + + /* Create workspace that build_regexp_match_result needs */ + matchctx->elems = (Datum *) palloc(sizeof(Datum) * matchctx->npatterns); + matchctx->nulls = (bool *) palloc(sizeof(bool) * matchctx->npatterns); + + PG_RETURN_DATUM(PointerGetDatum(build_regexp_match_result(matchctx))); +} + +/* This is separate to keep the opr_sanity regression test from complaining */ +Datum +regexp_match_no_flags(PG_FUNCTION_ARGS) +{ + return regexp_match(fcinfo); +} + /* * regexp_matches() - * Return a table of matches of a pattern within a string. + * Return a table of all matches of a pattern within a string. */ Datum regexp_matches(PG_FUNCTION_ARGS) @@ -854,18 +897,22 @@ regexp_matches(PG_FUNCTION_ARGS) { text *pattern = PG_GETARG_TEXT_PP(1); text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* be sure to copy the input string into the multi-call ctx */ matchctx = setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, - flags, + &re_flags, PG_GET_COLLATION(), - false, true, false); + true, false); - /* Pre-create workspace that build_regexp_matches_result needs */ + /* Pre-create workspace that build_regexp_match_result needs */ matchctx->elems = (Datum *) palloc(sizeof(Datum) * matchctx->npatterns); matchctx->nulls = (bool *) palloc(sizeof(bool) * matchctx->npatterns); @@ -880,7 +927,7 @@ regexp_matches(PG_FUNCTION_ARGS) { ArrayType *result_ary; - result_ary = build_regexp_matches_result(matchctx); + result_ary = build_regexp_match_result(matchctx); matchctx->next_match++; SRF_RETURN_NEXT(funcctx, PointerGetDatum(result_ary)); } @@ -899,28 +946,27 @@ regexp_matches_no_flags(PG_FUNCTION_ARGS) } /* - * setup_regexp_matches --- do the initial matching for regexp_matches() - * or regexp_split() + * setup_regexp_matches --- do the initial matching for regexp_match + * and regexp_split functions * * To avoid having to re-find the compiled pattern on each call, we do * all the matching in one swoop. The returned regexp_matches_ctx contains * the locations of all the substrings matching the pattern. * - * The three bool parameters have only two patterns (one for each caller) - * but it seems clearer to distinguish the functionality this way than to - * key it all off one "is_split" flag. + * The two bool parameters have only two patterns (one for matching, one for + * splitting) but it seems clearer to distinguish the functionality this way + * than to key it all off one "is_split" flag. */ static regexp_matches_ctx * -setup_regexp_matches(text *orig_str, text *pattern, text *flags, +setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, Oid collation, - bool force_glob, bool use_subpatterns, + bool use_subpatterns, bool ignore_degenerate) { regexp_matches_ctx *matchctx = palloc0(sizeof(regexp_matches_ctx)); int orig_len; pg_wchar *wide_str; int wide_len; - pg_re_flags re_flags; regex_t *cpattern; regmatch_t *pmatch; int pmatch_len; @@ -937,21 +983,8 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags, wide_str = (pg_wchar *) palloc(sizeof(pg_wchar) * (orig_len + 1)); wide_len = pg_mb2wchar_with_len(VARDATA_ANY(orig_str), wide_str, orig_len); - /* determine options */ - parse_re_flags(&re_flags, flags); - if (force_glob) - { - /* user mustn't specify 'g' for regexp_split */ - if (re_flags.glob) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("regexp_split does not support the global option"))); - /* but we find all the matches anyway */ - re_flags.glob = true; - } - /* set up the compiled pattern */ - cpattern = RE_compile_and_cache(pattern, re_flags.cflags, collation); + cpattern = RE_compile_and_cache(pattern, re_flags->cflags, collation); /* do we want to remember subpatterns? */ if (use_subpatterns && cpattern->re_nsub > 0) @@ -970,7 +1003,7 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags, pmatch = palloc(sizeof(regmatch_t) * pmatch_len); /* the real output space (grown dynamically if needed) */ - array_len = re_flags.glob ? 256 : 32; + array_len = re_flags->glob ? 256 : 32; matchctx->match_locs = (int *) palloc(sizeof(int) * array_len); array_idx = 0; @@ -1018,7 +1051,7 @@ setup_regexp_matches(text *orig_str, text *pattern, text *flags, prev_match_end = pmatch[0].rm_eo; /* if not glob, stop after one match */ - if (!re_flags.glob) + if (!re_flags->glob) break; /* @@ -1057,10 +1090,10 @@ cleanup_regexp_matches(regexp_matches_ctx *matchctx) } /* - * build_regexp_matches_result - build output array for current match + * build_regexp_match_result - build output array for current match */ static ArrayType * -build_regexp_matches_result(regexp_matches_ctx *matchctx) +build_regexp_match_result(regexp_matches_ctx *matchctx) { Datum *elems = matchctx->elems; bool *nulls = matchctx->nulls; @@ -1114,16 +1147,27 @@ regexp_split_to_table(PG_FUNCTION_ARGS) { text *pattern = PG_GETARG_TEXT_PP(1); text *flags = PG_GETARG_TEXT_PP_IF_EXISTS(2); + pg_re_flags re_flags; MemoryContext oldcontext; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + /* Determine options */ + parse_re_flags(&re_flags, flags); + /* User mustn't specify 'g' */ + if (re_flags.glob) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("regexp_split_to_table does not support the global option"))); + /* But we find all the matches anyway */ + re_flags.glob = true; + /* be sure to copy the input string into the multi-call ctx */ splitctx = setup_regexp_matches(PG_GETARG_TEXT_P_COPY(0), pattern, - flags, + &re_flags, PG_GET_COLLATION(), - true, false, true); + false, true); MemoryContextSwitchTo(oldcontext); funcctx->user_fctx = (void *) splitctx; @@ -1162,13 +1206,24 @@ Datum regexp_split_to_array(PG_FUNCTION_ARGS) { ArrayBuildState *astate = NULL; + pg_re_flags re_flags; regexp_matches_ctx *splitctx; + /* Determine options */ + parse_re_flags(&re_flags, PG_GETARG_TEXT_PP_IF_EXISTS(2)); + /* User mustn't specify 'g' */ + if (re_flags.glob) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("regexp_split_to_array does not support the global option"))); + /* But we find all the matches anyway */ + re_flags.glob = true; + splitctx = setup_regexp_matches(PG_GETARG_TEXT_PP(0), PG_GETARG_TEXT_PP(1), - PG_GETARG_TEXT_PP_IF_EXISTS(2), + &re_flags, PG_GET_COLLATION(), - true, false, true); + false, true); while (splitctx->next_match <= splitctx->nmatches) { diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 82810c8fba..fb356bf3cd 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201608161 +#define CATALOG_VERSION_NO 201608171 #endif diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index af19c1a82b..6fed7a0d19 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -1912,10 +1912,14 @@ DATA(insert OID = 2284 ( regexp_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i DESCR("replace text using regexp"); DATA(insert OID = 2285 ( regexp_replace PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 25 "25 25 25 25" _null_ _null_ _null_ _null_ _null_ textregexreplace _null_ _null_ _null_ )); DESCR("replace text using regexp"); +DATA(insert OID = 3396 ( regexp_match PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_match_no_flags _null_ _null_ _null_ )); +DESCR("find first match for regexp"); +DATA(insert OID = 3397 ( regexp_match PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_match _null_ _null_ _null_ )); +DESCR("find first match for regexp"); DATA(insert OID = 2763 ( regexp_matches PGNSP PGUID 12 1 1 0 0 f f f f t t i s 2 0 1009 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_matches_no_flags _null_ _null_ _null_ )); -DESCR("find all match groups for regexp"); +DESCR("find match(es) for regexp"); DATA(insert OID = 2764 ( regexp_matches PGNSP PGUID 12 1 10 0 0 f f f f t t i s 3 0 1009 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_matches _null_ _null_ _null_ )); -DESCR("find all match groups for regexp"); +DESCR("find match(es) for regexp"); DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ split_text _null_ _null_ _null_ )); DESCR("split string by field_sep and return field_num"); DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ )); diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index a91be981b9..40e25c8824 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -628,6 +628,8 @@ extern Datum textregexsubstr(PG_FUNCTION_ARGS); extern Datum textregexreplace_noopt(PG_FUNCTION_ARGS); extern Datum textregexreplace(PG_FUNCTION_ARGS); extern Datum similar_escape(PG_FUNCTION_ARGS); +extern Datum regexp_match(PG_FUNCTION_ARGS); +extern Datum regexp_match_no_flags(PG_FUNCTION_ARGS); extern Datum regexp_matches(PG_FUNCTION_ARGS); extern Datum regexp_matches_no_flags(PG_FUNCTION_ARGS); extern Datum regexp_split_to_table(PG_FUNCTION_ARGS); diff --git a/src/test/regress/expected/regex.out b/src/test/regress/expected/regex.out index af097193c5..79a7fa7a84 100644 --- a/src/test/regress/expected/regex.out +++ b/src/test/regress/expected/regex.out @@ -90,6 +90,34 @@ select substring('a' from '((a)+)'); a (1 row) +-- Test regexp_match() +select regexp_match('abc', ''); + regexp_match +-------------- + {""} +(1 row) + +select regexp_match('abc', 'bc'); + regexp_match +-------------- + {bc} +(1 row) + +select regexp_match('abc', 'd') is null; + ?column? +---------- + t +(1 row) + +select regexp_match('abc', '(B)(c)', 'i'); + regexp_match +-------------- + {b,c} +(1 row) + +select regexp_match('abc', 'Bd', 'ig'); -- error +ERROR: regexp_match does not support the global option +HINT: Use the regexp_matches function instead. -- Test lookahead constraints select regexp_matches('ab', 'a(?=b)b*'); regexp_matches diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index 19708c32fd..35cadb24aa 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -681,9 +681,9 @@ SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', ERROR: invalid regexp option: "z" -- global option meaningless for regexp_split SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g') AS foo; -ERROR: regexp_split does not support the global option +ERROR: regexp_split_to_table does not support the global option SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g'); -ERROR: regexp_split does not support the global option +ERROR: regexp_split_to_array does not support the global option -- change NULL-display back \pset null '' -- E021-11 position expression diff --git a/src/test/regress/sql/regex.sql b/src/test/regress/sql/regex.sql index 1028ca6dcd..1361b62570 100644 --- a/src/test/regress/sql/regex.sql +++ b/src/test/regress/sql/regex.sql @@ -25,6 +25,13 @@ select substring('asd TO foo' from ' TO (([a-z0-9._]+|"([^"]+|"")+")+)'); select substring('a' from '((a))+'); select substring('a' from '((a)+)'); +-- Test regexp_match() +select regexp_match('abc', ''); +select regexp_match('abc', 'bc'); +select regexp_match('abc', 'd') is null; +select regexp_match('abc', '(B)(c)', 'i'); +select regexp_match('abc', 'Bd', 'ig'); -- error + -- Test lookahead constraints select regexp_matches('ab', 'a(?=b)b*'); select regexp_matches('a', 'a(?=b)b*'); -- cgit v1.2.3 From 07ef035129586ca26a713c4cd15e550dfe35e643 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Wed, 17 Aug 2016 17:03:36 -0700 Subject: Fix deletion of speculatively inserted TOAST on conflict INSERT .. ON CONFLICT runs a pre-check of the possible conflicting constraints before performing the actual speculative insertion. In case the inserted tuple included TOASTed columns the ON CONFLICT condition would be handled correctly in case the conflict was caught by the pre-check, but if two transactions entered the speculative insertion phase at the same time, one would have to re-try, and the code for aborting a speculative insertion did not handle deleting the speculatively inserted TOAST datums correctly. TOAST deletion would fail with "ERROR: attempted to delete invisible tuple" as we attempted to remove the TOAST tuples using simple_heap_delete which reasoned that the given tuples should not be visible to the command that wrote them. This commit updates the heap_abort_speculative() function which aborts the conflicting tuple to use itself, via toast_delete, for deleting associated TOAST datums. Like before, the inserted toast rows are not marked as being speculative. This commit also adds a isolationtester spec test, exercising the relevant code path. Unfortunately 9.5 cannot handle two waiting sessions, and thus cannot execute this test. Reported-By: Viren Negi, Oskari Saarenmaa Author: Oskari Saarenmaa, edited a bit by me Bug: #14150 Discussion: <20160519123338.12513.20271@wrigleys.postgresql.org> Backpatch: 9.5, where ON CONFLICT was introduced --- src/backend/access/heap/heapam.c | 12 +++-- src/backend/access/heap/tuptoaster.c | 15 ++++--- src/backend/utils/time/tqual.c | 4 +- src/include/access/tuptoaster.h | 2 +- .../isolation/expected/insert-conflict-toast.out | 15 +++++++ src/test/isolation/isolation_schedule | 1 + .../isolation/specs/insert-conflict-toast.spec | 51 ++++++++++++++++++++++ 7 files changed, 87 insertions(+), 13 deletions(-) create mode 100644 src/test/isolation/expected/insert-conflict-toast.out create mode 100644 src/test/isolation/specs/insert-conflict-toast.spec (limited to 'src') diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 24bd9be5e1..c63dfa0baf 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3335,7 +3335,7 @@ l1: Assert(!HeapTupleHasExternal(&tp)); } else if (HeapTupleHasExternal(&tp)) - toast_delete(relation, &tp); + toast_delete(relation, &tp, false); /* * Mark tuple for invalidation from system caches at next command @@ -6057,7 +6057,8 @@ heap_finish_speculative(Relation relation, HeapTuple tuple) * could deadlock with each other, which would not be acceptable. * * This is somewhat redundant with heap_delete, but we prefer to have a - * dedicated routine with stripped down requirements. + * dedicated routine with stripped down requirements. Note that this is also + * used to delete the TOAST tuples created during speculative insertion. * * This routine does not affect logical decoding as it only looks at * confirmation records. @@ -6101,7 +6102,7 @@ heap_abort_speculative(Relation relation, HeapTuple tuple) */ if (tp.t_data->t_choice.t_heap.t_xmin != xid) elog(ERROR, "attempted to kill a tuple inserted by another transaction"); - if (!HeapTupleHeaderIsSpeculative(tp.t_data)) + if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data))) elog(ERROR, "attempted to kill a non-speculative tuple"); Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data)); @@ -6171,7 +6172,10 @@ heap_abort_speculative(Relation relation, HeapTuple tuple) LockBuffer(buffer, BUFFER_LOCK_UNLOCK); if (HeapTupleHasExternal(&tp)) - toast_delete(relation, &tp); + { + Assert(!IsToastRelation(relation)); + toast_delete(relation, &tp, true); + } /* * Never need to mark tuple for invalidation, since catalogs don't support diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index bbb2649371..fc4702ce6f 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -67,7 +67,7 @@ typedef struct toast_compress_header #define TOAST_COMPRESS_SET_RAWSIZE(ptr, len) \ (((toast_compress_header *) (ptr))->rawsize = (len)) -static void toast_delete_datum(Relation rel, Datum value); +static void toast_delete_datum(Relation rel, Datum value, bool is_speculative); static Datum toast_save_datum(Relation rel, Datum value, struct varlena * oldexternal, int options); static bool toastrel_valueid_exists(Relation toastrel, Oid valueid); @@ -461,7 +461,7 @@ toast_datum_size(Datum value) * ---------- */ void -toast_delete(Relation rel, HeapTuple oldtup) +toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) { TupleDesc tupleDesc; Form_pg_attribute *att; @@ -508,7 +508,7 @@ toast_delete(Relation rel, HeapTuple oldtup) if (toast_isnull[i]) continue; else if (VARATT_IS_EXTERNAL_ONDISK(PointerGetDatum(value))) - toast_delete_datum(rel, value); + toast_delete_datum(rel, value, is_speculative); } } } @@ -1064,7 +1064,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, if (need_delold) for (i = 0; i < numAttrs; i++) if (toast_delold[i]) - toast_delete_datum(rel, toast_oldvalues[i]); + toast_delete_datum(rel, toast_oldvalues[i], false); return result_tuple; } @@ -1656,7 +1656,7 @@ toast_save_datum(Relation rel, Datum value, * ---------- */ static void -toast_delete_datum(Relation rel, Datum value) +toast_delete_datum(Relation rel, Datum value, bool is_speculative) { struct varlena *attr = (struct varlena *) DatumGetPointer(value); struct varatt_external toast_pointer; @@ -1707,7 +1707,10 @@ toast_delete_datum(Relation rel, Datum value) /* * Have a chunk, delete it */ - simple_heap_delete(toastrel, &toasttup->t_self); + if (is_speculative) + heap_abort_speculative(toastrel, toasttup); + else + simple_heap_delete(toastrel, &toasttup->t_self); } /* diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c index d99c847000..26a3be3a61 100644 --- a/src/backend/utils/time/tqual.c +++ b/src/backend/utils/time/tqual.c @@ -418,8 +418,8 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot, /* * An invalid Xmin can be left behind by a speculative insertion that - * is canceled by super-deleting the tuple. We shouldn't see any of - * those in TOAST tables, but better safe than sorry. + * is canceled by super-deleting the tuple. This also applies to + * TOAST tuples created during speculative insertion. */ else if (!TransactionIdIsValid(HeapTupleHeaderGetXmin(tuple))) return false; diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h index 7b5ae6245e..011f5c1906 100644 --- a/src/include/access/tuptoaster.h +++ b/src/include/access/tuptoaster.h @@ -142,7 +142,7 @@ extern HeapTuple toast_insert_or_update(Relation rel, * Called by heap_delete(). * ---------- */ -extern void toast_delete(Relation rel, HeapTuple oldtup); +extern void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative); /* ---------- * heap_tuple_fetch_attr() - diff --git a/src/test/isolation/expected/insert-conflict-toast.out b/src/test/isolation/expected/insert-conflict-toast.out new file mode 100644 index 0000000000..e86b98020f --- /dev/null +++ b/src/test/isolation/expected/insert-conflict-toast.out @@ -0,0 +1,15 @@ +Parsed test spec with 3 sessions + +starting permutation: s2insert s3insert s1commit +pg_advisory_xact_lock + + +step s2insert: + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; + +step s3insert: + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; + +step s1commit: COMMIT; +step s2insert: <... completed> +step s3insert: <... completed> diff --git a/src/test/isolation/isolation_schedule b/src/test/isolation/isolation_schedule index 87ab7742fc..a96a318987 100644 --- a/src/test/isolation/isolation_schedule +++ b/src/test/isolation/isolation_schedule @@ -28,6 +28,7 @@ test: insert-conflict-do-nothing test: insert-conflict-do-update test: insert-conflict-do-update-2 test: insert-conflict-do-update-3 +test: insert-conflict-toast test: delete-abort-savept test: delete-abort-savept-2 test: aborted-keyrevoke diff --git a/src/test/isolation/specs/insert-conflict-toast.spec b/src/test/isolation/specs/insert-conflict-toast.spec new file mode 100644 index 0000000000..c5e39ef9e3 --- /dev/null +++ b/src/test/isolation/specs/insert-conflict-toast.spec @@ -0,0 +1,51 @@ +# INSERT...ON CONFLICT test on table with TOAST +# +# This test verifies that speculatively inserted toast rows do not +# cause conflicts. It does so by using expression index over a +# function which acquires an advisory lock, triggering two index +# insertions to happen almost at the same time. This is not guaranteed +# to lead to a failed speculative insertion, but makes one quite +# likely. + +setup +{ + CREATE TABLE ctoast (key int primary key, val text); + CREATE OR REPLACE FUNCTION ctoast_lock_func(int) RETURNS INT IMMUTABLE LANGUAGE SQL AS 'select pg_advisory_xact_lock_shared(1); select $1;'; + CREATE OR REPLACE FUNCTION ctoast_large_val() RETURNS TEXT LANGUAGE SQL AS 'select array_agg(md5(g::text))::text from generate_series(1, 256) g'; + CREATE UNIQUE INDEX ctoast_lock_idx ON ctoast (ctoast_lock_func(key)); +} + +teardown +{ + DROP TABLE ctoast; + DROP FUNCTION ctoast_lock_func(int); + DROP FUNCTION ctoast_large_val(); +} + +session "s1" +setup +{ + BEGIN ISOLATION LEVEL READ COMMITTED; + SELECT pg_advisory_xact_lock(1); +} +step "s1commit" { COMMIT; } + +session "s2" +setup +{ + SET default_transaction_isolation = 'read committed'; +} +step "s2insert" { + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; +} + +session "s3" +setup +{ + SET default_transaction_isolation = 'read committed'; +} +step "s3insert" { + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; +} + +permutation "s2insert" "s3insert" "s1commit" -- cgit v1.2.3 From 8d3b9cce81c173da55b9500353e5d773f8449a66 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 18 Aug 2016 13:25:31 +0300 Subject: Refactor sendAuthRequest. This way sendAuthRequest doesn't need to know the details of all the different authentication methods. This is in preparation for adding SCRAM authentication, which will add yet another authentication request message type, with different payload. Reviewed-By: Michael Paquier Discussion: --- src/backend/libpq/auth.c | 52 +++++++++++++++++------------------------------- 1 file changed, 18 insertions(+), 34 deletions(-) (limited to 'src') diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 7d8fc3e54d..fc8b99b444 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -36,7 +36,8 @@ * Global authentication functions *---------------------------------------------------------------- */ -static void sendAuthRequest(Port *port, AuthRequest areq); +static void sendAuthRequest(Port *port, AuthRequest areq, char *extradata, + int extralen); static void auth_failed(Port *port, int status, char *logdetail); static char *recv_password_packet(Port *port); static int recv_and_check_password_packet(Port *port, char **logdetail); @@ -498,7 +499,7 @@ ClientAuthentication(Port *port) case uaGSS: #ifdef ENABLE_GSS - sendAuthRequest(port, AUTH_REQ_GSS); + sendAuthRequest(port, AUTH_REQ_GSS, NULL, 0); status = pg_GSS_recvauth(port); #else Assert(false); @@ -507,7 +508,7 @@ ClientAuthentication(Port *port) case uaSSPI: #ifdef ENABLE_SSPI - sendAuthRequest(port, AUTH_REQ_SSPI); + sendAuthRequest(port, AUTH_REQ_SSPI, NULL, 0); status = pg_SSPI_recvauth(port); #else Assert(false); @@ -531,12 +532,13 @@ ClientAuthentication(Port *port) ereport(FATAL, (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION), errmsg("MD5 authentication is not supported when \"db_user_namespace\" is enabled"))); - sendAuthRequest(port, AUTH_REQ_MD5); + /* include the salt to use for computing the response */ + sendAuthRequest(port, AUTH_REQ_MD5, port->md5Salt, 4); status = recv_and_check_password_packet(port, &logdetail); break; case uaPassword: - sendAuthRequest(port, AUTH_REQ_PASSWORD); + sendAuthRequest(port, AUTH_REQ_PASSWORD, NULL, 0); status = recv_and_check_password_packet(port, &logdetail); break; @@ -583,7 +585,7 @@ ClientAuthentication(Port *port) (*ClientAuthentication_hook) (port, status); if (status == STATUS_OK) - sendAuthRequest(port, AUTH_REQ_OK); + sendAuthRequest(port, AUTH_REQ_OK, NULL, 0); else auth_failed(port, status, logdetail); } @@ -593,7 +595,7 @@ ClientAuthentication(Port *port) * Send an authentication request packet to the frontend. */ static void -sendAuthRequest(Port *port, AuthRequest areq) +sendAuthRequest(Port *port, AuthRequest areq, char *extradata, int extralen) { StringInfoData buf; @@ -601,28 +603,8 @@ sendAuthRequest(Port *port, AuthRequest areq) pq_beginmessage(&buf, 'R'); pq_sendint(&buf, (int32) areq, sizeof(int32)); - - /* Add the salt for encrypted passwords. */ - if (areq == AUTH_REQ_MD5) - pq_sendbytes(&buf, port->md5Salt, 4); - -#if defined(ENABLE_GSS) || defined(ENABLE_SSPI) - - /* - * Add the authentication data for the next step of the GSSAPI or SSPI - * negotiation. - */ - else if (areq == AUTH_REQ_GSS_CONT) - { - if (port->gss->outbuf.length > 0) - { - elog(DEBUG4, "sending GSS token of length %u", - (unsigned int) port->gss->outbuf.length); - - pq_sendbytes(&buf, port->gss->outbuf.value, port->gss->outbuf.length); - } - } -#endif + if (extralen > 0) + pq_sendbytes(&buf, extradata, extralen); pq_endmessage(&buf); @@ -934,7 +916,8 @@ pg_GSS_recvauth(Port *port) elog(DEBUG4, "sending GSS response token of length %u", (unsigned int) port->gss->outbuf.length); - sendAuthRequest(port, AUTH_REQ_GSS_CONT); + sendAuthRequest(port, AUTH_REQ_GSS_CONT, + port->gss->outbuf.value, port->gss->outbuf.length); gss_release_buffer(&lmin_s, &port->gss->outbuf); } @@ -1179,7 +1162,8 @@ pg_SSPI_recvauth(Port *port) port->gss->outbuf.length = outbuf.pBuffers[0].cbBuffer; port->gss->outbuf.value = outbuf.pBuffers[0].pvBuffer; - sendAuthRequest(port, AUTH_REQ_GSS_CONT); + sendAuthRequest(port, AUTH_REQ_GSS_CONT, + port->gss->outbuf.value, port->gss->outbuf.length); FreeContextBuffer(outbuf.pBuffers[0].pvBuffer); } @@ -1807,7 +1791,7 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message ** msg, * let's go ask the client to send a password, which we * then stuff into PAM. */ - sendAuthRequest(pam_port_cludge, AUTH_REQ_PASSWORD); + sendAuthRequest(pam_port_cludge, AUTH_REQ_PASSWORD, NULL, 0); passwd = recv_password_packet(pam_port_cludge); if (passwd == NULL) { @@ -2137,7 +2121,7 @@ CheckLDAPAuth(Port *port) if (port->hba->ldapport == 0) port->hba->ldapport = LDAP_PORT; - sendAuthRequest(port, AUTH_REQ_PASSWORD); + sendAuthRequest(port, AUTH_REQ_PASSWORD, NULL, 0); passwd = recv_password_packet(port); if (passwd == NULL) @@ -2497,7 +2481,7 @@ CheckRADIUSAuth(Port *port) identifier = port->hba->radiusidentifier; /* Send regular password request to client, and get the response */ - sendAuthRequest(port, AUTH_REQ_PASSWORD); + sendAuthRequest(port, AUTH_REQ_PASSWORD, NULL, 0); passwd = recv_password_packet(port); if (passwd == NULL) -- cgit v1.2.3 From a79a68562240c58f21680483a8d2e137803bd48f Mon Sep 17 00:00:00 2001 From: Magnus Hagander Date: Thu, 18 Aug 2016 12:32:42 +0200 Subject: Update Windows timezone mapping from Windows 7 and 10 This adds a couple of new timezones that are present in the newer versions of Windows. It also updates comments to reference UTC rather than GMT, as this change has been made in Windows. Michael Paquier --- src/bin/initdb/findtimezone.c | 389 ++++++++++++++++++++++++++++++------------ 1 file changed, 281 insertions(+), 108 deletions(-) (limited to 'src') diff --git a/src/bin/initdb/findtimezone.c b/src/bin/initdb/findtimezone.c index 5a443131ee..b32ba9fbee 100644 --- a/src/bin/initdb/findtimezone.c +++ b/src/bin/initdb/findtimezone.c @@ -584,7 +584,7 @@ static const struct /* * This list was built from the contents of the registry at * HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time - * Zones on Windows 2003 R2. + * Zones on Windows 10 and Windows 7. * * The zones have been matched to Olson timezones by looking at the cities * listed in the win32 display name (in the comment here) in most cases. @@ -592,417 +592,590 @@ static const struct { "Afghanistan Standard Time", "Afghanistan Daylight Time", "Asia/Kabul" - }, /* (GMT+04:30) Kabul */ + }, /* (UTC+04:30) Kabul */ { "Alaskan Standard Time", "Alaskan Daylight Time", "US/Alaska" - }, /* (GMT-09:00) Alaska */ + }, /* (UTC-09:00) Alaska */ + { + "Aleutian Standard Time", "Aleutian Daylight Time", + "US/Aleutan" + }, /* (UTC-10:00) Aleutian Islands */ + { + "Altai Standard Time", "Altai Daylight Time", + "Asia/Barnaul" + }, /* (UTC+07:00) Barnaul, Gorno-Altaysk */ { "Arab Standard Time", "Arab Daylight Time", "Asia/Kuwait" - }, /* (GMT+03:00) Kuwait, Riyadh */ + }, /* (UTC+03:00) Kuwait, Riyadh */ { "Arabian Standard Time", "Arabian Daylight Time", "Asia/Muscat" - }, /* (GMT+04:00) Abu Dhabi, Muscat */ + }, /* (UTC+04:00) Abu Dhabi, Muscat */ { "Arabic Standard Time", "Arabic Daylight Time", "Asia/Baghdad" - }, /* (GMT+03:00) Baghdad */ + }, /* (UTC+03:00) Baghdad */ { "Argentina Standard Time", "Argentina Daylight Time", "America/Buenos_Aires" - }, /* (GMT-03:00) Buenos Aires */ + }, /* (UTC-03:00) City of Buenos Aires */ { "Armenian Standard Time", "Armenian Daylight Time", "Asia/Yerevan" - }, /* (GMT+04:00) Yerevan */ + }, /* (UTC+04:00) Baku, Tbilisi, Yerevan */ + { + "Astrakhan Standard Time", "Astrakhan Daylight Time", + "Europe/Astrakhan" + }, /* (UTC+04:00) Astrakhan, Ulyanovsk */ { "Atlantic Standard Time", "Atlantic Daylight Time", "Canada/Atlantic" - }, /* (GMT-04:00) Atlantic Time (Canada) */ + }, /* (UTC-04:00) Atlantic Time (Canada) */ { "AUS Central Standard Time", "AUS Central Daylight Time", "Australia/Darwin" - }, /* (GMT+09:30) Darwin */ + }, /* (UTC+09:30) Darwin */ + { + "Aus Central W. Standard Time", "Aus Central W. Daylight Time", + "Australia/Eucla" + }, /* (UTC+08:45) Eucla */ { "AUS Eastern Standard Time", "AUS Eastern Daylight Time", "Australia/Canberra" - }, /* (GMT+10:00) Canberra, Melbourne, Sydney */ + }, /* (UTC+10:00) Canberra, Melbourne, Sydney */ { "Azerbaijan Standard Time", "Azerbaijan Daylight Time", "Asia/Baku" - }, /* (GMT+04:00) Baku */ + }, /* (UTC+04:00) Baku */ { "Azores Standard Time", "Azores Daylight Time", "Atlantic/Azores" - }, /* (GMT-01:00) Azores */ + }, /* (UTC-01:00) Azores */ + { + "Bahia Standard Time", "Bahia Daylight Time", + "America/Salvador" + }, /* (UTC-03:00) Salvador */ { "Bangladesh Standard Time", "Bangladesh Daylight Time", "Asia/Dhaka" - }, /* (GMT+06:00) Dhaka */ + }, /* (UTC+06:00) Dhaka */ + { + "Bougainville Standard Time", "Bougainville Daylight Time", + "Pacific/Bougainville" + }, /* (UTC+11:00) Bougainville Island */ + { + "Belarus Standard Time", "Belarus Daylight Time", + "Europe/Minsk" + }, /* (UTC+03:00) Minsk */ + { + "Cabo Verde Standard Time", "Cabo Verde Daylight Time", + "Atlantic/Cape_Verde" + }, /* (UTC-01:00) Cabo Verde Is. */ + { + "Chatham Islands Standard Time", "Chatham Islands Daylight Time", + "Pacific/Chatham" + }, /* (UTC+12:45) Chatham Islands */ { "Canada Central Standard Time", "Canada Central Daylight Time", "Canada/Saskatchewan" - }, /* (GMT-06:00) Saskatchewan */ + }, /* (UTC-06:00) Saskatchewan */ { "Cape Verde Standard Time", "Cape Verde Daylight Time", "Atlantic/Cape_Verde" - }, /* (GMT-01:00) Cape Verde Is. */ + }, /* (UTC-01:00) Cape Verde Is. */ { "Caucasus Standard Time", "Caucasus Daylight Time", "Asia/Baku" - }, /* (GMT+04:00) Baku, Tbilisi, Yerevan */ + }, /* (UTC+04:00) Yerevan */ { "Cen. Australia Standard Time", "Cen. Australia Daylight Time", "Australia/Adelaide" - }, /* (GMT+09:30) Adelaide */ + }, /* (UTC+09:30) Adelaide */ /* Central America (other than Mexico) generally does not observe DST */ { "Central America Standard Time", "Central America Daylight Time", "CST6" - }, /* (GMT-06:00) Central America */ + }, /* (UTC-06:00) Central America */ { "Central Asia Standard Time", "Central Asia Daylight Time", "Asia/Dhaka" - }, /* (GMT+06:00) Astana, Dhaka */ + }, /* (UTC+06:00) Astana */ { "Central Brazilian Standard Time", "Central Brazilian Daylight Time", "America/Cuiaba" - }, /* (GMT-04:00) Cuiaba */ + }, /* (UTC-04:00) Cuiaba */ { "Central Europe Standard Time", "Central Europe Daylight Time", "Europe/Belgrade" - }, /* (GMT+01:00) Belgrade, Bratislava, Budapest, + }, /* (UTC+01:00) Belgrade, Bratislava, Budapest, * Ljubljana, Prague */ { "Central European Standard Time", "Central European Daylight Time", "Europe/Sarajevo" - }, /* (GMT+01:00) Sarajevo, Skopje, Warsaw, + }, /* (UTC+01:00) Sarajevo, Skopje, Warsaw, * Zagreb */ { "Central Pacific Standard Time", "Central Pacific Daylight Time", "Pacific/Noumea" - }, /* (GMT+11:00) Magadan, Solomon Is., New - * Caledonia */ + }, /* (UTC+11:00) Solomon Is., New Caledonia */ { "Central Standard Time", "Central Daylight Time", "US/Central" - }, /* (GMT-06:00) Central Time (US & Canada) */ + }, /* (UTC-06:00) Central Time (US & Canada) */ { "Central Standard Time (Mexico)", "Central Daylight Time (Mexico)", "America/Mexico_City" - }, /* (GMT-06:00) Guadalajara, Mexico City, - * Monterrey - New */ + }, /* (UTC-06:00) Guadalajara, Mexico City, + * Monterrey */ { "China Standard Time", "China Daylight Time", "Asia/Hong_Kong" - }, /* (GMT+08:00) Beijing, Chongqing, Hong Kong, + }, /* (UTC+08:00) Beijing, Chongqing, Hong Kong, * Urumqi */ + { + "Cuba Standard Time", "Cuba Daylight Time", + "America/Havana" + }, /* (UTC-05:00) Havana */ { "Dateline Standard Time", "Dateline Daylight Time", - "Etc/GMT+12" - }, /* (GMT-12:00) International Date Line West */ + "Etc/UTC+12" + }, /* (UTC-12:00) International Date Line West */ { "E. Africa Standard Time", "E. Africa Daylight Time", "Africa/Nairobi" - }, /* (GMT+03:00) Nairobi */ + }, /* (UTC+03:00) Nairobi */ { "E. Australia Standard Time", "E. Australia Daylight Time", "Australia/Brisbane" - }, /* (GMT+10:00) Brisbane */ + }, /* (UTC+10:00) Brisbane */ { "E. Europe Standard Time", "E. Europe Daylight Time", "Europe/Bucharest" - }, /* (GMT+02:00) Bucharest */ + }, /* (UTC+02:00) E. Europe */ { "E. South America Standard Time", "E. South America Daylight Time", "America/Araguaina" - }, /* (GMT-03:00) Brasilia */ + }, /* (UTC-03:00) Brasilia */ { "Eastern Standard Time", "Eastern Daylight Time", "US/Eastern" - }, /* (GMT-05:00) Eastern Time (US & Canada) */ + }, /* (UTC-05:00) Eastern Time (US & Canada) */ + { + "Eastern Standard Time (Mexico)", "Eastern Daylight Time (Mexico)", + "America/Mexico_City" + }, /* (UTC-05:00) Chetumal */ + { + "Easter Island Standard Time", "Easter Island Daylight Time", + "Pacific/Easter" + }, /* (UTC-06:00) Easter Island */ { "Egypt Standard Time", "Egypt Daylight Time", "Africa/Cairo" - }, /* (GMT+02:00) Cairo */ + }, /* (UTC+02:00) Cairo */ { - "Ekaterinburg Standard Time", "Ekaterinburg Daylight Time", + "Ekaterinburg Standard Time (RTZ 4)", "Ekaterinburg Daylight Time", "Asia/Yekaterinburg" - }, /* (GMT+05:00) Ekaterinburg */ + }, /* (UTC+05:00) Ekaterinburg */ { "Fiji Standard Time", "Fiji Daylight Time", "Pacific/Fiji" - }, /* (GMT+12:00) Fiji, Kamchatka, Marshall Is. */ + }, /* (UTC+12:00) Fiji */ { "FLE Standard Time", "FLE Daylight Time", "Europe/Helsinki" - }, /* (GMT+02:00) Helsinki, Kyiv, Riga, Sofia, + }, /* (UTC+02:00) Helsinki, Kyiv, Riga, Sofia, * Tallinn, Vilnius */ { "Georgian Standard Time", "Georgian Daylight Time", "Asia/Tbilisi" - }, /* (GMT+03:00) Tbilisi */ + }, /* (UTC+04:00) Tbilisi */ { "GMT Standard Time", "GMT Daylight Time", "Europe/London" - }, /* (GMT) Greenwich Mean Time : Dublin, - * Edinburgh, Lisbon, London */ + }, /* (UTC) Dublin, Edinburgh, Lisbon, London */ { "Greenland Standard Time", "Greenland Daylight Time", "America/Godthab" - }, /* (GMT-03:00) Greenland */ + }, /* (UTC-03:00) Greenland */ { "Greenwich Standard Time", "Greenwich Daylight Time", "Africa/Casablanca" - }, /* (GMT) Casablanca, Monrovia */ + }, /* (UTC) Monrovia, Reykjavik */ { "GTB Standard Time", "GTB Daylight Time", "Europe/Athens" - }, /* (GMT+02:00) Athens, Istanbul, Minsk */ + }, /* (UTC+02:00) Athens, Bucharest */ + { + "Haiti Standard Time", "Haiti Daylight Time", + "US/Eastern" + }, /* (UTC-05:00) Haiti */ { "Hawaiian Standard Time", "Hawaiian Daylight Time", "US/Hawaii" - }, /* (GMT-10:00) Hawaii */ + }, /* (UTC-10:00) Hawaii */ { "India Standard Time", "India Daylight Time", "Asia/Calcutta" - }, /* (GMT+05:30) Chennai, Kolkata, Mumbai, New + }, /* (UTC+05:30) Chennai, Kolkata, Mumbai, New * Delhi */ { "Iran Standard Time", "Iran Daylight Time", "Asia/Tehran" - }, /* (GMT+03:30) Tehran */ + }, /* (UTC+03:30) Tehran */ { "Jerusalem Standard Time", "Jerusalem Daylight Time", "Asia/Jerusalem" - }, /* (GMT+02:00) Jerusalem */ + }, /* (UTC+02:00) Jerusalem */ { "Jordan Standard Time", "Jordan Daylight Time", "Asia/Amman" - }, /* (GMT+02:00) Amman */ + }, /* (UTC+02:00) Amman */ { "Kamchatka Standard Time", "Kamchatka Daylight Time", "Asia/Kamchatka" - }, /* (GMT+12:00) Petropavlovsk-Kamchatsky */ + }, /* (UTC+12:00) Petropavlovsk-Kamchatsky - Old */ { "Korea Standard Time", "Korea Daylight Time", "Asia/Seoul" - }, /* (GMT+09:00) Seoul */ + }, /* (UTC+09:00) Seoul */ + { + "Libya Standard Time", "Libya Daylight Time", + "Africa/Tripoli" + }, /* (UTC+02:00) Tripoli */ + { + "Line Islands Standard Time", "Line Islands Daylight Time", + "Pacific/Kiritimati" + }, /* (UTC+14:00) Kiritimati Island */ + { + "Lord Howe Standard Time", "Lord Howe Daylight Time", + "Australia/Lord_Howe" + }, /* (UTC+10:30) Lord Howe Island */ + { + "Magadan Standard Time", "Magadan Daylight Time", + "Asia/Magadan" + }, /* (UTC+10:00) Magadan */ + { + "Marquesas Standard Time", "Marquesas Daylight Time", + "Pacific/Marquesas" + }, /* (UTC-09:30) Marquesas Islands */ { "Mauritius Standard Time", "Mauritius Daylight Time", "Indian/Mauritius" - }, /* (GMT+04:00) Port Louis */ + }, /* (UTC+04:00) Port Louis */ { "Mexico Standard Time", "Mexico Daylight Time", "America/Mexico_City" - }, /* (GMT-06:00) Guadalajara, Mexico City, + }, /* (UTC-06:00) Guadalajara, Mexico City, * Monterrey */ { "Mexico Standard Time 2", "Mexico Daylight Time 2", "America/Chihuahua" - }, /* (GMT-07:00) Chihuahua, La Paz, Mazatlan */ + }, /* (UTC-07:00) Chihuahua, La Paz, Mazatlan */ { "Mid-Atlantic Standard Time", "Mid-Atlantic Daylight Time", "Atlantic/South_Georgia" - }, /* (GMT-02:00) Mid-Atlantic */ + }, /* (UTC-02:00) Mid-Atlantic - Old */ { "Middle East Standard Time", "Middle East Daylight Time", "Asia/Beirut" - }, /* (GMT+02:00) Beirut */ + }, /* (UTC+02:00) Beirut */ { "Montevideo Standard Time", "Montevideo Daylight Time", "America/Montevideo" - }, /* (GMT-03:00) Montevideo */ + }, /* (UTC-03:00) Montevideo */ { "Morocco Standard Time", "Morocco Daylight Time", "Africa/Casablanca" - }, /* (GMT) Casablanca */ + }, /* (UTC) Casablanca */ { "Mountain Standard Time", "Mountain Daylight Time", "US/Mountain" - }, /* (GMT-07:00) Mountain Time (US & Canada) */ + }, /* (UTC-07:00) Mountain Time (US & Canada) */ { "Mountain Standard Time (Mexico)", "Mountain Daylight Time (Mexico)", "America/Chihuahua" - }, /* (GMT-07:00) Chihuahua, La Paz, Mazatlan - - * New */ + }, /* (UTC-07:00) Chihuahua, La Paz, Mazatlan */ { "Myanmar Standard Time", "Myanmar Daylight Time", "Asia/Rangoon" - }, /* (GMT+06:30) Rangoon */ + }, /* (UTC+06:30) Yangon (Rangoon) */ { "N. Central Asia Standard Time", "N. Central Asia Daylight Time", "Asia/Novosibirsk" - }, /* (GMT+06:00) Novosibirsk */ + }, /* (UTC+06:00) Novosibirsk (RTZ 5) */ { "Namibia Standard Time", "Namibia Daylight Time", "Africa/Windhoek" - }, /* (GMT+02:00) Windhoek */ + }, /* (UTC+01:00) Windhoek */ { "Nepal Standard Time", "Nepal Daylight Time", "Asia/Katmandu" - }, /* (GMT+05:45) Kathmandu */ + }, /* (UTC+05:45) Kathmandu */ { "New Zealand Standard Time", "New Zealand Daylight Time", "Pacific/Auckland" - }, /* (GMT+12:00) Auckland, Wellington */ + }, /* (UTC+12:00) Auckland, Wellington */ { "Newfoundland Standard Time", "Newfoundland Daylight Time", "Canada/Newfoundland" - }, /* (GMT-03:30) Newfoundland */ + }, /* (UTC-03:30) Newfoundland */ + { + "Norfolk Standard Time", "Norfolk Daylight Time", + "Pacific/Norfolk" + }, /* (UTC+11:00) Norfolk Island */ { "North Asia East Standard Time", "North Asia East Daylight Time", "Asia/Irkutsk" - }, /* (GMT+08:00) Irkutsk, Ulaan Bataar */ + }, /* (UTC+08:00) Irkutsk, Ulaan Bataar */ { "North Asia Standard Time", "North Asia Daylight Time", "Asia/Krasnoyarsk" - }, /* (GMT+07:00) Krasnoyarsk */ + }, /* (UTC+07:00) Krasnoyarsk */ + { + "North Korea Standard Time", "North Korea Daylight Time", + "Asia/Pyongyang" + }, /* (UTC+08:30) Pyongyang */ { "Pacific SA Standard Time", "Pacific SA Daylight Time", "America/Santiago" - }, /* (GMT-04:00) Santiago */ + }, /* (UTC-03:00) Santiago */ { "Pacific Standard Time", "Pacific Daylight Time", "US/Pacific" - }, /* (GMT-08:00) Pacific Time (US & Canada); - * Tijuana */ + }, /* (UTC-08:00) Pacific Time (US & Canada) */ { "Pacific Standard Time (Mexico)", "Pacific Daylight Time (Mexico)", "America/Tijuana" - }, /* (GMT-08:00) Tijuana, Baja California */ + }, /* (UTC-08:00) Baja California */ { "Pakistan Standard Time", "Pakistan Daylight Time", "Asia/Karachi" - }, /* (GMT+05:00) Islamabad, Karachi */ + }, /* (UTC+05:00) Islamabad, Karachi */ { "Paraguay Standard Time", "Paraguay Daylight Time", "America/Asuncion" - }, /* (GMT-04:00) Asuncion */ + }, /* (UTC-04:00) Asuncion */ { "Romance Standard Time", "Romance Daylight Time", "Europe/Brussels" - }, /* (GMT+01:00) Brussels, Copenhagen, Madrid, + }, /* (UTC+01:00) Brussels, Copenhagen, Madrid, * Paris */ + { + "Russia TZ 1 Standard Time", "Russia TZ 1 Daylight Time", + "Europe/Kaliningrad" + }, /* (UTC+02:00) Kaliningrad (RTZ 1) */ + { + "Russia TZ 2 Standard Time", "Russia TZ 2 Daylight Time", + "Europe/Moscow" + }, /* (UTC+03:00) Moscow, St. Petersburg, + * Volgograd (RTZ 2) */ + { + "Russia TZ 3 Standard Time", "Russia TZ 3 Daylight Time", + "Europe/Samara" + }, /* (UTC+04:00) Izhevsk, Samara (RTZ 3) */ + { + "Russia TZ 4 Standard Time", "Russia TZ 4 Daylight Time", + "Asia/Yekaterinburg" + }, /* (UTC+05:00) Ekaterinburg (RTZ 4) */ + { + "Russia TZ 5 Standard Time", "Russia TZ 5 Daylight Time", + "Asia/Novosibirsk" + }, /* (UTC+06:00) Novosibirsk (RTZ 5) */ + { + "Russia TZ 6 Standard Time", "Russia TZ 6 Daylight Time", + "Asia/Krasnoyarsk" + }, /* (UTC+07:00) Krasnoyarsk (RTZ 6) */ + { + "Russia TZ 7 Standard Time", "Russia TZ 7 Daylight Time", + "Asia/Irkutsk" + }, /* (UTC+08:00) Irkutsk (RTZ 7) */ + { + "Russia TZ 8 Standard Time", "Russia TZ 8 Daylight Time", + "Asia/Yakutsk" + }, /* (UTC+09:00) Yakutsk (RTZ 8) */ + { + "Russia TZ 9 Standard Time", "Russia TZ 9 Daylight Time", + "Asia/Vladivostok" + }, /* (UTC+10:00) Vladivostok, Magadan + * (RTZ 9) */ + { + "Russia TZ 10 Standard Time", "Russia TZ 10 Daylight Time", + "Asia/Magadan" + }, /* (UTC+11:00) Chokurdakh (RTZ 10) */ + { + "Russia TZ 11 Standard Time", "Russia TZ 11 Daylight Time", + "Asia/Anadyr" + }, /* (UTC+12:00) Anadyr, Petropavlovsk-Kamchatsky + * (RTZ 11) */ { "Russian Standard Time", "Russian Daylight Time", "Europe/Moscow" - }, /* (GMT+03:00) Moscow, St. Petersburg, + }, /* (UTC+03:00) Moscow, St. Petersburg, * Volgograd */ { "SA Eastern Standard Time", "SA Eastern Daylight Time", "America/Buenos_Aires" - }, /* (GMT-03:00) Buenos Aires, Georgetown */ + }, /* (UTC-03:00) Cayenne, Fortaleza */ { "SA Pacific Standard Time", "SA Pacific Daylight Time", "America/Bogota" - }, /* (GMT-05:00) Bogota, Lima, Quito */ + }, /* (UTC-05:00) Bogota, Lima, Quito, Rio + * Branco */ { "SA Western Standard Time", "SA Western Daylight Time", "America/Caracas" - }, /* (GMT-04:00) Caracas, La Paz */ + }, /* (UTC-04:00) Georgetown, La Paz, Manaus, + * San Juan */ + { + "Saint Pierre Standard Time", "Saint Pierre Daylight Time", + "America/Miquelon" + }, /* (UTC-03:00) Saint Pierre and Miquelon */ { "Samoa Standard Time", "Samoa Daylight Time", - "Pacific/Midway" - }, /* (GMT-11:00) Midway Island, Samoa */ + "Pacific/Samoa" + }, /* (UTC+13:00) Samoa */ { "SE Asia Standard Time", "SE Asia Daylight Time", "Asia/Bangkok" - }, /* (GMT+07:00) Bangkok, Hanoi, Jakarta */ + }, /* (UTC+07:00) Bangkok, Hanoi, Jakarta */ { "Malay Peninsula Standard Time", "Malay Peninsula Daylight Time", "Asia/Kuala_Lumpur" - }, /* (GMT+08:00) Kuala Lumpur, Singapore */ + }, /* (UTC+08:00) Kuala Lumpur, Singapore */ + { + "Sakhalin Standard Time", "Sakhalin Daylight Time", + "Asia/Sakhalin" + }, /* (UTC+11:00) Sakhalin */ { "South Africa Standard Time", "South Africa Daylight Time", "Africa/Harare" - }, /* (GMT+02:00) Harare, Pretoria */ + }, /* (UTC+02:00) Harare, Pretoria */ { "Sri Lanka Standard Time", "Sri Lanka Daylight Time", "Asia/Colombo" - }, /* (GMT+06:00) Sri Jayawardenepura */ + }, /* (UTC+05:30) Sri Jayawardenepura */ + { + "Syria Standard Time", "Syria Daylight Time", + "Asia/Damascus" + }, /* (UTC+02:00) Damascus */ { "Taipei Standard Time", "Taipei Daylight Time", "Asia/Taipei" - }, /* (GMT+08:00) Taipei */ + }, /* (UTC+08:00) Taipei */ { "Tasmania Standard Time", "Tasmania Daylight Time", "Australia/Hobart" - }, /* (GMT+10:00) Hobart */ + }, /* (UTC+10:00) Hobart */ + { + "Tocantins Standard Time", "Tocantins Daylight Time", + "America/Araguaina" + }, /* (UTC-03:00) Araguaina */ { "Tokyo Standard Time", "Tokyo Daylight Time", "Asia/Tokyo" - }, /* (GMT+09:00) Osaka, Sapporo, Tokyo */ + }, /* (UTC+09:00) Osaka, Sapporo, Tokyo */ { "Tonga Standard Time", "Tonga Daylight Time", "Pacific/Tongatapu" - }, /* (GMT+13:00) Nuku'alofa */ + }, /* (UTC+13:00) Nuku'alofa */ + { + "Tomsk Standard Time", "Tomsk Daylight Time", + "Asia/Tomsk" + }, /* (UTC+07:00) Tomsk */ + { + "Transbaikal Standard Time", "Transbaikal Daylight Time", + "Asia/Chita" + }, /* (UTC+09:00) Chita */ + { + "Turkey Standard Time", "Turkey Daylight Time", + "Europe/Istanbul" + }, /* (UTC+02:00) Istanbul */ + { + "Turks and Caicos Standard Time", "Turks and Caicos Daylight Time", + "America/Grand_Turk" + }, /* (UTC-04:00) Turks and Caicos */ { "Ulaanbaatar Standard Time", "Ulaanbaatar Daylight Time", "Asia/Ulaanbaatar", - }, /* (GMT+08:00) Ulaanbaatar */ + }, /* (UTC+08:00) Ulaanbaatar */ { "US Eastern Standard Time", "US Eastern Daylight Time", "US/Eastern" - }, /* (GMT-05:00) Indiana (East) */ + }, /* (UTC-05:00) Indiana (East) */ { "US Mountain Standard Time", "US Mountain Daylight Time", "US/Arizona" - }, /* (GMT-07:00) Arizona */ + }, /* (UTC-07:00) Arizona */ { "Coordinated Universal Time", "Coordinated Universal Time", "UTC" - }, /* (GMT) Coordinated Universal Time */ + }, /* (UTC) Coordinated Universal Time */ { "UTC+12", "UTC+12", "Etc/GMT+12" - }, /* (GMT+12:00) Coordinated Universal Time+12 */ + }, /* (UTC+12:00) Coordinated Universal Time+12 */ { "UTC-02", "UTC-02", "Etc/GMT-02" - }, /* (GMT-02:00) Coordinated Universal Time-02 */ + }, /* (UTC-02:00) Coordinated Universal Time-02 */ + { + "UTC-08", "UTC-08", + "Etc/GMT-08" + }, /* (UTC-08:00) Coordinated Universal Time-08 */ + { + "UTC-09", "UTC-09", + "Etc/GMT-09" + }, /* (UTC-09:00) Coordinated Universal Time-09 */ { "UTC-11", "UTC-11", "Etc/GMT-11" - }, /* (GMT-11:00) Coordinated Universal Time-11 */ + }, /* (UTC-11:00) Coordinated Universal Time-11 */ { "Venezuela Standard Time", "Venezuela Daylight Time", "America/Caracas", - }, /* (GMT-04:30) Caracas */ + }, /* (UTC-04:30) Caracas */ { "Vladivostok Standard Time", "Vladivostok Daylight Time", "Asia/Vladivostok" - }, /* (GMT+10:00) Vladivostok */ + }, /* (UTC+10:00) Vladivostok (RTZ 9) */ { "W. Australia Standard Time", "W. Australia Daylight Time", "Australia/Perth" - }, /* (GMT+08:00) Perth */ + }, /* (UTC+08:00) Perth */ #ifdef NOT_USED /* Could not find a match for this one (just a guess). Excluded for now. */ { "W. Central Africa Standard Time", "W. Central Africa Daylight Time", "WAT" - }, /* (GMT+01:00) West Central Africa */ + }, /* (UTC+01:00) West Central Africa */ #endif { "W. Europe Standard Time", "W. Europe Daylight Time", "CET" - }, /* (GMT+01:00) Amsterdam, Berlin, Bern, Rome, + }, /* (UTC+01:00) Amsterdam, Berlin, Bern, Rome, * Stockholm, Vienna */ + { + "W. Mongolia Standard Time", "W. Mongolia Daylight Time", + "Asia/Hovd" + }, /* (UTC+07:00) Hovd */ { "West Asia Standard Time", "West Asia Daylight Time", "Asia/Karachi" - }, /* (GMT+05:00) Islamabad, Karachi, Tashkent */ + }, /* (UTC+05:00) Ashgabat, Tashkent */ + { + "West Bank Gaza Standard Time", "West Bank Gaza Daylight Time", + "Asia/Gaza" + }, /* (UTC+02:00) Gaza, Hebron */ { "West Pacific Standard Time", "West Pacific Daylight Time", "Pacific/Guam" - }, /* (GMT+10:00) Guam, Port Moresby */ + }, /* (UTC+10:00) Guam, Port Moresby */ { "Yakutsk Standard Time", "Yakutsk Daylight Time", "Asia/Yakutsk" - }, /* (GMT+09:00) Yakutsk */ + }, /* (UTC+09:00) Yakutsk */ { NULL, NULL, NULL } -- cgit v1.2.3 From fa878703f456b804b01b61a9d94008f57967cdd0 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 18 Aug 2016 13:41:17 +0300 Subject: Refactor RandomSalt to handle salts of different lengths. All we need is 4 bytes at the moment, for MD5 authentication. But in upcomint patches for SCRAM authentication, SCRAM will need a salt of different length. It's less scary for the caller to pass the buffer length anyway, than assume a certain-sized output buffer. Author: Michael Paquier Discussion: --- src/backend/postmaster/postmaster.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index f5c8e9d812..05f3f14e35 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -404,7 +404,7 @@ static int initMasks(fd_set *rmask); static void report_fork_failure_to_client(Port *port, int errnum); static CAC_state canAcceptConnections(void); static long PostmasterRandom(void); -static void RandomSalt(char *md5Salt); +static void RandomSalt(char *salt, int len); static void signal_child(pid_t pid, int signal); static bool SignalSomeChildren(int signal, int targets); static void TerminateChildren(int signal); @@ -2342,7 +2342,7 @@ ConnCreate(int serverFd) * after. Else the postmaster's random sequence won't get advanced, and * all backends would end up using the same salt... */ - RandomSalt(port->md5Salt); + RandomSalt(port->md5Salt, sizeof(port->md5Salt)); /* * Allocate GSSAPI specific state struct @@ -5083,23 +5083,21 @@ StartupPacketTimeoutHandler(void) * RandomSalt */ static void -RandomSalt(char *md5Salt) +RandomSalt(char *salt, int len) { long rand; + int i; /* * We use % 255, sacrificing one possible byte value, so as to ensure that * all bits of the random() value participate in the result. While at it, * add one to avoid generating any null bytes. */ - rand = PostmasterRandom(); - md5Salt[0] = (rand % 255) + 1; - rand = PostmasterRandom(); - md5Salt[1] = (rand % 255) + 1; - rand = PostmasterRandom(); - md5Salt[2] = (rand % 255) + 1; - rand = PostmasterRandom(); - md5Salt[3] = (rand % 255) + 1; + for (i = 0; i < len; i++) + { + rand = PostmasterRandom(); + salt[i] = (rand % 255) + 1; + } } /* -- cgit v1.2.3 From 9f31e45a6d36a2564423a20601d5066939ea83c1 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 16 Aug 2016 12:00:00 -0400 Subject: Improve formatting of comments in plpgsql.h This file had some unusual comment layout. Most of the comments introducing structs ended up to the right of the screen and following the start of the struct. Some comments for struct members ended up after the member definition. Fix that by moving comments consistently before what they are describing. Also add missing struct tags where missing so that it is easier to tell what the struct is. --- src/pl/plpgsql/src/plpgsql.h | 417 ++++++++++++++++++++++++++----------------- 1 file changed, 255 insertions(+), 162 deletions(-) (limited to 'src') diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index bfd52af3e7..b416e50c64 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -34,9 +34,8 @@ #undef _ #define _(x) dgettext(TEXTDOMAIN, x) -/* ---------- +/* * Compiler's namespace item types - * ---------- */ enum { @@ -46,9 +45,8 @@ enum PLPGSQL_NSTYPE_REC }; -/* ---------- +/* * A PLPGSQL_NSTYPE_LABEL stack entry must be one of these types - * ---------- */ enum PLpgSQL_label_types { @@ -57,9 +55,8 @@ enum PLpgSQL_label_types PLPGSQL_LABEL_OTHER /* anything else */ }; -/* ---------- +/* * Datum array node types - * ---------- */ enum { @@ -71,9 +68,8 @@ enum PLPGSQL_DTYPE_EXPR }; -/* ---------- +/* * Variants distinguished in PLpgSQL_type structs - * ---------- */ enum { @@ -83,9 +79,8 @@ enum PLPGSQL_TTYPE_PSEUDO /* other pseudotypes */ }; -/* ---------- +/* * Execution tree node types - * ---------- */ enum PLpgSQL_stmt_types { @@ -115,10 +110,8 @@ enum PLpgSQL_stmt_types PLPGSQL_STMT_PERFORM }; - -/* ---------- +/* * Execution node return codes - * ---------- */ enum { @@ -128,9 +121,8 @@ enum PLPGSQL_RC_CONTINUE }; -/* ---------- +/* * GET DIAGNOSTICS information items - * ---------- */ enum { @@ -149,9 +141,8 @@ enum PLPGSQL_GETDIAG_SCHEMA_NAME }; -/* -------- +/* * RAISE statement options - * -------- */ enum { @@ -166,9 +157,8 @@ enum PLPGSQL_RAISEOPTION_SCHEMA }; -/* -------- +/* * Behavioral modes for plpgsql variable resolution - * -------- */ typedef enum { @@ -182,9 +172,11 @@ typedef enum * Node and structure definitions **********************************************************************/ - -typedef struct -{ /* Postgres data type */ +/* + * Postgres data type + */ +typedef struct PLpgSQL_type +{ char *typname; /* (simple) name of the type */ Oid typoid; /* OID of the data type */ int ttype; /* PLPGSQL_TTYPE_ code */ @@ -197,31 +189,37 @@ typedef struct int32 atttypmod; /* typmod (taken from someplace else) */ } PLpgSQL_type; - /* + * Generic datum array item + * * PLpgSQL_datum is the common supertype for PLpgSQL_expr, PLpgSQL_var, * PLpgSQL_row, PLpgSQL_rec, PLpgSQL_recfield, and PLpgSQL_arrayelem */ -typedef struct -{ /* Generic datum array item */ +typedef struct PLpgSQL_datum +{ int dtype; int dno; } PLpgSQL_datum; /* + * Scalar or composite variable + * * The variants PLpgSQL_var, PLpgSQL_row, and PLpgSQL_rec share these * fields */ -typedef struct -{ /* Scalar or composite variable */ +typedef struct PLpgSQL_variable +{ int dtype; int dno; char *refname; int lineno; } PLpgSQL_variable; +/* + * SQL Query to plan and execute + */ typedef struct PLpgSQL_expr -{ /* SQL Query to plan and execute */ +{ int dtype; int dno; char *query; @@ -252,9 +250,11 @@ typedef struct PLpgSQL_expr LocalTransactionId expr_simple_lxid; } PLpgSQL_expr; - -typedef struct -{ /* Scalar variable */ +/* + * Scalar variable + */ +typedef struct PLpgSQL_var +{ int dtype; int dno; char *refname; @@ -273,19 +273,20 @@ typedef struct bool freeval; } PLpgSQL_var; - -typedef struct -{ /* Row variable */ +/* + * Row variable + */ +typedef struct PLpgSQL_row +{ int dtype; int dno; char *refname; int lineno; + /* Note: TupleDesc is only set up for named rowtypes, else it is NULL. */ TupleDesc rowtupdesc; /* - * Note: TupleDesc is only set up for named rowtypes, else it is NULL. - * * Note: if the underlying rowtype contains a dropped column, the * corresponding fieldnames[] entry will be NULL, and there is no * corresponding var (varnos[] will be -1). @@ -295,9 +296,11 @@ typedef struct int *varnos; } PLpgSQL_row; - -typedef struct -{ /* Record variable (non-fixed structure) */ +/* + * Record variable (non-fixed structure) + */ +typedef struct PLpgSQL_rec +{ int dtype; int dno; char *refname; @@ -309,22 +312,27 @@ typedef struct bool freetupdesc; } PLpgSQL_rec; - -typedef struct -{ /* Field in record */ +/* + * Field in record + */ +typedef struct PLpgSQL_recfield +{ int dtype; int dno; char *fieldname; int recparentno; /* dno of parent record */ } PLpgSQL_recfield; - -typedef struct -{ /* Element of array variable */ +/* + * Element of array variable + */ +typedef struct PLpgSQL_arrayelem +{ int dtype; int dno; PLpgSQL_expr *subscript; int arrayparentno; /* dno of parent array variable */ + /* Remaining fields are cached info about the array variable's type */ Oid parenttypoid; /* type of array variable; 0 if not yet set */ int32 parenttypmod; /* typmod of array variable */ @@ -337,49 +345,65 @@ typedef struct char elemtypalign; /* typalign of element type */ } PLpgSQL_arrayelem; - +/* + * Item in the compilers namespace tree + */ typedef struct PLpgSQL_nsitem -{ /* Item in the compilers namespace tree */ +{ int itemtype; + /* + * For labels, itemno is a value of enum PLpgSQL_label_types. For other + * itemtypes, itemno is the associated PLpgSQL_datum's dno. + */ int itemno; - /* For labels, itemno is a value of enum PLpgSQL_label_types. */ - /* For other itemtypes, itemno is the associated PLpgSQL_datum's dno. */ struct PLpgSQL_nsitem *prev; char name[FLEXIBLE_ARRAY_MEMBER]; /* nul-terminated string */ } PLpgSQL_nsitem; - -typedef struct -{ /* Generic execution node */ +/* + * Generic execution node + */ +typedef struct PLpgSQL_stmt +{ int cmd_type; int lineno; } PLpgSQL_stmt; - +/* + * One EXCEPTION condition name + */ typedef struct PLpgSQL_condition -{ /* One EXCEPTION condition name */ +{ int sqlerrstate; /* SQLSTATE code */ char *condname; /* condition name (for debugging) */ struct PLpgSQL_condition *next; } PLpgSQL_condition; -typedef struct +/* + * EXCEPTION block + */ +typedef struct PLpgSQL_exception_block { int sqlstate_varno; int sqlerrm_varno; List *exc_list; /* List of WHEN clauses */ } PLpgSQL_exception_block; -typedef struct -{ /* One EXCEPTION ... WHEN clause */ +/* + * One EXCEPTION ... WHEN clause + */ +typedef struct PLpgSQL_exception +{ int lineno; PLpgSQL_condition *conditions; List *action; /* List of statements */ } PLpgSQL_exception; - -typedef struct -{ /* Block of statements */ +/* + * Block of statements + */ +typedef struct PLpgSQL_stmt_block +{ int cmd_type; int lineno; char *label; @@ -389,39 +413,52 @@ typedef struct PLpgSQL_exception_block *exceptions; } PLpgSQL_stmt_block; - -typedef struct -{ /* Assign statement */ +/* + * Assign statement + */ +typedef struct PLpgSQL_stmt_assign +{ int cmd_type; int lineno; int varno; PLpgSQL_expr *expr; } PLpgSQL_stmt_assign; -typedef struct -{ /* PERFORM statement */ +/* + * PERFORM statement + */ +typedef struct PLpgSQL_stmt_perform +{ int cmd_type; int lineno; PLpgSQL_expr *expr; } PLpgSQL_stmt_perform; -typedef struct -{ /* Get Diagnostics item */ +/* + * GET DIAGNOSTICS item + */ +typedef struct PLpgSQL_diag_item +{ int kind; /* id for diagnostic value desired */ int target; /* where to assign it */ } PLpgSQL_diag_item; -typedef struct -{ /* Get Diagnostics statement */ +/* + * GET DIAGNOSTICS statement + */ +typedef struct PLpgSQL_stmt_getdiag +{ int cmd_type; int lineno; bool is_stacked; /* STACKED or CURRENT diagnostics area? */ List *diag_items; /* List of PLpgSQL_diag_item */ } PLpgSQL_stmt_getdiag; - -typedef struct -{ /* IF statement */ +/* + * IF statement + */ +typedef struct PLpgSQL_stmt_if +{ int cmd_type; int lineno; PLpgSQL_expr *cond; /* boolean expression for THEN */ @@ -430,15 +467,20 @@ typedef struct List *else_body; /* List of statements */ } PLpgSQL_stmt_if; -typedef struct /* one ELSIF arm of IF statement */ +/* + * one ELSIF arm of IF statement + */ +typedef struct PLpgSQL_if_elsif { int lineno; PLpgSQL_expr *cond; /* boolean expression for this case */ List *stmts; /* List of statements */ } PLpgSQL_if_elsif; - -typedef struct /* CASE statement */ +/* + * CASE statement + */ +typedef struct PLpgSQL_stmt_case { int cmd_type; int lineno; @@ -449,25 +491,32 @@ typedef struct /* CASE statement */ List *else_stmts; /* List of statements */ } PLpgSQL_stmt_case; -typedef struct /* one arm of CASE statement */ +/* + * one arm of CASE statement + */ +typedef struct PLpgSQL_case_when { int lineno; PLpgSQL_expr *expr; /* boolean expression for this case */ List *stmts; /* List of statements */ } PLpgSQL_case_when; - -typedef struct -{ /* Unconditional LOOP statement */ +/* + * Unconditional LOOP statement + */ +typedef struct PLpgSQL_stmt_loop +{ int cmd_type; int lineno; char *label; List *body; /* List of statements */ } PLpgSQL_stmt_loop; - -typedef struct -{ /* WHILE cond LOOP statement */ +/* + * WHILE cond LOOP statement + */ +typedef struct PLpgSQL_stmt_while +{ int cmd_type; int lineno; char *label; @@ -475,9 +524,11 @@ typedef struct List *body; /* List of statements */ } PLpgSQL_stmt_while; - -typedef struct -{ /* FOR statement with integer loopvar */ +/* + * FOR statement with integer loopvar + */ +typedef struct PLpgSQL_stmt_fori +{ int cmd_type; int lineno; char *label; @@ -489,13 +540,12 @@ typedef struct List *body; /* List of statements */ } PLpgSQL_stmt_fori; - /* * PLpgSQL_stmt_forq represents a FOR statement running over a SQL query. * It is the common supertype of PLpgSQL_stmt_fors, PLpgSQL_stmt_forc * and PLpgSQL_dynfors. */ -typedef struct +typedef struct PLpgSQL_stmt_forq { int cmd_type; int lineno; @@ -505,8 +555,11 @@ typedef struct List *body; /* List of statements */ } PLpgSQL_stmt_forq; -typedef struct -{ /* FOR statement running over SELECT */ +/* + * FOR statement running over SELECT + */ +typedef struct PLpgSQL_stmt_fors +{ int cmd_type; int lineno; char *label; @@ -517,8 +570,11 @@ typedef struct PLpgSQL_expr *query; } PLpgSQL_stmt_fors; -typedef struct -{ /* FOR statement running over cursor */ +/* + * FOR statement running over cursor + */ +typedef struct PLpgSQL_stmt_forc +{ int cmd_type; int lineno; char *label; @@ -530,8 +586,11 @@ typedef struct PLpgSQL_expr *argquery; /* cursor arguments if any */ } PLpgSQL_stmt_forc; -typedef struct -{ /* FOR statement running over EXECUTE */ +/* + * FOR statement running over EXECUTE + */ +typedef struct PLpgSQL_stmt_dynfors +{ int cmd_type; int lineno; char *label; @@ -543,9 +602,11 @@ typedef struct List *params; /* USING expressions */ } PLpgSQL_stmt_dynfors; - -typedef struct -{ /* FOREACH item in array loop */ +/* + * FOREACH item in array loop + */ +typedef struct PLpgSQL_stmt_foreach_a +{ int cmd_type; int lineno; char *label; @@ -555,9 +616,11 @@ typedef struct List *body; /* List of statements */ } PLpgSQL_stmt_foreach_a; - -typedef struct -{ /* OPEN a curvar */ +/* + * OPEN a curvar + */ +typedef struct PLpgSQL_stmt_open +{ int cmd_type; int lineno; int curvar; @@ -569,9 +632,11 @@ typedef struct List *params; /* USING expressions */ } PLpgSQL_stmt_open; - -typedef struct -{ /* FETCH or MOVE statement */ +/* + * FETCH or MOVE statement + */ +typedef struct PLpgSQL_stmt_fetch +{ int cmd_type; int lineno; PLpgSQL_rec *rec; /* target, as record or row */ @@ -584,17 +649,21 @@ typedef struct bool returns_multiple_rows; /* can return more than one row? */ } PLpgSQL_stmt_fetch; - -typedef struct -{ /* CLOSE curvar */ +/* + * CLOSE curvar + */ +typedef struct PLpgSQL_stmt_close +{ int cmd_type; int lineno; int curvar; } PLpgSQL_stmt_close; - -typedef struct -{ /* EXIT or CONTINUE statement */ +/* + * EXIT or CONTINUE statement + */ +typedef struct PLpgSQL_stmt_exit +{ int cmd_type; int lineno; bool is_exit; /* Is this an exit or a continue? */ @@ -602,25 +671,33 @@ typedef struct PLpgSQL_expr *cond; } PLpgSQL_stmt_exit; - -typedef struct -{ /* RETURN statement */ +/* + * RETURN statement + */ +typedef struct PLpgSQL_stmt_return +{ int cmd_type; int lineno; PLpgSQL_expr *expr; int retvarno; } PLpgSQL_stmt_return; -typedef struct -{ /* RETURN NEXT statement */ +/* + * RETURN NEXT statement + */ +typedef struct PLpgSQL_stmt_return_next +{ int cmd_type; int lineno; PLpgSQL_expr *expr; int retvarno; } PLpgSQL_stmt_return_next; -typedef struct -{ /* RETURN QUERY statement */ +/* + * RETURN QUERY statement + */ +typedef struct PLpgSQL_stmt_return_query +{ int cmd_type; int lineno; PLpgSQL_expr *query; /* if static query */ @@ -628,8 +705,11 @@ typedef struct List *params; /* USING arguments for dynamic query */ } PLpgSQL_stmt_return_query; -typedef struct -{ /* RAISE statement */ +/* + * RAISE statement + */ +typedef struct PLpgSQL_stmt_raise +{ int cmd_type; int lineno; int elog_level; @@ -639,36 +719,47 @@ typedef struct List *options; /* list of PLpgSQL_raise_option */ } PLpgSQL_stmt_raise; -typedef struct -{ /* RAISE statement option */ +/* + * RAISE statement option + */ +typedef struct PLpgSQL_raise_option +{ int opt_type; PLpgSQL_expr *expr; } PLpgSQL_raise_option; -typedef struct -{ /* ASSERT statement */ +/* + * ASSERT statement + */ +typedef struct PLpgSQL_stmt_assert +{ int cmd_type; int lineno; PLpgSQL_expr *cond; PLpgSQL_expr *message; } PLpgSQL_stmt_assert; -typedef struct -{ /* Generic SQL statement to execute */ +/* + * Generic SQL statement to execute + */ +typedef struct PLpgSQL_stmt_execsql +{ int cmd_type; int lineno; PLpgSQL_expr *sqlstmt; - bool mod_stmt; /* is the stmt INSERT/UPDATE/DELETE? */ - /* note: mod_stmt is set when we plan the query */ + bool mod_stmt; /* is the stmt INSERT/UPDATE/DELETE? Note: + mod_stmt is set when we plan the query */ bool into; /* INTO supplied? */ bool strict; /* INTO STRICT flag */ PLpgSQL_rec *rec; /* INTO target, if record */ PLpgSQL_row *row; /* INTO target, if row */ } PLpgSQL_stmt_execsql; - -typedef struct -{ /* Dynamic SQL string to execute */ +/* + * Dynamic SQL string to execute + */ +typedef struct PLpgSQL_stmt_dynexecute +{ int cmd_type; int lineno; PLpgSQL_expr *query; /* string expression */ @@ -679,9 +770,11 @@ typedef struct List *params; /* USING expressions */ } PLpgSQL_stmt_dynexecute; - +/* + * Hash lookup key for functions + */ typedef struct PLpgSQL_func_hashkey -{ /* Hash lookup key for functions */ +{ Oid funcOid; bool isTrigger; /* true if called as a trigger */ @@ -710,6 +803,9 @@ typedef struct PLpgSQL_func_hashkey Oid argtypes[FUNC_MAX_ARGS]; } PLpgSQL_func_hashkey; +/* + * Trigger type + */ typedef enum PLpgSQL_trigtype { PLPGSQL_DML_TRIGGER, @@ -717,8 +813,11 @@ typedef enum PLpgSQL_trigtype PLPGSQL_NOT_TRIGGER } PLpgSQL_trigtype; +/* + * Complete compiled function + */ typedef struct PLpgSQL_function -{ /* Complete compiled function */ +{ char *fn_signature; Oid fn_oid; TransactionId fn_xmin; @@ -777,9 +876,11 @@ typedef struct PLpgSQL_function unsigned long use_count; } PLpgSQL_function; - +/* + * Runtime execution data + */ typedef struct PLpgSQL_execstate -{ /* Runtime execution data */ +{ PLpgSQL_function *func; /* function being executed */ Datum retval; @@ -835,7 +936,6 @@ typedef struct PLpgSQL_execstate void *plugin_info; /* reserved for use by optional plugin */ } PLpgSQL_execstate; - /* * A PLpgSQL_plugin structure represents an instrumentation plugin. * To instrument PL/pgSQL, a plugin library must access the rendezvous @@ -850,24 +950,23 @@ typedef struct PLpgSQL_execstate * (if the pointers are non-NULL) to give the plugin a chance to watch * what we are doing. * - * func_setup is called when we start a function, before we've initialized - * the local variables defined by the function. + * func_setup is called when we start a function, before we've initialized + * the local variables defined by the function. * - * func_beg is called when we start a function, after we've initialized - * the local variables. + * func_beg is called when we start a function, after we've initialized + * the local variables. * - * func_end is called at the end of a function. + * func_end is called at the end of a function. * - * stmt_beg and stmt_end are called before and after (respectively) each - * statement. + * stmt_beg and stmt_end are called before and after (respectively) each + * statement. * * Also, immediately before any call to func_setup, PL/pgSQL fills in the * error_callback and assign_expr fields with pointers to its own * plpgsql_exec_error_callback and exec_assign_expr functions. This is * a somewhat ad-hoc expedient to simplify life for debugger plugins. */ - -typedef struct +typedef struct PLpgSQL_plugin { /* Function pointers set up by the plugin */ void (*func_setup) (PLpgSQL_execstate *estate, PLpgSQL_function *func); @@ -882,21 +981,22 @@ typedef struct PLpgSQL_expr *expr); } PLpgSQL_plugin; +/* + * Struct types used during parsing + */ -/* Struct types used during parsing */ - -typedef struct +typedef struct PLword { char *ident; /* palloc'd converted identifier */ bool quoted; /* Was it double-quoted? */ } PLword; -typedef struct +typedef struct PLcword { List *idents; /* composite identifiers (list of String) */ } PLcword; -typedef struct +typedef struct PLwdatum { PLpgSQL_datum *datum; /* referenced variable */ char *ident; /* valid if simple name */ @@ -950,9 +1050,8 @@ extern PLpgSQL_plugin **plpgsql_plugin_ptr; * Function declarations **********************************************************************/ -/* ---------- +/* * Functions in pl_comp.c - * ---------- */ extern PLpgSQL_function *plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator); @@ -983,15 +1082,13 @@ extern void plpgsql_adddatum(PLpgSQL_datum *new); extern int plpgsql_add_initdatums(int **varnos); extern void plpgsql_HashTableInit(void); -/* ---------- +/* * Functions in pl_handler.c - * ---------- */ extern void _PG_init(void); -/* ---------- +/* * Functions in pl_exec.c - * ---------- */ extern Datum plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo, @@ -1009,9 +1106,8 @@ extern void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate, PLpgSQL_datum *datum, Oid *typeid, int32 *typmod, Oid *collation); -/* ---------- +/* * Functions for namespace handling in pl_funcs.c - * ---------- */ extern void plpgsql_ns_init(void); extern void plpgsql_ns_push(const char *label, @@ -1026,18 +1122,16 @@ extern PLpgSQL_nsitem *plpgsql_ns_lookup_label(PLpgSQL_nsitem *ns_cur, const char *name); extern PLpgSQL_nsitem *plpgsql_ns_find_nearest_loop(PLpgSQL_nsitem *ns_cur); -/* ---------- +/* * Other functions in pl_funcs.c - * ---------- */ extern const char *plpgsql_stmt_typename(PLpgSQL_stmt *stmt); extern const char *plpgsql_getdiag_kindname(int kind); extern void plpgsql_free_function_memory(PLpgSQL_function *func); extern void plpgsql_dumptree(PLpgSQL_function *func); -/* ---------- +/* * Scanner functions in pl_scanner.c - * ---------- */ extern int plpgsql_base_yylex(void); extern int plpgsql_yylex(void); @@ -1055,9 +1149,8 @@ extern int plpgsql_latest_lineno(void); extern void plpgsql_scanner_init(const char *str); extern void plpgsql_scanner_finish(void); -/* ---------- +/* * Externs in gram.y - * ---------- */ extern int plpgsql_yyparse(void); -- cgit v1.2.3 From 49917dbd76ba0b4179a82fcf033ef5a10b8e3488 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 18 Aug 2016 11:17:10 -0400 Subject: Improve psql's tab completion for ALTER EXTENSION foo UPDATE ... Offer a list of available versions for that extension. Formerly, since there was no special support for this, it triggered off the UPDATE keyword and offered a list of table names --- not too helpful. Jeff Janes, reviewed by Gerdan Santos Patch: --- src/bin/psql/tab-complete.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'src') diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 8469d9ff03..d31fd25b8d 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -820,6 +820,13 @@ static const SchemaQuery Query_for_list_of_matviews = { " WHERE (%d = pg_catalog.length('%s'))"\ " AND pg_catalog.quote_ident(name)='%s'" +/* the silly-looking length condition is just to eat up the current word */ +#define Query_for_list_of_available_extension_versions_with_TO \ +" SELECT 'TO ' || pg_catalog.quote_ident(version) "\ +" FROM pg_catalog.pg_available_extension_versions "\ +" WHERE (%d = pg_catalog.length('%s'))"\ +" AND pg_catalog.quote_ident(name)='%s'" + #define Query_for_list_of_prepared_statements \ " SELECT pg_catalog.quote_ident(name) "\ " FROM pg_catalog.pg_prepared_statements "\ @@ -1414,6 +1421,20 @@ psql_completion(const char *text, int start, int end) else if (Matches3("ALTER", "EXTENSION", MatchAny)) COMPLETE_WITH_LIST4("ADD", "DROP", "UPDATE", "SET SCHEMA"); + /* ALTER EXTENSION UPDATE */ + else if (Matches4("ALTER", "EXTENSION", MatchAny, "UPDATE")) + { + completion_info_charp = prev2_wd; + COMPLETE_WITH_QUERY(Query_for_list_of_available_extension_versions_with_TO); + } + + /* ALTER EXTENSION UPDATE TO */ + else if (Matches5("ALTER", "EXTENSION", MatchAny, "UPDATE", "TO")) + { + completion_info_charp = prev3_wd; + COMPLETE_WITH_QUERY(Query_for_list_of_available_extension_versions); + } + /* ALTER FOREIGN */ else if (Matches2("ALTER", "FOREIGN")) COMPLETE_WITH_LIST2("DATA WRAPPER", "TABLE"); -- cgit v1.2.3 From 8019b5a89c3cefbaa69ab58c00281419f7e46601 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 18 Aug 2016 11:29:16 -0400 Subject: Improve psql's tab completion for \l. Offer a list of database names; formerly no help was offered. Ian Barwick, reviewed by Gerdan Santos Patch: <5724132E.1030804@2ndquadrant.com> --- src/bin/psql/tab-complete.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index d31fd25b8d..1345e4ed80 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -3033,6 +3033,8 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_QUERY(Query_for_list_of_encodings); else if (TailMatchesCS1("\\h") || TailMatchesCS1("\\help")) COMPLETE_WITH_LIST(sql_commands); + else if (TailMatchesCS1("\\l*") && !TailMatchesCS1("\\lo*")) + COMPLETE_WITH_QUERY(Query_for_list_of_databases); else if (TailMatchesCS1("\\password")) COMPLETE_WITH_QUERY(Query_for_list_of_roles); else if (TailMatchesCS1("\\pset")) -- cgit v1.2.3 From 5697522d8411135d26a5d807f9e4afa182502f64 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 18 Aug 2016 14:48:51 -0400 Subject: In plpgsql, don't try to convert int2vector or oidvector to expanded array. These types are storage-compatible with real arrays, but they don't support toasting, so of course they can't support expansion either. Per bug #14289 from Michael Overmeyer. Back-patch to 9.5 where expanded arrays were introduced. Report: <20160818174414.1529.37913@wrigleys.postgresql.org> --- src/include/utils/array.h | 2 +- src/pl/plpgsql/src/pl_comp.c | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/include/utils/array.h b/src/include/utils/array.h index b62b08c482..6164f119ba 100644 --- a/src/include/utils/array.h +++ b/src/include/utils/array.h @@ -36,7 +36,7 @@ * * The OIDVECTOR and INT2VECTOR datatypes are storage-compatible with * generic arrays, but they support only one-dimensional arrays with no - * nulls (and no null bitmap). + * nulls (and no null bitmap). They don't support being toasted, either. * * There are also some "fixed-length array" datatypes, such as NAME and * POINT. These are simply a sequence of a fixed number of items each diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index b628c2811b..38aa030303 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -2192,14 +2192,19 @@ build_datatype(HeapTuple typeTup, int32 typmod, Oid collation) /* NB: this is only used to decide whether to apply expand_array */ if (typeStruct->typtype == TYPTYPE_BASE) { - /* this test should match what get_element_type() checks */ + /* + * This test should include what get_element_type() checks. We also + * disallow non-toastable array types (i.e. oidvector and int2vector). + */ typ->typisarray = (typeStruct->typlen == -1 && - OidIsValid(typeStruct->typelem)); + OidIsValid(typeStruct->typelem) && + typeStruct->typstorage != 'p'); } else if (typeStruct->typtype == TYPTYPE_DOMAIN) { /* we can short-circuit looking up base types if it's not varlena */ typ->typisarray = (typeStruct->typlen == -1 && + typeStruct->typstorage != 'p' && OidIsValid(get_base_element_type(typeStruct->typbasetype))); } else -- cgit v1.2.3 From c5d4f40cb5e231eb2cbc533b5f094f3a4829e2ef Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 18 Aug 2016 16:04:35 -0400 Subject: Update line count totals for psql help displays. As usual, we've been pretty awful about maintaining these counts. They're not all that critical, perhaps, but let's get them right at release time. Also fix 9.5, which I notice is just as bad. It's probably wrong further back, but the lack of --help=foo options before 9.5 makes it too painful to count. --- src/bin/psql/help.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index efc845414f..a69c4dd8cf 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -69,7 +69,7 @@ usage(unsigned short int pager) * Keep this line count in sync with the number of lines printed below! * Use "psql --help=options | wc" to count correctly. */ - output = PageOutput(60, pager ? &(pset.popt.topt) : NULL); + output = PageOutput(61, pager ? &(pset.popt.topt) : NULL); fprintf(output, _("psql is the PostgreSQL interactive terminal.\n\n")); fprintf(output, _("Usage:\n")); @@ -168,7 +168,7 @@ slashUsage(unsigned short int pager) * Use "psql --help=commands | wc" to count correctly. It's okay to count * the USE_READLINE line even in builds without that. */ - output = PageOutput(111, pager ? &(pset.popt.topt) : NULL); + output = PageOutput(113, pager ? &(pset.popt.topt) : NULL); fprintf(output, _("General\n")); fprintf(output, _(" \\copyright show PostgreSQL usage and distribution terms\n")); @@ -325,7 +325,7 @@ helpVariables(unsigned short int pager) * Windows builds currently print one more line than non-Windows builds. * Using the larger number is fine. */ - output = PageOutput(87, pager ? &(pset.popt.topt) : NULL); + output = PageOutput(88, pager ? &(pset.popt.topt) : NULL); fprintf(output, _("List of specially treated variables\n\n")); -- cgit v1.2.3 From 1d2e73a3dfdbd7168b323fa39879c60df6076412 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Thu, 18 Aug 2016 12:00:00 -0400 Subject: Remove obsolete replacement system() on darwin Per comment in the file, this was fixed around OS X 10.2. --- src/backend/port/Makefile | 4 -- src/backend/port/darwin/Makefile | 17 ------- src/backend/port/darwin/README | 36 -------------- src/backend/port/darwin/system.c | 104 --------------------------------------- 4 files changed, 161 deletions(-) delete mode 100644 src/backend/port/darwin/Makefile delete mode 100644 src/backend/port/darwin/README delete mode 100644 src/backend/port/darwin/system.c (limited to 'src') diff --git a/src/backend/port/Makefile b/src/backend/port/Makefile index 89549d0d2b..aba1e92fe1 100644 --- a/src/backend/port/Makefile +++ b/src/backend/port/Makefile @@ -23,9 +23,6 @@ include $(top_builddir)/src/Makefile.global OBJS = atomics.o dynloader.o pg_sema.o pg_shmem.o $(TAS) -ifeq ($(PORTNAME), darwin) -SUBDIRS += darwin -endif ifeq ($(PORTNAME), win32) SUBDIRS += win32 endif @@ -44,5 +41,4 @@ endif distclean clean: rm -f tas_cpp.s - $(MAKE) -C darwin clean $(MAKE) -C win32 clean diff --git a/src/backend/port/darwin/Makefile b/src/backend/port/darwin/Makefile deleted file mode 100644 index 9d463ffb87..0000000000 --- a/src/backend/port/darwin/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -#------------------------------------------------------------------------- -# -# Makefile-- -# Makefile for port/darwin -# -# IDENTIFICATION -# src/backend/port/darwin/Makefile -# -#------------------------------------------------------------------------- - -subdir = src/backend/port/darwin -top_builddir = ../../../.. -include $(top_builddir)/src/Makefile.global - -OBJS = system.o - -include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/port/darwin/README b/src/backend/port/darwin/README deleted file mode 100644 index 2d9df79683..0000000000 --- a/src/backend/port/darwin/README +++ /dev/null @@ -1,36 +0,0 @@ -src/backend/port/darwin/README - -Darwin -====== - -The file system.c included herein is taken directly from Apple's Darwin -open-source CVS archives, and is redistributed under the BSD copyright -notice it bears. (According to Apple's CVS logs, their version is -identical to the FreeBSD original.) It provides our own implementation of -the system(3) function, which ought by all rights to be identical to the -one provided in libc on Darwin machines. Nonetheless, this version works, -whereas the one that actually ships with Mac OS X 10.1 doesn't. The -shipped version appears to disconnect the calling process from any shared -memory segments it is attached to. (The symptom seen in PostgreSQL is -that a backend attempting to execute CREATE DATABASE core-dumps.) I would -love to know why there is a discrepancy between the published source and -the actual behavior --- tgl 7-Nov-2001. - -Appropriate bug reports have been filed with Apple --- see -Radar Bug#s 2767956, 2683531, 2805147. One hopes we can retire this -kluge in the not too distant future. - - -As of PostgreSQL 7.3 and Mac OS X 10.1, one should expect warnings -like these while linking the backend: - -/usr/bin/ld: warning unused multiple definitions of symbol _system -port/SUBSYS.o definition of _system in section (__TEXT,__text) -/usr/lib/libm.dylib(system.o) unused definition of _system - -These are due to overriding system() per the above-described hack. - - -The bug appears to be repaired in OS X 10.2.6 and later (possibly in -earlier 10.2.* as well, but no systems handy to check). We #ifdef out -the substitute system() definition on 10.3 and later. diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c deleted file mode 100644 index 1cd5266929..0000000000 --- a/src/backend/port/darwin/system.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * src/backend/port/darwin/system.c - * - * only needed in OS X 10.1 and possibly early 10.2 releases */ -#include /* pgrminclude ignore */ -#if MAC_OS_X_VERSION_MAX_ALLOWED <= MAC_OS_X_VERSION_10_2 || !defined(MAC_OS_X_VERSION_10_2) - -/* - * Copyright (c) 1988, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD: src/lib/libc/stdlib/system.c,v 1.6 2000/03/16 02:14:41 jasone Exp $ - */ - -#if defined(LIBC_SCCS) && !defined(lint) -static char sccsid[] = "@(#)system.c 8.1 (Berkeley) 6/4/93"; -#endif /* LIBC_SCCS and not lint */ - -#include -#include -#include -#include -#include - -int system(const char *command); - -int -system(const char *command) -{ - pid_t pid; - int pstat; - struct sigaction ign, - intact, - quitact; - sigset_t newsigblock, - oldsigblock; - - if (!command) /* just checking... */ - return (1); - - /* - * Ignore SIGINT and SIGQUIT, block SIGCHLD. Remember to save existing - * signal dispositions. - */ - ign.sa_handler = SIG_IGN; - (void) sigemptyset(&ign.sa_mask); - ign.sa_flags = 0; - (void) sigaction(SIGINT, &ign, &intact); - (void) sigaction(SIGQUIT, &ign, &quitact); - (void) sigemptyset(&newsigblock); - (void) sigaddset(&newsigblock, SIGCHLD); - (void) sigprocmask(SIG_BLOCK, &newsigblock, &oldsigblock); - switch (pid = fork()) - { - case -1: /* error */ - break; - case 0: /* child */ - - /* - * Restore original signal dispositions and exec the command. - */ - (void) sigaction(SIGINT, &intact, NULL); - (void) sigaction(SIGQUIT, &quitact, NULL); - (void) sigprocmask(SIG_SETMASK, &oldsigblock, NULL); - execl(_PATH_BSHELL, "sh", "-c", command, (char *) NULL); - _exit(127); - default: /* parent */ - do - { - pid = wait4(pid, &pstat, 0, (struct rusage *) 0); - } while (pid == -1 && errno == EINTR); - break; - } - (void) sigaction(SIGINT, &intact, NULL); - (void) sigaction(SIGQUIT, &quitact, NULL); - (void) sigprocmask(SIG_SETMASK, &oldsigblock, NULL); - return (pid == -1 ? -1 : pstat); -} - -#endif /* OS X < 10.3 */ -- cgit v1.2.3 From 9595383bc6fc24d25970374e2eddd5ce6f977f9e Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Thu, 18 Aug 2016 17:30:14 -0700 Subject: Add alternative output for ON CONFLICT toast isolation test. On some buildfarm animals the isolationtest added in 07ef0351 failed, as the order in which processes are run after unlocking is not guaranteed. Add an alternative output for that. Discussion: <7969.1471484738@sss.pgh.pa.us> Backpatch: 9.6, like the test in the aforementioned commit --- src/test/isolation/expected/insert-conflict-toast_1.out | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 src/test/isolation/expected/insert-conflict-toast_1.out (limited to 'src') diff --git a/src/test/isolation/expected/insert-conflict-toast_1.out b/src/test/isolation/expected/insert-conflict-toast_1.out new file mode 100644 index 0000000000..18346162b7 --- /dev/null +++ b/src/test/isolation/expected/insert-conflict-toast_1.out @@ -0,0 +1,15 @@ +Parsed test spec with 3 sessions + +starting permutation: s2insert s3insert s1commit +pg_advisory_xact_lock + + +step s2insert: + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; + +step s3insert: + INSERT INTO ctoast (key, val) VALUES (1, ctoast_large_val()) ON CONFLICT DO NOTHING; + +step s1commit: COMMIT; +step s3insert: <... completed> +step s2insert: <... completed> -- cgit v1.2.3 From 6eefd2422ef232aec2fe12465d9ec4018c63814d Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 12:51:02 -0400 Subject: Remove typedef celt from the regex library, along with macro NOCELT. The regex library used to have a notion of a "collating element" that was distinct from a "character", but Henry Spencer never actually implemented his planned support for multi-character collating elements, and the Tcl crew ripped out most of the stubs for that years ago. The only thing left that distinguished the "celt" typedef from the "chr" typedef was that "celt" was supposed to also be able to hold the not-a-character "NOCELT" value. However, NOCELT was not used anywhere after the MCCE stub removal changes, which means there's no need for celt to be different from chr. Removing the separate typedef simplifies matters and also removes a trap for the unwary, in that celt is signed while chr may not be, so comparisons could mean different things. There's no bug there today because we restrict CHR_MAX to be less than INT_MAX, but I think there may have been such bugs before we did that, and there could be again if anyone ever decides to fool with the range of chr. This patch also removes assorted unnecessary casts to "chr" of values that are already chrs. Many of these seem to be leftover from days when the code was compatible with pre-ANSI C. --- src/backend/regex/regc_cvec.c | 6 +++--- src/backend/regex/regc_lex.c | 12 +++++------- src/backend/regex/regc_locale.c | 37 ++++++++++++++++++------------------- src/backend/regex/regcomp.c | 12 ++++++------ src/include/regex/regcustom.h | 4 +--- 5 files changed, 33 insertions(+), 38 deletions(-) (limited to 'src') diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c index 921a7d7f92..3a9e8cfbbd 100644 --- a/src/backend/regex/regc_cvec.c +++ b/src/backend/regex/regc_cvec.c @@ -78,7 +78,7 @@ addchr(struct cvec * cv, /* character vector */ chr c) /* character to add */ { assert(cv->nchrs < cv->chrspace); - cv->chrs[cv->nchrs++] = (chr) c; + cv->chrs[cv->nchrs++] = c; } /* @@ -90,8 +90,8 @@ addrange(struct cvec * cv, /* character vector */ chr to) /* last character of range */ { assert(cv->nranges < cv->rangespace); - cv->ranges[cv->nranges * 2] = (chr) from; - cv->ranges[cv->nranges * 2 + 1] = (chr) to; + cv->ranges[cv->nranges * 2] = from; + cv->ranges[cv->nranges * 2 + 1] = to; cv->nranges++; } diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c index f62ec7dc81..cd34c8ae41 100644 --- a/src/backend/regex/regc_lex.c +++ b/src/backend/regex/regc_lex.c @@ -870,7 +870,7 @@ lexescape(struct vars * v) if (v->now == save || ((int) c > 0 && (int) c <= v->nsubexp)) { NOTE(REG_UBACKREF); - RETV(BACKREF, (chr) c); + RETV(BACKREF, c); } /* oops, doesn't look like it's a backref after all... */ v->now = save; @@ -986,10 +986,8 @@ lexdigits(struct vars * v, */ static int /* 1 normal, 0 failure */ brenext(struct vars * v, - chr pc) + chr c) { - chr c = (chr) pc; - switch (c) { case CHR('*'): @@ -1153,7 +1151,7 @@ chrnamed(struct vars * v, const chr *endp, /* just past end of name */ chr lastresort) /* what to return if name lookup fails */ { - celt c; + chr c; int errsave; int e; struct cvec *cv; @@ -1165,10 +1163,10 @@ chrnamed(struct vars * v, v->err = errsave; if (e != 0) - return (chr) lastresort; + return lastresort; cv = range(v, c, c, 0); if (cv->nchrs == 0) - return (chr) lastresort; + return lastresort; return cv->chrs[0]; } diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c index 4fe62921e3..399de027cd 100644 --- a/src/backend/regex/regc_locale.c +++ b/src/backend/regex/regc_locale.c @@ -361,9 +361,9 @@ static const struct cname /* - * element - map collating-element name to celt + * element - map collating-element name to chr */ -static celt +static chr element(struct vars * v, /* context */ const chr *startp, /* points to start of name */ const chr *endp) /* points just past end of name */ @@ -401,13 +401,13 @@ element(struct vars * v, /* context */ */ static struct cvec * range(struct vars * v, /* context */ - celt a, /* range start */ - celt b, /* range end, might equal a */ + chr a, /* range start */ + chr b, /* range end, might equal a */ int cases) /* case-independent? */ { int nchrs; struct cvec *cv; - celt c, + chr c, cc; if (a != b && !before(a, b)) @@ -444,7 +444,7 @@ range(struct vars * v, /* context */ for (c = a; c <= b; c++) { - cc = pg_wc_tolower((chr) c); + cc = pg_wc_tolower(c); if (cc != c && (before(cc, a) || before(b, cc))) { @@ -455,7 +455,7 @@ range(struct vars * v, /* context */ } addchr(cv, cc); } - cc = pg_wc_toupper((chr) c); + cc = pg_wc_toupper(c); if (cc != c && (before(cc, a) || before(b, cc))) { @@ -477,10 +477,10 @@ range(struct vars * v, /* context */ } /* - * before - is celt x before celt y, for purposes of range legality? + * before - is chr x before chr y, for purposes of range legality? */ static int /* predicate */ -before(celt x, celt y) +before(chr x, chr y) { if (x < y) return 1; @@ -493,7 +493,7 @@ before(celt x, celt y) */ static struct cvec * eclass(struct vars * v, /* context */ - celt c, /* Collating element representing the + chr c, /* Collating element representing the * equivalence class. */ int cases) /* all cases? */ { @@ -503,12 +503,12 @@ eclass(struct vars * v, /* context */ if ((v->cflags & REG_FAKE) && c == 'x') { cv = getcvec(v, 4, 0); - addchr(cv, (chr) 'x'); - addchr(cv, (chr) 'y'); + addchr(cv, CHR('x')); + addchr(cv, CHR('y')); if (cases) { - addchr(cv, (chr) 'X'); - addchr(cv, (chr) 'Y'); + addchr(cv, CHR('X')); + addchr(cv, CHR('Y')); } return cv; } @@ -518,7 +518,7 @@ eclass(struct vars * v, /* context */ return allcases(v, c); cv = getcvec(v, 1, 0); assert(cv != NULL); - addchr(cv, (chr) c); + addchr(cv, c); return cv; } @@ -673,15 +673,14 @@ cclass(struct vars * v, /* context */ */ static struct cvec * allcases(struct vars * v, /* context */ - chr pc) /* character to get case equivs of */ + chr c) /* character to get case equivs of */ { struct cvec *cv; - chr c = (chr) pc; chr lc, uc; - lc = pg_wc_tolower((chr) c); - uc = pg_wc_toupper((chr) c); + lc = pg_wc_tolower(c); + uc = pg_wc_toupper(c); cv = getcvec(v, 2, 0); addchr(cv, lc); diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c index cc589b0930..48d63da11d 100644 --- a/src/backend/regex/regcomp.c +++ b/src/backend/regex/regcomp.c @@ -210,10 +210,10 @@ static pg_wchar pg_wc_toupper(pg_wchar c); static pg_wchar pg_wc_tolower(pg_wchar c); /* === regc_locale.c === */ -static celt element(struct vars *, const chr *, const chr *); -static struct cvec *range(struct vars *, celt, celt, int); -static int before(celt, celt); -static struct cvec *eclass(struct vars *, celt, int); +static chr element(struct vars *, const chr *, const chr *); +static struct cvec *range(struct vars *, chr, chr, int); +static int before(chr, chr); +static struct cvec *eclass(struct vars *, chr, int); static struct cvec *cclass(struct vars *, const chr *, const chr *, int); static struct cvec *allcases(struct vars *, chr); static int cmp(const chr *, const chr *, size_t); @@ -1424,8 +1424,8 @@ brackpart(struct vars * v, struct state * lp, struct state * rp) { - celt startc; - celt endc; + chr startc; + chr endc; struct cvec *cv; const chr *startp; const chr *endp; diff --git a/src/include/regex/regcustom.h b/src/include/regex/regcustom.h index 60034daee8..459851a7f6 100644 --- a/src/include/regex/regcustom.h +++ b/src/include/regex/regcustom.h @@ -58,15 +58,13 @@ /* internal character type and related */ typedef pg_wchar chr; /* the type itself */ typedef unsigned uchr; /* unsigned type that will hold a chr */ -typedef int celt; /* type to hold chr, or NOCELT */ -#define NOCELT (-1) /* celt value which is not valid chr */ #define CHR(c) ((unsigned char) (c)) /* turn char literal into chr literal */ #define DIGITVAL(c) ((c)-'0') /* turn chr digit into its value */ #define CHRBITS 32 /* bits in a chr; must not use sizeof */ #define CHR_MIN 0x00000000 /* smallest and largest chr; the value */ #define CHR_MAX 0x7ffffffe /* CHR_MAX-CHR_MIN+1 must fit in an int, and - * CHR_MAX+1 must fit in both chr and celt */ + * CHR_MAX+1 must fit in a chr variable */ /* * Check if a chr value is in range. Ideally we'd just write this as -- cgit v1.2.3 From a859e640035680db31531ccd19a67292dd726baf Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 13:31:10 -0400 Subject: Clean up another pre-ANSI-C-ism in regex code: get rid of pcolor typedef. pcolor was used to represent function arguments that are nominally of type color, but when using a pre-ANSI C compiler would be passed as the promoted integer type. We really don't need that anymore. --- src/backend/regex/regc_color.c | 10 +++++----- src/backend/regex/regc_nfa.c | 8 ++++---- src/backend/regex/regcomp.c | 14 +++++++------- src/backend/regex/rege_dfa.c | 6 +++--- src/backend/regex/regexec.c | 4 ++-- src/include/regex/regguts.h | 1 - 6 files changed, 21 insertions(+), 22 deletions(-) (limited to 'src') diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c index c495cee300..8ffc8fb797 100644 --- a/src/backend/regex/regc_color.c +++ b/src/backend/regex/regc_color.c @@ -148,7 +148,7 @@ cmtreefree(struct colormap * cm, static color /* previous color */ setcolor(struct colormap * cm, chr c, - pcolor co) + color co) { uchr uc = c; int shift; @@ -199,7 +199,7 @@ setcolor(struct colormap * cm, b = uc & BYTMASK; prev = t->tcolor[b]; - t->tcolor[b] = (color) co; + t->tcolor[b] = co; return prev; } @@ -293,7 +293,7 @@ newcolor(struct colormap * cm) */ static void freecolor(struct colormap * cm, - pcolor co) + color co) { struct colordesc *cd = &cm->cd[co]; color pco, @@ -393,7 +393,7 @@ subcolor(struct colormap * cm, chr c) */ static color newsub(struct colormap * cm, - pcolor co) + color co) { color sco; /* new subcolor */ @@ -658,7 +658,7 @@ static void rainbow(struct nfa * nfa, struct colormap * cm, int type, - pcolor but, /* COLORLESS if no exceptions */ + color but, /* COLORLESS if no exceptions */ struct state * from, struct state * to) { diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c index cd9a3239bd..90dca5d9de 100644 --- a/src/backend/regex/regc_nfa.c +++ b/src/backend/regex/regc_nfa.c @@ -275,7 +275,7 @@ destroystate(struct nfa * nfa, static void newarc(struct nfa * nfa, int t, - pcolor co, + color co, struct state * from, struct state * to) { @@ -321,7 +321,7 @@ newarc(struct nfa * nfa, static void createarc(struct nfa * nfa, int t, - pcolor co, + color co, struct state * from, struct state * to) { @@ -334,7 +334,7 @@ createarc(struct nfa * nfa, assert(a != NULL); a->type = t; - a->co = (color) co; + a->co = co; a->to = to; a->from = from; @@ -553,7 +553,7 @@ hasnonemptyout(struct state * s) static struct arc * findarc(struct state * s, int type, - pcolor co) + color co) { struct arc *a; diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c index 48d63da11d..b211cc0a18 100644 --- a/src/backend/regex/regcomp.c +++ b/src/backend/regex/regcomp.c @@ -97,19 +97,19 @@ static chr chrnamed(struct vars *, const chr *, const chr *, chr); static void initcm(struct vars *, struct colormap *); static void freecm(struct colormap *); static void cmtreefree(struct colormap *, union tree *, int); -static color setcolor(struct colormap *, chr, pcolor); +static color setcolor(struct colormap *, chr, color); static color maxcolor(struct colormap *); static color newcolor(struct colormap *); -static void freecolor(struct colormap *, pcolor); +static void freecolor(struct colormap *, color); static color pseudocolor(struct colormap *); static color subcolor(struct colormap *, chr c); -static color newsub(struct colormap *, pcolor); +static color newsub(struct colormap *, color); static void subrange(struct vars *, chr, chr, struct state *, struct state *); static void subblock(struct vars *, chr, struct state *, struct state *); static void okcolors(struct nfa *, struct colormap *); static void colorchain(struct colormap *, struct arc *); static void uncolorchain(struct colormap *, struct arc *); -static void rainbow(struct nfa *, struct colormap *, int, pcolor, struct state *, struct state *); +static void rainbow(struct nfa *, struct colormap *, int, color, struct state *, struct state *); static void colorcomplement(struct nfa *, struct colormap *, int, struct state *, struct state *, struct state *); #ifdef REG_DEBUG @@ -125,13 +125,13 @@ static struct state *newfstate(struct nfa *, int flag); static void dropstate(struct nfa *, struct state *); static void freestate(struct nfa *, struct state *); static void destroystate(struct nfa *, struct state *); -static void newarc(struct nfa *, int, pcolor, struct state *, struct state *); -static void createarc(struct nfa *, int, pcolor, struct state *, struct state *); +static void newarc(struct nfa *, int, color, struct state *, struct state *); +static void createarc(struct nfa *, int, color, struct state *, struct state *); static struct arc *allocarc(struct nfa *, struct state *); static void freearc(struct nfa *, struct arc *); static void changearctarget(struct arc *, struct state *); static int hasnonemptyout(struct state *); -static struct arc *findarc(struct state *, int, pcolor); +static struct arc *findarc(struct state *, int, color); static void cparc(struct nfa *, struct arc *, struct state *, struct state *); static void sortins(struct nfa *, struct state *); static int sortins_cmp(const void *, const void *); diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c index 7d90242ace..b98c9d3902 100644 --- a/src/backend/regex/rege_dfa.c +++ b/src/backend/regex/rege_dfa.c @@ -603,7 +603,7 @@ static struct sset * miss(struct vars * v, struct dfa * d, struct sset * css, - pcolor co, + color co, chr *cp, /* next chr */ chr *start) /* where the attempt got started */ { @@ -731,7 +731,7 @@ miss(struct vars * v, css->outs[co] = p; css->inchain[co] = p->ins; p->ins.ss = css; - p->ins.co = (color) co; + p->ins.co = co; } return p; } @@ -743,7 +743,7 @@ static int /* predicate: constraint satisfied? */ lacon(struct vars * v, struct cnfa * pcnfa, /* parent cnfa */ chr *cp, - pcolor co) /* "color" of the lookaround constraint */ + color co) /* "color" of the lookaround constraint */ { int n; struct subre *sub; diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c index 82659a0f2f..5cbfd9b151 100644 --- a/src/backend/regex/regexec.c +++ b/src/backend/regex/regexec.c @@ -159,8 +159,8 @@ static struct dfa *newdfa(struct vars *, struct cnfa *, struct colormap *, struc static void freedfa(struct dfa *); static unsigned hash(unsigned *, int); static struct sset *initialize(struct vars *, struct dfa *, chr *); -static struct sset *miss(struct vars *, struct dfa *, struct sset *, pcolor, chr *, chr *); -static int lacon(struct vars *, struct cnfa *, chr *, pcolor); +static struct sset *miss(struct vars *, struct dfa *, struct sset *, color, chr *, chr *); +static int lacon(struct vars *, struct cnfa *, chr *, color); static struct sset *getvacant(struct vars *, struct dfa *, chr *, chr *); static struct sset *pickss(struct vars *, struct dfa *, chr *, chr *); diff --git a/src/include/regex/regguts.h b/src/include/regex/regguts.h index 2ceffa6563..b0aa641cc4 100644 --- a/src/include/regex/regguts.h +++ b/src/include/regex/regguts.h @@ -149,7 +149,6 @@ * which are of much more manageable number. */ typedef short color; /* colors of characters */ -typedef int pcolor; /* what color promotes to */ #define MAX_COLOR 32767 /* max color (must fit in 'color' datatype) */ #define COLORLESS (-1) /* impossible color */ -- cgit v1.2.3 From 6f79ae7fe549bed8bbd1f54ddd9b98f8f9a315f5 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Fri, 19 Aug 2016 14:38:55 -0300 Subject: reorderbuffer: preserve errno while reporting error Clobbering errno during cleanup after an error is an oft-repeated, easy to make mistake. Deal with it here as everywhere else, by saving it aside and restoring after cleanup, before ereport'ing. In passing, add a missing errcode declaration in another ereport() call in the same file, which I noticed while skimming the file looking for similar problems. Backpatch to 9.4, where this code was introduced. --- src/backend/replication/logical/reorderbuffer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 9594b1c671..213ce34674 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2317,7 +2317,10 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, if (write(fd, rb->outbuf, ondisk->size) != ondisk->size) { + int save_errno = errno; + CloseTransientFile(fd); + errno = save_errno; ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to data file for XID %u: %m", @@ -3070,7 +3073,8 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname) fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); if (fd < 0) ereport(ERROR, - (errmsg("could not open file \"%s\": %m", path))); + (errcode_for_file_access(), + errmsg("could not open file \"%s\": %m", path))); while (true) { -- cgit v1.2.3 From da1c91631e3577ea5818f855ebb5bd206d559006 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 14:03:07 -0400 Subject: Speed up planner's scanning for parallel-query hazards. We need to scan the whole parse tree for parallel-unsafe functions. If there are none, we'll later need to determine whether particular subtrees contain any parallel-restricted functions. The previous coding retained no knowledge from the first scan, even though this is very wasteful in the common case where the query contains only parallel-safe functions. We can bypass all of the later scans by remembering that fact. This provides a small but measurable speed improvement when the case applies, and shouldn't cost anything when it doesn't. Patch by me, reviewed by Robert Haas Discussion: <3740.1471538387@sss.pgh.pa.us> --- src/backend/nodes/outfuncs.c | 1 + src/backend/optimizer/path/allpaths.c | 30 ++------- src/backend/optimizer/plan/planmain.c | 9 ++- src/backend/optimizer/plan/planner.c | 43 ++++++++----- src/backend/optimizer/util/clauses.c | 115 +++++++++++++++++++++++++--------- src/backend/optimizer/util/pathnode.c | 6 +- src/backend/optimizer/util/relnode.c | 4 +- src/include/nodes/relation.h | 2 + src/include/optimizer/clauses.h | 3 +- 9 files changed, 133 insertions(+), 80 deletions(-) (limited to 'src') diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 1fab807772..50019f4164 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -2029,6 +2029,7 @@ _outPlannerGlobal(StringInfo str, const PlannerGlobal *node) WRITE_BOOL_FIELD(dependsOnRole); WRITE_BOOL_FIELD(parallelModeOK); WRITE_BOOL_FIELD(parallelModeNeeded); + WRITE_CHAR_FIELD(maxParallelHazard); } static void diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 88d833a2e8..af73792227 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -78,7 +78,6 @@ static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel, static void create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel); static void set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); -static bool function_rte_parallel_ok(RangeTblEntry *rte); static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte); static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, @@ -542,8 +541,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, if (proparallel != PROPARALLEL_SAFE) return; - if (has_parallel_hazard((Node *) rte->tablesample->args, - false)) + if (!is_parallel_safe(root, (Node *) rte->tablesample->args)) return; } @@ -596,7 +594,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, case RTE_FUNCTION: /* Check for parallel-restricted functions. */ - if (!function_rte_parallel_ok(rte)) + if (!is_parallel_safe(root, (Node *) rte->functions)) return; break; @@ -629,40 +627,20 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, * outer join clauses work correctly. It would likely break equivalence * classes, too. */ - if (has_parallel_hazard((Node *) rel->baserestrictinfo, false)) + if (!is_parallel_safe(root, (Node *) rel->baserestrictinfo)) return; /* * Likewise, if the relation's outputs are not parallel-safe, give up. * (Usually, they're just Vars, but sometimes they're not.) */ - if (has_parallel_hazard((Node *) rel->reltarget->exprs, false)) + if (!is_parallel_safe(root, (Node *) rel->reltarget->exprs)) return; /* We have a winner. */ rel->consider_parallel = true; } -/* - * Check whether a function RTE is scanning something parallel-restricted. - */ -static bool -function_rte_parallel_ok(RangeTblEntry *rte) -{ - ListCell *lc; - - foreach(lc, rte->functions) - { - RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - - Assert(IsA(rtfunc, RangeTblFunction)); - if (has_parallel_hazard(rtfunc->funcexpr, false)) - return false; - } - - return true; -} - /* * set_plain_rel_pathlist * Build access paths for a plain relation (no subquery, no inheritance) diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 27234ffa22..e7ae7ae8ae 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -71,14 +71,13 @@ query_planner(PlannerInfo *root, List *tlist, /* * If query allows parallelism in general, check whether the quals are - * parallel-restricted. There's currently no real benefit to setting - * this flag correctly because we can't yet reference subplans from - * parallel workers. But that might change someday, so set this - * correctly anyway. + * parallel-restricted. (We need not check final_rel->reltarget + * because it's empty at this point. Anything parallel-restricted in + * the query tlist will be dealt with later.) */ if (root->glob->parallelModeOK) final_rel->consider_parallel = - !has_parallel_hazard(parse->jointree->quals, false); + is_parallel_safe(root, parse->jointree->quals); /* The only path for it is a trivial Result path */ add_path(final_rel, (Path *) diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index b265628325..174210be6c 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -23,6 +23,7 @@ #include "access/sysattr.h" #include "access/xact.h" #include "catalog/pg_constraint_fn.h" +#include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/executor.h" #include "executor/nodeAgg.h" @@ -241,12 +242,26 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) * time and execution time, so don't generate a parallel plan if we're in * serializable mode. */ - glob->parallelModeOK = (cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 && - IsUnderPostmaster && dynamic_shared_memory_type != DSM_IMPL_NONE && - parse->commandType == CMD_SELECT && !parse->hasModifyingCTE && - parse->utilityStmt == NULL && max_parallel_workers_per_gather > 0 && - !IsParallelWorker() && !IsolationIsSerializable() && - !has_parallel_hazard((Node *) parse, true); + if ((cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 && + IsUnderPostmaster && + dynamic_shared_memory_type != DSM_IMPL_NONE && + parse->commandType == CMD_SELECT && + parse->utilityStmt == NULL && + !parse->hasModifyingCTE && + max_parallel_workers_per_gather > 0 && + !IsParallelWorker() && + !IsolationIsSerializable()) + { + /* all the cheap tests pass, so scan the query tree */ + glob->maxParallelHazard = max_parallel_hazard(parse); + glob->parallelModeOK = (glob->maxParallelHazard != PROPARALLEL_UNSAFE); + } + else + { + /* skip the query tree scan, just assume it's unsafe */ + glob->maxParallelHazard = PROPARALLEL_UNSAFE; + glob->parallelModeOK = false; + } /* * glob->parallelModeNeeded should tell us whether it's necessary to @@ -1802,7 +1817,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * computed by partial paths. */ if (current_rel->partial_pathlist && - !has_parallel_hazard((Node *) scanjoin_target->exprs, false)) + is_parallel_safe(root, (Node *) scanjoin_target->exprs)) { /* Apply the scan/join target to each partial path */ foreach(lc, current_rel->partial_pathlist) @@ -1948,8 +1963,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * query. */ if (current_rel->consider_parallel && - !has_parallel_hazard(parse->limitOffset, false) && - !has_parallel_hazard(parse->limitCount, false)) + is_parallel_safe(root, parse->limitOffset) && + is_parallel_safe(root, parse->limitCount)) final_rel->consider_parallel = true; /* @@ -3326,8 +3341,8 @@ create_grouping_paths(PlannerInfo *root, * target list and HAVING quals are parallel-safe. */ if (input_rel->consider_parallel && - !has_parallel_hazard((Node *) target->exprs, false) && - !has_parallel_hazard((Node *) parse->havingQual, false)) + is_parallel_safe(root, (Node *) target->exprs) && + is_parallel_safe(root, (Node *) parse->havingQual)) grouped_rel->consider_parallel = true; /* @@ -3881,8 +3896,8 @@ create_window_paths(PlannerInfo *root, * target list and active windows for non-parallel-safe constructs. */ if (input_rel->consider_parallel && - !has_parallel_hazard((Node *) output_target->exprs, false) && - !has_parallel_hazard((Node *) activeWindows, false)) + is_parallel_safe(root, (Node *) output_target->exprs) && + is_parallel_safe(root, (Node *) activeWindows)) window_rel->consider_parallel = true; /* @@ -4272,7 +4287,7 @@ create_ordered_paths(PlannerInfo *root, * target list is parallel-safe. */ if (input_rel->consider_parallel && - !has_parallel_hazard((Node *) target->exprs, false)) + is_parallel_safe(root, (Node *) target->exprs)) ordered_rel->consider_parallel = true; /* diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index a40ad40606..4496fde056 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -91,8 +91,9 @@ typedef struct typedef struct { - bool allow_restricted; -} has_parallel_hazard_arg; + char max_hazard; /* worst proparallel hazard found so far */ + char max_interesting; /* worst proparallel hazard of interest */ +} max_parallel_hazard_context; static bool contain_agg_clause_walker(Node *node, void *context); static bool get_agg_clause_costs_walker(Node *node, @@ -103,8 +104,8 @@ static bool contain_subplans_walker(Node *node, void *context); static bool contain_mutable_functions_walker(Node *node, void *context); static bool contain_volatile_functions_walker(Node *node, void *context); static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context); -static bool has_parallel_hazard_walker(Node *node, - has_parallel_hazard_arg *context); +static bool max_parallel_hazard_walker(Node *node, + max_parallel_hazard_context *context); static bool contain_nonstrict_functions_walker(Node *node, void *context); static bool contain_context_dependent_node(Node *clause); static bool contain_context_dependent_node_walker(Node *node, int *flags); @@ -1100,46 +1101,98 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) context); } + /***************************************************************************** * Check queries for parallel unsafe and/or restricted constructs *****************************************************************************/ /* - * Check whether a node tree contains parallel hazards. This is used both on - * the entire query tree, to see whether the query can be parallelized at all - * (with allow_restricted = true), and also to evaluate whether a particular - * expression is safe to run within a parallel worker (with allow_restricted = - * false). We could separate these concerns into two different functions, but - * there's enough overlap that it doesn't seem worthwhile. + * max_parallel_hazard + * Find the worst parallel-hazard level in the given query + * + * Returns the worst function hazard property (the earliest in this list: + * PROPARALLEL_UNSAFE, PROPARALLEL_RESTRICTED, PROPARALLEL_SAFE) that can + * be found in the given parsetree. We use this to find out whether the query + * can be parallelized at all. The caller will also save the result in + * PlannerGlobal so as to short-circuit checks of portions of the querytree + * later, in the common case where everything is SAFE. + */ +char +max_parallel_hazard(Query *parse) +{ + max_parallel_hazard_context context; + + context.max_hazard = PROPARALLEL_SAFE; + context.max_interesting = PROPARALLEL_UNSAFE; + (void) max_parallel_hazard_walker((Node *) parse, &context); + return context.max_hazard; +} + +/* + * is_parallel_safe + * Detect whether the given expr contains only parallel-safe functions + * + * root->glob->maxParallelHazard must previously have been set to the + * result of max_parallel_hazard() on the whole query. */ bool -has_parallel_hazard(Node *node, bool allow_restricted) +is_parallel_safe(PlannerInfo *root, Node *node) { - has_parallel_hazard_arg context; + max_parallel_hazard_context context; - context.allow_restricted = allow_restricted; - return has_parallel_hazard_walker(node, &context); + /* If max_parallel_hazard found nothing unsafe, we don't need to look */ + if (root->glob->maxParallelHazard == PROPARALLEL_SAFE) + return true; + /* Else use max_parallel_hazard's search logic, but stop on RESTRICTED */ + context.max_hazard = PROPARALLEL_SAFE; + context.max_interesting = PROPARALLEL_RESTRICTED; + return !max_parallel_hazard_walker(node, &context); } +/* core logic for all parallel-hazard checks */ static bool -has_parallel_hazard_checker(Oid func_id, void *context) +max_parallel_hazard_test(char proparallel, max_parallel_hazard_context *context) { - char proparallel = func_parallel(func_id); + switch (proparallel) + { + case PROPARALLEL_SAFE: + /* nothing to see here, move along */ + break; + case PROPARALLEL_RESTRICTED: + /* increase max_hazard to RESTRICTED */ + Assert(context->max_hazard != PROPARALLEL_UNSAFE); + context->max_hazard = proparallel; + /* done if we are not expecting any unsafe functions */ + if (context->max_interesting == proparallel) + return true; + break; + case PROPARALLEL_UNSAFE: + context->max_hazard = proparallel; + /* we're always done at the first unsafe construct */ + return true; + default: + elog(ERROR, "unrecognized proparallel value \"%c\"", proparallel); + break; + } + return false; +} - if (((has_parallel_hazard_arg *) context)->allow_restricted) - return (proparallel == PROPARALLEL_UNSAFE); - else - return (proparallel != PROPARALLEL_SAFE); +/* check_functions_in_node callback */ +static bool +max_parallel_hazard_checker(Oid func_id, void *context) +{ + return max_parallel_hazard_test(func_parallel(func_id), + (max_parallel_hazard_context *) context); } static bool -has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) +max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context) { if (node == NULL) return false; /* Check for hazardous functions in node itself */ - if (check_functions_in_node(node, has_parallel_hazard_checker, + if (check_functions_in_node(node, max_parallel_hazard_checker, context)) return true; @@ -1156,7 +1209,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) */ if (IsA(node, CoerceToDomain)) { - if (!context->allow_restricted) + if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context)) return true; } @@ -1167,7 +1220,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) { RestrictInfo *rinfo = (RestrictInfo *) node; - return has_parallel_hazard_walker((Node *) rinfo->clause, context); + return max_parallel_hazard_walker((Node *) rinfo->clause, context); } /* @@ -1176,13 +1229,13 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) * not worry about examining their contents; if they are unsafe, we would * have found that out while examining the whole tree before reduction of * sublinks to subplans. (Really we should not see SubLink during a - * not-allow_restricted scan, but if we do, return true.) + * max_interesting == restricted scan, but if we do, return true.) */ else if (IsA(node, SubLink) || IsA(node, SubPlan) || IsA(node, AlternativeSubPlan)) { - if (!context->allow_restricted) + if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context)) return true; } @@ -1192,7 +1245,7 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) */ else if (IsA(node, Param)) { - if (!context->allow_restricted) + if (max_parallel_hazard_test(PROPARALLEL_RESTRICTED, context)) return true; } @@ -1207,20 +1260,24 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) /* SELECT FOR UPDATE/SHARE must be treated as unsafe */ if (query->rowMarks != NULL) + { + context->max_hazard = PROPARALLEL_UNSAFE; return true; + } /* Recurse into subselects */ return query_tree_walker(query, - has_parallel_hazard_walker, + max_parallel_hazard_walker, context, 0); } /* Recurse to check arguments */ return expression_tree_walker(node, - has_parallel_hazard_walker, + max_parallel_hazard_walker, context); } + /***************************************************************************** * Check clauses for nonstrict functions *****************************************************************************/ diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index ce7ad545a9..abb7507d8e 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -2178,7 +2178,7 @@ create_projection_path(PlannerInfo *root, pathnode->path.parallel_aware = false; pathnode->path.parallel_safe = rel->consider_parallel && subpath->parallel_safe && - !has_parallel_hazard((Node *) target->exprs, false); + is_parallel_safe(root, (Node *) target->exprs); pathnode->path.parallel_workers = subpath->parallel_workers; /* Projection does not change the sort order */ pathnode->path.pathkeys = subpath->pathkeys; @@ -2285,7 +2285,7 @@ apply_projection_to_path(PlannerInfo *root, * target expressions, then we can't. */ if (IsA(path, GatherPath) && - !has_parallel_hazard((Node *) target->exprs, false)) + is_parallel_safe(root, (Node *) target->exprs)) { GatherPath *gpath = (GatherPath *) path; @@ -2306,7 +2306,7 @@ apply_projection_to_path(PlannerInfo *root, target); } else if (path->parallel_safe && - has_parallel_hazard((Node *) target->exprs, false)) + !is_parallel_safe(root, (Node *) target->exprs)) { /* * We're inserting a parallel-restricted target list into a path diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 806600ed10..deef5605b7 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -513,8 +513,8 @@ build_join_rel(PlannerInfo *root, * here. */ if (inner_rel->consider_parallel && outer_rel->consider_parallel && - !has_parallel_hazard((Node *) restrictlist, false) && - !has_parallel_hazard((Node *) joinrel->reltarget->exprs, false)) + is_parallel_safe(root, (Node *) restrictlist) && + is_parallel_safe(root, (Node *) joinrel->reltarget->exprs)) joinrel->consider_parallel = true; /* diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index 2be8908445..fcfb0d4d0f 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -126,6 +126,8 @@ typedef struct PlannerGlobal bool parallelModeOK; /* parallel mode potentially OK? */ bool parallelModeNeeded; /* parallel mode actually required? */ + + char maxParallelHazard; /* worst PROPARALLEL hazard level */ } PlannerGlobal; /* macro for fetching the Plan associated with a SubPlan node */ diff --git a/src/include/optimizer/clauses.h b/src/include/optimizer/clauses.h index be7c639f7b..9abef37cb6 100644 --- a/src/include/optimizer/clauses.h +++ b/src/include/optimizer/clauses.h @@ -61,7 +61,8 @@ extern bool contain_subplans(Node *clause); extern bool contain_mutable_functions(Node *clause); extern bool contain_volatile_functions(Node *clause); extern bool contain_volatile_functions_not_nextval(Node *clause); -extern bool has_parallel_hazard(Node *node, bool allow_restricted); +extern char max_parallel_hazard(Query *parse); +extern bool is_parallel_safe(PlannerInfo *root, Node *node); extern bool contain_nonstrict_functions(Node *clause); extern bool contain_leaked_vars(Node *clause); -- cgit v1.2.3 From 65a603e90328a7a8fb3ab30ed96f24bf8eb4cf84 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 14:35:32 -0400 Subject: Guard against parallel-restricted functions in VALUES expressions. Obvious brain fade in set_rel_consider_parallel(). Noticed it while adjusting the adjacent RTE_FUNCTION case. In 9.6, also make the code look more like what I just did in HEAD by removing the unnecessary function_rte_parallel_ok subroutine (it does nothing that expression_tree_walker wouldn't do). --- src/backend/optimizer/path/allpaths.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index af73792227..04264b4335 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -599,11 +599,9 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_VALUES: - - /* - * The data for a VALUES clause is stored in the plan tree itself, - * so scanning it in a worker is fine. - */ + /* Check for parallel-restricted functions. */ + if (!is_parallel_safe(root, (Node *) rte->values_lists)) + return; break; case RTE_CTE: -- cgit v1.2.3 From 8299471c37fff0b0f5a777a12f920125310c0efe Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 17:13:47 -0400 Subject: Use LEFT JOINs in some system views in case referenced row doesn't exist. In particular, left join to pg_authid so that rows in pg_stat_activity don't disappear if the session's owning user has been dropped. Also convert a few joins to pg_database to left joins, in the same spirit, though that case might be harder to hit. We were doing this in other views already, so it was a bit inconsistent that these views didn't. Oskari Saarenmaa, with some further tweaking by me Discussion: <56E87CD8.60007@ohmu.fi> --- src/backend/catalog/system_views.sql | 20 +++++++++----------- src/include/catalog/catversion.h | 2 +- src/test/regress/expected/rules.out | 20 +++++++++----------- 3 files changed, 19 insertions(+), 23 deletions(-) (limited to 'src') diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index 4fc5d5a065..ada214274f 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -642,9 +642,9 @@ CREATE VIEW pg_stat_activity AS S.backend_xid, s.backend_xmin, S.query - FROM pg_database D, pg_stat_get_activity(NULL) AS S, pg_authid U - WHERE S.datid = D.oid AND - S.usesysid = U.oid; + FROM pg_stat_get_activity(NULL) AS S + LEFT JOIN pg_database AS D ON (S.datid = D.oid) + LEFT JOIN pg_authid AS U ON (S.usesysid = U.oid); CREATE VIEW pg_stat_replication AS SELECT @@ -664,10 +664,9 @@ CREATE VIEW pg_stat_replication AS W.replay_location, W.sync_priority, W.sync_state - FROM pg_stat_get_activity(NULL) AS S, pg_authid U, - pg_stat_get_wal_senders() AS W - WHERE S.usesysid = U.oid AND - S.pid = W.pid; + FROM pg_stat_get_activity(NULL) AS S + JOIN pg_stat_get_wal_senders() AS W ON (S.pid = W.pid) + LEFT JOIN pg_authid AS U ON (S.usesysid = U.oid); CREATE VIEW pg_stat_wal_receiver AS SELECT @@ -813,7 +812,7 @@ CREATE VIEW pg_stat_progress_vacuum AS S.param4 AS heap_blks_vacuumed, S.param5 AS index_vacuum_count, S.param6 AS max_dead_tuples, S.param7 AS num_dead_tuples FROM pg_stat_get_progress_info('VACUUM') AS S - JOIN pg_database D ON S.datid = D.oid; + LEFT JOIN pg_database D ON S.datid = D.oid; CREATE VIEW pg_user_mappings AS SELECT @@ -832,12 +831,11 @@ CREATE VIEW pg_user_mappings AS NULL END AS umoptions FROM pg_user_mapping U - LEFT JOIN pg_authid A ON (A.oid = U.umuser) JOIN - pg_foreign_server S ON (U.umserver = S.oid); + JOIN pg_foreign_server S ON (U.umserver = S.oid) + LEFT JOIN pg_authid A ON (A.oid = U.umuser); REVOKE ALL on pg_user_mapping FROM public; - CREATE VIEW pg_replication_origin_status AS SELECT * FROM pg_show_replication_origin_status(); diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index fb356bf3cd..26f6126002 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201608171 +#define CATALOG_VERSION_NO 201608191 #endif diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 8157324fee..00700f28dc 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -1664,10 +1664,9 @@ pg_stat_activity| SELECT s.datid, s.backend_xid, s.backend_xmin, s.query - FROM pg_database d, - pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn), - pg_authid u - WHERE ((s.datid = d.oid) AND (s.usesysid = u.oid)); + FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn) + LEFT JOIN pg_database d ON ((s.datid = d.oid))) + LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); pg_stat_all_indexes| SELECT c.oid AS relid, i.oid AS indexrelid, n.nspname AS schemaname, @@ -1776,7 +1775,7 @@ pg_stat_progress_vacuum| SELECT s.pid, s.param6 AS max_dead_tuples, s.param7 AS num_dead_tuples FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10) - JOIN pg_database d ON ((s.datid = d.oid))); + LEFT JOIN pg_database d ON ((s.datid = d.oid))); pg_stat_replication| SELECT s.pid, s.usesysid, u.rolname AS usename, @@ -1793,10 +1792,9 @@ pg_stat_replication| SELECT s.pid, w.replay_location, w.sync_priority, w.sync_state - FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn), - pg_authid u, - pg_stat_get_wal_senders() w(pid, state, sent_location, write_location, flush_location, replay_location, sync_priority, sync_state) - WHERE ((s.usesysid = u.oid) AND (s.pid = w.pid)); + FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn) + JOIN pg_stat_get_wal_senders() w(pid, state, sent_location, write_location, flush_location, replay_location, sync_priority, sync_state) ON ((s.pid = w.pid))) + LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); pg_stat_ssl| SELECT s.pid, s.ssl, s.sslversion AS version, @@ -2155,8 +2153,8 @@ pg_user_mappings| SELECT u.oid AS umid, ELSE NULL::text[] END AS umoptions FROM ((pg_user_mapping u - LEFT JOIN pg_authid a ON ((a.oid = u.umuser))) - JOIN pg_foreign_server s ON ((u.umserver = s.oid))); + JOIN pg_foreign_server s ON ((u.umserver = s.oid))) + LEFT JOIN pg_authid a ON ((a.oid = u.umuser))); pg_views| SELECT n.nspname AS schemaname, c.relname AS viewname, pg_get_userbyid(c.relowner) AS viewowner, -- cgit v1.2.3 From 6471045230f5d891ad724c54d406e2214f3c96d9 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 19 Aug 2016 17:32:59 -0400 Subject: Allow empty queries in pgbench. This might have been too much of a foot-gun before 9.6, but with the new commands-end-at-semicolons parsing rule, the only way to get an empty query into a script is to explicitly write an extra ";". So we may as well allow the case. Fabien Coelho Patch: --- src/bin/pgbench/pgbench.c | 1 + 1 file changed, 1 insertion(+) (limited to 'src') diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 87fb006d87..8027955121 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -1898,6 +1898,7 @@ top: { case PGRES_COMMAND_OK: case PGRES_TUPLES_OK: + case PGRES_EMPTY_QUERY: break; /* OK */ default: fprintf(stderr, "client %d aborted in state %d: %s", -- cgit v1.2.3 From a00c58314745772f6c6a49b6d02a9572cd600bda Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 20 Aug 2016 15:05:25 -0400 Subject: Make initdb's suggested "pg_ctl start" command line more reliable. The original coding here was not nearly careful enough about quoting special characters, and it didn't get corner cases right for constructing the pg_ctl path either. Use join_path_components() and appendShellString() to do it honestly, so that the string will more likely work if blindly copied-and-pasted. While at it, teach appendShellString() not to quote strings that clearly don't need it, so that the output from initdb doesn't become uglier than it was before in typical cases where quoting is not needed. Ryan Murphy, reviewed by Michael Paquier and myself Discussion: --- src/bin/initdb/Makefile | 3 +++ src/bin/initdb/initdb.c | 43 ++++++++++++++++++++++++++++--------------- src/fe_utils/string_utils.c | 17 +++++++++++++++-- 3 files changed, 46 insertions(+), 17 deletions(-) (limited to 'src') diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile index 094c8945c9..531cc979a4 100644 --- a/src/bin/initdb/Makefile +++ b/src/bin/initdb/Makefile @@ -18,6 +18,9 @@ include $(top_builddir)/src/Makefile.global override CPPFLAGS := -DFRONTEND -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) +# note: we need libpq only because fe_utils does +LDFLAGS += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) + # use system timezone data? ifneq (,$(with_system_tzdata)) override CPPFLAGS += '-DSYSTEMTZDIR="$(with_system_tzdata)"' diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index a978bbc328..aad6ba5639 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -67,6 +67,7 @@ #include "getaddrinfo.h" #include "getopt_long.h" #include "miscadmin.h" +#include "fe_utils/string_utils.h" /* Define PG_FLUSH_DATA_WORKS if we have an implementation for pg_flush_data */ @@ -331,14 +332,6 @@ do { \ output_failed = true, output_errno = errno; \ } while (0) -#ifndef WIN32 -#define QUOTE_PATH "" -#define DIR_SEP "/" -#else -#define QUOTE_PATH "\"" -#define DIR_SEP "\\" -#endif - static char * escape_quotes(const char *src) { @@ -3359,7 +3352,8 @@ main(int argc, char *argv[]) int c; int option_index; char *effective_user; - char bin_dir[MAXPGPATH]; + PQExpBuffer start_db_cmd; + char pg_ctl_path[MAXPGPATH]; /* * Ensure that buffering behavior of stdout and stderr matches what it is @@ -3587,14 +3581,33 @@ main(int argc, char *argv[]) if (authwarning != NULL) fprintf(stderr, "%s", authwarning); - /* Get directory specification used to start this executable */ - strlcpy(bin_dir, argv[0], sizeof(bin_dir)); - get_parent_directory(bin_dir); + /* + * Build up a shell command to tell the user how to start the server + */ + start_db_cmd = createPQExpBuffer(); + + /* Get directory specification used to start initdb ... */ + strlcpy(pg_ctl_path, argv[0], sizeof(pg_ctl_path)); + canonicalize_path(pg_ctl_path); + get_parent_directory(pg_ctl_path); + /* ... and tag on pg_ctl instead */ + join_path_components(pg_ctl_path, pg_ctl_path, "pg_ctl"); + + /* path to pg_ctl, properly quoted */ + appendShellString(start_db_cmd, pg_ctl_path); + + /* add -D switch, with properly quoted data directory */ + appendPQExpBufferStr(start_db_cmd, " -D "); + appendShellString(start_db_cmd, pgdata_native); + + /* add suggested -l switch and "start" command */ + appendPQExpBufferStr(start_db_cmd, " -l logfile start"); printf(_("\nSuccess. You can now start the database server using:\n\n" - " %s%s%spg_ctl%s -D %s%s%s -l logfile start\n\n"), - QUOTE_PATH, bin_dir, (strlen(bin_dir) > 0) ? DIR_SEP : "", QUOTE_PATH, - QUOTE_PATH, pgdata_native, QUOTE_PATH); + " %s\n\n"), + start_db_cmd->data); + + destroyPQExpBuffer(start_db_cmd); return 0; } diff --git a/src/fe_utils/string_utils.c b/src/fe_utils/string_utils.c index 2c566b1ad7..edbc869e45 100644 --- a/src/fe_utils/string_utils.c +++ b/src/fe_utils/string_utils.c @@ -418,7 +418,7 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length, /* * Append the given string to the shell command being built in the buffer, - * with suitable shell-style quoting to create exactly one argument. + * with shell-style quoting as needed to create exactly one argument. * * Forbid LF or CR characters, which have scant practical use beyond designing * security breaches. The Windows command shell is unusable as a conduit for @@ -429,8 +429,22 @@ appendByteaLiteral(PQExpBuffer buf, const unsigned char *str, size_t length, void appendShellString(PQExpBuffer buf, const char *str) { +#ifdef WIN32 + int backslash_run_length = 0; +#endif const char *p; + /* + * Don't bother with adding quotes if the string is nonempty and clearly + * contains only safe characters. + */ + if (*str != '\0' && + strspn(str, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_./:") == strlen(str)) + { + appendPQExpBufferStr(buf, str); + return; + } + #ifndef WIN32 appendPQExpBufferChar(buf, '\''); for (p = str; *p; p++) @@ -450,7 +464,6 @@ appendShellString(PQExpBuffer buf, const char *str) } appendPQExpBufferChar(buf, '\''); #else /* WIN32 */ - int backslash_run_length = 0; /* * A Windows system() argument experiences two layers of interpretation. -- cgit v1.2.3 From 04164deb7cb8e572302e2b43786fa24de3c40da3 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 20 Aug 2016 16:53:25 -0400 Subject: initdb now needs to reference libpq include files in MSVC builds. Fallout from commit a00c58314. Per buildfarm. --- src/tools/msvc/Mkvcbuild.pm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm index da4d9847fc..6746728616 100644 --- a/src/tools/msvc/Mkvcbuild.pm +++ b/src/tools/msvc/Mkvcbuild.pm @@ -50,7 +50,7 @@ my @contrib_excludes = ( # Set of variables for frontend modules my $frontend_defines = { 'initdb' => 'FRONTEND' }; -my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql'); +my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql', 'initdb'); my @frontend_uselibpgport = ( 'pg_archivecleanup', 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', -- cgit v1.2.3 From 9132c014290d02435999c81892fa8b0b384497d8 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Sun, 21 Aug 2016 22:05:57 -0400 Subject: Retire escapeConnectionParameter(). It is redundant with appendConnStrVal(), which became an extern function in commit 41f18f021a0882eccbeca62e2ed4b66c6b96e9c9. This changes the handling of out-of-memory and of certain inputs for which quoting is optional, but pg_basebackup has no need for unusual treatment thereof. --- src/bin/pg_basebackup/Makefile | 1 + src/bin/pg_basebackup/pg_basebackup.c | 69 ++--------------------------------- 2 files changed, 4 insertions(+), 66 deletions(-) (limited to 'src') diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile index 585467205b..a23a83eb9b 100644 --- a/src/bin/pg_basebackup/Makefile +++ b/src/bin/pg_basebackup/Makefile @@ -17,6 +17,7 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) +LDFLAGS += -L$(top_builddir)/src/fe_utils -lpgfeutils -lpq OBJS=receivelog.o streamutil.o $(WIN32RES) diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index ed41db8e6e..351a42068f 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -26,6 +26,7 @@ #endif #include "common/string.h" +#include "fe_utils/string_utils.h" #include "getopt_long.h" #include "libpq-fe.h" #include "pqexpbuffer.h" @@ -1392,69 +1393,6 @@ ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum) WriteRecoveryConf(); } -/* - * Escape a parameter value so that it can be used as part of a libpq - * connection string, e.g. in: - * - * application_name= - * - * The returned string is malloc'd. Return NULL on out-of-memory. - */ -static char * -escapeConnectionParameter(const char *src) -{ - bool need_quotes = false; - bool need_escaping = false; - const char *p; - char *dstbuf; - char *dst; - - /* - * First check if quoting is needed. Any quote (') or backslash (\) - * characters need to be escaped. Parameters are separated by whitespace, - * so any string containing whitespace characters need to be quoted. An - * empty string is represented by ''. - */ - if (strchr(src, '\'') != NULL || strchr(src, '\\') != NULL) - need_escaping = true; - - for (p = src; *p; p++) - { - if (isspace((unsigned char) *p)) - { - need_quotes = true; - break; - } - } - - if (*src == '\0') - return pg_strdup("''"); - - if (!need_quotes && !need_escaping) - return pg_strdup(src); /* no quoting or escaping needed */ - - /* - * Allocate a buffer large enough for the worst case that all the source - * characters need to be escaped, plus quotes. - */ - dstbuf = pg_malloc(strlen(src) * 2 + 2 + 1); - - dst = dstbuf; - if (need_quotes) - *(dst++) = '\''; - for (; *src; src++) - { - if (*src == '\'' || *src == '\\') - *(dst++) = '\\'; - *(dst++) = *src; - } - if (need_quotes) - *(dst++) = '\''; - *dst = '\0'; - - return dstbuf; -} - /* * Escape a string so that it can be used as a value in a key-value pair * a configuration file. @@ -1523,9 +1461,8 @@ GenerateRecoveryConf(PGconn *conn) * Write "keyword=value" pieces, the value string is escaped and/or * quoted if necessary. */ - escaped = escapeConnectionParameter(option->val); - appendPQExpBuffer(&conninfo_buf, "%s=%s", option->keyword, escaped); - free(escaped); + appendPQExpBuffer(&conninfo_buf, "%s=", option->keyword); + appendConnStrVal(&conninfo_buf, option->val); } /* -- cgit v1.2.3 From 234309fa87739f7a3ac99de815d181b50f2542e7 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 22 Aug 2016 08:01:12 -0400 Subject: initdb now needs submake-libpq and submake-libpgfeutils. More fallout from commit a00c58314. Pointed out by Michael Paquier. --- src/bin/initdb/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile index 531cc979a4..394eae0875 100644 --- a/src/bin/initdb/Makefile +++ b/src/bin/initdb/Makefile @@ -30,7 +30,7 @@ OBJS= initdb.o findtimezone.o localtime.o encnames.o $(WIN32RES) all: initdb -initdb: $(OBJS) | submake-libpgport +initdb: $(OBJS) | submake-libpq submake-libpgport submake-libpgfeutils $(CC) $(CFLAGS) $(OBJS) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) # We used to pull in all of libpq to get encnames.c, but that -- cgit v1.2.3 From f9472d72561d285e8c138f3e1276f3110f55e515 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 22 Aug 2016 12:00:00 -0400 Subject: Run select_parallel test by itself Remove the plpgsql wrapping that hides the context. So now the test will fail if the work doesn't actually happen in a parallel worker. Run the test in its own test group to ensure it won't run out of resources for that. --- src/test/regress/expected/select_parallel.out | 14 ++++---------- src/test/regress/parallel_schedule | 5 ++++- src/test/regress/sql/select_parallel.sql | 10 ++-------- 3 files changed, 10 insertions(+), 19 deletions(-) (limited to 'src') diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out index 2286fafab3..18e21b7f13 100644 --- a/src/test/regress/expected/select_parallel.out +++ b/src/test/regress/expected/select_parallel.out @@ -111,14 +111,8 @@ explain (costs off) Index Cond: (unique1 = 1) (5 rows) -do $$begin - -- Provoke error, possibly in worker. If this error happens to occur in - -- the worker, there will be a CONTEXT line which must be hidden. - perform stringu1::int2 from tenk1 where unique1 = 1; - exception - when others then - raise 'SQLERRM: %', sqlerrm; -end$$; -ERROR: SQLERRM: invalid input syntax for integer: "BAAAAA" -CONTEXT: PL/pgSQL function inline_code_block line 7 at RAISE +-- provoke error in worker +select stringu1::int2 from tenk1 where unique1 = 1; +ERROR: invalid input syntax for integer: "BAAAAA" +CONTEXT: parallel worker rollback; diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule index 3815182fe7..1cb5dfc336 100644 --- a/src/test/regress/parallel_schedule +++ b/src/test/regress/parallel_schedule @@ -92,7 +92,10 @@ test: brin gin gist spgist privileges init_privs security_label collate matview test: alter_generic alter_operator misc psql async dbsize misc_functions # rules cannot run concurrently with any test that creates a view -test: rules psql_crosstab select_parallel amutils +test: rules psql_crosstab amutils + +# run by itself so it can run parallel workers +test: select_parallel # ---------- # Another group of parallel tests diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql index 38d3166742..8b4090f2ec 100644 --- a/src/test/regress/sql/select_parallel.sql +++ b/src/test/regress/sql/select_parallel.sql @@ -44,13 +44,7 @@ set force_parallel_mode=1; explain (costs off) select stringu1::int2 from tenk1 where unique1 = 1; -do $$begin - -- Provoke error, possibly in worker. If this error happens to occur in - -- the worker, there will be a CONTEXT line which must be hidden. - perform stringu1::int2 from tenk1 where unique1 = 1; - exception - when others then - raise 'SQLERRM: %', sqlerrm; -end$$; +-- provoke error in worker +select stringu1::int2 from tenk1 where unique1 = 1; rollback; -- cgit v1.2.3 From af5743851d7d526fadfeb9726e2b3d8b1fc5026d Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 22 Aug 2016 11:52:43 -0400 Subject: Improve header comment for LockHasWaitersRelation. Dimitry Ivanov spotted a typo, and I added a bit of wordsmithing. --- src/backend/storage/lmgr/lmgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index 7b08555b07..eeedc38251 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -268,8 +268,8 @@ UnlockRelation(Relation relation, LOCKMODE lockmode) /* * LockHasWaitersRelation * - * This is a functiion to check if someone else is waiting on a - * lock, we are currently holding. + * This is a function to check whether someone else is waiting for a + * lock which we are currently holding. */ bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode) -- cgit v1.2.3 From 008c4135ccf67e74239a17a85f912d1a51b6349e Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 22 Aug 2016 15:22:11 -0400 Subject: Fix possible sorting error when aborting use of abbreviated keys. Due to an error in the abbreviated key abort logic, the most recently processed SortTuple could be incorrectly marked NULL, resulting in an incorrect final sort order. In the worst case, this could result in a corrupt btree index, which would need to be rebuild using REINDEX. However, abbrevation doesn't abort very often, not all data types use it, and only one tuple would end up in the wrong place, so the practical impact of this mistake may be somewhat limited. Report and patch by Peter Geoghegan. --- src/backend/utils/sort/tuplesort.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 510565c339..ae384a8546 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1443,7 +1443,7 @@ tuplesort_putindextuplevalues(Tuplesortstate *state, Relation rel, mtup->datum1 = index_getattr(tuple, 1, RelationGetDescr(state->indexRel), - &stup.isnull1); + &mtup->isnull1); } } @@ -4271,7 +4271,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup) mtup->datum1 = heap_getattr(tuple, state->indexInfo->ii_KeyAttrNumbers[0], state->tupDesc, - &stup->isnull1); + &mtup->isnull1); } } } @@ -4588,7 +4588,7 @@ copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup) mtup->datum1 = index_getattr(tuple, 1, RelationGetDescr(state->indexRel), - &stup->isnull1); + &mtup->isnull1); } } } -- cgit v1.2.3 From 7b405b3e04779fc0a026c9c6ac3e06194948b253 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 23 Aug 2016 09:39:54 -0400 Subject: Refactor some network.c code to create cidr_set_masklen_internal(). Merge several copies of "copy an inet value and adjust the mask length" code to create a single, conveniently C-callable function. This function is exported for future use by inet SPGiST support, but it's good cleanup anyway since we had three slightly-different-for-no-good-reason copies. (Extracted from a larger patch, to separate new code from refactoring of old code) Emre Hasegeli --- src/backend/utils/adt/network.c | 109 +++++++++++----------------------------- src/include/utils/inet.h | 5 +- 2 files changed, 33 insertions(+), 81 deletions(-) (limited to 'src') diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index 1f8469a2cb..3f6987af04 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -268,11 +268,7 @@ Datum inet_to_cidr(PG_FUNCTION_ARGS) { inet *src = PG_GETARG_INET_PP(0); - inet *dst; int bits; - int byte; - int nbits; - int maxbytes; bits = ip_bits(src); @@ -280,29 +276,7 @@ inet_to_cidr(PG_FUNCTION_ARGS) if ((bits < 0) || (bits > ip_maxbits(src))) elog(ERROR, "invalid inet bit length: %d", bits); - /* clone the original data */ - dst = (inet *) palloc(VARSIZE_ANY(src)); - memcpy(dst, src, VARSIZE_ANY(src)); - - /* zero out any bits to the right of the netmask */ - byte = bits / 8; - - nbits = bits % 8; - /* clear the first byte, this might be a partial byte */ - if (nbits != 0) - { - ip_addr(dst)[byte] &= ~(0xFF >> nbits); - byte++; - } - /* clear remaining bytes */ - maxbytes = ip_addrsize(dst); - while (byte < maxbytes) - { - ip_addr(dst)[byte] = 0; - byte++; - } - - PG_RETURN_INET_P(dst); + PG_RETURN_INET_P(cidr_set_masklen_internal(src, bits)); } Datum @@ -334,10 +308,6 @@ cidr_set_masklen(PG_FUNCTION_ARGS) { inet *src = PG_GETARG_INET_PP(0); int bits = PG_GETARG_INT32(1); - inet *dst; - int byte; - int nbits; - int maxbytes; if (bits == -1) bits = ip_maxbits(src); @@ -347,31 +317,36 @@ cidr_set_masklen(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid mask length: %d", bits))); - /* clone the original data */ - dst = (inet *) palloc(VARSIZE_ANY(src)); - memcpy(dst, src, VARSIZE_ANY(src)); + PG_RETURN_INET_P(cidr_set_masklen_internal(src, bits)); +} - ip_bits(dst) = bits; +/* + * Copy src and set mask length to 'bits' (which must be valid for the family) + */ +inet * +cidr_set_masklen_internal(const inet *src, int bits) +{ + inet *dst = (inet *) palloc0(sizeof(inet)); - /* zero out any bits to the right of the new netmask */ - byte = bits / 8; + ip_family(dst) = ip_family(src); + ip_bits(dst) = bits; - nbits = bits % 8; - /* clear the first byte, this might be a partial byte */ - if (nbits != 0) + if (bits > 0) { - ip_addr(dst)[byte] &= ~(0xFF >> nbits); - byte++; - } - /* clear remaining bytes */ - maxbytes = ip_addrsize(dst); - while (byte < maxbytes) - { - ip_addr(dst)[byte] = 0; - byte++; + Assert(bits <= ip_maxbits(dst)); + + /* Clone appropriate bytes of the address, leaving the rest 0 */ + memcpy(ip_addr(dst), ip_addr(src), (bits + 7) / 8); + + /* Clear any unwanted bits in the last partial byte */ + if (bits % 8) + ip_addr(dst)[bits / 8] &= ~(0xFF >> (bits % 8)); } - PG_RETURN_INET_P(dst); + /* Set varlena header correctly */ + SET_INET_VARSIZE(dst); + + return dst; } /* @@ -719,11 +694,7 @@ network_broadcast(PG_FUNCTION_ARGS) /* make sure any unused bits are zeroed */ dst = (inet *) palloc0(sizeof(inet)); - if (ip_family(ip) == PGSQL_AF_INET) - maxbytes = 4; - else - maxbytes = 16; - + maxbytes = ip_addrsize(ip); bits = ip_bits(ip); a = ip_addr(ip); b = ip_addr(dst); @@ -853,11 +824,7 @@ network_hostmask(PG_FUNCTION_ARGS) /* make sure any unused bits are zeroed */ dst = (inet *) palloc0(sizeof(inet)); - if (ip_family(ip) == PGSQL_AF_INET) - maxbytes = 4; - else - maxbytes = 16; - + maxbytes = ip_addrsize(ip); bits = ip_maxbits(ip) - ip_bits(ip); b = ip_addr(dst); @@ -907,8 +874,7 @@ Datum inet_merge(PG_FUNCTION_ARGS) { inet *a1 = PG_GETARG_INET_PP(0), - *a2 = PG_GETARG_INET_PP(1), - *result; + *a2 = PG_GETARG_INET_PP(1); int commonbits; if (ip_family(a1) != ip_family(a2)) @@ -919,24 +885,7 @@ inet_merge(PG_FUNCTION_ARGS) commonbits = bitncommon(ip_addr(a1), ip_addr(a2), Min(ip_bits(a1), ip_bits(a2))); - /* Make sure any unused bits are zeroed. */ - result = (inet *) palloc0(sizeof(inet)); - - ip_family(result) = ip_family(a1); - ip_bits(result) = commonbits; - - /* Clone appropriate bytes of the address. */ - if (commonbits > 0) - memcpy(ip_addr(result), ip_addr(a1), (commonbits + 7) / 8); - - /* Clean any unwanted bits in the last partial byte. */ - if (commonbits % 8 != 0) - ip_addr(result)[commonbits / 8] &= ~(0xFF >> (commonbits % 8)); - - /* Set varlena header correctly. */ - SET_INET_VARSIZE(result); - - PG_RETURN_INET_P(result); + PG_RETURN_INET_P(cidr_set_masklen_internal(a1, commonbits)); } /* diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h index 2fe3ca8c3c..dfa0b9f711 100644 --- a/src/include/utils/inet.h +++ b/src/include/utils/inet.h @@ -28,10 +28,12 @@ typedef struct } inet_struct; /* + * We use these values for the "family" field. + * * Referencing all of the non-AF_INET types to AF_INET lets us work on * machines which may not have the appropriate address family (like * inet6 addresses when AF_INET6 isn't present) but doesn't cause a - * dump/reload requirement. Existing databases used AF_INET for the family + * dump/reload requirement. Pre-7.4 databases used AF_INET for the family * type on disk. */ #define PGSQL_AF_INET (AF_INET + 0) @@ -117,6 +119,7 @@ typedef struct macaddr /* * Support functions in network.c */ +extern inet *cidr_set_masklen_internal(const inet *src, int bits); extern int bitncmp(const unsigned char *l, const unsigned char *r, int n); extern int bitncommon(const unsigned char *l, const unsigned char *r, int n); -- cgit v1.2.3 From ff36700c3ba2180047b4103de440ffaa34889b72 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 23 Aug 2016 10:05:13 -0400 Subject: Remove duplicate word from comment. Erik Rijkers --- src/backend/storage/lmgr/lmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index eeedc38251..cbee20e9bf 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -33,7 +33,7 @@ * constraint violations. It's theoretically possible that a backend sees a * tuple that was speculatively inserted by another backend, but before it has * started waiting on the token, the other backend completes its insertion, - * and then then performs 2^32 unrelated insertions. And after all that, the + * and then performs 2^32 unrelated insertions. And after all that, the * first backend finally calls SpeculativeInsertionLockAcquire(), with the * intention of waiting for the first insertion to complete, but ends up * waiting for the latest unrelated insertion instead. Even then, nothing -- cgit v1.2.3 From 86f31695f3b54211226949de519063bbf248e8c4 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 23 Aug 2016 10:30:52 -0400 Subject: Add txid_current_ifassigned(). Add a variant of txid_current() that returns NULL if no transaction ID is assigned. This version can be used even on a standby server, although it will always return NULL since no transaction IDs can be assigned during recovery. Craig Ringer, per suggestion from Jim Nasby. Reviewed by Petr Jelinek and by me. --- doc/src/sgml/func.sgml | 9 +++++++++ src/backend/utils/adt/txid.c | 21 +++++++++++++++++++++ src/include/catalog/pg_proc.h | 2 ++ src/include/utils/builtins.h | 1 + src/test/regress/expected/txid.out | 16 ++++++++++++++++ src/test/regress/sql/txid.sql | 7 +++++++ 6 files changed, 56 insertions(+) (limited to 'src') diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 169a385a9c..6355300d9d 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -17119,6 +17119,10 @@ SELECT collation for ('foo' COLLATE "de_DE"); txid_current + + txid_current_if_assigned + + txid_current_snapshot @@ -17159,6 +17163,11 @@ SELECT collation for ('foo' COLLATE "de_DE"); bigint get current transaction ID, assigning a new one if the current transaction does not have one + + txid_current_if_assigned() + bigint + same as txid_current() but returns null instead of assigning an xid if none is already assigned + txid_current_snapshot() txid_snapshot diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index c2069a9923..276075e293 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -376,6 +376,27 @@ txid_current(PG_FUNCTION_ARGS) PG_RETURN_INT64(val); } +/* + * Same as txid_current() but doesn't assign a new xid if there isn't one + * yet. + */ +Datum +txid_current_if_assigned(PG_FUNCTION_ARGS) +{ + txid val; + TxidEpoch state; + TransactionId topxid = GetTopTransactionIdIfAny(); + + if (topxid == InvalidTransactionId) + PG_RETURN_NULL(); + + load_xid_epoch(&state); + + val = convert_xid(topxid, &state); + + PG_RETURN_INT64(val); +} + /* * txid_current_snapshot() returns txid_snapshot * diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 6fed7a0d19..050a98c397 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -4904,6 +4904,8 @@ DATA(insert OID = 2942 ( txid_snapshot_send PGNSP PGUID 12 1 0 0 0 f f f f t DESCR("I/O"); DATA(insert OID = 2943 ( txid_current PGNSP PGUID 12 1 0 0 0 f f f f t f s u 0 0 20 "" _null_ _null_ _null_ _null_ _null_ txid_current _null_ _null_ _null_ )); DESCR("get current transaction ID"); +DATA(insert OID = 3348 ( txid_current_if_assigned PGNSP PGUID 12 1 0 0 0 f f f f t f s u 0 0 20 "" _null_ _null_ _null_ _null_ _null_ txid_current_if_assigned _null_ _null_ _null_ )); +DESCR("get current transaction ID"); DATA(insert OID = 2944 ( txid_current_snapshot PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 2970 "" _null_ _null_ _null_ _null_ _null_ txid_current_snapshot _null_ _null_ _null_ )); DESCR("get current snapshot"); DATA(insert OID = 2945 ( txid_snapshot_xmin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 20 "2970" _null_ _null_ _null_ _null_ _null_ txid_snapshot_xmin _null_ _null_ _null_ )); diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 40e25c8824..2ae212a9c3 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -1221,6 +1221,7 @@ extern Datum txid_snapshot_out(PG_FUNCTION_ARGS); extern Datum txid_snapshot_recv(PG_FUNCTION_ARGS); extern Datum txid_snapshot_send(PG_FUNCTION_ARGS); extern Datum txid_current(PG_FUNCTION_ARGS); +extern Datum txid_current_if_assigned(PG_FUNCTION_ARGS); extern Datum txid_current_snapshot(PG_FUNCTION_ARGS); extern Datum txid_snapshot_xmin(PG_FUNCTION_ARGS); extern Datum txid_snapshot_xmax(PG_FUNCTION_ARGS); diff --git a/src/test/regress/expected/txid.out b/src/test/regress/expected/txid.out index ddd217eb10..802ccb949f 100644 --- a/src/test/regress/expected/txid.out +++ b/src/test/regress/expected/txid.out @@ -238,3 +238,19 @@ SELECT txid_snapshot '1:9223372036854775808:3'; ERROR: invalid input syntax for type txid_snapshot: "1:9223372036854775808:3" LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; ^ +-- test txid_current_if_assigned +BEGIN; +SELECT txid_current_if_assigned() IS NULL; + ?column? +---------- + t +(1 row) + +SELECT txid_current() \gset +SELECT txid_current_if_assigned() IS NOT DISTINCT FROM BIGINT :'txid_current'; + ?column? +---------- + t +(1 row) + +COMMIT; diff --git a/src/test/regress/sql/txid.sql b/src/test/regress/sql/txid.sql index b6650b922e..4aefd9e64d 100644 --- a/src/test/regress/sql/txid.sql +++ b/src/test/regress/sql/txid.sql @@ -52,3 +52,10 @@ select txid_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010 -- test 64bit overflow SELECT txid_snapshot '1:9223372036854775807:3'; SELECT txid_snapshot '1:9223372036854775808:3'; + +-- test txid_current_if_assigned +BEGIN; +SELECT txid_current_if_assigned() IS NULL; +SELECT txid_current() \gset +SELECT txid_current_if_assigned() IS NOT DISTINCT FROM BIGINT :'txid_current'; +COMMIT; -- cgit v1.2.3 From d2ddee63b43b27d6c6af169342af10db19bd3a1a Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 23 Aug 2016 12:10:25 -0400 Subject: Improve SP-GiST opclass API to better support unlabeled nodes. Previously, the spgSplitTuple action could only create a new upper tuple containing a single labeled node. This made it useless for opclasses that prefer to work with fixed sets of nodes (labeled or otherwise), which meant that restrictive prefixes could not be used with such node definitions. Change the output field set for the choose() method to allow it to specify any valid node set for the new upper tuple, and to specify which of these nodes to place the modified lower tuple in. In addition to its primary use for fixed node sets, this feature could allow existing opclasses that use variable node sets to skip a separate spgAddNode action when splitting a tuple, by setting up the node needed for the incoming value as part of the spgSplitTuple action. However, care would have to be taken to add the extra node only when it would not make the tuple bigger than before. (spgAddNode can enlarge the tuple, spgSplitTuple can't.) This is a prerequisite for an upcoming SP-GiST inet opclass, but is being committed separately to increase the visibility of the API change. In passing, improve the documentation about the traverse-values feature that was added by commit ccd6eb49a. Emre Hasegeli, with cosmetic adjustments and documentation rework by me Discussion: --- doc/src/sgml/spgist.sgml | 115 ++++++++++++++++++-------------- src/backend/access/spgist/spgdoinsert.c | 39 +++++++++-- src/backend/access/spgist/spgtextproc.c | 12 +++- src/include/access/spgist.h | 12 ++-- 4 files changed, 115 insertions(+), 63 deletions(-) (limited to 'src') diff --git a/doc/src/sgml/spgist.sgml b/doc/src/sgml/spgist.sgml index f40c790612..dfa62adb55 100644 --- a/doc/src/sgml/spgist.sgml +++ b/doc/src/sgml/spgist.sgml @@ -114,7 +114,7 @@ box_ops - box + box << &< @@ -183,11 +183,14 @@ Inner tuples are more complex, since they are branching points in the search tree. Each inner tuple contains a set of one or more nodes, which represent groups of similar leaf values. - A node contains a downlink that leads to either another, lower-level inner - tuple, or a short list of leaf tuples that all lie on the same index page. - Each node has a label that describes it; for example, + A node contains a downlink that leads either to another, lower-level inner + tuple, or to a short list of leaf tuples that all lie on the same index page. + Each node normally has a label that describes it; for example, in a radix tree the node label could be the next character of the string - value. Optionally, an inner tuple can have a prefix value + value. (Alternatively, an operator class can omit the node labels, if it + works with a fixed set of nodes for all inner tuples; + see .) + Optionally, an inner tuple can have a prefix value that describes all its members. In a radix tree this could be the common prefix of the represented strings. The prefix value is not necessarily really a prefix, but can be any data needed by the operator class; @@ -202,7 +205,8 @@ tuple, so the SP-GiST core provides the possibility for operator classes to manage level counting while descending the tree. There is also support for incrementally reconstructing the represented - value when that is needed. + value when that is needed, and for passing down additional data (called + traverse values) during a tree descent. @@ -343,10 +347,13 @@ typedef struct spgChooseOut } addNode; struct /* results for spgSplitTuple */ { - /* Info to form new inner tuple with one node */ + /* Info to form new upper-level inner tuple with one child tuple */ bool prefixHasPrefix; /* tuple should have a prefix? */ Datum prefixPrefixDatum; /* if so, its value */ - Datum nodeLabel; /* node's label */ + int prefixNNodes; /* number of nodes */ + Datum *prefixNodeLabels; /* their labels (or NULL for + * no labels) */ + int childNodeN; /* which node gets child tuple */ /* Info to form new lower-level inner tuple with all old nodes */ bool postfixHasPrefix; /* tuple should have a prefix? */ @@ -416,29 +423,33 @@ typedef struct spgChooseOut set resultType to spgSplitTuple. This action moves all the existing nodes into a new lower-level inner tuple, and replaces the existing inner tuple with a tuple - having a single node that links to the new lower-level inner tuple. + having a single downlink pointing to the new lower-level inner tuple. Set prefixHasPrefix to indicate whether the new upper tuple should have a prefix, and if so set prefixPrefixDatum to the prefix value. This new prefix value must be sufficiently less restrictive than the original - to accept the new value to be indexed, and it should be no longer - than the original prefix. - Set nodeLabel to the label to be used for the - node that will point to the new lower-level inner tuple. + to accept the new value to be indexed. + Set prefixNNodes to the number of nodes needed in the + new tuple, and set prefixNodeLabels to a palloc'd array + holding their labels, or to NULL if node labels are not required. + Note that the total size of the new upper tuple must be no more + than the total size of the tuple it is replacing; this constrains + the lengths of the new prefix and new labels. + Set childNodeN to the index (from zero) of the node + that will downlink to the new lower-level inner tuple. Set postfixHasPrefix to indicate whether the new lower-level inner tuple should have a prefix, and if so set postfixPrefixDatum to the prefix value. The - combination of these two prefixes and the additional label must - have the same meaning as the original prefix, because there is - no opportunity to alter the node labels that are moved to the new - lower-level tuple, nor to change any child index entries. + combination of these two prefixes and the downlink node's label + (if any) must have the same meaning as the original prefix, because + there is no opportunity to alter the node labels that are moved to + the new lower-level tuple, nor to change any child index entries. After the node has been split, the choose function will be called again with the replacement inner tuple. - That call will usually result in an spgAddNode result, - since presumably the node label added in the split step will not - match the new value; so after that, there will be a third call - that finally returns spgMatchNode and allows the - insertion to descend to the leaf level. + That call may return an spgAddNode result, if no suitable + node was created by the spgSplitTuple action. Eventually + choose must return spgMatchNode to + allow the insertion to descend to the next level. @@ -492,9 +503,8 @@ typedef struct spgPickSplitOut prefixDatum to the prefix value. Set nNodes to indicate the number of nodes that the new inner tuple will contain, and - set nodeLabels to an array of their label values. - (If the nodes do not require labels, set nodeLabels - to NULL; see for details.) + set nodeLabels to an array of their label values, + or to NULL if node labels are not required. Set mapTuplesToNodes to an array that gives the index (from zero) of the node that each leaf tuple should be assigned to. Set leafTupleDatums to an array of the values to @@ -561,7 +571,7 @@ typedef struct spgInnerConsistentIn Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ - MemoryContext traversalMemoryContext; + MemoryContext traversalMemoryContext; /* put new traverse values here */ int level; /* current level (counting from zero) */ bool returnData; /* original data must be returned? */ @@ -580,7 +590,6 @@ typedef struct spgInnerConsistentOut int *levelAdds; /* increment level by this much for each */ Datum *reconstructedValues; /* associated reconstructed values */ void **traversalValues; /* opclass-specific traverse values */ - } spgInnerConsistentOut; @@ -599,6 +608,11 @@ typedef struct spgInnerConsistentOut parent tuple; it is (Datum) 0 at the root level or if the inner_consistent function did not provide a value at the parent level. + traversalValue is a pointer to any traverse data + passed down from the previous call of inner_consistent + on the parent index tuple, or NULL at the root level. + traversalMemoryContext is the memory context in which + to store output traverse values (see below). level is the current inner tuple's level, starting at zero for the root level. returnData is true if reconstructed data is @@ -615,9 +629,6 @@ typedef struct spgInnerConsistentOut inner tuple, and nodeLabels is an array of their label values, or NULL if the nodes do not have labels. - traversalValue is a pointer to data that - inner_consistent gets when called on child nodes from an - outer call of inner_consistent on parent nodes. @@ -633,17 +644,19 @@ typedef struct spgInnerConsistentOut reconstructedValues to an array of the values reconstructed for each child node to be visited; otherwise, leave reconstructedValues as NULL. + If it is desired to pass down additional out-of-band information + (traverse values) to lower levels of the tree search, + set traversalValues to an array of the appropriate + traverse values, one for each child node to be visited; otherwise, + leave traversalValues as NULL. Note that the inner_consistent function is responsible for palloc'ing the - nodeNumbers, levelAdds and - reconstructedValues arrays. - Sometimes accumulating some information is needed, while - descending from parent to child node was happened. In this case - traversalValues array keeps pointers to - specific data you need to accumulate for every child node. - Memory for traversalValues should be allocated in - the default context, but each element of it should be allocated in - traversalMemoryContext. + nodeNumbers, levelAdds, + reconstructedValues, and + traversalValues arrays in the current memory context. + However, any output traverse values pointed to by + the traversalValues array should be allocated + in traversalMemoryContext. @@ -670,8 +683,8 @@ typedef struct spgLeafConsistentIn ScanKey scankeys; /* array of operators and comparison values */ int nkeys; /* length of array */ - void *traversalValue; /* opclass-specific traverse value */ Datum reconstructedValue; /* value reconstructed at parent */ + void *traversalValue; /* opclass-specific traverse value */ int level; /* current level (counting from zero) */ bool returnData; /* original data must be returned? */ @@ -700,6 +713,9 @@ typedef struct spgLeafConsistentOut parent tuple; it is (Datum) 0 at the root level or if the inner_consistent function did not provide a value at the parent level. + traversalValue is a pointer to any traverse data + passed down from the previous call of inner_consistent + on the parent index tuple, or NULL at the root level. level is the current leaf tuple's level, starting at zero for the root level. returnData is true if reconstructed data is @@ -797,7 +813,10 @@ typedef struct spgLeafConsistentOut point. In such a case the code typically works with the nodes by number, and there is no need for explicit node labels. To suppress node labels (and thereby save some space), the picksplit - function can return NULL for the nodeLabels array. + function can return NULL for the nodeLabels array, + and likewise the choose function can return NULL for + the prefixNodeLabels array during + a spgSplitTuple action. This will in turn result in nodeLabels being NULL during subsequent calls to choose and inner_consistent. In principle, node labels could be used for some inner tuples and omitted @@ -807,10 +826,7 @@ typedef struct spgLeafConsistentOut When working with an inner tuple having unlabeled nodes, it is an error for choose to return spgAddNode, since the set - of nodes is supposed to be fixed in such cases. Also, there is no - provision for generating an unlabeled node in spgSplitTuple - actions, since it is expected that an spgAddNode action will - be needed as well. + of nodes is supposed to be fixed in such cases. @@ -859,11 +875,10 @@ typedef struct spgLeafConsistentOut The PostgreSQL source distribution includes - several examples of index operator classes for - SP-GiST. The core system currently provides radix - trees over text columns and two types of trees over points: quad-tree and - k-d tree. Look into src/backend/access/spgist/ to see the - code. + several examples of index operator classes for SP-GiST, + as described in . Look + into src/backend/access/spgist/ + and src/backend/utils/adt/ to see the code. diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index f090ca528b..6fc04b224d 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -1705,17 +1705,40 @@ spgSplitNodeAction(Relation index, SpGistState *state, /* Should not be applied to nulls */ Assert(!SpGistPageStoresNulls(current->page)); + /* Check opclass gave us sane values */ + if (out->result.splitTuple.prefixNNodes <= 0 || + out->result.splitTuple.prefixNNodes > SGITMAXNNODES) + elog(ERROR, "invalid number of prefix nodes: %d", + out->result.splitTuple.prefixNNodes); + if (out->result.splitTuple.childNodeN < 0 || + out->result.splitTuple.childNodeN >= + out->result.splitTuple.prefixNNodes) + elog(ERROR, "invalid child node number: %d", + out->result.splitTuple.childNodeN); + /* - * Construct new prefix tuple, containing a single node with the specified - * label. (We'll update the node's downlink to point to the new postfix - * tuple, below.) + * Construct new prefix tuple with requested number of nodes. We'll fill + * in the childNodeN'th node's downlink below. */ - node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false); + nodes = (SpGistNodeTuple *) palloc(sizeof(SpGistNodeTuple) * + out->result.splitTuple.prefixNNodes); + + for (i = 0; i < out->result.splitTuple.prefixNNodes; i++) + { + Datum label = (Datum) 0; + bool labelisnull; + + labelisnull = (out->result.splitTuple.prefixNodeLabels == NULL); + if (!labelisnull) + label = out->result.splitTuple.prefixNodeLabels[i]; + nodes[i] = spgFormNodeTuple(state, label, labelisnull); + } prefixTuple = spgFormInnerTuple(state, out->result.splitTuple.prefixHasPrefix, out->result.splitTuple.prefixPrefixDatum, - 1, &node); + out->result.splitTuple.prefixNNodes, + nodes); /* it must fit in the space that innerTuple now occupies */ if (prefixTuple->size > innerTuple->size) @@ -1807,10 +1830,12 @@ spgSplitNodeAction(Relation index, SpGistState *state, * the postfix tuple first.) We have to update the local copy of the * prefixTuple too, because that's what will be written to WAL. */ - spgUpdateNodeLink(prefixTuple, 0, postfixBlkno, postfixOffset); + spgUpdateNodeLink(prefixTuple, out->result.splitTuple.childNodeN, + postfixBlkno, postfixOffset); prefixTuple = (SpGistInnerTuple) PageGetItem(current->page, PageGetItemId(current->page, current->offnum)); - spgUpdateNodeLink(prefixTuple, 0, postfixBlkno, postfixOffset); + spgUpdateNodeLink(prefixTuple, out->result.splitTuple.childNodeN, + postfixBlkno, postfixOffset); MarkBufferDirty(current->buffer); diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index e0d8f30ef1..852a9b00fa 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -212,9 +212,14 @@ spg_text_choose(PG_FUNCTION_ARGS) out->result.splitTuple.prefixPrefixDatum = formTextDatum(prefixStr, commonLen); } - out->result.splitTuple.nodeLabel = + out->result.splitTuple.prefixNNodes = 1; + out->result.splitTuple.prefixNodeLabels = + (Datum *) palloc(sizeof(Datum)); + out->result.splitTuple.prefixNodeLabels[0] = Int16GetDatum(*(unsigned char *) (prefixStr + commonLen)); + out->result.splitTuple.childNodeN = 0; + if (prefixSize - commonLen == 1) { out->result.splitTuple.postfixHasPrefix = false; @@ -280,7 +285,10 @@ spg_text_choose(PG_FUNCTION_ARGS) out->resultType = spgSplitTuple; out->result.splitTuple.prefixHasPrefix = in->hasPrefix; out->result.splitTuple.prefixPrefixDatum = in->prefixDatum; - out->result.splitTuple.nodeLabel = Int16GetDatum(-2); + out->result.splitTuple.prefixNNodes = 1; + out->result.splitTuple.prefixNodeLabels = (Datum *) palloc(sizeof(Datum)); + out->result.splitTuple.prefixNodeLabels[0] = Int16GetDatum(-2); + out->result.splitTuple.childNodeN = 0; out->result.splitTuple.postfixHasPrefix = false; } else diff --git a/src/include/access/spgist.h b/src/include/access/spgist.h index f39a2d6938..a953a5a401 100644 --- a/src/include/access/spgist.h +++ b/src/include/access/spgist.h @@ -90,10 +90,13 @@ typedef struct spgChooseOut } addNode; struct /* results for spgSplitTuple */ { - /* Info to form new inner tuple with one node */ + /* Info to form new upper-level inner tuple with one child tuple */ bool prefixHasPrefix; /* tuple should have a prefix? */ Datum prefixPrefixDatum; /* if so, its value */ - Datum nodeLabel; /* node's label */ + int prefixNNodes; /* number of nodes */ + Datum *prefixNodeLabels; /* their labels (or NULL for + * no labels) */ + int childNodeN; /* which node gets child tuple */ /* Info to form new lower-level inner tuple with all old nodes */ bool postfixHasPrefix; /* tuple should have a prefix? */ @@ -134,7 +137,8 @@ typedef struct spgInnerConsistentIn Datum reconstructedValue; /* value reconstructed at parent */ void *traversalValue; /* opclass-specific traverse value */ - MemoryContext traversalMemoryContext; + MemoryContext traversalMemoryContext; /* put new traverse values + * here */ int level; /* current level (counting from zero) */ bool returnData; /* original data must be returned? */ @@ -163,8 +167,8 @@ typedef struct spgLeafConsistentIn ScanKey scankeys; /* array of operators and comparison values */ int nkeys; /* length of array */ - void *traversalValue; /* opclass-specific traverse value */ Datum reconstructedValue; /* value reconstructed at parent */ + void *traversalValue; /* opclass-specific traverse value */ int level; /* current level (counting from zero) */ bool returnData; /* original data must be returned? */ -- cgit v1.2.3 From 19998730aea97137e2516af0516c683a1261ba1f Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 23 Aug 2016 13:44:18 -0400 Subject: Remove duplicate function prototype. Kyotaro Horiguchi --- src/backend/libpq/pqcomm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index ba42753c06..90b6946b38 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -145,7 +145,6 @@ static void socket_startcopyout(void); static void socket_endcopyout(bool errorAbort); static int internal_putbytes(const char *s, size_t len); static int internal_flush(void); -static void socket_set_nonblocking(bool nonblocking); #ifdef HAVE_UNIX_SOCKETS static int Lock_AF_UNIX(char *unixSocketDir, char *unixSocketPath); -- cgit v1.2.3 From 0fda682e542c9acd368588e50a1993fecd3b73e2 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 23 Aug 2016 14:32:23 -0400 Subject: Extend dsm API with a new function dsm_unpin_segment. If you have previously pinned a segment and decide that you don't actually want to keep it around until shutdown, this new API lets you remove the pin. This is pretty trivial except on Windows, where it requires closing the duplicate handle that was used to implement the pin. Thomas Munro and Amit Kapila, reviewed by Amit Kapila and by me. --- src/backend/storage/ipc/dsm.c | 117 +++++++++++++++++++++++++++++++++++-- src/backend/storage/ipc/dsm_impl.c | 57 +++++++++++++++++- src/include/storage/dsm.h | 1 + src/include/storage/dsm_impl.h | 4 +- 4 files changed, 169 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c index 47f2bea0be..d8066647a0 100644 --- a/src/backend/storage/ipc/dsm.c +++ b/src/backend/storage/ipc/dsm.c @@ -82,6 +82,8 @@ typedef struct dsm_control_item { dsm_handle handle; uint32 refcnt; /* 2+ = active, 1 = moribund, 0 = gone */ + void *impl_private_pm_handle; /* only needed on Windows */ + bool pinned; } dsm_control_item; /* Layout of the dynamic shared memory control segment. */ @@ -491,6 +493,8 @@ dsm_create(Size size, int flags) dsm_control->item[i].handle = seg->handle; /* refcnt of 1 triggers destruction, so start at 2 */ dsm_control->item[i].refcnt = 2; + dsm_control->item[i].impl_private_pm_handle = NULL; + dsm_control->item[i].pinned = false; seg->control_slot = i; LWLockRelease(DynamicSharedMemoryControlLock); return seg; @@ -520,6 +524,8 @@ dsm_create(Size size, int flags) dsm_control->item[nitems].handle = seg->handle; /* refcnt of 1 triggers destruction, so start at 2 */ dsm_control->item[nitems].refcnt = 2; + dsm_control->item[nitems].impl_private_pm_handle = NULL; + dsm_control->item[nitems].pinned = false; seg->control_slot = nitems; dsm_control->nitems++; LWLockRelease(DynamicSharedMemoryControlLock); @@ -760,6 +766,9 @@ dsm_detach(dsm_segment *seg) /* If new reference count is 1, try to destroy the segment. */ if (refcnt == 1) { + /* A pinned segment should never reach 1. */ + Assert(!dsm_control->item[control_slot].pinned); + /* * If we fail to destroy the segment here, or are killed before we * finish doing so, the reference count will remain at 1, which @@ -830,11 +839,11 @@ dsm_unpin_mapping(dsm_segment *seg) } /* - * Keep a dynamic shared memory segment until postmaster shutdown. + * Keep a dynamic shared memory segment until postmaster shutdown, or until + * dsm_unpin_segment is called. * - * This function should not be called more than once per segment; - * on Windows, doing so will create unnecessary handles which will - * consume system resources to no benefit. + * This function should not be called more than once per segment, unless the + * segment is explicitly unpinned with dsm_unpin_segment in between calls. * * Note that this function does not arrange for the current process to * keep the segment mapped indefinitely; if that behavior is desired, @@ -844,16 +853,112 @@ dsm_unpin_mapping(dsm_segment *seg) void dsm_pin_segment(dsm_segment *seg) { + void *handle; + /* * Bump reference count for this segment in shared memory. This will * ensure that even if there is no session which is attached to this - * segment, it will remain until postmaster shutdown. + * segment, it will remain until postmaster shutdown or an explicit call + * to unpin. */ LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE); + if (dsm_control->item[seg->control_slot].pinned) + elog(ERROR, "cannot pin a segment that is already pinned"); + dsm_impl_pin_segment(seg->handle, seg->impl_private, &handle); + dsm_control->item[seg->control_slot].pinned = true; dsm_control->item[seg->control_slot].refcnt++; + dsm_control->item[seg->control_slot].impl_private_pm_handle = handle; LWLockRelease(DynamicSharedMemoryControlLock); +} + +/* + * Unpin a dynamic shared memory segment that was previously pinned with + * dsm_pin_segment. This function should not be called unless dsm_pin_segment + * was previously called for this segment. + * + * The argument is a dsm_handle rather than a dsm_segment in case you want + * to unpin a segment to which you haven't attached. This turns out to be + * useful if, for example, a reference to one shared memory segment is stored + * within another shared memory segment. You might want to unpin the + * referenced segment before destroying the referencing segment. + */ +void +dsm_unpin_segment(dsm_handle handle) +{ + uint32 control_slot = INVALID_CONTROL_SLOT; + bool destroy = false; + uint32 i; - dsm_impl_pin_segment(seg->handle, seg->impl_private); + /* Find the control slot for the given handle. */ + LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE); + for (i = 0; i < dsm_control->nitems; ++i) + { + /* Skip unused slots. */ + if (dsm_control->item[i].refcnt == 0) + continue; + + /* If we've found our handle, we can stop searching. */ + if (dsm_control->item[i].handle == handle) + { + control_slot = i; + break; + } + } + + /* + * We should definitely have found the slot, and it should not already be + * in the process of going away, because this function should only be + * called on a segment which is pinned. + */ + if (control_slot == INVALID_CONTROL_SLOT) + elog(ERROR, "cannot unpin unknown segment handle"); + if (!dsm_control->item[control_slot].pinned) + elog(ERROR, "cannot unpin a segment that is not pinned"); + Assert(dsm_control->item[control_slot].refcnt > 1); + + /* + * Allow implementation-specific code to run. We have to do this before + * releasing the lock, because impl_private_pm_handle may get modified by + * dsm_impl_unpin_segment. + */ + dsm_impl_unpin_segment(handle, + &dsm_control->item[control_slot].impl_private_pm_handle); + + /* Note that 1 means no references (0 means unused slot). */ + if (--dsm_control->item[control_slot].refcnt == 1) + destroy = true; + dsm_control->item[control_slot].pinned = false; + + /* Now we can release the lock. */ + LWLockRelease(DynamicSharedMemoryControlLock); + + /* Clean up resources if that was the last reference. */ + if (destroy) + { + void *junk_impl_private = NULL; + void *junk_mapped_address = NULL; + Size junk_mapped_size = 0; + + /* + * For an explanation of how error handling works in this case, see + * comments in dsm_detach. Note that if we reach this point, the + * current process certainly does not have the segment mapped, because + * if it did, the reference count would have still been greater than 1 + * even after releasing the reference count held by the pin. The fact + * that there can't be a dsm_segment for this handle makes it OK to + * pass the mapped size, mapped address, and private data as NULL + * here. + */ + if (dsm_impl_op(DSM_OP_DESTROY, handle, 0, &junk_impl_private, + &junk_mapped_address, &junk_mapped_size, WARNING)) + { + LWLockAcquire(DynamicSharedMemoryControlLock, LW_EXCLUSIVE); + Assert(dsm_control->item[control_slot].handle == handle); + Assert(dsm_control->item[control_slot].refcnt == 1); + dsm_control->item[control_slot].refcnt = 0; + LWLockRelease(DynamicSharedMemoryControlLock); + } + } } /* diff --git a/src/backend/storage/ipc/dsm_impl.c b/src/backend/storage/ipc/dsm_impl.c index 173b982219..c07a5c6b15 100644 --- a/src/backend/storage/ipc/dsm_impl.c +++ b/src/backend/storage/ipc/dsm_impl.c @@ -987,8 +987,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, #endif /* - * Implementation-specific actions that must be performed when a segment - * is to be preserved until postmaster shutdown. + * Implementation-specific actions that must be performed when a segment is to + * be preserved even when no backend has it attached. * * Except on Windows, we don't need to do anything at all. But since Windows * cleans up segments automatically when no references remain, we duplicate @@ -996,7 +996,8 @@ dsm_impl_mmap(dsm_op op, dsm_handle handle, Size request_size, * do anything to receive the handle; Windows transfers it automatically. */ void -dsm_impl_pin_segment(dsm_handle handle, void *impl_private) +dsm_impl_pin_segment(dsm_handle handle, void *impl_private, + void **impl_private_pm_handle) { switch (dynamic_shared_memory_type) { @@ -1018,6 +1019,56 @@ dsm_impl_pin_segment(dsm_handle handle, void *impl_private) errmsg("could not duplicate handle for \"%s\": %m", name))); } + + /* + * Here, we remember the handle that we created in the + * postmaster process. This handle isn't actually usable in + * any process other than the postmaster, but that doesn't + * matter. We're just holding onto it so that, if the segment + * is unpinned, dsm_impl_unpin_segment can close it. + */ + *impl_private_pm_handle = hmap; + break; + } +#endif + default: + break; + } +} + +/* + * Implementation-specific actions that must be performed when a segment is no + * longer to be preserved, so that it will be cleaned up when all backends + * have detached from it. + * + * Except on Windows, we don't need to do anything at all. For Windows, we + * close the extra handle that dsm_impl_pin_segment created in the + * postmaster's process space. + */ +void +dsm_impl_unpin_segment(dsm_handle handle, void **impl_private) +{ + switch (dynamic_shared_memory_type) + { +#ifdef USE_DSM_WINDOWS + case DSM_IMPL_WINDOWS: + { + if (*impl_private && + !DuplicateHandle(PostmasterHandle, *impl_private, + NULL, NULL, 0, FALSE, + DUPLICATE_CLOSE_SOURCE)) + { + char name[64]; + + snprintf(name, 64, "%s.%u", SEGMENT_NAME_PREFIX, handle); + _dosmaperr(GetLastError()); + ereport(ERROR, + (errcode_for_dynamic_shared_memory(), + errmsg("could not duplicate handle for \"%s\": %m", + name))); + } + + *impl_private = NULL; break; } #endif diff --git a/src/include/storage/dsm.h b/src/include/storage/dsm.h index 86ede7a7e7..8be7c9aeeb 100644 --- a/src/include/storage/dsm.h +++ b/src/include/storage/dsm.h @@ -41,6 +41,7 @@ extern void dsm_detach(dsm_segment *seg); extern void dsm_pin_mapping(dsm_segment *seg); extern void dsm_unpin_mapping(dsm_segment *seg); extern void dsm_pin_segment(dsm_segment *seg); +extern void dsm_unpin_segment(dsm_handle h); extern dsm_segment *dsm_find_mapping(dsm_handle h); /* Informational functions. */ diff --git a/src/include/storage/dsm_impl.h b/src/include/storage/dsm_impl.h index ec05e22a6b..e44b477075 100644 --- a/src/include/storage/dsm_impl.h +++ b/src/include/storage/dsm_impl.h @@ -73,6 +73,8 @@ extern bool dsm_impl_op(dsm_op op, dsm_handle handle, Size request_size, extern bool dsm_impl_can_resize(void); /* Implementation-dependent actions required to keep segment until shutdown. */ -extern void dsm_impl_pin_segment(dsm_handle handle, void *impl_private); +extern void dsm_impl_pin_segment(dsm_handle handle, void *impl_private, + void **impl_private_pm_handle); +extern void dsm_impl_unpin_segment(dsm_handle handle, void **impl_private); #endif /* DSM_IMPL_H */ -- cgit v1.2.3 From 77e2906821e2aec3c0807866a84c2934feeac8be Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 23 Aug 2016 15:16:21 -0400 Subject: Create an SP-GiST opclass for inet/cidr. This seems to offer significantly better search performance than the existing GiST opclass for inet/cidr, at least on data with a wide mix of network mask lengths. (That may suggest that the data splitting heuristics in the GiST opclass could be improved.) Emre Hasegeli, with mostly-cosmetic adjustments by me Discussion: --- doc/src/sgml/spgist.sgml | 17 + src/backend/utils/adt/Makefile | 2 +- src/backend/utils/adt/network_spgist.c | 708 +++++++++++++++++++++++++++++++ src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_amop.h | 15 + src/include/catalog/pg_amproc.h | 5 + src/include/catalog/pg_opclass.h | 1 + src/include/catalog/pg_opfamily.h | 1 + src/include/catalog/pg_proc.h | 12 + src/include/utils/inet.h | 9 + src/test/regress/expected/inet.out | 148 +++++++ src/test/regress/expected/opr_sanity.out | 11 +- src/test/regress/sql/inet.sql | 23 + 13 files changed, 951 insertions(+), 3 deletions(-) create mode 100644 src/backend/utils/adt/network_spgist.c (limited to 'src') diff --git a/doc/src/sgml/spgist.sgml b/doc/src/sgml/spgist.sgml index dfa62adb55..d60aa23f33 100644 --- a/doc/src/sgml/spgist.sgml +++ b/doc/src/sgml/spgist.sgml @@ -145,6 +145,23 @@ ~>~ + + inet_ops + inet, cidr + + && + >> + >>= + > + >= + <> + << + <<= + < + <= + = + + diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile index b9e217ae49..0f512753e4 100644 --- a/src/backend/utils/adt/Makefile +++ b/src/backend/utils/adt/Makefile @@ -17,7 +17,7 @@ OBJS = acl.o amutils.o arrayfuncs.o array_expanded.o array_selfuncs.o \ geo_ops.o geo_selfuncs.o geo_spgist.o inet_cidr_ntop.o inet_net_pton.o \ int.o int8.o json.o jsonb.o jsonb_gin.o jsonb_op.o jsonb_util.o \ jsonfuncs.o like.o lockfuncs.o mac.o misc.o nabstime.o name.o \ - network.o network_gist.o network_selfuncs.o \ + network.o network_gist.o network_selfuncs.o network_spgist.o \ numeric.o numutils.o oid.o oracle_compat.o \ orderedsetaggs.o pg_locale.o pg_lsn.o pg_upgrade_support.o \ pgstatfuncs.o \ diff --git a/src/backend/utils/adt/network_spgist.c b/src/backend/utils/adt/network_spgist.c new file mode 100644 index 0000000000..708ae899ac --- /dev/null +++ b/src/backend/utils/adt/network_spgist.c @@ -0,0 +1,708 @@ +/*------------------------------------------------------------------------- + * + * network_spgist.c + * SP-GiST support for network types. + * + * We split inet index entries first by address family (IPv4 or IPv6). + * If the entries below a given inner tuple are all of the same family, + * we identify their common prefix and split by the next bit of the address, + * and by whether their masklens exceed the length of the common prefix. + * + * An inner tuple that has both IPv4 and IPv6 children has a null prefix + * and exactly two nodes, the first being for IPv4 and the second for IPv6. + * + * Otherwise, the prefix is a CIDR value representing the common prefix, + * and there are exactly four nodes. Node numbers 0 and 1 are for addresses + * with the same masklen as the prefix, while node numbers 2 and 3 are for + * addresses with larger masklen. (We do not allow a tuple to contain + * entries with masklen smaller than its prefix's.) Node numbers 0 and 1 + * are distinguished by the next bit of the address after the common prefix, + * and likewise for node numbers 2 and 3. If there are no more bits in + * the address family, everything goes into node 0 (which will probably + * lead to creating an allTheSame tuple). + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * src/backend/utils/adt/network_spgist.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/spgist.h" +#include "catalog/pg_type.h" +#include "utils/inet.h" + + +static int inet_spg_node_number(const inet *val, int commonbits); +static int inet_spg_consistent_bitmap(const inet *prefix, int nkeys, + ScanKey scankeys, bool leaf); + +/* + * The SP-GiST configuration function + */ +Datum +inet_spg_config(PG_FUNCTION_ARGS) +{ + /* spgConfigIn *cfgin = (spgConfigIn *) PG_GETARG_POINTER(0); */ + spgConfigOut *cfg = (spgConfigOut *) PG_GETARG_POINTER(1); + + cfg->prefixType = CIDROID; + cfg->labelType = VOIDOID; + cfg->canReturnData = true; + cfg->longValuesOK = false; + + PG_RETURN_VOID(); +} + +/* + * The SP-GiST choose function + */ +Datum +inet_spg_choose(PG_FUNCTION_ARGS) +{ + spgChooseIn *in = (spgChooseIn *) PG_GETARG_POINTER(0); + spgChooseOut *out = (spgChooseOut *) PG_GETARG_POINTER(1); + inet *val = DatumGetInetPP(in->datum), + *prefix; + int commonbits; + + /* + * If we're looking at a tuple that splits by address family, choose the + * appropriate subnode. + */ + if (!in->hasPrefix) + { + /* allTheSame isn't possible for such a tuple */ + Assert(!in->allTheSame); + Assert(in->nNodes == 2); + + out->resultType = spgMatchNode; + out->result.matchNode.nodeN = (ip_family(val) == PGSQL_AF_INET) ? 0 : 1; + out->result.matchNode.restDatum = InetPGetDatum(val); + + PG_RETURN_VOID(); + } + + /* Else it must split by prefix */ + Assert(in->nNodes == 4 || in->allTheSame); + + prefix = DatumGetInetPP(in->prefixDatum); + commonbits = ip_bits(prefix); + + /* + * We cannot put addresses from different families under the same inner + * node, so we have to split if the new value's family is different. + */ + if (ip_family(val) != ip_family(prefix)) + { + /* Set up 2-node tuple */ + out->resultType = spgSplitTuple; + out->result.splitTuple.prefixHasPrefix = false; + out->result.splitTuple.prefixNNodes = 2; + out->result.splitTuple.prefixNodeLabels = NULL; + + /* Identify which node the existing data goes into */ + out->result.splitTuple.childNodeN = + (ip_family(prefix) == PGSQL_AF_INET) ? 0 : 1; + + out->result.splitTuple.postfixHasPrefix = true; + out->result.splitTuple.postfixPrefixDatum = InetPGetDatum(prefix); + + PG_RETURN_VOID(); + } + + /* + * If the new value does not match the existing prefix, we have to split. + */ + if (ip_bits(val) < commonbits || + bitncmp(ip_addr(prefix), ip_addr(val), commonbits) != 0) + { + /* Determine new prefix length for the split tuple */ + commonbits = bitncommon(ip_addr(prefix), ip_addr(val), + Min(ip_bits(val), commonbits)); + + /* Set up 4-node tuple */ + out->resultType = spgSplitTuple; + out->result.splitTuple.prefixHasPrefix = true; + out->result.splitTuple.prefixPrefixDatum = + InetPGetDatum(cidr_set_masklen_internal(val, commonbits)); + out->result.splitTuple.prefixNNodes = 4; + out->result.splitTuple.prefixNodeLabels = NULL; + + /* Identify which node the existing data goes into */ + out->result.splitTuple.childNodeN = + inet_spg_node_number(prefix, commonbits); + + out->result.splitTuple.postfixHasPrefix = true; + out->result.splitTuple.postfixPrefixDatum = InetPGetDatum(prefix); + + PG_RETURN_VOID(); + } + + /* + * All OK, choose the node to descend into. (If this tuple is marked + * allTheSame, the core code will ignore our choice of nodeN; but we need + * not account for that case explicitly here.) + */ + out->resultType = spgMatchNode; + out->result.matchNode.nodeN = inet_spg_node_number(val, commonbits); + out->result.matchNode.restDatum = InetPGetDatum(val); + + PG_RETURN_VOID(); +} + +/* + * The GiST PickSplit method + */ +Datum +inet_spg_picksplit(PG_FUNCTION_ARGS) +{ + spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0); + spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1); + inet *prefix, + *tmp; + int i, + commonbits; + bool differentFamilies = false; + + /* Initialize the prefix with the first item */ + prefix = DatumGetInetPP(in->datums[0]); + commonbits = ip_bits(prefix); + + /* Examine remaining items to discover minimum common prefix length */ + for (i = 1; i < in->nTuples; i++) + { + tmp = DatumGetInetPP(in->datums[i]); + + if (ip_family(tmp) != ip_family(prefix)) + { + differentFamilies = true; + break; + } + + if (ip_bits(tmp) < commonbits) + commonbits = ip_bits(tmp); + commonbits = bitncommon(ip_addr(prefix), ip_addr(tmp), commonbits); + if (commonbits == 0) + break; + } + + /* Don't need labels; allocate output arrays */ + out->nodeLabels = NULL; + out->mapTuplesToNodes = (int *) palloc(sizeof(int) * in->nTuples); + out->leafTupleDatums = (Datum *) palloc(sizeof(Datum) * in->nTuples); + + if (differentFamilies) + { + /* Set up 2-node tuple */ + out->hasPrefix = false; + out->nNodes = 2; + + for (i = 0; i < in->nTuples; i++) + { + tmp = DatumGetInetPP(in->datums[i]); + out->mapTuplesToNodes[i] = + (ip_family(tmp) == PGSQL_AF_INET) ? 0 : 1; + out->leafTupleDatums[i] = InetPGetDatum(tmp); + } + } + else + { + /* Set up 4-node tuple */ + out->hasPrefix = true; + out->prefixDatum = + InetPGetDatum(cidr_set_masklen_internal(prefix, commonbits)); + out->nNodes = 4; + + for (i = 0; i < in->nTuples; i++) + { + tmp = DatumGetInetPP(in->datums[i]); + out->mapTuplesToNodes[i] = inet_spg_node_number(tmp, commonbits); + out->leafTupleDatums[i] = InetPGetDatum(tmp); + } + } + + PG_RETURN_VOID(); +} + +/* + * The SP-GiST query consistency check for inner tuples + */ +Datum +inet_spg_inner_consistent(PG_FUNCTION_ARGS) +{ + spgInnerConsistentIn *in = (spgInnerConsistentIn *) PG_GETARG_POINTER(0); + spgInnerConsistentOut *out = (spgInnerConsistentOut *) PG_GETARG_POINTER(1); + int i; + int which; + + if (!in->hasPrefix) + { + Assert(!in->allTheSame); + Assert(in->nNodes == 2); + + /* Identify which child nodes need to be visited */ + which = 1 | (1 << 1); + + for (i = 0; i < in->nkeys; i++) + { + StrategyNumber strategy = in->scankeys[i].sk_strategy; + inet *argument = DatumGetInetPP(in->scankeys[i].sk_argument); + + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (ip_family(argument) == PGSQL_AF_INET) + which &= 1; + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (ip_family(argument) == PGSQL_AF_INET6) + which &= (1 << 1); + break; + + case RTNotEqualStrategyNumber: + break; + + default: + /* all other ops can only match addrs of same family */ + if (ip_family(argument) == PGSQL_AF_INET) + which &= 1; + else + which &= (1 << 1); + break; + } + } + } + else if (!in->allTheSame) + { + Assert(in->nNodes == 4); + + /* Identify which child nodes need to be visited */ + which = inet_spg_consistent_bitmap(DatumGetInetPP(in->prefixDatum), + in->nkeys, in->scankeys, false); + } + else + { + /* Must visit all nodes; we assume there are less than 32 of 'em */ + which = ~0; + } + + out->nNodes = 0; + + if (which) + { + out->nodeNumbers = (int *) palloc(sizeof(int) * in->nNodes); + + for (i = 0; i < in->nNodes; i++) + { + if (which & (1 << i)) + { + out->nodeNumbers[out->nNodes] = i; + out->nNodes++; + } + } + } + + PG_RETURN_VOID(); +} + +/* + * The SP-GiST query consistency check for leaf tuples + */ +Datum +inet_spg_leaf_consistent(PG_FUNCTION_ARGS) +{ + spgLeafConsistentIn *in = (spgLeafConsistentIn *) PG_GETARG_POINTER(0); + spgLeafConsistentOut *out = (spgLeafConsistentOut *) PG_GETARG_POINTER(1); + inet *leaf = DatumGetInetPP(in->leafDatum); + + /* All tests are exact. */ + out->recheck = false; + + /* Leaf is what it is... */ + out->leafValue = InetPGetDatum(leaf); + + /* Use common code to apply the tests. */ + PG_RETURN_BOOL(inet_spg_consistent_bitmap(leaf, in->nkeys, in->scankeys, + true)); +} + +/* + * Calculate node number (within a 4-node, single-family inner index tuple) + * + * The value must have the same family as the node's prefix, and + * commonbits is the mask length of the prefix. We use even or odd + * nodes according to the next address bit after the commonbits, + * and low or high nodes according to whether the value's mask length + * is larger than commonbits. + */ +static int +inet_spg_node_number(const inet *val, int commonbits) +{ + int nodeN = 0; + + if (commonbits < ip_maxbits(val) && + ip_addr(val)[commonbits / 8] & (1 << (7 - commonbits % 8))) + nodeN |= 1; + if (commonbits < ip_bits(val)) + nodeN |= 2; + + return nodeN; +} + +/* + * Calculate bitmap of node numbers that are consistent with the query + * + * This can be used either at a 4-way inner tuple, or at a leaf tuple. + * In the latter case, we should return a boolean result (0 or 1) + * not a bitmap. + * + * This definition is pretty odd, but the inner and leaf consistency checks + * are mostly common and it seems best to keep them in one function. + */ +static int +inet_spg_consistent_bitmap(const inet *prefix, int nkeys, ScanKey scankeys, + bool leaf) +{ + int bitmap; + int commonbits, + i; + + /* Initialize result to allow visiting all children */ + if (leaf) + bitmap = 1; + else + bitmap = 1 | (1 << 1) | (1 << 2) | (1 << 3); + + commonbits = ip_bits(prefix); + + for (i = 0; i < nkeys; i++) + { + inet *argument = DatumGetInetPP(scankeys[i].sk_argument); + StrategyNumber strategy = scankeys[i].sk_strategy; + int order; + + /* + * Check 0: different families + * + * Matching families do not help any of the strategies. + */ + if (ip_family(argument) != ip_family(prefix)) + { + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (ip_family(argument) < ip_family(prefix)) + bitmap = 0; + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (ip_family(argument) > ip_family(prefix)) + bitmap = 0; + break; + + case RTNotEqualStrategyNumber: + break; + + default: + /* For all other cases, we can be sure there is no match */ + bitmap = 0; + break; + } + + if (!bitmap) + break; + + /* Other checks make no sense with different families. */ + continue; + } + + /* + * Check 1: network bit count + * + * Network bit count (ip_bits) helps to check leaves for sub network + * and sup network operators. At non-leaf nodes, we know every child + * value has greater ip_bits, so we can avoid descending in some cases + * too. + * + * This check is less expensive than checking the address bits, so we + * are doing this before, but it has to be done after for the basic + * comparison strategies, because ip_bits only affect their results + * when the common network bits are the same. + */ + switch (strategy) + { + case RTSubStrategyNumber: + if (commonbits <= ip_bits(argument)) + bitmap &= (1 << 2) | (1 << 3); + break; + + case RTSubEqualStrategyNumber: + if (commonbits < ip_bits(argument)) + bitmap &= (1 << 2) | (1 << 3); + break; + + case RTSuperStrategyNumber: + if (commonbits == ip_bits(argument) - 1) + bitmap &= 1 | (1 << 1); + else if (commonbits >= ip_bits(argument)) + bitmap = 0; + break; + + case RTSuperEqualStrategyNumber: + if (commonbits == ip_bits(argument)) + bitmap &= 1 | (1 << 1); + else if (commonbits > ip_bits(argument)) + bitmap = 0; + break; + + case RTEqualStrategyNumber: + if (commonbits < ip_bits(argument)) + bitmap &= (1 << 2) | (1 << 3); + else if (commonbits == ip_bits(argument)) + bitmap &= 1 | (1 << 1); + else + bitmap = 0; + break; + } + + if (!bitmap) + break; + + /* + * Check 2: common network bits + * + * Compare available common prefix bits to the query, but not beyond + * either the query's netmask or the minimum netmask among the + * represented values. If these bits don't match the query, we can + * eliminate some cases. + */ + order = bitncmp(ip_addr(prefix), ip_addr(argument), + Min(commonbits, ip_bits(argument))); + + if (order != 0) + { + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (order > 0) + bitmap = 0; + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (order < 0) + bitmap = 0; + break; + + case RTNotEqualStrategyNumber: + break; + + default: + /* For all other cases, we can be sure there is no match */ + bitmap = 0; + break; + } + + if (!bitmap) + break; + + /* + * Remaining checks make no sense when common bits don't match. + */ + continue; + } + + /* + * Check 3: next network bit + * + * We can filter out branch 2 or 3 using the next network bit of the + * argument, if it is available. + * + * This check matters for the performance of the search. The results + * would be correct without it. + */ + if (bitmap & ((1 << 2) | (1 << 3)) && + commonbits < ip_bits(argument)) + { + int nextbit; + + nextbit = ip_addr(argument)[commonbits / 8] & + (1 << (7 - commonbits % 8)); + + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (!nextbit) + bitmap &= 1 | (1 << 1) | (1 << 2); + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (nextbit) + bitmap &= 1 | (1 << 1) | (1 << 3); + break; + + case RTNotEqualStrategyNumber: + break; + + default: + if (!nextbit) + bitmap &= 1 | (1 << 1) | (1 << 2); + else + bitmap &= 1 | (1 << 1) | (1 << 3); + break; + } + + if (!bitmap) + break; + } + + /* + * Remaining checks are only for the basic comparison strategies. This + * test relies on the strategy number ordering defined in stratnum.h. + */ + if (strategy < RTEqualStrategyNumber || + strategy > RTGreaterEqualStrategyNumber) + continue; + + /* + * Check 4: network bit count + * + * At this point, we know that the common network bits of the prefix + * and the argument are the same, so we can go forward and check the + * ip_bits. + */ + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (commonbits == ip_bits(argument)) + bitmap &= 1 | (1 << 1); + else if (commonbits > ip_bits(argument)) + bitmap = 0; + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (commonbits < ip_bits(argument)) + bitmap &= (1 << 2) | (1 << 3); + break; + } + + if (!bitmap) + break; + + /* Remaining checks don't make sense with different ip_bits. */ + if (commonbits != ip_bits(argument)) + continue; + + /* + * Check 5: next host bit + * + * We can filter out branch 0 or 1 using the next host bit of the + * argument, if it is available. + * + * This check matters for the performance of the search. The results + * would be correct without it. There is no point in running it for + * leafs as we have to check the whole address on the next step. + */ + if (!leaf && bitmap & (1 | (1 << 1)) && + commonbits < ip_maxbits(argument)) + { + int nextbit; + + nextbit = ip_addr(argument)[commonbits / 8] & + (1 << (7 - commonbits % 8)); + + switch (strategy) + { + case RTLessStrategyNumber: + case RTLessEqualStrategyNumber: + if (!nextbit) + bitmap &= 1 | (1 << 2) | (1 << 3); + break; + + case RTGreaterEqualStrategyNumber: + case RTGreaterStrategyNumber: + if (nextbit) + bitmap &= (1 << 1) | (1 << 2) | (1 << 3); + break; + + case RTNotEqualStrategyNumber: + break; + + default: + if (!nextbit) + bitmap &= 1 | (1 << 2) | (1 << 3); + else + bitmap &= (1 << 1) | (1 << 2) | (1 << 3); + break; + } + + if (!bitmap) + break; + } + + /* + * Check 6: whole address + * + * This is the last check for correctness of the basic comparison + * strategies. It's only appropriate at leaf entries. + */ + if (leaf) + { + /* Redo ordering comparison using all address bits */ + order = bitncmp(ip_addr(prefix), ip_addr(argument), + ip_maxbits(prefix)); + + switch (strategy) + { + case RTLessStrategyNumber: + if (order >= 0) + bitmap = 0; + break; + + case RTLessEqualStrategyNumber: + if (order > 0) + bitmap = 0; + break; + + case RTEqualStrategyNumber: + if (order != 0) + bitmap = 0; + break; + + case RTGreaterEqualStrategyNumber: + if (order < 0) + bitmap = 0; + break; + + case RTGreaterStrategyNumber: + if (order <= 0) + bitmap = 0; + break; + + case RTNotEqualStrategyNumber: + if (order == 0) + bitmap = 0; + break; + } + + if (!bitmap) + break; + } + } + + return bitmap; +} diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 26f6126002..c04edadbf0 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201608191 +#define CATALOG_VERSION_NO 201608231 #endif diff --git a/src/include/catalog/pg_amop.h b/src/include/catalog/pg_amop.h index a15b0ec309..917ed46b71 100644 --- a/src/include/catalog/pg_amop.h +++ b/src/include/catalog/pg_amop.h @@ -863,6 +863,21 @@ DATA(insert ( 3550 869 869 25 s 932 783 0 )); DATA(insert ( 3550 869 869 26 s 933 783 0 )); DATA(insert ( 3550 869 869 27 s 934 783 0 )); +/* + * SP-GiST inet_ops + */ +DATA(insert ( 3794 869 869 3 s 3552 4000 0 )); +DATA(insert ( 3794 869 869 18 s 1201 4000 0 )); +DATA(insert ( 3794 869 869 19 s 1202 4000 0 )); +DATA(insert ( 3794 869 869 20 s 1203 4000 0 )); +DATA(insert ( 3794 869 869 21 s 1204 4000 0 )); +DATA(insert ( 3794 869 869 22 s 1205 4000 0 )); +DATA(insert ( 3794 869 869 23 s 1206 4000 0 )); +DATA(insert ( 3794 869 869 24 s 931 4000 0 )); +DATA(insert ( 3794 869 869 25 s 932 4000 0 )); +DATA(insert ( 3794 869 869 26 s 933 4000 0 )); +DATA(insert ( 3794 869 869 27 s 934 4000 0 )); + /* BRIN opclasses */ /* minmax bytea */ DATA(insert ( 4064 17 17 1 s 1957 3580 0 )); diff --git a/src/include/catalog/pg_amproc.h b/src/include/catalog/pg_amproc.h index 00320b4c33..0cbb416392 100644 --- a/src/include/catalog/pg_amproc.h +++ b/src/include/catalog/pg_amproc.h @@ -428,6 +428,11 @@ DATA(insert ( 3474 3831 3831 2 3470 )); DATA(insert ( 3474 3831 3831 3 3471 )); DATA(insert ( 3474 3831 3831 4 3472 )); DATA(insert ( 3474 3831 3831 5 3473 )); +DATA(insert ( 3794 869 869 1 3795 )); +DATA(insert ( 3794 869 869 2 3796 )); +DATA(insert ( 3794 869 869 3 3797 )); +DATA(insert ( 3794 869 869 4 3798 )); +DATA(insert ( 3794 869 869 5 3799 )); DATA(insert ( 4015 600 600 1 4018 )); DATA(insert ( 4015 600 600 2 4019 )); DATA(insert ( 4015 600 600 3 4020 )); diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h index 6c82d94600..f40b06112b 100644 --- a/src/include/catalog/pg_opclass.h +++ b/src/include/catalog/pg_opclass.h @@ -113,6 +113,7 @@ DATA(insert ( 405 float8_ops PGNSP PGUID 1971 701 t 0 )); DATA(insert ( 403 inet_ops PGNSP PGUID 1974 869 t 0 )); DATA(insert ( 405 inet_ops PGNSP PGUID 1975 869 t 0 )); DATA(insert ( 783 inet_ops PGNSP PGUID 3550 869 f 0 )); +DATA(insert ( 4000 inet_ops PGNSP PGUID 3794 869 t 0 )); DATA(insert OID = 1979 ( 403 int2_ops PGNSP PGUID 1976 21 t 0 )); #define INT2_BTREE_OPS_OID 1979 DATA(insert ( 405 int2_ops PGNSP PGUID 1977 21 t 0 )); diff --git a/src/include/catalog/pg_opfamily.h b/src/include/catalog/pg_opfamily.h index 1499a502f3..ac6b304787 100644 --- a/src/include/catalog/pg_opfamily.h +++ b/src/include/catalog/pg_opfamily.h @@ -79,6 +79,7 @@ DATA(insert OID = 1974 ( 403 network_ops PGNSP PGUID )); #define NETWORK_BTREE_FAM_OID 1974 DATA(insert OID = 1975 ( 405 network_ops PGNSP PGUID )); DATA(insert OID = 3550 ( 783 network_ops PGNSP PGUID )); +DATA(insert OID = 3794 ( 4000 network_ops PGNSP PGUID )); DATA(insert OID = 1976 ( 403 integer_ops PGNSP PGUID )); #define INTEGER_BTREE_FAM_OID 1976 DATA(insert OID = 1977 ( 405 integer_ops PGNSP PGUID )); diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 050a98c397..e2d08babda 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -2212,6 +2212,18 @@ DESCR("GiST support"); DATA(insert OID = 3559 ( inet_gist_same PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "869 869 2281" _null_ _null_ _null_ _null_ _null_ inet_gist_same _null_ _null_ _null_ )); DESCR("GiST support"); +/* SP-GiST support for inet and cidr */ +DATA(insert OID = 3795 ( inet_spg_config PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_config _null_ _null_ _null_ )); +DESCR("SP-GiST support"); +DATA(insert OID = 3796 ( inet_spg_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_choose _null_ _null_ _null_ )); +DESCR("SP-GiST support"); +DATA(insert OID = 3797 ( inet_spg_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_picksplit _null_ _null_ _null_ )); +DESCR("SP-GiST support"); +DATA(insert OID = 3798 ( inet_spg_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_inner_consistent _null_ _null_ _null_ )); +DESCR("SP-GiST support"); +DATA(insert OID = 3799 ( inet_spg_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ inet_spg_leaf_consistent _null_ _null_ _null_ )); +DESCR("SP-GiST support"); + /* Selectivity estimation for inet and cidr */ DATA(insert OID = 3560 ( networksel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ networksel _null_ _null_ _null_ )); DESCR("restriction selectivity for network operators"); diff --git a/src/include/utils/inet.h b/src/include/utils/inet.h index dfa0b9f711..9fd954da7d 100644 --- a/src/include/utils/inet.h +++ b/src/include/utils/inet.h @@ -135,6 +135,15 @@ extern Datum inet_gist_penalty(PG_FUNCTION_ARGS); extern Datum inet_gist_picksplit(PG_FUNCTION_ARGS); extern Datum inet_gist_same(PG_FUNCTION_ARGS); +/* + * SP-GiST support functions in network_spgist.c + */ +extern Datum inet_spg_config(PG_FUNCTION_ARGS); +extern Datum inet_spg_choose(PG_FUNCTION_ARGS); +extern Datum inet_spg_picksplit(PG_FUNCTION_ARGS); +extern Datum inet_spg_inner_consistent(PG_FUNCTION_ARGS); +extern Datum inet_spg_leaf_consistent(PG_FUNCTION_ARGS); + /* * Estimation functions in network_selfuncs.c */ diff --git a/src/test/regress/expected/inet.out b/src/test/regress/expected/inet.out index 9447e03ab5..be9427eb6b 100644 --- a/src/test/regress/expected/inet.out +++ b/src/test/regress/expected/inet.out @@ -411,6 +411,154 @@ SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; SET enable_seqscan TO on; DROP INDEX inet_idx2; +-- check that spgist index works correctly +CREATE INDEX inet_idx3 ON inet_tbl using spgist (i); +SET enable_seqscan TO off; +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(3 rows) + +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 +(3 rows) + +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; + c | i +---+--- +(0 rows) + +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; + c | i +-------------+------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 +(8 rows) + +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.0/24 +(9 rows) + +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 192.168.1.0/24 | 192.168.1.0/24 +(1 row) + +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(9 rows) + +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(8 rows) + +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(16 rows) + +-- test index-only scans +EXPLAIN (COSTS OFF) +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + QUERY PLAN +--------------------------------------------------- + Sort + Sort Key: i + -> Index Only Scan using inet_idx3 on inet_tbl + Index Cond: (i << '192.168.1.0/24'::inet) +(4 rows) + +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + i +------------------ + 192.168.1.0/25 + 192.168.1.255/25 + 192.168.1.226 +(3 rows) + +SET enable_seqscan TO on; +DROP INDEX inet_idx3; -- simple tests of inet boolean and arithmetic operators SELECT i, ~i AS "~i" FROM inet_tbl; i | ~i diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out index 826441442b..0bcec136c5 100644 --- a/src/test/regress/expected/opr_sanity.out +++ b/src/test/regress/expected/opr_sanity.out @@ -1819,7 +1819,16 @@ ORDER BY 1, 2, 3; 4000 | 15 | > 4000 | 16 | @> 4000 | 18 | = -(112 rows) + 4000 | 19 | <> + 4000 | 20 | < + 4000 | 21 | <= + 4000 | 22 | > + 4000 | 23 | >= + 4000 | 24 | << + 4000 | 25 | <<= + 4000 | 26 | >> + 4000 | 27 | >>= +(121 rows) -- Check that all opclass search operators have selectivity estimators. -- This is not absolutely required, but it seems a reasonable thing diff --git a/src/test/regress/sql/inet.sql b/src/test/regress/sql/inet.sql index 007741e935..880e115360 100644 --- a/src/test/regress/sql/inet.sql +++ b/src/test/regress/sql/inet.sql @@ -93,6 +93,29 @@ SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; SET enable_seqscan TO on; DROP INDEX inet_idx2; +-- check that spgist index works correctly +CREATE INDEX inet_idx3 ON inet_tbl using spgist (i); +SET enable_seqscan TO off; +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; + +-- test index-only scans +EXPLAIN (COSTS OFF) +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + +SET enable_seqscan TO on; +DROP INDEX inet_idx3; + -- simple tests of inet boolean and arithmetic operators SELECT i, ~i AS "~i" FROM inet_tbl; SELECT i, c, i & c AS "and" FROM inet_tbl; -- cgit v1.2.3 From 32909a57f9fb131eab8971a6d9845b55bbcb9091 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 23 Aug 2016 16:25:35 -0400 Subject: Fix network_spgist.c build failures from missing AF_INET definition. AF_INET is apparently defined in something that's pulled in automatically on Linux, but the buildfarm says that's not true everywhere. Comparing to network_gist.c suggests that including ought to fix it, and the POSIX standard concurs. --- src/backend/utils/adt/network_spgist.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/backend/utils/adt/network_spgist.c b/src/backend/utils/adt/network_spgist.c index 708ae899ac..a198a83973 100644 --- a/src/backend/utils/adt/network_spgist.c +++ b/src/backend/utils/adt/network_spgist.c @@ -31,6 +31,8 @@ */ #include "postgres.h" +#include + #include "access/spgist.h" #include "catalog/pg_type.h" #include "utils/inet.h" -- cgit v1.2.3 From 71e006f031310f77ab72881c47a7d8f41df748bb Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 23 Aug 2016 23:21:10 -0400 Subject: Suppress compiler warnings in non-cassert builds. With Asserts off, these variables are set but never used, resulting in warnings from pickier compilers. Fix that with our standard solution. Per report from Jeff Janes. --- src/backend/access/gist/gistutil.c | 2 +- src/backend/utils/adt/amutils.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 26d4a64694..887c58b71c 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -852,7 +852,7 @@ gistproperty(Oid index_oid, int attno, bool *res, bool *isnull) { HeapTuple tuple; - Form_pg_index rd_index; + Form_pg_index rd_index PG_USED_FOR_ASSERTS_ONLY; Form_pg_opclass rd_opclass; Datum datum; bool disnull; diff --git a/src/backend/utils/adt/amutils.c b/src/backend/utils/adt/amutils.c index ad5e45674b..f4844d1506 100644 --- a/src/backend/utils/adt/amutils.c +++ b/src/backend/utils/adt/amutils.c @@ -115,7 +115,7 @@ test_indoption(Oid relid, int attno, bool guard, bool *res) { HeapTuple tuple; - Form_pg_index rd_index; + Form_pg_index rd_index PG_USED_FOR_ASSERTS_ONLY; Datum datum; bool isnull; int2vector *indoption; -- cgit v1.2.3 From b6418a0919c2c161cf2ed65bc930a06c9c2d85da Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Tue, 23 Aug 2016 23:40:38 -0400 Subject: Build libpgfeutils before pg_isready. Every program having -lpgfeutils in LDFLAGS must have this dependency, whether or not the program uses a libpgfeutils symbol. Back-patch to 9.6, where libpgfeutils was introduced. --- src/bin/scripts/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/bin/scripts/Makefile b/src/bin/scripts/Makefile index 8c107b1ba4..c6ad4e7552 100644 --- a/src/bin/scripts/Makefile +++ b/src/bin/scripts/Makefile @@ -35,7 +35,7 @@ dropuser: dropuser.o common.o | submake-libpq submake-libpgport submake-libpgfeu clusterdb: clusterdb.o common.o | submake-libpq submake-libpgport submake-libpgfeutils vacuumdb: vacuumdb.o common.o | submake-libpq submake-libpgport submake-libpgfeutils reindexdb: reindexdb.o common.o | submake-libpq submake-libpgport submake-libpgfeutils -pg_isready: pg_isready.o common.o | submake-libpq submake-libpgport +pg_isready: pg_isready.o common.o | submake-libpq submake-libpgport submake-libpgfeutils install: all installdirs $(INSTALL_PROGRAM) createdb$(X) '$(DESTDIR)$(bindir)'/createdb$(X) -- cgit v1.2.3 From 03951987283b99d5002227ab89f896bab772b7ec Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Tue, 23 Aug 2016 23:40:38 -0400 Subject: Build libpgfeutils before src/bin/pg_basebackup programs. Oversight in commit 9132c014290d02435999c81892fa8b0b384497d8. --- src/bin/pg_basebackup/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile index a23a83eb9b..fa1ce8b24d 100644 --- a/src/bin/pg_basebackup/Makefile +++ b/src/bin/pg_basebackup/Makefile @@ -23,13 +23,13 @@ OBJS=receivelog.o streamutil.o $(WIN32RES) all: pg_basebackup pg_receivexlog pg_recvlogical -pg_basebackup: pg_basebackup.o $(OBJS) | submake-libpq submake-libpgport +pg_basebackup: pg_basebackup.o $(OBJS) | submake-libpq submake-libpgport submake-libpgfeutils $(CC) $(CFLAGS) pg_basebackup.o $(OBJS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) -pg_receivexlog: pg_receivexlog.o $(OBJS) | submake-libpq submake-libpgport +pg_receivexlog: pg_receivexlog.o $(OBJS) | submake-libpq submake-libpgport submake-libpgfeutils $(CC) $(CFLAGS) pg_receivexlog.o $(OBJS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) -pg_recvlogical: pg_recvlogical.o $(OBJS) | submake-libpq submake-libpgport +pg_recvlogical: pg_recvlogical.o $(OBJS) | submake-libpq submake-libpgport submake-libpgfeutils $(CC) $(CFLAGS) pg_recvlogical.o $(OBJS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@$(X) install: all installdirs -- cgit v1.2.3 From 5cd3864075622b203d530f1a710818777859304e Mon Sep 17 00:00:00 2001 From: Kevin Grittner Date: Wed, 24 Aug 2016 13:17:21 -0500 Subject: Remove unnecessary #include. Accidentally added in 8b65cf4c5edabdcae45ceaef7b9ac236879aae50. Pointed out by Álvaro Herrera --- src/include/storage/bufmgr.h | 1 - 1 file changed, 1 deletion(-) (limited to 'src') diff --git a/src/include/storage/bufmgr.h b/src/include/storage/bufmgr.h index fcd0c75b1c..7b6ba96000 100644 --- a/src/include/storage/bufmgr.h +++ b/src/include/storage/bufmgr.h @@ -14,7 +14,6 @@ #ifndef BUFMGR_H #define BUFMGR_H -#include "catalog/catalog.h" #include "storage/block.h" #include "storage/buf.h" #include "storage/bufpage.h" -- cgit v1.2.3 From 2c00fad2864350508f666da1a2c04e0cbe9cbf58 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 24 Aug 2016 14:37:50 -0400 Subject: Fix improper repetition of previous results from a hashed aggregate. ExecReScanAgg's check for whether it could re-use a previously calculated hashtable neglected the possibility that the Agg node might reference PARAM_EXEC Params that are not referenced by its input plan node. That's okay if the Params are in upper tlist or qual expressions; but if one appears in aggregate input expressions, then the hashtable contents need to be recomputed when the Param's value changes. To avoid unnecessary performance degradation in the case of a Param that isn't within an aggregate input, add logic to the planner to determine which Params are within aggregate inputs. This requires a new field in struct Agg, but fortunately we never write plans to disk, so this isn't an initdb-forcing change. Per report from Jeevan Chalke. This has been broken since forever, so back-patch to all supported branches. Andrew Gierth, with minor adjustments by me Report: --- src/backend/executor/nodeAgg.c | 10 +++-- src/backend/nodes/copyfuncs.c | 1 + src/backend/nodes/outfuncs.c | 1 + src/backend/nodes/readfuncs.c | 1 + src/backend/optimizer/plan/createplan.c | 1 + src/backend/optimizer/plan/subselect.c | 48 +++++++++++++++++++- src/include/nodes/plannodes.h | 3 +- src/test/regress/expected/aggregates.out | 75 ++++++++++++++++++++++++++++++++ src/test/regress/sql/aggregates.sql | 22 ++++++++++ 9 files changed, 156 insertions(+), 6 deletions(-) (limited to 'src') diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 1ec2515090..ce2fc281a4 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -3425,11 +3425,13 @@ ExecReScanAgg(AggState *node) return; /* - * If we do have the hash table and the subplan does not have any - * parameter changes, then we can just rescan the existing hash table; - * no need to build it again. + * If we do have the hash table, and the subplan does not have any + * parameter changes, and none of our own parameter changes affect + * input expressions of the aggregated functions, then we can just + * rescan the existing hash table; no need to build it again. */ - if (outerPlan->chgParam == NULL) + if (outerPlan->chgParam == NULL && + !bms_overlap(node->ss.ps.chgParam, aggnode->aggParams)) { ResetTupleHashIterator(node->hashtable, &node->hashiter); return; diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index c7a06442ba..1877fb45e5 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -877,6 +877,7 @@ _copyAgg(const Agg *from) COPY_POINTER_FIELD(grpOperators, from->numCols * sizeof(Oid)); } COPY_SCALAR_FIELD(numGroups); + COPY_BITMAPSET_FIELD(aggParams); COPY_NODE_FIELD(groupingSets); COPY_NODE_FIELD(chain); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 50019f4164..29b7712584 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -716,6 +716,7 @@ _outAgg(StringInfo str, const Agg *node) appendStringInfo(str, " %u", node->grpOperators[i]); WRITE_LONG_FIELD(numGroups); + WRITE_BITMAPSET_FIELD(aggParams); WRITE_NODE_FIELD(groupingSets); WRITE_NODE_FIELD(chain); } diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index c83063e219..6f9a81e3e3 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -2007,6 +2007,7 @@ _readAgg(void) READ_ATTRNUMBER_ARRAY(grpColIdx, local_node->numCols); READ_OID_ARRAY(grpOperators, local_node->numCols); READ_LONG_FIELD(numGroups); + READ_BITMAPSET_FIELD(aggParams); READ_NODE_FIELD(groupingSets); READ_NODE_FIELD(chain); diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 54d601fc47..47158f6468 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -5664,6 +5664,7 @@ make_agg(List *tlist, List *qual, node->grpColIdx = grpColIdx; node->grpOperators = grpOperators; node->numGroups = numGroups; + node->aggParams = NULL; /* SS_finalize_plan() will fill this */ node->groupingSets = groupingSets; node->chain = chain; diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index a46cc10820..6edefb1138 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -82,6 +82,7 @@ static Bitmapset *finalize_plan(PlannerInfo *root, Bitmapset *valid_params, Bitmapset *scan_params); static bool finalize_primnode(Node *node, finalize_primnode_context *context); +static bool finalize_agg_primnode(Node *node, finalize_primnode_context *context); /* @@ -2652,6 +2653,29 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, locally_added_param); break; + case T_Agg: + { + Agg *agg = (Agg *) plan; + + /* + * AGG_HASHED plans need to know which Params are referenced + * in aggregate calls. Do a separate scan to identify them. + */ + if (agg->aggstrategy == AGG_HASHED) + { + finalize_primnode_context aggcontext; + + aggcontext.root = root; + aggcontext.paramids = NULL; + finalize_agg_primnode((Node *) agg->plan.targetlist, + &aggcontext); + finalize_agg_primnode((Node *) agg->plan.qual, + &aggcontext); + agg->aggParams = aggcontext.paramids; + } + } + break; + case T_WindowAgg: finalize_primnode(((WindowAgg *) plan)->startOffset, &context); @@ -2660,7 +2684,6 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, break; case T_Hash: - case T_Agg: case T_Material: case T_Sort: case T_Unique: @@ -2811,6 +2834,29 @@ finalize_primnode(Node *node, finalize_primnode_context *context) (void *) context); } +/* + * finalize_agg_primnode: find all Aggref nodes in the given expression tree, + * and add IDs of all PARAM_EXEC params appearing within their aggregated + * arguments to the result set. + */ +static bool +finalize_agg_primnode(Node *node, finalize_primnode_context *context) +{ + if (node == NULL) + return false; + if (IsA(node, Aggref)) + { + Aggref *agg = (Aggref *) node; + + /* we should not consider the direct arguments, if any */ + finalize_primnode((Node *) agg->args, context); + finalize_primnode((Node *) agg->aggfilter, context); + return false; /* there can't be any Aggrefs below here */ + } + return expression_tree_walker(node, finalize_agg_primnode, + (void *) context); +} + /* * SS_make_initplan_output_param - make a Param for an initPlan's output * diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 369179f291..bc5463b8f7 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -715,7 +715,8 @@ typedef struct Agg AttrNumber *grpColIdx; /* their indexes in the target list */ Oid *grpOperators; /* equality operators to compare with */ long numGroups; /* estimated number of groups in input */ - /* Note: the planner only provides numGroups in AGG_HASHED case */ + Bitmapset *aggParams; /* IDs of Params used in Aggref inputs */ + /* Note: planner provides numGroups & aggParams only in AGG_HASHED case */ List *groupingSets; /* grouping sets to use */ List *chain; /* chained Agg/Sort nodes */ } Agg; diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out index 14646c6397..45208a6da6 100644 --- a/src/test/regress/expected/aggregates.out +++ b/src/test/regress/expected/aggregates.out @@ -366,6 +366,81 @@ from tenk1 o; 9999 (1 row) +-- Test handling of Params within aggregate arguments in hashed aggregation. +-- Per bug report from Jeevan Chalke. +explain (verbose, costs off) +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; + QUERY PLAN +------------------------------------------------------------------ + Sort + Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) + Sort Key: s1.s1, s2.s2 + -> Nested Loop + Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) + -> Function Scan on pg_catalog.generate_series s1 + Output: s1.s1 + Function Call: generate_series(1, 3) + -> HashAggregate + Output: s2.s2, sum((s1.s1 + s2.s2)) + Group Key: s2.s2 + -> Function Scan on pg_catalog.generate_series s2 + Output: s2.s2 + Function Call: generate_series(1, 3) +(14 rows) + +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; + s1 | s2 | sm +----+----+---- + 1 | 1 | 2 + 1 | 2 | 3 + 1 | 3 | 4 + 2 | 1 | 3 + 2 | 2 | 4 + 2 | 3 | 5 + 3 | 1 | 4 + 3 | 2 | 5 + 3 | 3 | 6 +(9 rows) + +explain (verbose, costs off) +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; + QUERY PLAN +------------------------------------------------------------------- + Function Scan on pg_catalog.generate_series x + Output: (SubPlan 1) + Function Call: generate_series(1, 3) + SubPlan 1 + -> Sort + Output: (sum((x.x + y.y))), y.y + Sort Key: (sum((x.x + y.y))) + -> HashAggregate + Output: sum((x.x + y.y)), y.y + Group Key: y.y + -> Function Scan on pg_catalog.generate_series y + Output: y.y + Function Call: generate_series(1, 3) +(13 rows) + +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; + array +--------- + {2,3,4} + {3,4,5} + {4,5,6} +(3 rows) + -- -- test for bitwise integer aggregates -- diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql index 9983ff3a89..430ac49385 100644 --- a/src/test/regress/sql/aggregates.sql +++ b/src/test/regress/sql/aggregates.sql @@ -98,6 +98,28 @@ select (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))) from tenk1 o; +-- Test handling of Params within aggregate arguments in hashed aggregation. +-- Per bug report from Jeevan Chalke. +explain (verbose, costs off) +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; + +explain (verbose, costs off) +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; + -- -- test for bitwise integer aggregates -- -- cgit v1.2.3 From ae4760d667c71924932ab32e14996b5be1831fc6 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 24 Aug 2016 22:20:01 -0400 Subject: Fix small query-lifespan memory leak in bulk updates. When there is an identifiable REPLICA IDENTITY index on the target table, heap_update leaks the id_attrs bitmapset. That's not many bytes, but it adds up over enough rows, since the code typically runs in a query-lifespan context. Bug introduced in commit e55704d8b, which did a rather poor job of cloning the existing use-pattern for RelationGetIndexAttrBitmap(). Per bug #14293 from Zhou Digoal. Back-patch to 9.4 where the bug was introduced. Report: <20160824114320.15676.45171@wrigleys.postgresql.org> --- src/backend/access/heap/heapam.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src') diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index c63dfa0baf..6a27ef4140 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3802,6 +3802,7 @@ l2: ReleaseBuffer(vmbuffer); bms_free(hot_attrs); bms_free(key_attrs); + bms_free(id_attrs); return result; } @@ -4268,6 +4269,7 @@ l2: bms_free(hot_attrs); bms_free(key_attrs); + bms_free(id_attrs); return HeapTupleMayBeUpdated; } -- cgit v1.2.3 From 2533ff0aa518d4d31391db279cf08e538fae5931 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 25 Aug 2016 09:57:09 -0400 Subject: Fix instability in parallel regression tests. Commit f0c7b789a added a test case in case.sql that creates and then drops both an '=' operator and the type it's for. Given the right timing, that can cause a "cache lookup failed for type" failure in concurrent sessions, which see the '=' operator as a potential match for '=' in a query, but then the type is gone by the time they inquire into its properties. It might be nice to make that behavior more robust someday, but as a back-patchable solution, adjust the new test case so that the operator is never visible to other sessions. Like the previous commit, back-patch to all supported branches. Discussion: <5983.1471371667@sss.pgh.pa.us> --- src/test/regress/expected/case.out | 9 ++++----- src/test/regress/sql/case.sql | 11 ++++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/test/regress/expected/case.out b/src/test/regress/expected/case.out index 35b6476e50..5f6aa16d31 100644 --- a/src/test/regress/expected/case.out +++ b/src/test/regress/expected/case.out @@ -305,6 +305,9 @@ SELECT * FROM CASE_TBL; -- the isNull flag for the case test value incorrectly became true, causing -- the third WHEN-clause not to match. The volatile function calls are needed -- to prevent constant-folding in the planner, which would hide the bug. +-- Wrap this in a single transaction so the transient '=' operator doesn't +-- cause problems in concurrent sessions +BEGIN; CREATE FUNCTION vol(text) returns text as 'begin return $1; end' language plpgsql volatile; SELECT CASE @@ -335,13 +338,9 @@ SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' is not foo (1 row) +ROLLBACK; -- -- Clean up -- DROP TABLE CASE_TBL; DROP TABLE CASE2_TBL; -DROP OPERATOR = (foodomain, foodomain); -DROP FUNCTION inline_eq(foodomain, foodomain); -DROP FUNCTION volfoo(text); -DROP DOMAIN foodomain; -DROP FUNCTION vol(text); diff --git a/src/test/regress/sql/case.sql b/src/test/regress/sql/case.sql index b2377e4610..c860fae258 100644 --- a/src/test/regress/sql/case.sql +++ b/src/test/regress/sql/case.sql @@ -167,6 +167,10 @@ SELECT * FROM CASE_TBL; -- the third WHEN-clause not to match. The volatile function calls are needed -- to prevent constant-folding in the planner, which would hide the bug. +-- Wrap this in a single transaction so the transient '=' operator doesn't +-- cause problems in concurrent sessions +BEGIN; + CREATE FUNCTION vol(text) returns text as 'begin return $1; end' language plpgsql volatile; @@ -194,14 +198,11 @@ CREATE OPERATOR = (procedure = inline_eq, SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END; +ROLLBACK; + -- -- Clean up -- DROP TABLE CASE_TBL; DROP TABLE CASE2_TBL; -DROP OPERATOR = (foodomain, foodomain); -DROP FUNCTION inline_eq(foodomain, foodomain); -DROP FUNCTION volfoo(text); -DROP DOMAIN foodomain; -DROP FUNCTION vol(text); -- cgit v1.2.3 From fbf28b6b52c269188262a87247adb2c359acd6c5 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 26 Aug 2016 10:07:28 -0400 Subject: Fix logic for adding "parallel worker" context line to worker errors. The previous coding here was capable of adding a "parallel worker" context line to errors that were not, in fact, returned from a parallel worker. Instead of using an errcontext callback to add that annotation, just paste it onto the message by hand; this looks uglier but is more reliable. Discussion: <19757.1472151987@sss.pgh.pa.us> --- src/backend/access/transam/parallel.c | 54 +++++++++++++++++------------------ 1 file changed, 27 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index a47eba647b..ec6e1c5e6d 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -108,7 +108,6 @@ static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list); /* Private functions. */ static void HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg); -static void ParallelErrorContext(void *arg); static void ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc); static void ParallelWorkerMain(Datum main_arg); static void WaitForParallelWorkersToExit(ParallelContext *pcxt); @@ -788,30 +787,43 @@ HandleParallelMessage(ParallelContext *pcxt, int i, StringInfo msg) case 'N': /* NoticeResponse */ { ErrorData edata; - ErrorContextCallback errctx; ErrorContextCallback *save_error_context_stack; - /* - * Rethrow the error using the error context callbacks that - * were in effect when the context was created, not the - * current ones. - */ - save_error_context_stack = error_context_stack; - errctx.callback = ParallelErrorContext; - errctx.arg = NULL; - errctx.previous = pcxt->error_context_stack; - error_context_stack = &errctx; - /* Parse ErrorResponse or NoticeResponse. */ pq_parse_errornotice(msg, &edata); /* Death of a worker isn't enough justification for suicide. */ edata.elevel = Min(edata.elevel, ERROR); - /* Rethrow error or notice. */ + /* + * If desired, add a context line to show that this is a + * message propagated from a parallel worker. Otherwise, it + * can sometimes be confusing to understand what actually + * happened. (We don't do this in FORCE_PARALLEL_REGRESS mode + * because it causes test-result instability depending on + * whether a parallel worker is actually used or not.) + */ + if (force_parallel_mode != FORCE_PARALLEL_REGRESS) + { + if (edata.context) + edata.context = psprintf("%s\n%s", edata.context, + _("parallel worker")); + else + edata.context = pstrdup(_("parallel worker")); + } + + /* + * Context beyond that should use the error context callbacks + * that were in effect when the ParallelContext was created, + * not the current ones. + */ + save_error_context_stack = error_context_stack; + error_context_stack = pcxt->error_context_stack; + + /* Rethrow error or print notice. */ ThrowErrorData(&edata); - /* Restore previous context. */ + /* Not an error, so restore previous context stack. */ error_context_stack = save_error_context_stack; break; @@ -1112,18 +1124,6 @@ ParallelExtensionTrampoline(dsm_segment *seg, shm_toc *toc) entrypt(seg, toc); } -/* - * Give the user a hint that this is a message propagated from a parallel - * worker. Otherwise, it can sometimes be confusing to understand what - * actually happened. - */ -static void -ParallelErrorContext(void *arg) -{ - if (force_parallel_mode != FORCE_PARALLEL_REGRESS) - errcontext("parallel worker"); -} - /* * Update shared memory with the ending location of the last WAL record we * wrote, if it's greater than the value already stored there. -- cgit v1.2.3 From 8529036b53298c0555670b4a81ed7349c44aeeb4 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 26 Aug 2016 14:15:47 -0400 Subject: Fix assorted small bugs in ThrowErrorData(). Copy the palloc'd strings into the correct context, ie ErrorContext not wherever the source ErrorData is. This would be a large bug, except that it appears that all catchers of thrown errors do either EmitErrorReport or CopyErrorData before doing anything that would cause transient memory contexts to be cleaned up. Still, it's wrong and it will bite somebody someday. Fix failure to copy cursorpos and internalpos. Utter the appropriate incantations involving recursion_depth, so that we'll behave sanely if we get an error inside pstrdup. (In general, the body of this function ought to act like, eg, errdetail().) Per code reading induced by Jakob Egger's report. --- src/backend/utils/error/elog.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'src') diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 78d441d198..86e0cd9315 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -1601,7 +1601,10 @@ FlushErrorState(void) /* * ThrowErrorData --- report an error described by an ErrorData structure * - * This is intended to be used to re-report errors originally thrown by + * This is somewhat like ReThrowError, but it allows elevels besides ERROR, + * and the boolean flags such as output_to_server are computed via the + * default rules rather than being copied from the given ErrorData. + * This is primarily used to re-report errors originally reported by * background worker processes and then propagated (with or without * modification) to the backend responsible for them. */ @@ -1613,13 +1616,14 @@ ThrowErrorData(ErrorData *edata) if (!errstart(edata->elevel, edata->filename, edata->lineno, edata->funcname, NULL)) - return; + return; /* error is not to be reported at all */ newedata = &errordata[errordata_stack_depth]; - oldcontext = MemoryContextSwitchTo(edata->assoc_context); + recursion_depth++; + oldcontext = MemoryContextSwitchTo(newedata->assoc_context); - /* Copy the supplied fields to the error stack. */ - if (edata->sqlerrcode > 0) + /* Copy the supplied fields to the error stack entry. */ + if (edata->sqlerrcode != 0) newedata->sqlerrcode = edata->sqlerrcode; if (edata->message) newedata->message = pstrdup(edata->message); @@ -1631,6 +1635,7 @@ ThrowErrorData(ErrorData *edata) newedata->hint = pstrdup(edata->hint); if (edata->context) newedata->context = pstrdup(edata->context); + /* assume message_id is not available */ if (edata->schema_name) newedata->schema_name = pstrdup(edata->schema_name); if (edata->table_name) @@ -1641,11 +1646,15 @@ ThrowErrorData(ErrorData *edata) newedata->datatype_name = pstrdup(edata->datatype_name); if (edata->constraint_name) newedata->constraint_name = pstrdup(edata->constraint_name); + newedata->cursorpos = edata->cursorpos; + newedata->internalpos = edata->internalpos; if (edata->internalquery) newedata->internalquery = pstrdup(edata->internalquery); MemoryContextSwitchTo(oldcontext); + recursion_depth--; + /* Process the error. */ errfinish(0); } -- cgit v1.2.3 From 45a36e68539dcd7095a257b49f6f38ae77dec30d Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 26 Aug 2016 14:19:03 -0400 Subject: Put static forward declarations in elog.c back into same order as code. The guiding principle for the last few patches in this area apparently involved throwing darts. Cosmetic only, but back-patch to 9.6 because there is no reason for 9.6 and HEAD to diverge yet in this file. --- src/backend/utils/error/elog.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 86e0cd9315..03c4a39761 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -79,11 +79,10 @@ #include "utils/ps_status.h" +/* In this module, access gettext() via err_gettext() */ #undef _ #define _(x) err_gettext(x) -static const char *err_gettext(const char *str) pg_attribute_format_arg(1); -static void set_errdata_field(MemoryContextData *cxt, char **ptr, const char *str); /* Global variables */ ErrorContextCallback *error_context_stack = NULL; @@ -129,10 +128,9 @@ static int syslog_facility = LOG_LOCAL0; static void write_syslog(int level, const char *line); #endif -static void write_console(const char *line, int len); - #ifdef WIN32 extern char *event_source; + static void write_eventlog(int level, const char *line, int len); #endif @@ -149,7 +147,6 @@ static int recursion_depth = 0; /* to detect actual recursion */ * Saved timeval and buffers for formatted timestamps that might be used by * both log_line_prefix and csv logs. */ - static struct timeval saved_timeval; static bool saved_timeval_set = false; @@ -169,9 +166,16 @@ static char formatted_log_time[FORMATTED_TS_LEN]; } while (0) +static const char *err_gettext(const char *str) pg_attribute_format_arg(1); +static void set_errdata_field(MemoryContextData *cxt, char **ptr, const char *str); +static void write_console(const char *line, int len); +static void setup_formatted_log_time(void); +static void setup_formatted_start_time(void); static const char *process_log_prefix_padding(const char *p, int *padding); static void log_line_prefix(StringInfo buf, ErrorData *edata); +static void write_csvlog(ErrorData *edata); static void send_message_to_server_log(ErrorData *edata); +static void write_pipe_chunks(char *data, int len, int dest); static void send_message_to_frontend(ErrorData *edata); static char *expand_fmt_string(const char *fmt, ErrorData *edata); static const char *useful_strerror(int errnum); @@ -179,10 +183,6 @@ static const char *get_errno_symbol(int errnum); static const char *error_severity(int elevel); static void append_with_tabs(StringInfo buf, const char *str); static bool is_log_level_output(int elevel, int log_min_level); -static void write_pipe_chunks(char *data, int len, int dest); -static void write_csvlog(ErrorData *edata); -static void setup_formatted_log_time(void); -static void setup_formatted_start_time(void); /* -- cgit v1.2.3 From 78dcd027e8f7ed213f69da932853dc4b7cb9cb44 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 26 Aug 2016 15:04:05 -0400 Subject: Fix potential memory leakage from HandleParallelMessages(). HandleParallelMessages leaked memory into the caller's context. Since it's called from ProcessInterrupts, there is basically zero certainty as to what CurrentMemoryContext is, which means we could be leaking into long-lived contexts. Over the processing of many worker messages that would grow to be a problem. Things could be even worse than just a leak, if we happened to service the interrupt while ErrorContext is current: elog.c thinks it can reset that on its own whim, possibly yanking storage out from under HandleParallelMessages. Give HandleParallelMessages its own dedicated context instead, which we can reset during each call to ensure there's no accumulation of wasted memory. Discussion: <16610.1472222135@sss.pgh.pa.us> --- src/backend/access/transam/parallel.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'src') diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index ec6e1c5e6d..949bfb8b3e 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -702,6 +702,9 @@ void HandleParallelMessages(void) { dlist_iter iter; + MemoryContext oldcontext; + + static MemoryContext hpm_context = NULL; /* * This is invoked from ProcessInterrupts(), and since some of the @@ -712,6 +715,23 @@ HandleParallelMessages(void) */ HOLD_INTERRUPTS(); + /* + * Moreover, CurrentMemoryContext might be pointing almost anywhere. We + * don't want to risk leaking data into long-lived contexts, so let's do + * our work here in a private context that we can reset on each use. + */ + if (hpm_context == NULL) /* first time through? */ + hpm_context = AllocSetContextCreate(TopMemoryContext, + "HandleParallelMessages context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + else + MemoryContextReset(hpm_context); + + oldcontext = MemoryContextSwitchTo(hpm_context); + + /* OK to process messages. Reset the flag saying there are more to do. */ ParallelMessagePending = false; dlist_foreach(iter, &pcxt_list) @@ -758,6 +778,11 @@ HandleParallelMessages(void) } } + MemoryContextSwitchTo(oldcontext); + + /* Might as well clear the context on our way out */ + MemoryContextReset(hpm_context); + RESUME_INTERRUPTS(); } -- cgit v1.2.3 From 26fa446da64716f12ab3a623434c644fcb344b2e Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 26 Aug 2016 16:20:17 -0400 Subject: Add a nonlocalized version of the severity field to client error messages. This has been requested a few times, but the use-case for it was never entirely clear. The reason for adding it now is that transmission of error reports from parallel workers fails when NLS is active, because pq_parse_errornotice() wrongly assumes that the existing severity field is nonlocalized. There are other ways we could have fixed that, but the other options were basically kluges, whereas this way provides something that's at least arguably a useful feature along with the bug fix. Per report from Jakob Egger. Back-patch into 9.6, because otherwise parallel query is essentially unusable in non-English locales. The problem exists in 9.5 as well, but we don't want to risk changing on-the-wire behavior in 9.5 (even though the possibility of new error fields is specifically called out in the protocol document). It may be sufficient to leave the issue unfixed in 9.5, given the very limited usefulness of pq_parse_errornotice in that version. Discussion: --- doc/src/sgml/libpq.sgml | 16 ++++++++++++++++ doc/src/sgml/protocol.sgml | 19 +++++++++++++++++++ src/backend/libpq/pqmq.c | 26 +++++++++++++++++++++----- src/backend/utils/error/elog.c | 33 ++++++++++++++++++++------------- src/include/postgres_ext.h | 1 + src/interfaces/libpq/fe-exec.c | 1 + 6 files changed, 78 insertions(+), 18 deletions(-) (limited to 'src') diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index f22e3da047..2f9350b10e 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -2767,6 +2767,22 @@ char *PQresultErrorField(const PGresult *res, int fieldcode); + + PG_DIAG_SEVERITY_NONLOCALIZED + + + The severity; the field contents are ERROR, + FATAL, or PANIC (in an error message), + or WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message). + This is identical to the PG_DIAG_SEVERITY field except + that the contents are never localized. This is present only in + reports generated by PostgreSQL versions 9.6 + and later. + + + + PG_DIAG_SQLSTATE diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml index 9c96d8fc44..68b0941029 100644 --- a/doc/src/sgml/protocol.sgml +++ b/doc/src/sgml/protocol.sgml @@ -4882,6 +4882,25 @@ message. + + +V + + + + Severity: the field contents are + ERROR, FATAL, or + PANIC (in an error message), or + WARNING, NOTICE, DEBUG, + INFO, or LOG (in a notice message). + This is identical to the S field except + that the contents are never localized. This is present only in + messages generated by PostgreSQL versions 9.6 + and later. + + + + C diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index 921242fbc4..bfe66c6c44 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -237,10 +237,26 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata) switch (code) { case PG_DIAG_SEVERITY: + /* ignore, trusting we'll get a nonlocalized version */ + break; + case PG_DIAG_SEVERITY_NONLOCALIZED: if (strcmp(value, "DEBUG") == 0) - edata->elevel = DEBUG1; /* or some other DEBUG level */ + { + /* + * We can't reconstruct the exact DEBUG level, but + * presumably it was >= client_min_messages, so select + * DEBUG1 to ensure we'll pass it on to the client. + */ + edata->elevel = DEBUG1; + } else if (strcmp(value, "LOG") == 0) - edata->elevel = LOG; /* can't be COMMERROR */ + { + /* + * It can't be LOG_SERVER_ONLY, or the worker wouldn't + * have sent it to us; so LOG is the correct value. + */ + edata->elevel = LOG; + } else if (strcmp(value, "INFO") == 0) edata->elevel = INFO; else if (strcmp(value, "NOTICE") == 0) @@ -254,11 +270,11 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata) else if (strcmp(value, "PANIC") == 0) edata->elevel = PANIC; else - elog(ERROR, "unknown error severity"); + elog(ERROR, "unrecognized error severity: \"%s\"", value); break; case PG_DIAG_SQLSTATE: if (strlen(value) != 5) - elog(ERROR, "malformed sql state"); + elog(ERROR, "invalid SQLSTATE: \"%s\"", value); edata->sqlerrcode = MAKE_SQLSTATE(value[0], value[1], value[2], value[3], value[4]); break; @@ -308,7 +324,7 @@ pq_parse_errornotice(StringInfo msg, ErrorData *edata) edata->funcname = pstrdup(value); break; default: - elog(ERROR, "unknown error field: %d", (int) code); + elog(ERROR, "unrecognized error field code: %d", (int) code); break; } } diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 03c4a39761..224ee7801c 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -2753,7 +2753,7 @@ write_csvlog(ErrorData *edata) appendStringInfoChar(&buf, ','); /* Error severity */ - appendStringInfoString(&buf, error_severity(edata->elevel)); + appendStringInfoString(&buf, _(error_severity(edata->elevel))); appendStringInfoChar(&buf, ','); /* SQL state code */ @@ -2870,7 +2870,7 @@ send_message_to_server_log(ErrorData *edata) formatted_log_time[0] = '\0'; log_line_prefix(&buf, edata); - appendStringInfo(&buf, "%s: ", error_severity(edata->elevel)); + appendStringInfo(&buf, "%s: ", _(error_severity(edata->elevel))); if (Log_error_verbosity >= PGERROR_VERBOSE) appendStringInfo(&buf, "%s: ", unpack_sql_state(edata->sqlerrcode)); @@ -3153,12 +3153,16 @@ send_message_to_frontend(ErrorData *edata) if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3) { /* New style with separate fields */ + const char *sev; char tbuf[12]; int ssval; int i; + sev = error_severity(edata->elevel); pq_sendbyte(&msgbuf, PG_DIAG_SEVERITY); - err_sendstring(&msgbuf, error_severity(edata->elevel)); + err_sendstring(&msgbuf, _(sev)); + pq_sendbyte(&msgbuf, PG_DIAG_SEVERITY_NONLOCALIZED); + err_sendstring(&msgbuf, sev); /* unpack MAKE_SQLSTATE code */ ssval = edata->sqlerrcode; @@ -3277,7 +3281,7 @@ send_message_to_frontend(ErrorData *edata) initStringInfo(&buf); - appendStringInfo(&buf, "%s: ", error_severity(edata->elevel)); + appendStringInfo(&buf, "%s: ", _(error_severity(edata->elevel))); if (edata->show_funcname && edata->funcname) appendStringInfo(&buf, "%s: ", edata->funcname); @@ -3587,7 +3591,10 @@ get_errno_symbol(int errnum) /* - * error_severity --- get localized string representing elevel + * error_severity --- get string representing elevel + * + * The string is not localized here, but we mark the strings for translation + * so that callers can invoke _() on the result. */ static const char * error_severity(int elevel) @@ -3601,29 +3608,29 @@ error_severity(int elevel) case DEBUG3: case DEBUG4: case DEBUG5: - prefix = _("DEBUG"); + prefix = gettext_noop("DEBUG"); break; case LOG: case LOG_SERVER_ONLY: - prefix = _("LOG"); + prefix = gettext_noop("LOG"); break; case INFO: - prefix = _("INFO"); + prefix = gettext_noop("INFO"); break; case NOTICE: - prefix = _("NOTICE"); + prefix = gettext_noop("NOTICE"); break; case WARNING: - prefix = _("WARNING"); + prefix = gettext_noop("WARNING"); break; case ERROR: - prefix = _("ERROR"); + prefix = gettext_noop("ERROR"); break; case FATAL: - prefix = _("FATAL"); + prefix = gettext_noop("FATAL"); break; case PANIC: - prefix = _("PANIC"); + prefix = gettext_noop("PANIC"); break; default: prefix = "???"; diff --git a/src/include/postgres_ext.h b/src/include/postgres_ext.h index 74c344c704..ae2f087798 100644 --- a/src/include/postgres_ext.h +++ b/src/include/postgres_ext.h @@ -49,6 +49,7 @@ typedef PG_INT64_TYPE pg_int64; * applications. */ #define PG_DIAG_SEVERITY 'S' +#define PG_DIAG_SEVERITY_NONLOCALIZED 'V' #define PG_DIAG_SQLSTATE 'C' #define PG_DIAG_MESSAGE_PRIMARY 'M' #define PG_DIAG_MESSAGE_DETAIL 'D' diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c index d1b91c841c..a9ba54628f 100644 --- a/src/interfaces/libpq/fe-exec.c +++ b/src/interfaces/libpq/fe-exec.c @@ -824,6 +824,7 @@ pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...) */ pqSaveMessageField(res, PG_DIAG_MESSAGE_PRIMARY, msgBuf); pqSaveMessageField(res, PG_DIAG_SEVERITY, libpq_gettext("NOTICE")); + pqSaveMessageField(res, PG_DIAG_SEVERITY_NONLOCALIZED, "NOTICE"); /* XXX should provide a SQLSTATE too? */ /* -- cgit v1.2.3 From ea268cdc9a2631da4a5748b00059a9fd43470d0e Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 27 Aug 2016 17:50:38 -0400 Subject: Add macros to make AllocSetContextCreate() calls simpler and safer. I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls had typos in the context-sizing parameters. While none of these led to especially significant problems, they did create minor inefficiencies, and it's now clear that expecting people to copy-and-paste those calls accurately is not a great idea. Let's reduce the risk of future errors by introducing single macros that encapsulate the common use-cases. Three such macros are enough to cover all but two special-purpose contexts; those two calls can be left as-is, I think. While this patch doesn't in itself improve matters for third-party extensions, it doesn't break anything for them either, and they can gradually adopt the simplified notation over time. In passing, change TopMemoryContext to use the default allocation parameters. Formerly it could only be extended 8K at a time. That was probably reasonable when this code was written; but nowadays we create many more contexts than we did then, so that it's not unusual to have a couple hundred K in TopMemoryContext, even without considering various dubious code that sticks other things there. There seems no good reason not to let it use growing blocks like most other contexts. Back-patch to 9.6, mostly because that's still close enough to HEAD that it's easy to do so, and keeping the branches in sync can be expected to avoid some future back-patching pain. The bugs fixed by these changes don't seem to be significant enough to justify fixing them further back. Discussion: <21072.1472321324@sss.pgh.pa.us> --- contrib/bloom/blinsert.c | 8 ++---- contrib/dblink/dblink.c | 4 +-- contrib/file_fdw/file_fdw.c | 4 +-- contrib/pg_trgm/trgm_regexp.c | 4 +-- contrib/postgres_fdw/postgres_fdw.c | 20 ++++---------- contrib/sepgsql/uavc.c | 6 ++--- contrib/test_decoding/test_decoding.c | 4 +-- src/backend/access/brin/brin.c | 16 +++-------- src/backend/access/brin/brin_tuple.c | 4 +-- src/backend/access/common/printtup.c | 4 +-- src/backend/access/gin/ginbtree.c | 4 +-- src/backend/access/gin/ginfast.c | 4 +-- src/backend/access/gin/gininsert.c | 12 +++------ src/backend/access/gin/ginscan.c | 8 ++---- src/backend/access/gin/ginvacuum.c | 4 +-- src/backend/access/gin/ginxlog.c | 5 ++-- src/backend/access/gist/gist.c | 8 ++---- src/backend/access/gist/gistscan.c | 8 ++---- src/backend/access/heap/rewriteheap.c | 4 +-- src/backend/access/nbtree/nbtree.c | 4 +-- src/backend/access/nbtree/nbtutils.c | 6 ++--- src/backend/access/spgist/spginsert.c | 8 ++---- src/backend/access/spgist/spgscan.c | 4 +-- src/backend/access/spgist/spgxlog.c | 4 +-- src/backend/access/transam/multixact.c | 6 ++--- src/backend/access/transam/parallel.c | 12 +++------ src/backend/access/transam/xact.c | 8 ++---- src/backend/access/transam/xlog.c | 4 +-- src/backend/access/transam/xloginsert.c | 4 +-- src/backend/bootstrap/bootstrap.c | 4 +-- src/backend/catalog/objectaddress.c | 4 +-- src/backend/commands/analyze.c | 12 +++------ src/backend/commands/cluster.c | 4 +-- src/backend/commands/copy.c | 8 ++---- src/backend/commands/event_trigger.c | 8 ++---- src/backend/commands/indexcmds.c | 4 +-- src/backend/commands/policy.c | 4 +-- src/backend/commands/trigger.c | 8 ++---- src/backend/commands/vacuum.c | 4 +-- src/backend/executor/execUtils.c | 12 +++------ src/backend/executor/functions.c | 4 +-- src/backend/executor/nodeFunctionscan.c | 4 +-- src/backend/executor/nodeHash.c | 8 ++---- src/backend/executor/nodeRecursiveunion.c | 8 ++---- src/backend/executor/nodeSetOp.c | 8 ++---- src/backend/executor/nodeSubplan.c | 8 ++---- src/backend/executor/nodeUnique.c | 4 +-- src/backend/executor/nodeWindowAgg.c | 18 +++++-------- src/backend/executor/spi.c | 23 +++++----------- src/backend/executor/tqueue.c | 4 +-- src/backend/libpq/be-fsstubs.c | 4 +-- src/backend/libpq/hba.c | 14 +++------- src/backend/optimizer/geqo/geqo_eval.c | 4 +-- src/backend/optimizer/util/clauses.c | 8 ++---- src/backend/postmaster/autovacuum.c | 24 +++++------------ src/backend/postmaster/bgwriter.c | 4 +-- src/backend/postmaster/checkpointer.c | 4 +-- src/backend/postmaster/pgstat.c | 4 +-- src/backend/postmaster/postmaster.c | 4 +-- src/backend/postmaster/walwriter.c | 4 +-- src/backend/replication/logical/logical.c | 6 ++--- src/backend/replication/logical/reorderbuffer.c | 6 ++--- src/backend/replication/logical/snapbuild.c | 4 +-- src/backend/replication/walsender.c | 4 +-- src/backend/storage/buffer/localbuf.c | 4 +-- src/backend/storage/file/reinit.c | 4 +-- src/backend/storage/lmgr/lwlock.c | 4 +-- src/backend/storage/smgr/md.c | 10 +++---- src/backend/tcop/postgres.c | 8 ++---- src/backend/tsearch/spell.c | 4 +-- src/backend/utils/adt/array_expanded.c | 4 +-- src/backend/utils/adt/arrayfuncs.c | 8 ++---- src/backend/utils/adt/jsonfuncs.c | 16 +++-------- src/backend/utils/adt/xml.c | 6 ++--- src/backend/utils/cache/catcache.c | 4 +-- src/backend/utils/cache/evtcache.c | 4 +-- src/backend/utils/cache/plancache.c | 35 ++++++++----------------- src/backend/utils/cache/relcache.c | 18 +++---------- src/backend/utils/cache/ts_cache.c | 4 +-- src/backend/utils/cache/typcache.c | 8 ++---- src/backend/utils/fmgr/funcapi.c | 4 +-- src/backend/utils/hash/dynahash.c | 4 +-- src/backend/utils/init/postinit.c | 4 +-- src/backend/utils/misc/guc-file.l | 4 +-- src/backend/utils/misc/tzparser.c | 4 +-- src/backend/utils/mmgr/aset.c | 6 ++++- src/backend/utils/mmgr/mcxt.c | 11 +++----- src/backend/utils/mmgr/portalmem.c | 12 +++------ src/backend/utils/sort/tuplesort.c | 8 ++---- src/include/utils/memutils.h | 14 +++++++++- src/pl/plperl/plperl.c | 12 +++------ src/pl/plpgsql/src/pl_comp.c | 10 +++---- src/pl/plpgsql/src/pl_exec.c | 8 ++---- src/pl/plpython/plpy_cursorobject.c | 8 ++---- src/pl/plpython/plpy_main.c | 8 ++---- src/pl/plpython/plpy_procedure.c | 4 +-- src/pl/plpython/plpy_spi.c | 8 ++---- src/pl/plpython/plpy_typeio.c | 8 ++---- src/pl/tcl/pltcl.c | 4 +-- 99 files changed, 206 insertions(+), 522 deletions(-) (limited to 'src') diff --git a/contrib/bloom/blinsert.c b/contrib/bloom/blinsert.c index 78eec5c67e..0946aa29ec 100644 --- a/contrib/bloom/blinsert.c +++ b/contrib/bloom/blinsert.c @@ -130,9 +130,7 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo) initBloomState(&buildstate.blstate, index); buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, "Bloom build temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); initCachedPage(&buildstate); /* Do the heap scan */ @@ -204,9 +202,7 @@ blinsert(Relation index, Datum *values, bool *isnull, insertCtx = AllocSetContextCreate(CurrentMemoryContext, "Bloom insert temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCtx = MemoryContextSwitchTo(insertCtx); diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 9c8e308358..d4f9090f06 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -980,9 +980,7 @@ materializeQueryResult(FunctionCallInfo fcinfo, /* Create short-lived memory context for data conversions */ sinfo.tmpcontext = AllocSetContextCreate(CurrentMemoryContext, "dblink temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* execute query, collecting any tuples into the tuplestore */ res = storeQueryResult(&sinfo, conn, sql); diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index c0491318c0..b42de873e0 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -1061,9 +1061,7 @@ file_acquire_sample_rows(Relation onerel, int elevel, */ tupcontext = AllocSetContextCreate(CurrentMemoryContext, "file_fdw temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* Prepare for sampling rows */ reservoir_init_selection_state(&rstate, targrows); diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index 3f09a9c718..005701fcd9 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -529,9 +529,7 @@ createTrgmNFA(text *text_re, Oid collation, */ tmpcontext = AllocSetContextCreate(CurrentMemoryContext, "createTrgmNFA temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(tmpcontext); /* diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index b92f29958f..daf0438532 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -1315,14 +1315,10 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags) /* Create contexts for batches of tuples and per-tuple temp workspace. */ fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt, "postgres_fdw tuple data", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); fsstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt, "postgres_fdw temporary data", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* * Get info we'll need for converting data fetched from the foreign server @@ -1695,9 +1691,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate, /* Create context for per-tuple temp workspace. */ fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt, "postgres_fdw temporary data", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* Prepare for input conversion of RETURNING results. */ if (fmstate->has_returning) @@ -2294,9 +2288,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags) /* Create context for per-tuple temp workspace. */ dmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt, "postgres_fdw temporary data", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* Prepare for input conversion of RETURNING results. */ if (dmstate->has_returning) @@ -3481,9 +3473,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel, astate.anl_cxt = CurrentMemoryContext; astate.temp_cxt = AllocSetContextCreate(CurrentMemoryContext, "postgres_fdw temporary data", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* * Get the connection to use. We do the remote access as the table's diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c index 10fa9a0b0b..6e358dbef7 100644 --- a/contrib/sepgsql/uavc.c +++ b/contrib/sepgsql/uavc.c @@ -498,13 +498,11 @@ sepgsql_avc_init(void) int rc; /* - * All the avc stuff shall be allocated on avc_mem_cxt + * All the avc stuff shall be allocated in avc_mem_cxt */ avc_mem_cxt = AllocSetContextCreate(TopMemoryContext, "userspace access vector cache", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); memset(avc_slots, 0, sizeof(avc_slots)); avc_num_caches = 0; avc_lru_hint = 0; diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index c3508f0e13..949e9a78d9 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -102,9 +102,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, data = palloc0(sizeof(TestDecodingData)); data->context = AllocSetContextCreate(ctx->context, "text conversion context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); data->include_xids = true; data->include_timestamp = false; data->skip_empty_xacts = false; diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index b194d33cc5..1b45a4c901 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -165,9 +165,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls, bdesc = brin_build_desc(idxRel); tupcxt = AllocSetContextCreate(CurrentMemoryContext, "brininsert cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(tupcxt); } @@ -347,9 +345,7 @@ bringetbitmap(IndexScanDesc scan, TIDBitmap *tbm) */ perRangeCxt = AllocSetContextCreate(CurrentMemoryContext, "bringetbitmap cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(perRangeCxt); /* @@ -856,9 +852,7 @@ brin_build_desc(Relation rel) cxt = AllocSetContextCreate(CurrentMemoryContext, "brin desc cxt", - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(cxt); tupdesc = RelationGetDescr(rel); @@ -1169,9 +1163,7 @@ union_tuples(BrinDesc *bdesc, BrinMemTuple *a, BrinTuple *b) /* Use our own memory context to avoid retail pfree */ cxt = AllocSetContextCreate(CurrentMemoryContext, "brin union", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(cxt); db = brin_deform_tuple(bdesc, b); MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/access/brin/brin_tuple.c b/src/backend/access/brin/brin_tuple.c index 64b8264959..3caec14d17 100644 --- a/src/backend/access/brin/brin_tuple.c +++ b/src/backend/access/brin/brin_tuple.c @@ -367,9 +367,7 @@ brin_new_memtuple(BrinDesc *brdesc) dtup->bt_context = AllocSetContextCreate(CurrentMemoryContext, "brin dtuple", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); return dtup; } diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index d9664aa6c6..d213af9074 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -135,9 +135,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) */ myState->tmpcontext = AllocSetContextCreate(CurrentMemoryContext, "printtup", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3) { diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index fa383719e6..a0afec4f3c 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -348,9 +348,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, */ tmpCxt = AllocSetContextCreate(CurrentMemoryContext, "ginPlaceToPage temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCxt = MemoryContextSwitchTo(tmpCxt); if (GinPageIsData(page)) diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 59a63f28d0..6b709dbdb3 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -808,9 +808,7 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, */ opCtx = AllocSetContextCreate(CurrentMemoryContext, "GIN insert cleanup temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCtx = MemoryContextSwitchTo(opCtx); diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 9f784bf48d..4e09f76eb2 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -372,9 +372,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) */ buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, "Gin build temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * create a temporary memory context that is used for calling @@ -382,9 +380,7 @@ ginbuild(Relation heap, Relation index, IndexInfo *indexInfo) */ buildstate.funcCtx = AllocSetContextCreate(CurrentMemoryContext, "Gin build temporary context for user-defined function", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); buildstate.accum.ginstate = &buildstate.ginstate; ginInitBA(&buildstate.accum); @@ -495,9 +491,7 @@ gininsert(Relation index, Datum *values, bool *isnull, insertCtx = AllocSetContextCreate(CurrentMemoryContext, "Gin insert temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCtx = MemoryContextSwitchTo(insertCtx); diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index c449c1cbc0..bfa86b521d 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -38,14 +38,10 @@ ginbeginscan(Relation rel, int nkeys, int norderbys) so->nkeys = 0; so->tempCtx = AllocSetContextCreate(CurrentMemoryContext, "Gin scan temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); so->keyCtx = AllocSetContextCreate(CurrentMemoryContext, "Gin scan key context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); initGinState(&so->ginstate, scan->indexRelation); scan->opaque = so; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index c258478f23..2685a1c373 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -526,9 +526,7 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext, "Gin vacuum temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); gvs.index = index; gvs.callback = callback; gvs.callback_state = callback_state; diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index b4d310f337..a40f1683dd 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -749,13 +749,12 @@ gin_xlog_startup(void) { opCtx = AllocSetContextCreate(CurrentMemoryContext, "GIN recovery temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } void gin_xlog_cleanup(void) { MemoryContextDelete(opCtx); + opCtx = NULL; } diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9a417ca2f4..f7f44b49aa 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -105,9 +105,7 @@ createTempGistContext(void) { return AllocSetContextCreate(CurrentMemoryContext, "GiST temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } /* @@ -1411,9 +1409,7 @@ initGISTstate(Relation index) /* Create the memory context that will hold the GISTSTATE */ scanCxt = AllocSetContextCreate(CurrentMemoryContext, "GiST scan context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCxt = MemoryContextSwitchTo(scanCxt); /* Create and fill in the GISTSTATE */ diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 6f07cd8d46..ba611ee490 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -140,9 +140,7 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, /* second time through */ so->queueCxt = AllocSetContextCreate(so->giststate->scanCxt, "GiST queue context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); first_time = false; } else @@ -180,9 +178,7 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, so->pageDataCxt = AllocSetContextCreate(so->giststate->scanCxt, "GiST page data context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } /* create new, empty RBTree for search queue */ diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index f9ce9861e2..17584ba3ed 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -258,9 +258,7 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm */ rw_cxt = AllocSetContextCreate(CurrentMemoryContext, "Table rewrite", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); old_cxt = MemoryContextSwitchTo(rw_cxt); /* Create and fill in the state struct */ diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 4668c5ee59..128744c5b7 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -763,9 +763,7 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* Create a temporary memory context to run _bt_pagedel in */ vstate.pagedelcontext = AllocSetContextCreate(CurrentMemoryContext, "_bt_pagedel", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * The outer loop iterates over all index pages except the metapage, in diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 5d335c7f97..063c988dc1 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -232,10 +232,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan) */ if (so->arrayContext == NULL) so->arrayContext = AllocSetContextCreate(CurrentMemoryContext, - "BTree Array Context", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + "BTree array context", + ALLOCSET_SMALL_SIZES); else MemoryContextReset(so->arrayContext); diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 44fd644e42..01c8d213f5 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -134,9 +134,7 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo) buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST build temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); reltuples = IndexBuildHeapScan(heap, index, indexInfo, true, spgistBuildCallback, (void *) &buildstate); @@ -213,9 +211,7 @@ spginsert(Relation index, Datum *values, bool *isnull, insertCtx = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST insert temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldCtx = MemoryContextSwitchTo(insertCtx); initSpGistState(&spgstate, index); diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 6f9e223f43..307c6a4ab5 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -193,9 +193,7 @@ spgbeginscan(Relation rel, int keysz, int orderbysz) initSpGistState(&so->state, scan->indexRelation); so->tempCxt = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST search temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* Set up indexTupDesc and xs_itupdesc in case it's an index-only scan */ so->indexTupDesc = scan->xs_itupdesc = RelationGetDescr(rel); diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 01a4e0f252..e016cdb4d3 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -1014,9 +1014,7 @@ spg_xlog_startup(void) { opCtx = AllocSetContextCreate(CurrentMemoryContext, "SP-GiST temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } void diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 0c8c17af33..e9588a7f69 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1570,10 +1570,8 @@ mXactCachePut(MultiXactId multi, int nmembers, MultiXactMember *members) /* The cache only lives as long as the current transaction */ debug_elog2(DEBUG2, "CachePut: initializing memory context"); MXactContext = AllocSetContextCreate(TopTransactionContext, - "MultiXact Cache Context", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + "MultiXact cache context", + ALLOCSET_SMALL_SIZES); } entry = (mXactCacheEnt *) diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 949bfb8b3e..cde0ed300f 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -722,10 +722,8 @@ HandleParallelMessages(void) */ if (hpm_context == NULL) /* first time through? */ hpm_context = AllocSetContextCreate(TopMemoryContext, - "HandleParallelMessages context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "HandleParallelMessages", + ALLOCSET_DEFAULT_SIZES); else MemoryContextReset(hpm_context); @@ -962,10 +960,8 @@ ParallelWorkerMain(Datum main_arg) Assert(CurrentResourceOwner == NULL); CurrentResourceOwner = ResourceOwnerCreate(NULL, "parallel toplevel"); CurrentMemoryContext = AllocSetContextCreate(TopMemoryContext, - "parallel worker", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "Parallel worker", + ALLOCSET_DEFAULT_SIZES); /* * Now that we have a resource owner, we can attach to the dynamic shared diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 23f36ead7e..e11b229792 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1018,9 +1018,7 @@ AtStart_Memory(void) TopTransactionContext = AllocSetContextCreate(TopMemoryContext, "TopTransactionContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * In a top-level transaction, CurTransactionContext is the same as @@ -1078,9 +1076,7 @@ AtSubStart_Memory(void) */ CurTransactionContext = AllocSetContextCreate(CurTransactionContext, "CurTransactionContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); s->curTransactionContext = CurTransactionContext; /* Make the CurTransactionContext active. */ diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index f13f9c1fa5..acd95aa740 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4663,9 +4663,7 @@ XLOGShmemInit(void) { walDebugCxt = AllocSetContextCreate(TopMemoryContext, "WAL Debug", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(walDebugCxt, true); } #endif diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index c37003a24c..3cd273b19f 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -997,9 +997,7 @@ InitXLogInsert(void) { xloginsert_cxt = AllocSetContextCreate(TopMemoryContext, "WAL record construction", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } if (registered_buffers == NULL) diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index e518e178bb..8feeae05df 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -1069,9 +1069,7 @@ index_register(Oid heap, if (nogc == NULL) nogc = AllocSetContextCreate(NULL, "BootstrapNoGC", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(nogc); diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 8068b82eab..9aa81748ba 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -4747,9 +4747,7 @@ strlist_to_textarray(List *list) memcxt = AllocSetContextCreate(CurrentMemoryContext, "strlist to array", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(memcxt); datums = palloc(sizeof(text *) * list_length(list)); diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 9ac71220a2..c617abb223 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -332,9 +332,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, */ anl_context = AllocSetContextCreate(CurrentMemoryContext, "Analyze", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); caller_context = MemoryContextSwitchTo(anl_context); /* @@ -504,9 +502,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, col_context = AllocSetContextCreate(anl_context, "Analyze Column", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); old_context = MemoryContextSwitchTo(col_context); for (i = 0; i < attr_cnt; i++) @@ -688,9 +684,7 @@ compute_index_stats(Relation onerel, double totalrows, ind_context = AllocSetContextCreate(anl_context, "Analyze Index", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); old_context = MemoryContextSwitchTo(ind_context); for (ind = 0; ind < nindexes; ind++) diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 43bbd90591..dc1f79f594 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -204,9 +204,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) */ cluster_context = AllocSetContextCreate(PortalContext, "Cluster", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Build the list of relations to cluster. Note that this lives in diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index f45b3304ae..5947e72093 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -1340,9 +1340,7 @@ BeginCopy(bool is_from, */ cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext, "COPY", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(cstate->copycontext); @@ -1895,9 +1893,7 @@ CopyTo(CopyState cstate) */ cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext, "COPY TO", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); if (cstate->binary) { diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 50c89b827b..ac4c4ecbe7 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -1018,9 +1018,7 @@ EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata) */ context = AllocSetContextCreate(CurrentMemoryContext, "event trigger context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(context); /* Call each event trigger. */ @@ -1226,9 +1224,7 @@ EventTriggerBeginCompleteQuery(void) cxt = AllocSetContextCreate(TopMemoryContext, "event trigger state", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); state = MemoryContextAlloc(cxt, sizeof(EventTriggerQueryState)); state->cxt = cxt; slist_init(&(state->SQLDropList)); diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index d14d540b26..85817c6530 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -1903,9 +1903,7 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind, */ private_context = AllocSetContextCreate(PortalContext, "ReindexMultipleTables", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* * Define the search keys to find the objects to reindex. For a schema, we diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index bc2e4af82a..d694cf80be 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -201,9 +201,7 @@ RelationBuildRowSecurity(Relation relation) */ rscxt = AllocSetContextCreate(CacheMemoryContext, "row security descriptor", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* * Since rscxt lives under CacheMemoryContext, it is long-lived. Use a diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 99a659a102..9de22a13d7 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -3339,9 +3339,7 @@ afterTriggerAddEvent(AfterTriggerEventList *events, afterTriggers.event_cxt = AllocSetContextCreate(TopTransactionContext, "AfterTriggerEvents", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Chunk size starts at 1KB and is allowed to increase up to 1MB. @@ -3780,9 +3778,7 @@ afterTriggerInvokeEvents(AfterTriggerEventList *events, per_tuple_context = AllocSetContextCreate(CurrentMemoryContext, "AfterTriggerTupleContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); for_each_chunk(chunk, *events) { diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 0563e63474..58bbf5548b 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -209,9 +209,7 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, */ vac_context = AllocSetContextCreate(PortalContext, "Vacuum", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * If caller didn't give us a buffer strategy object, make one in the diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index e937cf8e7e..a3bcb100da 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -80,9 +80,7 @@ CreateExecutorState(void) */ qcontext = AllocSetContextCreate(CurrentMemoryContext, "ExecutorState", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Make the EState node within the per-query context. This way, we don't @@ -229,9 +227,7 @@ CreateExprContext(EState *estate) econtext->ecxt_per_tuple_memory = AllocSetContextCreate(estate->es_query_cxt, "ExprContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); econtext->ecxt_param_exec_vals = estate->es_param_exec_vals; econtext->ecxt_param_list_info = estate->es_param_list_info; @@ -300,9 +296,7 @@ CreateStandaloneExprContext(void) econtext->ecxt_per_tuple_memory = AllocSetContextCreate(CurrentMemoryContext, "ExprContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); econtext->ecxt_param_exec_vals = NULL; econtext->ecxt_param_list_info = NULL; diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index e02fba5232..470db5bb4a 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -600,9 +600,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) */ fcontext = AllocSetContextCreate(finfo->fn_mcxt, "SQL function data", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(fcontext); diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index a03f6e73fd..5a0f324de0 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -508,9 +508,7 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) */ scanstate->argcontext = AllocSetContextCreate(CurrentMemoryContext, "Table function arguments", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); return scanstate; } diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 9ed09a7b0c..6375d9bfda 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -344,15 +344,11 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) */ hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext, "HashTableContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt, "HashBatchContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* Allocate data that will live for the life of the hashjoin */ diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index e76405a56e..39be191c45 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -200,15 +200,11 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags) rustate->tempContext = AllocSetContextCreate(CurrentMemoryContext, "RecursiveUnion", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); rustate->tableContext = AllocSetContextCreate(CurrentMemoryContext, "RecursiveUnion hash table", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } /* diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 2d81d46927..633580b436 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -507,9 +507,7 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) setopstate->tempContext = AllocSetContextCreate(CurrentMemoryContext, "SetOp", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * If hashing, we also need a longer-lived context to store the hash @@ -520,9 +518,7 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags) setopstate->tableContext = AllocSetContextCreate(CurrentMemoryContext, "SetOp hash table", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Tuple table initialization diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index e503494edd..2cf169f956 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -776,16 +776,12 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) sstate->hashtablecxt = AllocSetContextCreate(CurrentMemoryContext, "Subplan HashTable Context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* and a small one for the hash tables to use as temp storage */ sstate->hashtempcxt = AllocSetContextCreate(CurrentMemoryContext, "Subplan HashTable Temp Context", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* and a short-lived exprcontext for function evaluation */ sstate->innerecontext = CreateExprContext(estate); /* Silly little array of column numbers 1..n */ diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 4caae34b97..f45c79232d 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -133,9 +133,7 @@ ExecInitUnique(Unique *node, EState *estate, int eflags) uniquestate->tempContext = AllocSetContextCreate(CurrentMemoryContext, "Unique", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Tuple table initialization diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index d4c88a1f0e..371548ceb3 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -1801,10 +1801,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) /* Create long-lived context for storage of partition-local memory etc */ winstate->partcontext = AllocSetContextCreate(CurrentMemoryContext, - "WindowAgg_Partition", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "WindowAgg Partition", + ALLOCSET_DEFAULT_SIZES); /* * Create mid-lived context for aggregate trans values etc. @@ -1814,10 +1812,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) */ winstate->aggcontext = AllocSetContextCreate(CurrentMemoryContext, - "WindowAgg_Aggregates", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "WindowAgg Aggregates", + ALLOCSET_DEFAULT_SIZES); /* * tuple table initialization @@ -2321,10 +2317,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, if (OidIsValid(invtransfn_oid)) peraggstate->aggcontext = AllocSetContextCreate(CurrentMemoryContext, - "WindowAgg_AggregatePrivate", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "WindowAgg Per Aggregate", + ALLOCSET_DEFAULT_SIZES); else peraggstate->aggcontext = winstate->aggcontext; diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 38de18006d..38767ae4ce 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -142,14 +142,10 @@ SPI_connect(void) */ _SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext, "SPI Proc", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); _SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext, "SPI Exec", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* ... and switch to procedure's context */ _SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt); @@ -1744,9 +1740,7 @@ spi_dest_startup(DestReceiver *self, int operation, TupleDesc typeinfo) tuptabcxt = AllocSetContextCreate(CurrentMemoryContext, "SPI TupTable", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(tuptabcxt); _SPI_current->tuptable = tuptable = (SPITupleTable *) @@ -2615,14 +2609,11 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan) /* * Create a memory context for the plan, underneath the procedure context. - * We don't expect the plan to be very large, so use smaller-than-default - * alloc parameters. + * We don't expect the plan to be very large. */ plancxt = AllocSetContextCreate(parentcxt, "SPI Plan", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(plancxt); /* Copy the SPI_plan struct and subsidiary data into the new context */ @@ -2689,9 +2680,7 @@ _SPI_save_plan(SPIPlanPtr plan) */ plancxt = AllocSetContextCreate(CurrentMemoryContext, "SPI Plan", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(plancxt); /* Copy the SPI plan into its own context */ diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index 58d0eeaf0b..344e623c94 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -281,9 +281,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) tqueue->tmpcontext = AllocSetContextCreate(tqueue->mycontext, "tqueue sender temp context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(tqueue->tmpcontext); } diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index b64ef8b5c4..764f602aaa 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -79,9 +79,7 @@ static MemoryContext fscxt = NULL; if (fscxt == NULL) \ fscxt = AllocSetContextCreate(TopMemoryContext, \ "Filesystem", \ - ALLOCSET_DEFAULT_MINSIZE, \ - ALLOCSET_DEFAULT_INITSIZE, \ - ALLOCSET_DEFAULT_MAXSIZE); \ + ALLOCSET_DEFAULT_SIZES); \ } while (0) diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 1b4bbce42d..d612c11159 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -387,10 +387,8 @@ tokenize_file(const char *filename, FILE *file, MemoryContext oldcxt; linecxt = AllocSetContextCreate(CurrentMemoryContext, - "tokenize file cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "tokenize_file", + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(linecxt); *lines = *line_nums = NIL; @@ -1817,9 +1815,7 @@ load_hba(void) Assert(PostmasterContext); hbacxt = AllocSetContextCreate(PostmasterContext, "hba parser context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(hbacxt); forthree(line, hba_lines, line_num, hba_line_nums, raw_line, hba_raw_lines) { @@ -2195,9 +2191,7 @@ load_ident(void) Assert(PostmasterContext); ident_context = AllocSetContextCreate(PostmasterContext, "ident parser context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(ident_context); forboth(line_cell, ident_lines, num_cell, ident_line_nums) { diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c index 88acebc1f2..fb2ab77422 100644 --- a/src/backend/optimizer/geqo/geqo_eval.c +++ b/src/backend/optimizer/geqo/geqo_eval.c @@ -74,9 +74,7 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene) */ mycontext = AllocSetContextCreate(CurrentMemoryContext, "GEQO", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(mycontext); /* diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 4496fde056..e1baf71e38 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -4378,9 +4378,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, */ mycxt = AllocSetContextCreate(CurrentMemoryContext, "inline_function", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(mycxt); /* Fetch the function body */ @@ -4896,9 +4894,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) */ mycxt = AllocSetContextCreate(CurrentMemoryContext, "inline_set_returning_function", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(mycxt); /* diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 3768f50bcf..1a92ca1deb 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -462,9 +462,7 @@ AutoVacLauncherMain(int argc, char *argv[]) */ AutovacMemCxt = AllocSetContextCreate(TopMemoryContext, "Autovacuum Launcher", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(AutovacMemCxt); /* @@ -894,14 +892,10 @@ rebuild_database_list(Oid newdb) newcxt = AllocSetContextCreate(AutovacMemCxt, "AV dblist", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); tmpcxt = AllocSetContextCreate(newcxt, "tmp AV dblist", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(tmpcxt); /* @@ -1111,9 +1105,7 @@ do_start_worker(void) */ tmpcxt = AllocSetContextCreate(CurrentMemoryContext, "Start worker tmp cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(tmpcxt); /* use fresh stats */ @@ -1911,9 +1903,7 @@ do_autovacuum(void) */ AutovacMemCxt = AllocSetContextCreate(TopMemoryContext, "AV worker", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(AutovacMemCxt); /* @@ -2183,9 +2173,7 @@ do_autovacuum(void) */ PortalContext = AllocSetContextCreate(AutovacMemCxt, "Autovacuum Portal", - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Perform operations on collected tables. diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 00f03d8acb..10020349a2 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -160,9 +160,7 @@ BackgroundWriterMain(void) */ bgwriter_context = AllocSetContextCreate(TopMemoryContext, "Background Writer", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(bgwriter_context); WritebackContextInit(&wb_context, &bgwriter_flush_after); diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 8d4b3539b1..d702a4864d 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -245,9 +245,7 @@ CheckpointerMain(void) */ checkpointer_context = AllocSetContextCreate(TopMemoryContext, "Checkpointer", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(checkpointer_context); /* diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 8fa9edbf72..2f99aea791 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -4792,9 +4792,7 @@ pgstat_setup_memcxt(void) if (!pgStatLocalContext) pgStatLocalContext = AllocSetContextCreate(TopMemoryContext, "Statistics snapshot", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); } diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 05f3f14e35..a28e215e2d 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -583,9 +583,7 @@ PostmasterMain(int argc, char *argv[]) */ PostmasterContext = AllocSetContextCreate(TopMemoryContext, "Postmaster", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(PostmasterContext); /* Initialize paths to installation files */ diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index 228190a836..11ec56aebb 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -142,9 +142,7 @@ WalWriterMain(void) */ walwriter_context = AllocSetContextCreate(TopMemoryContext, "Wal Writer", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(walwriter_context); /* diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index ecf9a03318..1512be5322 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -127,10 +127,8 @@ StartupDecodingContext(List *output_plugin_options, slot = MyReplicationSlot; context = AllocSetContextCreate(CurrentMemoryContext, - "Logical Decoding Context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "Logical decoding context", + ALLOCSET_DEFAULT_SIZES); old_context = MemoryContextSwitchTo(context); ctx = palloc0(sizeof(LogicalDecodingContext)); diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 213ce34674..43b584cf7e 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -232,9 +232,7 @@ ReorderBufferAllocate(void) /* allocate memory in own context, to have better accountability */ new_ctx = AllocSetContextCreate(CurrentMemoryContext, "ReorderBuffer", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); buffer = (ReorderBuffer *) MemoryContextAlloc(new_ctx, sizeof(ReorderBuffer)); @@ -2317,7 +2315,7 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn, if (write(fd, rb->outbuf, ondisk->size) != ondisk->size) { - int save_errno = errno; + int save_errno = errno; CloseTransientFile(fd); errno = save_errno; diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index b5fa3dbbc0..8b59fc5a16 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -289,9 +289,7 @@ AllocateSnapshotBuilder(ReorderBuffer *reorder, /* allocate memory in own context, to have better accountability */ context = AllocSetContextCreate(CurrentMemoryContext, "snapshot builder context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(context); builder = palloc0(sizeof(SnapBuild)); diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index a0dba194a6..1ea2a5cfdf 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -1309,9 +1309,7 @@ exec_replication_command(const char *cmd_string) cmd_context = AllocSetContextCreate(CurrentMemoryContext, "Replication command context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); old_context = MemoryContextSwitchTo(cmd_context); replication_scanner_init(cmd_string); diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 53981794b9..ca2388789d 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -511,9 +511,7 @@ GetLocalBufferStorage(void) LocalBufferContext = AllocSetContextCreate(TopMemoryContext, "LocalBufferContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* Start with a 16-buffer request; subsequent ones double each time */ num_bufs = Max(num_bufs_in_block * 2, 16); diff --git a/src/backend/storage/file/reinit.c b/src/backend/storage/file/reinit.c index 7e8138b42a..6b98131e54 100644 --- a/src/backend/storage/file/reinit.c +++ b/src/backend/storage/file/reinit.c @@ -65,9 +65,7 @@ ResetUnloggedRelations(int op) */ tmpctx = AllocSetContextCreate(CurrentMemoryContext, "ResetUnloggedRelations", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldctx = MemoryContextSwitchTo(tmpctx); /* diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 303e99c65b..53b45d72fe 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -285,9 +285,7 @@ init_lwlock_stats(void) */ lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext, "LWLock stats", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true); MemSet(&ctl, 0, sizeof(ctl)); diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index f329d1538c..1287142918 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -208,9 +208,7 @@ mdinit(void) { MdCxt = AllocSetContextCreate(TopMemoryContext, "MdSmgr", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Create pending-operations hashtable if we need it. Currently, we need @@ -231,10 +229,8 @@ mdinit(void) * practice. */ pendingOpsCxt = AllocSetContextCreate(MdCxt, - "Pending Ops Context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "Pending ops context", + ALLOCSET_DEFAULT_SIZES); MemoryContextAllowInCriticalSection(pendingOpsCxt, true); MemSet(&hash_ctl, 0, sizeof(hash_ctl)); diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index b185c1b5eb..98ccbbb4d1 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -1253,9 +1253,7 @@ exec_parse_message(const char *query_string, /* string to execute */ unnamed_stmt_context = AllocSetContextCreate(MessageContext, "unnamed prepared statement", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(unnamed_stmt_context); } @@ -3794,9 +3792,7 @@ PostgresMain(int argc, char *argv[], */ MessageContext = AllocSetContextCreate(TopMemoryContext, "MessageContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Remember stand-alone backend startup time diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index 821f611e4f..9c7ba85eb5 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -92,9 +92,7 @@ NIStartBuild(IspellDict *Conf) */ Conf->buildCxt = AllocSetContextCreate(CurTransactionContext, "Ispell dictionary init context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } /* diff --git a/src/backend/utils/adt/array_expanded.c b/src/backend/utils/adt/array_expanded.c index 7dd7e3fbcb..94eb19d45d 100644 --- a/src/backend/utils/adt/array_expanded.c +++ b/src/backend/utils/adt/array_expanded.c @@ -63,9 +63,7 @@ expand_array(Datum arraydatum, MemoryContext parentcontext, */ objcxt = AllocSetContextCreate(parentcontext, "expanded array", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); /* Set up expanded array header */ eah = (ExpandedArrayHeader *) diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 8fbd850146..1db7bf0a35 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -4957,9 +4957,7 @@ initArrayResult(Oid element_type, MemoryContext rcontext, bool subcontext) if (subcontext) arr_context = AllocSetContextCreate(rcontext, "accumArrayResult", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); astate = (ArrayBuildState *) MemoryContextAlloc(arr_context, sizeof(ArrayBuildState)); @@ -5161,9 +5159,7 @@ initArrayResultArr(Oid array_type, Oid element_type, MemoryContext rcontext, if (subcontext) arr_context = AllocSetContextCreate(rcontext, "accumArrayResultArr", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* Note we initialize all fields to zero */ astate = (ArrayBuildStateArr *) diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index a80a20ecee..996007d483 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -1503,9 +1503,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text) tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_each temporary cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); it = JsonbIteratorInit(&jb->root); @@ -1641,9 +1639,7 @@ each_worker(FunctionCallInfo fcinfo, bool as_text) state->lex = lex; state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "json_each temporary cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); pg_parse_json(lex, sem); @@ -1822,9 +1818,7 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "jsonb_array_elements temporary cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); it = JsonbIteratorInit(&jb->root); @@ -1962,9 +1956,7 @@ elements_worker(FunctionCallInfo fcinfo, const char *funcname, bool as_text) state->lex = lex; state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "json_array_elements temporary cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); pg_parse_json(lex, sem); diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 7ed5bcb93d..b144920ec6 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -1455,10 +1455,8 @@ xml_memory_init(void) /* Create memory context if not there already */ if (LibxmlContext == NULL) LibxmlContext = AllocSetContextCreate(TopMemoryContext, - "LibxmlContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "Libxml context", + ALLOCSET_DEFAULT_SIZES); /* Re-establish the callbacks even if already set */ xmlMemSetup(xml_pfree, xml_palloc, xml_repalloc, xml_pstrdup); diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index e929616c97..db7099fc0e 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -536,9 +536,7 @@ CreateCacheMemoryContext(void) if (!CacheMemoryContext) CacheMemoryContext = AllocSetContextCreate(TopMemoryContext, "CacheMemoryContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index 6fc1df880b..8a620a51c8 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -105,9 +105,7 @@ BuildEventTriggerCache(void) EventTriggerCacheContext = AllocSetContextCreate(CacheMemoryContext, "EventTriggerCache", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); CacheRegisterSyscacheCallback(EVENTTRIGGEROID, InvalidateEventCacheCallback, (Datum) 0); diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index f42a62d500..c96a86500a 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -159,15 +159,13 @@ CreateCachedPlan(Node *raw_parse_tree, /* * Make a dedicated memory context for the CachedPlanSource and its * permanent subsidiary data. It's probably not going to be large, but - * just in case, use the default maxsize parameter. Initially it's a - * child of the caller's context (which we assume to be transient), so - * that it will be cleaned up on error. + * just in case, allow it to grow large. Initially it's a child of the + * caller's context (which we assume to be transient), so that it will be + * cleaned up on error. */ source_context = AllocSetContextCreate(CurrentMemoryContext, "CachedPlanSource", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); /* * Create and fill the CachedPlanSource struct within the new context. @@ -359,9 +357,7 @@ CompleteCachedPlan(CachedPlanSource *plansource, /* Again, it's a good bet the querytree_context can be small */ querytree_context = AllocSetContextCreate(source_context, "CachedPlanQuery", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); MemoryContextSwitchTo(querytree_context); querytree_list = (List *) copyObject(querytree_list); } @@ -733,9 +729,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource) */ querytree_context = AllocSetContextCreate(CurrentMemoryContext, "CachedPlanQuery", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(querytree_context); qlist = (List *) copyObject(tlist); @@ -955,17 +949,14 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist, /* * Normally we make a dedicated memory context for the CachedPlan and its * subsidiary data. (It's probably not going to be large, but just in - * case, use the default maxsize parameter. It's transient for the - * moment.) But for a one-shot plan, we just leave it in the caller's - * memory context. + * case, allow it to grow large. It's transient for the moment.) But for + * a one-shot plan, we just leave it in the caller's memory context. */ if (!plansource->is_oneshot) { plan_context = AllocSetContextCreate(CurrentMemoryContext, "CachedPlan", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); /* * Copy plan into the new context. @@ -1351,9 +1342,7 @@ CopyCachedPlan(CachedPlanSource *plansource) source_context = AllocSetContextCreate(CurrentMemoryContext, "CachedPlanSource", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); oldcxt = MemoryContextSwitchTo(source_context); @@ -1384,9 +1373,7 @@ CopyCachedPlan(CachedPlanSource *plansource) querytree_context = AllocSetContextCreate(source_context, "CachedPlanQuery", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_START_SMALL_SIZES); MemoryContextSwitchTo(querytree_context); newsource->query_list = (List *) copyObject(plansource->query_list); newsource->relationOids = (List *) copyObject(plansource->relationOids); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 8d2ad018bb..79e0b1ff48 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -659,14 +659,11 @@ RelationBuildRuleLock(Relation relation) int maxlocks; /* - * Make the private context. Parameters are set on the assumption that - * it'll probably not contain much data. + * Make the private context. Assume it'll not contain much data. */ rulescxt = AllocSetContextCreate(CacheMemoryContext, RelationGetRelationName(relation), - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); relation->rd_rulescxt = rulescxt; /* @@ -1248,15 +1245,10 @@ RelationInitIndexAccessInfo(Relation relation) * Make the private context to hold index access info. The reason we need * a context, and not just a couple of pallocs, is so that we won't leak * any subsidiary info attached to fmgr lookup records. - * - * Context parameters are set on the assumption that it'll probably not - * contain much data. */ indexcxt = AllocSetContextCreate(CacheMemoryContext, RelationGetRelationName(relation), - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); relation->rd_indexcxt = indexcxt; /* @@ -4948,9 +4940,7 @@ load_relcache_init_file(bool shared) */ indexcxt = AllocSetContextCreate(CacheMemoryContext, RelationGetRelationName(rel), - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); rel->rd_indexcxt = indexcxt; /* diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index 5e4de431dd..50f17438fb 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -295,9 +295,7 @@ lookup_ts_dictionary_cache(Oid dictId) /* Create private memory context the first time through */ saveCtx = AllocSetContextCreate(CacheMemoryContext, NameStr(dict->dictname), - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); } else { diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index ea6f787a52..9150fe832f 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -756,9 +756,7 @@ load_domaintype_info(TypeCacheEntry *typentry) cxt = AllocSetContextCreate(CurrentMemoryContext, "Domain constraints", - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); dcc = (DomainConstraintCache *) MemoryContextAlloc(cxt, sizeof(DomainConstraintCache)); dcc->constraints = NIL; @@ -841,9 +839,7 @@ load_domaintype_info(TypeCacheEntry *typentry) cxt = AllocSetContextCreate(CurrentMemoryContext, "Domain constraints", - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); dcc = (DomainConstraintCache *) MemoryContextAlloc(cxt, sizeof(DomainConstraintCache)); dcc->constraints = NIL; diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index 5d179ae8a8..5d49fe5b50 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -73,9 +73,7 @@ init_MultiFuncCall(PG_FUNCTION_ARGS) */ multi_call_ctx = AllocSetContextCreate(fcinfo->flinfo->fn_mcxt, "SRF multi-call context", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* * Allocate suitably long-lived space and zero it diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index d35052aea6..bb835ba946 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -327,9 +327,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) CurrentDynaHashCxt = TopMemoryContext; CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt, tabname, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } /* Initialize the hash header, plus a copy of the table name */ diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index d17197267e..824d5abf11 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -201,9 +201,7 @@ PerformAuthentication(Port *port) if (PostmasterContext == NULL) PostmasterContext = AllocSetContextCreate(TopMemoryContext, "Postmaster", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); if (!load_hba()) { diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l index 48052f9320..dae5015a32 100644 --- a/src/backend/utils/misc/guc-file.l +++ b/src/backend/utils/misc/guc-file.l @@ -145,9 +145,7 @@ ProcessConfigFile(GucContext context) */ config_cxt = AllocSetContextCreate(CurrentMemoryContext, "config file processing", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); caller_cxt = MemoryContextSwitchTo(config_cxt); /* diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c index a960343baa..a053e22439 100644 --- a/src/backend/utils/misc/tzparser.c +++ b/src/backend/utils/misc/tzparser.c @@ -450,9 +450,7 @@ load_tzoffsets(const char *filename) */ tmpContext = AllocSetContextCreate(CurrentMemoryContext, "TZParserMemory", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); oldContext = MemoryContextSwitchTo(tmpContext); /* Initialize array at a reasonable size */ diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index d26991ed23..43c85234ce 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -427,10 +427,14 @@ randomize_mem(char *ptr, size_t size) * Create a new AllocSet context. * * parent: parent context, or NULL if top-level context - * name: name of context (for debugging --- string will be copied) + * name: name of context (for debugging only, need not be unique) * minContextSize: minimum context size * initBlockSize: initial allocation block size * maxBlockSize: maximum allocation block size + * + * Notes: the name string will be copied into context-lifespan storage. + * Most callers should abstract the context size parameters using a macro + * such as ALLOCSET_DEFAULT_SIZES. */ MemoryContext AllocSetContextCreate(MemoryContext parent, diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 6b7894213c..5cf388f9d6 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -91,16 +91,13 @@ MemoryContextInit(void) AssertState(TopMemoryContext == NULL); /* - * Initialize TopMemoryContext as an AllocSetContext with slow growth rate - * --- we don't really expect much to be allocated in it. - * - * (There is special-case code in MemoryContextCreate() for this call.) + * First, initialize TopMemoryContext, which will hold the MemoryContext + * nodes for all other contexts. (There is special-case code in + * MemoryContextCreate() to handle this call.) */ TopMemoryContext = AllocSetContextCreate((MemoryContext) NULL, "TopMemoryContext", - 0, - 8 * 1024, - 8 * 1024); + ALLOCSET_DEFAULT_SIZES); /* * Not having any other place to point CurrentMemoryContext, make it point diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 425cae12ea..8286800380 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -108,9 +108,7 @@ EnablePortalManager(void) PortalMemory = AllocSetContextCreate(TopMemoryContext, "PortalMemory", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); ctl.keysize = MAX_PORTALNAME_LEN; ctl.entrysize = sizeof(PortalHashEnt); @@ -221,9 +219,7 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent) /* initialize portal heap context; typically it won't store much */ portal->heap = AllocSetContextCreate(PortalMemory, "PortalHeapMemory", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); /* create a resource owner for the portal */ portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner, @@ -361,9 +357,7 @@ PortalCreateHoldStore(Portal portal) portal->holdContext = AllocSetContextCreate(PortalMemory, "PortalHoldContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Create the tuple store, selecting cross-transaction temp files, and diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index ae384a8546..c8fbcf8fcc 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -654,9 +654,7 @@ tuplesort_begin_common(int workMem, bool randomAccess) */ sortcontext = AllocSetContextCreate(CurrentMemoryContext, "TupleSort main", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Caller tuple (e.g. IndexTuple) memory context. @@ -669,9 +667,7 @@ tuplesort_begin_common(int workMem, bool randomAccess) */ tuplecontext = AllocSetContextCreate(sortcontext, "Caller tuples", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); /* * Make the Tuplesortstate within the per-sort context. This way, we diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h index ae07705b6b..e6334a2038 100644 --- a/src/include/utils/memutils.h +++ b/src/include/utils/memutils.h @@ -142,14 +142,26 @@ extern MemoryContext AllocSetContextCreate(MemoryContext parent, #define ALLOCSET_DEFAULT_MINSIZE 0 #define ALLOCSET_DEFAULT_INITSIZE (8 * 1024) #define ALLOCSET_DEFAULT_MAXSIZE (8 * 1024 * 1024) +#define ALLOCSET_DEFAULT_SIZES \ + ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE /* - * Recommended alloc parameters for "small" contexts that are not expected + * Recommended alloc parameters for "small" contexts that are never expected * to contain much data (for example, a context to contain a query plan). */ #define ALLOCSET_SMALL_MINSIZE 0 #define ALLOCSET_SMALL_INITSIZE (1 * 1024) #define ALLOCSET_SMALL_MAXSIZE (8 * 1024) +#define ALLOCSET_SMALL_SIZES \ + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_SMALL_MAXSIZE + +/* + * Recommended alloc parameters for contexts that should start out small, + * but might sometimes grow big. + */ +#define ALLOCSET_START_SMALL_SIZES \ + ALLOCSET_SMALL_MINSIZE, ALLOCSET_SMALL_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE + /* * Threshold above which a request in an AllocSet context is certain to be diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 82bde6e442..2cd761496d 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -3205,9 +3205,7 @@ plperl_return_next(SV *sv) current_call_data->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Perl return_next temporary cxt", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } old_cxt = MemoryContextSwitchTo(current_call_data->tmp_cxt); @@ -3460,9 +3458,7 @@ plperl_spi_prepare(char *query, int argc, SV **argv) ************************************************************/ plan_cxt = AllocSetContextCreate(TopMemoryContext, "PL/Perl spi_prepare query", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); MemoryContextSwitchTo(plan_cxt); qdesc = (plperl_query_desc *) palloc0(sizeof(plperl_query_desc)); snprintf(qdesc->qname, sizeof(qdesc->qname), "%p", qdesc); @@ -3479,9 +3475,7 @@ plperl_spi_prepare(char *query, int argc, SV **argv) ************************************************************/ work_cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Perl spi_prepare workspace", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemoryContextSwitchTo(work_cxt); /************************************************************ diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 38aa030303..4ceb402c92 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -340,9 +340,7 @@ do_compile(FunctionCallInfo fcinfo, */ func_cxt = AllocSetContextCreate(TopMemoryContext, "PL/pgSQL function context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); function->fn_signature = format_procedure(fcinfo->flinfo->fn_oid); @@ -829,10 +827,8 @@ plpgsql_compile_inline(char *proc_source) * its own memory context, so it can be reclaimed easily. */ func_cxt = AllocSetContextCreate(CurrentMemoryContext, - "PL/pgSQL function context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + "PL/pgSQL inline code context", + ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); function->fn_signature = pstrdup(func_name); diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index f9b3b22d08..2f8b6ff2f2 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -1092,9 +1092,7 @@ get_stmt_mcontext(PLpgSQL_execstate *estate) estate->stmt_mcontext = AllocSetContextCreate(estate->stmt_mcontext_parent, "PLpgSQL per-statement data", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); } return estate->stmt_mcontext; } @@ -3479,9 +3477,7 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate, { shared_cast_context = AllocSetContextCreate(TopMemoryContext, "PLpgSQL cast info", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(plpgsql_CastHashKey); ctl.entrysize = sizeof(plpgsql_CastHashEntry); diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c index 44ba76e765..0e17a03ce7 100644 --- a/src/pl/plpython/plpy_cursorobject.c +++ b/src/pl/plpython/plpy_cursorobject.c @@ -116,9 +116,7 @@ PLy_cursor_query(const char *query) cursor->closed = false; cursor->mcxt = AllocSetContextCreate(TopMemoryContext, "PL/Python cursor context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); PLy_typeinfo_init(&cursor->result, cursor->mcxt); oldcontext = CurrentMemoryContext; @@ -210,9 +208,7 @@ PLy_cursor_plan(PyObject *ob, PyObject *args) cursor->closed = false; cursor->mcxt = AllocSetContextCreate(TopMemoryContext, "PL/Python cursor context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); PLy_typeinfo_init(&cursor->result, cursor->mcxt); oldcontext = CurrentMemoryContext; diff --git a/src/pl/plpython/plpy_main.c b/src/pl/plpython/plpy_main.c index f95039406a..860b804e54 100644 --- a/src/pl/plpython/plpy_main.c +++ b/src/pl/plpython/plpy_main.c @@ -315,9 +315,7 @@ plpython_inline_handler(PG_FUNCTION_ARGS) MemSet(&proc, 0, sizeof(PLyProcedure)); proc.mcxt = AllocSetContextCreate(TopMemoryContext, "__plpython_inline_block", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); proc.pyname = MemoryContextStrdup(proc.mcxt, "__plpython_inline_block"); proc.langid = codeblock->langOid; proc.result.out.d.typoid = VOIDOID; @@ -416,9 +414,7 @@ PLy_get_scratch_context(PLyExecutionContext *context) context->scratch_ctx = AllocSetContextCreate(TopTransactionContext, "PL/Python scratch context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); return context->scratch_ctx; } diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c index 70b75f5d95..2b249b029d 100644 --- a/src/pl/plpython/plpy_procedure.c +++ b/src/pl/plpython/plpy_procedure.c @@ -167,9 +167,7 @@ PLy_procedure_create(HeapTuple procTup, Oid fn_oid, bool is_trigger) cxt = AllocSetContextCreate(TopMemoryContext, procName, - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcxt = MemoryContextSwitchTo(cxt); diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c index 1e965cf85f..09ee06d9e8 100644 --- a/src/pl/plpython/plpy_spi.c +++ b/src/pl/plpython/plpy_spi.c @@ -66,9 +66,7 @@ PLy_spi_prepare(PyObject *self, PyObject *args) plan->mcxt = AllocSetContextCreate(TopMemoryContext, "PL/Python plan context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); oldcontext = MemoryContextSwitchTo(plan->mcxt); nargs = list ? PySequence_Length(list) : 0; @@ -413,9 +411,7 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, uint64 rows, int status) cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Python temp context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); PLy_typeinfo_init(&args, cxt); oldcontext = CurrentMemoryContext; diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c index 7ad7a4400a..70f2e6d20f 100644 --- a/src/pl/plpython/plpy_typeio.c +++ b/src/pl/plpython/plpy_typeio.c @@ -756,9 +756,7 @@ PLyObject_ToComposite(PLyObToDatum *arg, int32 typmod, PyObject *plrv) /* Create a dummy PLyTypeInfo */ cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Python temp context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemSet(&info, 0, sizeof(PLyTypeInfo)); PLy_typeinfo_init(&info, cxt); /* Mark it as needing output routines lookup */ @@ -923,9 +921,7 @@ PLyString_ToComposite(PLyTypeInfo *info, TupleDesc desc, PyObject *string) /* Create a dummy PLyTypeInfo */ cxt = AllocSetContextCreate(CurrentMemoryContext, "PL/Python temp context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_SIZES); MemSet(&locinfo, 0, sizeof(PLyTypeInfo)); PLy_typeinfo_init(&locinfo, cxt); diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 6ee4153ae6..2a335aa219 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -2331,9 +2331,7 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp, ************************************************************/ plan_cxt = AllocSetContextCreate(TopMemoryContext, "PL/TCL spi_prepare query", - ALLOCSET_SMALL_MINSIZE, - ALLOCSET_SMALL_INITSIZE, - ALLOCSET_SMALL_MAXSIZE); + ALLOCSET_SMALL_SIZES); MemoryContextSwitchTo(plan_cxt); qdesc = (pltcl_query_desc *) palloc0(sizeof(pltcl_query_desc)); snprintf(qdesc->qname, sizeof(qdesc->qname), "%p", qdesc); -- cgit v1.2.3 From b899ccbb49cbcf8431b3af43fcf3faf91e6a28c6 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 28 Aug 2016 17:44:29 -0400 Subject: Fix stray reference to the old genbki.sh script. Per Tomas Vondra. --- src/include/catalog/pg_foreign_table.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/include/catalog/pg_foreign_table.h b/src/include/catalog/pg_foreign_table.h index e7b925b7c9..34690f3808 100644 --- a/src/include/catalog/pg_foreign_table.h +++ b/src/include/catalog/pg_foreign_table.h @@ -9,7 +9,7 @@ * src/include/catalog/pg_foreign_table.h * * NOTES - * the genbki.sh script reads this file and generates .bki + * the genbki.pl script reads this file and generates .bki * information from the DATA() statements. * *------------------------------------------------------------------------- -- cgit v1.2.3 From bab7823a49bb210b8920ae59e5126d27e4d63833 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Mon, 29 Aug 2016 14:34:58 +0900 Subject: Fix pg_xlogdump so that it handles cross-page XLP_FIRST_IS_CONTRECORD record. Previously pg_xlogdump failed to dump the contents of the WAL file if the file starts with the continuation WAL record which spans more than one pages. Since pg_xlogdump assumed that the continuation record always fits on a page, it could not find the valid WAL record to start reading from in that case. This patch changes pg_xlogdump so that it can handle a continuation WAL record which crosses a page boundary and find the valid record to start reading from. Back-patch to 9.3 where pg_xlogdump was introduced. Author: Pavan Deolasee Reviewed-By: Michael Paquier and Craig Ringer Discussion: CABOikdPsPByMiG6J01DKq6om2+BNkxHTPkOyqHM2a4oYwGKsqQ@mail.gmail.com --- src/backend/access/transam/xlogreader.c | 91 +++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 27 deletions(-) (limited to 'src') diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index dcf747c633..f2da505892 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -866,46 +866,83 @@ XLogRecPtr XLogFindNextRecord(XLogReaderState *state, XLogRecPtr RecPtr) { XLogReaderState saved_state = *state; - XLogRecPtr targetPagePtr; XLogRecPtr tmpRecPtr; - int targetRecOff; XLogRecPtr found = InvalidXLogRecPtr; - uint32 pageHeaderSize; XLogPageHeader header; - int readLen; char *errormsg; Assert(!XLogRecPtrIsInvalid(RecPtr)); - targetRecOff = RecPtr % XLOG_BLCKSZ; + /* + * skip over potential continuation data, keeping in mind that it may span + * multiple pages + */ + tmpRecPtr = RecPtr; + while (true) + { + XLogRecPtr targetPagePtr; + int targetRecOff; + uint32 pageHeaderSize; + int readLen; - /* scroll back to page boundary */ - targetPagePtr = RecPtr - targetRecOff; + /* + * Compute targetRecOff. It should typically be equal or greater than + * short page-header since a valid record can't start anywhere before + * that, except when caller has explicitly specified the offset that + * falls somewhere there or when we are skipping multi-page + * continuation record. It doesn't matter though because + * ReadPageInternal() is prepared to handle that and will read at least + * short page-header worth of data + */ + targetRecOff = tmpRecPtr % XLOG_BLCKSZ; - /* Read the page containing the record */ - readLen = ReadPageInternal(state, targetPagePtr, targetRecOff); - if (readLen < 0) - goto err; + /* scroll back to page boundary */ + targetPagePtr = tmpRecPtr - targetRecOff; - header = (XLogPageHeader) state->readBuf; + /* Read the page containing the record */ + readLen = ReadPageInternal(state, targetPagePtr, targetRecOff); + if (readLen < 0) + goto err; - pageHeaderSize = XLogPageHeaderSize(header); + header = (XLogPageHeader) state->readBuf; - /* make sure we have enough data for the page header */ - readLen = ReadPageInternal(state, targetPagePtr, pageHeaderSize); - if (readLen < 0) - goto err; + pageHeaderSize = XLogPageHeaderSize(header); - /* skip over potential continuation data */ - if (header->xlp_info & XLP_FIRST_IS_CONTRECORD) - { - /* record headers are MAXALIGN'ed */ - tmpRecPtr = targetPagePtr + pageHeaderSize - + MAXALIGN(header->xlp_rem_len); - } - else - { - tmpRecPtr = targetPagePtr + pageHeaderSize; + /* make sure we have enough data for the page header */ + readLen = ReadPageInternal(state, targetPagePtr, pageHeaderSize); + if (readLen < 0) + goto err; + + /* skip over potential continuation data */ + if (header->xlp_info & XLP_FIRST_IS_CONTRECORD) + { + /* + * If the length of the remaining continuation data is more than + * what can fit in this page, the continuation record crosses over + * this page. Read the next page and try again. xlp_rem_len in the + * next page header will contain the remaining length of the + * continuation data + * + * Note that record headers are MAXALIGN'ed + */ + if (MAXALIGN(header->xlp_rem_len) > (XLOG_BLCKSZ - pageHeaderSize)) + tmpRecPtr = targetPagePtr + XLOG_BLCKSZ; + else + { + /* + * The previous continuation record ends in this page. Set + * tmpRecPtr to point to the first valid record + */ + tmpRecPtr = targetPagePtr + pageHeaderSize + + MAXALIGN(header->xlp_rem_len); + break; + } + } + else + { + tmpRecPtr = targetPagePtr + pageHeaderSize; + break; + } } /* -- cgit v1.2.3 From bd082231edbaf25626a023913394b611fe7928e8 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Mon, 29 Aug 2016 16:06:40 +0900 Subject: Fix typos in comments. --- src/backend/access/brin/brin_inclusion.c | 2 +- src/timezone/localtime.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c index 0ae7a72996..c647be0ced 100644 --- a/src/backend/access/brin/brin_inclusion.c +++ b/src/backend/access/brin/brin_inclusion.c @@ -431,7 +431,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS) * It is straightforward to support the equality strategies with * the contains operator. Generally, inequality strategies do not * make much sense for the types which will be used with the - * inclusion BRIN family of opclasses, but is is possible to + * inclusion BRIN family of opclasses, but is possible to * implement them with logical negation of the left-of and * right-of operators. * diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c index a14215d6bd..d004e5ebe2 100644 --- a/src/timezone/localtime.c +++ b/src/timezone/localtime.c @@ -489,7 +489,7 @@ tzloadbody(char const * name, char *canonname, struct state * sp, bool doextend, } /* - * If type 0 is is unused in transitions, it's the type to use for early + * If type 0 is unused in transitions, it's the type to use for early * times. */ for (i = 0; i < sp->timecnt; ++i) -- cgit v1.2.3 From 49340627f9821e447f135455d942f7d5e96cae6d Mon Sep 17 00:00:00 2001 From: Simon Riggs Date: Mon, 29 Aug 2016 12:16:18 +0100 Subject: Fix pg_receivexlog --synchronous Make pg_receivexlog work correctly with --synchronous without slots Backpatch to 9.5 Gabriele Bartolini, reviewed by Michael Paquier and Simon Riggs --- src/bin/pg_basebackup/receivelog.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'src') diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index 595213f042..062730b6b4 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -503,26 +503,28 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream) if (!CheckServerVersionForStreaming(conn)) return false; + /* + * Decide whether we want to report the flush position. If we report + * the flush position, the primary will know what WAL we'll + * possibly re-request, and it can then remove older WAL safely. + * We must always do that when we are using slots. + * + * Reporting the flush position makes one eligible as a synchronous + * replica. People shouldn't include generic names in + * synchronous_standby_names, but we've protected them against it so + * far, so let's continue to do so unless specifically requested. + */ if (replication_slot != NULL) { - /* - * Report the flush position, so the primary can know what WAL we'll - * possibly re-request, and remove older WAL safely. - * - * We only report it when a slot has explicitly been used, because - * reporting the flush position makes one eligible as a synchronous - * replica. People shouldn't include generic names in - * synchronous_standby_names, but we've protected them against it so - * far, so let's continue to do so in the situations when possible. If - * they've got a slot, though, we need to report the flush position, - * so that the master can remove WAL. - */ reportFlushPosition = true; sprintf(slotcmd, "SLOT \"%s\" ", replication_slot); } else { - reportFlushPosition = false; + if (stream->synchronous) + reportFlushPosition = true; + else + reportFlushPosition = false; slotcmd[0] = 0; } -- cgit v1.2.3 From cf34fdbbe1452b9e19c0956bc48494889e1b2777 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 29 Aug 2016 09:29:26 -0400 Subject: Make AllocSetContextCreate throw an error for bad context-size parameters. The previous behavior was to silently change them to something valid. That obscured the bugs fixed in commit ea268cdc9, and generally seems less useful than complaining. Unlike the previous commit, though, we'll do this in HEAD only --- it's a bit too late to be possibly breaking third-party code in 9.6. Discussion: --- src/backend/utils/mmgr/aset.c | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) (limited to 'src') diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 43c85234ce..f44e46767b 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -445,6 +445,26 @@ AllocSetContextCreate(MemoryContext parent, { AllocSet set; + /* + * First, validate allocation parameters. (If we're going to throw an + * error, we should do so before the context is created, not after.) We + * somewhat arbitrarily enforce a minimum 1K block size. + */ + if (initBlockSize != MAXALIGN(initBlockSize) || + initBlockSize < 1024) + elog(ERROR, "invalid initBlockSize for memory context: %zu", + initBlockSize); + if (maxBlockSize != MAXALIGN(maxBlockSize) || + maxBlockSize < initBlockSize || + !AllocHugeSizeIsValid(maxBlockSize)) /* must be safe to double */ + elog(ERROR, "invalid maxBlockSize for memory context: %zu", + maxBlockSize); + if (minContextSize != 0 && + (minContextSize != MAXALIGN(minContextSize) || + minContextSize <= ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ)) + elog(ERROR, "invalid minContextSize for memory context: %zu", + minContextSize); + /* Do the type-independent part of context creation */ set = (AllocSet) MemoryContextCreate(T_AllocSetContext, sizeof(AllocSetContext), @@ -452,18 +472,7 @@ AllocSetContextCreate(MemoryContext parent, parent, name); - /* - * Make sure alloc parameters are reasonable, and save them. - * - * We somewhat arbitrarily enforce a minimum 1K block size. - */ - initBlockSize = MAXALIGN(initBlockSize); - if (initBlockSize < 1024) - initBlockSize = 1024; - maxBlockSize = MAXALIGN(maxBlockSize); - if (maxBlockSize < initBlockSize) - maxBlockSize = initBlockSize; - Assert(AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */ + /* Save allocation parameters */ set->initBlockSize = initBlockSize; set->maxBlockSize = maxBlockSize; set->nextBlockSize = initBlockSize; @@ -495,9 +504,9 @@ AllocSetContextCreate(MemoryContext parent, /* * Grab always-allocated space, if requested */ - if (minContextSize > ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ) + if (minContextSize > 0) { - Size blksize = MAXALIGN(minContextSize); + Size blksize = minContextSize; AllocBlock block; block = (AllocBlock) malloc(blksize); -- cgit v1.2.3 From 9b7cd59af1afcfbd786921d5cf73befb5fefa2f7 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 29 Aug 2016 20:16:02 +0300 Subject: Remove support for OpenSSL versions older than 0.9.8. OpenSSL officially only supports 1.0.1 and newer. Some OS distributions still provide patches for 0.9.8, but anything older than that is not interesting anymore. Let's simplify things by removing compatibility code. Andreas Karlsson, with small changes by me. --- contrib/pgcrypto/openssl.c | 152 +------------------------------ doc/src/sgml/installation.sgml | 39 +++----- doc/src/sgml/libpq.sgml | 3 +- doc/src/sgml/pgcrypto.sgml | 18 +--- src/backend/libpq/be-secure-openssl.c | 8 +- src/interfaces/libpq/fe-secure-openssl.c | 4 - src/interfaces/libpq/libpq-int.h | 2 +- 7 files changed, 20 insertions(+), 206 deletions(-) (limited to 'src') diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c index 976af70591..ffab5d2bb0 100644 --- a/contrib/pgcrypto/openssl.c +++ b/contrib/pgcrypto/openssl.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -46,155 +47,6 @@ #define MAX_KEY (512/8) #define MAX_IV (128/8) -/* - * Compatibility with OpenSSL 0.9.6 - * - * It needs AES and newer DES and digest API. - */ -#if OPENSSL_VERSION_NUMBER >= 0x00907000L - -/* - * Nothing needed for OpenSSL 0.9.7+ - */ - -#include -#else /* old OPENSSL */ - -/* - * Emulate OpenSSL AES. - */ - -#include "rijndael.c" - -#define AES_ENCRYPT 1 -#define AES_DECRYPT 0 -#define AES_KEY rijndael_ctx - -static int -AES_set_encrypt_key(const uint8 *key, int kbits, AES_KEY *ctx) -{ - aes_set_key(ctx, key, kbits, 1); - return 0; -} - -static int -AES_set_decrypt_key(const uint8 *key, int kbits, AES_KEY *ctx) -{ - aes_set_key(ctx, key, kbits, 0); - return 0; -} - -static void -AES_ecb_encrypt(const uint8 *src, uint8 *dst, AES_KEY *ctx, int enc) -{ - memcpy(dst, src, 16); - if (enc) - aes_ecb_encrypt(ctx, dst, 16); - else - aes_ecb_decrypt(ctx, dst, 16); -} - -static void -AES_cbc_encrypt(const uint8 *src, uint8 *dst, int len, AES_KEY *ctx, uint8 *iv, int enc) -{ - memcpy(dst, src, len); - if (enc) - { - aes_cbc_encrypt(ctx, iv, dst, len); - memcpy(iv, dst + len - 16, 16); - } - else - { - aes_cbc_decrypt(ctx, iv, dst, len); - memcpy(iv, src + len - 16, 16); - } -} - -/* - * Emulate DES_* API - */ - -#define DES_key_schedule des_key_schedule -#define DES_cblock des_cblock -#define DES_set_key(k, ks) \ - des_set_key((k), *(ks)) -#define DES_ecb_encrypt(i, o, k, e) \ - des_ecb_encrypt((i), (o), *(k), (e)) -#define DES_ncbc_encrypt(i, o, l, k, iv, e) \ - des_ncbc_encrypt((i), (o), (l), *(k), (iv), (e)) -#define DES_ecb3_encrypt(i, o, k1, k2, k3, e) \ - des_ecb3_encrypt((des_cblock *)(i), (des_cblock *)(o), \ - *(k1), *(k2), *(k3), (e)) -#define DES_ede3_cbc_encrypt(i, o, l, k1, k2, k3, iv, e) \ - des_ede3_cbc_encrypt((i), (o), \ - (l), *(k1), *(k2), *(k3), (iv), (e)) - -/* - * Emulate newer digest API. - */ - -static void -EVP_MD_CTX_init(EVP_MD_CTX *ctx) -{ - memset(ctx, 0, sizeof(*ctx)); -} - -static int -EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx) -{ - px_memset(ctx, 0, sizeof(*ctx)); - return 1; -} - -static int -EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *md, void *engine) -{ - EVP_DigestInit(ctx, md); - return 1; -} - -static int -EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *res, unsigned int *len) -{ - EVP_DigestFinal(ctx, res, len); - return 1; -} -#endif /* old OpenSSL */ - -/* - * Provide SHA2 for older OpenSSL < 0.9.8 - */ -#if OPENSSL_VERSION_NUMBER < 0x00908000L - -#include "sha2.c" -#include "internal-sha2.c" - -typedef void (*init_f) (PX_MD *md); - -static int -compat_find_digest(const char *name, PX_MD **res) -{ - init_f init = NULL; - - if (pg_strcasecmp(name, "sha224") == 0) - init = init_sha224; - else if (pg_strcasecmp(name, "sha256") == 0) - init = init_sha256; - else if (pg_strcasecmp(name, "sha384") == 0) - init = init_sha384; - else if (pg_strcasecmp(name, "sha512") == 0) - init = init_sha512; - else - return PXE_NO_HASH; - - *res = px_alloc(sizeof(PX_MD)); - init(*res); - return 0; -} -#else -#define compat_find_digest(name, res) (PXE_NO_HASH) -#endif - /* * Hashes */ @@ -275,7 +127,7 @@ px_find_digest(const char *name, PX_MD **res) md = EVP_get_digestbyname(name); if (md == NULL) - return compat_find_digest(name, res); + return PXE_NO_HASH; digest = px_alloc(sizeof(*digest)); digest->algo = md; diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index a9968756e6..14a6d57aea 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -252,10 +252,17 @@ su - postgres - You need Kerberos, OpenSSL, - OpenLDAP, and/or - PAM, if you want to support authentication or - encryption using those services. + You need OpenSSL, if you want to support + encrypted client connections. The minimum required version is + 0.9.8. + + + + + + You need Kerberos, OpenLDAP, + and/or PAM, if you want to support authentication + using those services. @@ -2826,30 +2833,6 @@ MANPATH=/usr/lib/scohelp/%L/man:/usr/dt/man:/usr/man:/usr/share/man:scohelp:/usr - - Problems with OpenSSL - - - When you build PostgreSQL with OpenSSL support you might get - compilation errors in the following files: - - src/backend/libpq/crypt.c - src/backend/libpq/password.c - src/interfaces/libpq/fe-auth.c - src/interfaces/libpq/fe-connect.c - - - This is because of a namespace conflict between the standard - /usr/include/crypt.h header and the header - files provided by OpenSSL. - - - - Upgrading your OpenSSL installation to version 0.9.6a fixes this - problem. Solaris 9 and above has a newer version of OpenSSL. - - - configure Complains About a Failed Test Program diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index 2f9350b10e..4e34f00e44 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1238,8 +1238,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname If set to 1 (default), data sent over SSL connections will be - compressed (this requires OpenSSL version - 0.9.8 or later). + compressed. If set to 0, compression will be disabled (this requires OpenSSL 1.0.0 or later). This parameter is ignored if a connection without SSL is made, diff --git a/doc/src/sgml/pgcrypto.sgml b/doc/src/sgml/pgcrypto.sgml index c4cefde4f7..bf514aacf3 100644 --- a/doc/src/sgml/pgcrypto.sgml +++ b/doc/src/sgml/pgcrypto.sgml @@ -1184,12 +1184,12 @@ gen_random_uuid() returns uuid SHA224/256/384/512 yes - yes (Note 1) + yes Other digest algorithms no - yes (Note 2) + yes (Note 1) Blowfish @@ -1199,7 +1199,7 @@ gen_random_uuid() returns uuid AES yes - yes (Note 3) + yes DES/3DES/CAST5 @@ -1230,12 +1230,6 @@ gen_random_uuid() returns uuid - - - SHA2 algorithms were added to OpenSSL in version 0.9.8. For - older versions, pgcrypto will use built-in code. - - Any digest algorithm OpenSSL supports is automatically picked up. @@ -1243,12 +1237,6 @@ gen_random_uuid() returns uuid explicitly. - - - AES is included in OpenSSL since version 0.9.7. For - older versions, pgcrypto will use built-in code. - - diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index f6adb155c6..e5f434ca17 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -53,10 +53,8 @@ #include #include -#if SSLEAY_VERSION_NUMBER >= 0x0907000L #include -#endif -#if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_ECDH) +#ifndef OPENSSL_NO_ECDH #include #endif @@ -166,9 +164,7 @@ be_tls_init(void) if (!SSL_context) { -#if SSLEAY_VERSION_NUMBER >= 0x0907000L OPENSSL_config(NULL); -#endif SSL_library_init(); SSL_load_error_strings(); @@ -978,7 +974,7 @@ info_cb(const SSL *ssl, int type, int args) static void initialize_ecdh(void) { -#if (OPENSSL_VERSION_NUMBER >= 0x0090800fL) && !defined(OPENSSL_NO_ECDH) +#ifndef OPENSSL_NO_ECDH EC_KEY *ecdh; int nid; diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c index f6ce1c7a13..d8716128ec 100644 --- a/src/interfaces/libpq/fe-secure-openssl.c +++ b/src/interfaces/libpq/fe-secure-openssl.c @@ -54,9 +54,7 @@ #endif #include -#if (SSLEAY_VERSION_NUMBER >= 0x00907000L) #include -#endif #ifdef USE_SSL_ENGINE #include #endif @@ -848,9 +846,7 @@ pgtls_init(PGconn *conn) { if (pq_init_ssl_lib) { -#if SSLEAY_VERSION_NUMBER >= 0x00907000L OPENSSL_config(NULL); -#endif SSL_library_init(); SSL_load_error_strings(); } diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 1183323a44..a94ead04ff 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -77,7 +77,7 @@ typedef struct #include #include -#if (SSLEAY_VERSION_NUMBER >= 0x00907000L) && !defined(OPENSSL_NO_ENGINE) +#ifndef OPENSSL_NO_ENGINE #define USE_SSL_ENGINE #endif #endif /* USE_OPENSSL */ -- cgit v1.2.3 From 8e1e3f958fb3749fe01e9f2473f4554859c685a8 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 29 Aug 2016 18:48:02 -0300 Subject: Split hash.h → hash_xlog.h Since the hash AM is going to be revamped to have WAL, this is a good opportunity to clean up the include file a little bit to avoid including a lot of extra stuff in the future. Author: Amit Kapila --- src/backend/access/hash/hash.c | 1 + src/backend/access/rmgrdesc/hashdesc.c | 2 +- src/backend/access/transam/rmgr.c | 2 +- src/bin/pg_xlogdump/rmgrdesc.c | 2 +- src/include/access/hash.h | 6 ------ src/include/access/hash_xlog.h | 25 +++++++++++++++++++++++++ 6 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 src/include/access/hash_xlog.h (limited to 'src') diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 07496f8156..e3b1eef246 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -19,6 +19,7 @@ #include "postgres.h" #include "access/hash.h" +#include "access/hash_xlog.h" #include "access/relscan.h" #include "catalog/index.h" #include "commands/vacuum.h" diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c index d37c9b1aae..12e1818fba 100644 --- a/src/backend/access/rmgrdesc/hashdesc.c +++ b/src/backend/access/rmgrdesc/hashdesc.c @@ -14,7 +14,7 @@ */ #include "postgres.h" -#include "access/hash.h" +#include "access/hash_xlog.h" void hash_desc(StringInfo buf, XLogReaderState *record) diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c index 31c5fd165c..9bb136218d 100644 --- a/src/backend/access/transam/rmgr.c +++ b/src/backend/access/transam/rmgr.c @@ -12,7 +12,7 @@ #include "access/gin.h" #include "access/gist_private.h" #include "access/generic_xlog.h" -#include "access/hash.h" +#include "access/hash_xlog.h" #include "access/heapam_xlog.h" #include "access/brin_xlog.h" #include "access/multixact.h" diff --git a/src/bin/pg_xlogdump/rmgrdesc.c b/src/bin/pg_xlogdump/rmgrdesc.c index 017b9c5b34..8fe20ce97e 100644 --- a/src/bin/pg_xlogdump/rmgrdesc.c +++ b/src/bin/pg_xlogdump/rmgrdesc.c @@ -14,7 +14,7 @@ #include "access/generic_xlog.h" #include "access/gin.h" #include "access/gist_private.h" -#include "access/hash.h" +#include "access/hash_xlog.h" #include "access/heapam_xlog.h" #include "access/multixact.h" #include "access/nbtree.h" diff --git a/src/include/access/hash.h b/src/include/access/hash.h index ce314180e6..d9df904555 100644 --- a/src/include/access/hash.h +++ b/src/include/access/hash.h @@ -20,7 +20,6 @@ #include "access/amapi.h" #include "access/itup.h" #include "access/sdir.h" -#include "access/xlogreader.h" #include "fmgr.h" #include "lib/stringinfo.h" #include "storage/bufmgr.h" @@ -365,9 +364,4 @@ extern bool _hash_convert_tuple(Relation index, extern OffsetNumber _hash_binsearch(Page page, uint32 hash_value); extern OffsetNumber _hash_binsearch_last(Page page, uint32 hash_value); -/* hash.c */ -extern void hash_redo(XLogReaderState *record); -extern void hash_desc(StringInfo buf, XLogReaderState *record); -extern const char *hash_identify(uint8 info); - #endif /* HASH_H */ diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h new file mode 100644 index 0000000000..5f941a9dfc --- /dev/null +++ b/src/include/access/hash_xlog.h @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * hash_xlog.h + * header file for Postgres hash AM implementation + * + * + * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/access/hash_xlog.h + * + *------------------------------------------------------------------------- + */ +#ifndef HASH_XLOG_H +#define HASH_XLOG_H + +#include "access/hash.h" +#include "access/xlogreader.h" + + +extern void hash_redo(XLogReaderState *record); +extern void hash_desc(StringInfo buf, XLogReaderState *record); +extern const char *hash_identify(uint8 info); + +#endif /* HASH_XLOG_H */ -- cgit v1.2.3 From 37f6fd1eaab698983ca1fb2a036d52381347ac71 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 30 Aug 2016 15:25:01 -0400 Subject: Fix initdb misbehavior when user mis-enters superuser password. While testing simple_prompt() revisions, I happened to notice that current initdb behaves rather badly when --pwprompt is specified and the user miskeys the second password. It complains about the mismatch, does "rm -rf" on the data directory, and exits. The problem is that since commit c4a8812cf, there's a standalone backend sitting waiting for commands at that point. It gets unhappy about its datadir having gone away, and spews a PANIC message at the user, which is not nice. (And the shell then adds to the mess with meaningless bleating about a core dump...) We don't really want that sort of thing to happen unless there's an internal failure in initdb, which this surely is not. The best fix seems to be to move the collection of the password earlier, so that it's done essentially as part of argument collection, rather than at the rather ad-hoc time it was done before. Back-patch to 9.6 where the problem was introduced. --- src/bin/initdb/initdb.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'src') diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index aad6ba5639..54d338d013 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -135,6 +135,7 @@ static const char *default_text_search_config = ""; static char *username = ""; static bool pwprompt = false; static char *pwfilename = NULL; +static char *superuser_password = NULL; static const char *authmethodhost = ""; static const char *authmethodlocal = ""; static bool debug = false; @@ -255,7 +256,7 @@ static void test_config_settings(void); static void setup_config(void); static void bootstrap_template1(void); static void setup_auth(FILE *cmdfd); -static void get_set_pwd(FILE *cmdfd); +static void get_su_pwd(void); static void setup_depend(FILE *cmdfd); static void setup_sysviews(FILE *cmdfd); static void setup_description(FILE *cmdfd); @@ -1544,13 +1545,17 @@ setup_auth(FILE *cmdfd) for (line = pg_authid_setup; *line != NULL; line++) PG_CMD_PUTS(*line); + + if (superuser_password) + PG_CMD_PRINTF2("ALTER USER \"%s\" WITH PASSWORD E'%s';\n\n", + username, escape_quotes(superuser_password)); } /* - * get the superuser password if required, and call postgres to set it + * get the superuser password if required */ static void -get_set_pwd(FILE *cmdfd) +get_su_pwd(void) { char *pwd1, *pwd2; @@ -1560,6 +1565,8 @@ get_set_pwd(FILE *cmdfd) /* * Read password from terminal */ + printf("\n"); + fflush(stdout); pwd1 = simple_prompt("Enter new superuser password: ", 100, false); pwd2 = simple_prompt("Enter it again: ", 100, false); if (strcmp(pwd1, pwd2) != 0) @@ -1609,10 +1616,7 @@ get_set_pwd(FILE *cmdfd) } - PG_CMD_PRINTF2("ALTER USER \"%s\" WITH PASSWORD E'%s';\n\n", - username, escape_quotes(pwd1)); - - free(pwd1); + superuser_password = pwd1; } /* @@ -3279,8 +3283,6 @@ initialize_data_directory(void) PG_CMD_OPEN; setup_auth(cmdfd); - if (pwprompt || pwfilename) - get_set_pwd(cmdfd); setup_depend(cmdfd); @@ -3569,6 +3571,9 @@ main(int argc, char *argv[]) else printf(_("Data page checksums are disabled.\n")); + if (pwprompt || pwfilename) + get_su_pwd(); + printf("\n"); initialize_data_directory(); -- cgit v1.2.3 From 9daec77e165de461fca9d5bc3ece86a91aba5804 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 30 Aug 2016 17:02:02 -0400 Subject: Simplify correct use of simple_prompt(). The previous API for this function had it returning a malloc'd string. That meant that callers had to check for NULL return, which few of them were doing, and it also meant that callers had to remember to free() the string later, which required extra logic in most cases. Instead, make simple_prompt() write into a buffer supplied by the caller. Anywhere that the maximum required input length is reasonably small, which is almost all of the callers, we can just use a local or static array as the buffer instead of dealing with malloc/free. A fair number of callers used "pointer == NULL" as a proxy for "haven't requested the password yet". Maintaining the same behavior requires adding a separate boolean flag for that, which adds back some of the complexity we save by removing free()s. Nonetheless, this nets out at a small reduction in overall code size, and considerably less code than we would have had if we'd added the missing NULL-return checks everywhere they were needed. In passing, clean up the API comment for simple_prompt() and get rid of a very-unnecessary malloc/free in its Windows code path. This is nominally a bug fix, but it does not seem worth back-patching, because the actual risk of an OOM failure in any of these places seems pretty tiny, and all of them are client-side not server-side anyway. This patch is by me, but it owes a great deal to Michael Paquier who identified the problem and drafted a patch for fixing it the other way. Discussion: --- contrib/oid2name/oid2name.c | 13 ++++++----- contrib/vacuumlo/vacuumlo.c | 17 +++++++++------ src/bin/initdb/initdb.c | 23 ++++++++------------ src/bin/pg_basebackup/nls.mk | 1 + src/bin/pg_basebackup/streamutil.c | 14 ++++++------ src/bin/pg_dump/pg_backup_db.c | 35 ++++++++++++------------------ src/bin/pg_dump/pg_dumpall.c | 17 +++++++++------ src/bin/pgbench/pgbench.c | 10 +++++---- src/bin/psql/command.c | 25 +++++++++++----------- src/bin/psql/startup.c | 16 ++++++++------ src/bin/scripts/common.c | 34 +++++++++++------------------ src/bin/scripts/createuser.c | 21 +++++++++++------- src/bin/scripts/dropuser.c | 7 +++++- src/include/port.h | 3 ++- src/port/sprompt.c | 44 +++++++++++++++----------------------- 15 files changed, 138 insertions(+), 142 deletions(-) (limited to 'src') diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c index e5eeec21c1..5a2aa1dd0e 100644 --- a/contrib/oid2name/oid2name.c +++ b/contrib/oid2name/oid2name.c @@ -261,7 +261,8 @@ PGconn * sql_conn(struct options * my_opts) { PGconn *conn; - char *password = NULL; + bool have_password = false; + char password[100]; bool new_pass; /* @@ -282,7 +283,7 @@ sql_conn(struct options * my_opts) keywords[2] = "user"; values[2] = my_opts->username; keywords[3] = "password"; - values[3] = password; + values[3] = have_password ? password : NULL; keywords[4] = "dbname"; values[4] = my_opts->dbname; keywords[5] = "fallback_application_name"; @@ -302,17 +303,15 @@ sql_conn(struct options * my_opts) if (PQstatus(conn) == CONNECTION_BAD && PQconnectionNeedsPassword(conn) && - password == NULL) + !have_password) { PQfinish(conn); - password = simple_prompt("Password: ", 100, false); + simple_prompt("Password: ", password, sizeof(password), false); + have_password = true; new_pass = true; } } while (new_pass); - if (password) - free(password); - /* check to see that the backend connection was successfully made */ if (PQstatus(conn) == CONNECTION_BAD) { diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c index 769c805a84..0a9328dc0e 100644 --- a/contrib/vacuumlo/vacuumlo.c +++ b/contrib/vacuumlo/vacuumlo.c @@ -65,13 +65,17 @@ vacuumlo(const char *database, const struct _param * param) long matched; long deleted; int i; - static char *password = NULL; bool new_pass; bool success = true; + static bool have_password = false; + static char password[100]; /* Note: password can be carried over from a previous call */ - if (param->pg_prompt == TRI_YES && password == NULL) - password = simple_prompt("Password: ", 100, false); + if (param->pg_prompt == TRI_YES && !have_password) + { + simple_prompt("Password: ", password, sizeof(password), false); + have_password = true; + } /* * Start the connection. Loop until we have a password if requested by @@ -91,7 +95,7 @@ vacuumlo(const char *database, const struct _param * param) keywords[2] = "user"; values[2] = param->pg_user; keywords[3] = "password"; - values[3] = password; + values[3] = have_password ? password : NULL; keywords[4] = "dbname"; values[4] = database; keywords[5] = "fallback_application_name"; @@ -110,11 +114,12 @@ vacuumlo(const char *database, const struct _param * param) if (PQstatus(conn) == CONNECTION_BAD && PQconnectionNeedsPassword(conn) && - password == NULL && + !have_password && param->pg_prompt != TRI_NO) { PQfinish(conn); - password = simple_prompt("Password: ", 100, false); + simple_prompt("Password: ", password, sizeof(password), false); + have_password = true; new_pass = true; } } while (new_pass); diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 54d338d013..94074928cb 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1557,8 +1557,8 @@ setup_auth(FILE *cmdfd) static void get_su_pwd(void) { - char *pwd1, - *pwd2; + char pwd1[100]; + char pwd2[100]; if (pwprompt) { @@ -1567,14 +1567,13 @@ get_su_pwd(void) */ printf("\n"); fflush(stdout); - pwd1 = simple_prompt("Enter new superuser password: ", 100, false); - pwd2 = simple_prompt("Enter it again: ", 100, false); + simple_prompt("Enter new superuser password: ", pwd1, sizeof(pwd1), false); + simple_prompt("Enter it again: ", pwd2, sizeof(pwd2), false); if (strcmp(pwd1, pwd2) != 0) { fprintf(stderr, _("Passwords didn't match.\n")); exit_nicely(); } - free(pwd2); } else { @@ -1587,7 +1586,6 @@ get_su_pwd(void) * for now. */ FILE *pwf = fopen(pwfilename, "r"); - char pwdbuf[MAXPGPATH]; int i; if (!pwf) @@ -1596,7 +1594,7 @@ get_su_pwd(void) progname, pwfilename, strerror(errno)); exit_nicely(); } - if (!fgets(pwdbuf, sizeof(pwdbuf), pwf)) + if (!fgets(pwd1, sizeof(pwd1), pwf)) { if (ferror(pwf)) fprintf(stderr, _("%s: could not read password from file \"%s\": %s\n"), @@ -1608,15 +1606,12 @@ get_su_pwd(void) } fclose(pwf); - i = strlen(pwdbuf); - while (i > 0 && (pwdbuf[i - 1] == '\r' || pwdbuf[i - 1] == '\n')) - pwdbuf[--i] = '\0'; - - pwd1 = pg_strdup(pwdbuf); - + i = strlen(pwd1); + while (i > 0 && (pwd1[i - 1] == '\r' || pwd1[i - 1] == '\n')) + pwd1[--i] = '\0'; } - superuser_password = pwd1; + superuser_password = pg_strdup(pwd1); } /* diff --git a/src/bin/pg_basebackup/nls.mk b/src/bin/pg_basebackup/nls.mk index ec466dcaa2..a34ca3d268 100644 --- a/src/bin/pg_basebackup/nls.mk +++ b/src/bin/pg_basebackup/nls.mk @@ -2,3 +2,4 @@ CATALOG_NAME = pg_basebackup AVAIL_LANGUAGES = de es fr it ko pl pt_BR ru zh_CN GETTEXT_FILES = pg_basebackup.c pg_receivexlog.c pg_recvlogical.c receivelog.c streamutil.c ../../common/fe_memutils.c +GETTEXT_TRIGGERS = simple_prompt diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index 72d8657004..595eaff46a 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -41,7 +41,8 @@ char *dbport = NULL; char *replication_slot = NULL; char *dbname = NULL; int dbgetpassword = 0; /* 0=auto, -1=never, 1=always */ -static char *dbpassword = NULL; +static bool have_password = false; +static char password[100]; PGconn *conn = NULL; /* @@ -141,24 +142,23 @@ GetConnection(void) } /* If -W was given, force prompt for password, but only the first time */ - need_password = (dbgetpassword == 1 && dbpassword == NULL); + need_password = (dbgetpassword == 1 && !have_password); do { /* Get a new password if appropriate */ if (need_password) { - if (dbpassword) - free(dbpassword); - dbpassword = simple_prompt(_("Password: "), 100, false); + simple_prompt("Password: ", password, sizeof(password), false); + have_password = true; need_password = false; } /* Use (or reuse, on a subsequent connection) password if we have it */ - if (dbpassword) + if (have_password) { keywords[i] = "password"; - values[i] = dbpassword; + values[i] = password; } else { diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c index d2a3de3c5d..3b9cd89b4a 100644 --- a/src/bin/pg_dump/pg_backup_db.c +++ b/src/bin/pg_dump/pg_backup_db.c @@ -134,6 +134,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) const char *newdb; const char *newuser; char *password; + char passbuf[100]; bool new_pass; if (!reqdb) @@ -149,13 +150,12 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) ahlog(AH, 1, "connecting to database \"%s\" as user \"%s\"\n", newdb, newuser); - password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL; + password = AH->savedPassword; if (AH->promptPassword == TRI_YES && password == NULL) { - password = simple_prompt("Password: ", 100, false); - if (password == NULL) - exit_horribly(modulename, "out of memory\n"); + simple_prompt("Password: ", passbuf, sizeof(passbuf), false); + password = passbuf; } initPQExpBuffer(&connstr); @@ -201,16 +201,14 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) fprintf(stderr, "Connecting to %s as %s\n", newdb, newuser); - if (password) - free(password); - if (AH->promptPassword != TRI_NO) - password = simple_prompt("Password: ", 100, false); + { + simple_prompt("Password: ", passbuf, sizeof(passbuf), false); + password = passbuf; + } else exit_horribly(modulename, "connection needs password\n"); - if (password == NULL) - exit_horribly(modulename, "out of memory\n"); new_pass = true; } } while (new_pass); @@ -225,8 +223,6 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) free(AH->savedPassword); AH->savedPassword = pg_strdup(PQpass(newConn)); } - if (password) - free(password); termPQExpBuffer(&connstr); @@ -258,18 +254,18 @@ ConnectDatabase(Archive *AHX, { ArchiveHandle *AH = (ArchiveHandle *) AHX; char *password; + char passbuf[100]; bool new_pass; if (AH->connection) exit_horribly(modulename, "already connected to a database\n"); - password = AH->savedPassword ? pg_strdup(AH->savedPassword) : NULL; + password = AH->savedPassword; if (prompt_password == TRI_YES && password == NULL) { - password = simple_prompt("Password: ", 100, false); - if (password == NULL) - exit_horribly(modulename, "out of memory\n"); + simple_prompt("Password: ", passbuf, sizeof(passbuf), false); + password = passbuf; } AH->promptPassword = prompt_password; @@ -309,9 +305,8 @@ ConnectDatabase(Archive *AHX, prompt_password != TRI_NO) { PQfinish(AH->connection); - password = simple_prompt("Password: ", 100, false); - if (password == NULL) - exit_horribly(modulename, "out of memory\n"); + simple_prompt("Password: ", passbuf, sizeof(passbuf), false); + password = passbuf; new_pass = true; } } while (new_pass); @@ -332,8 +327,6 @@ ConnectDatabase(Archive *AHX, free(AH->savedPassword); AH->savedPassword = pg_strdup(PQpass(AH->connection)); } - if (password) - free(password); /* check for version mismatch */ _check_database_version(AH); diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 54a9f48200..b5efb46019 10