summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorTom Lane2021-10-31 23:13:48 +0000
committerTom Lane2021-10-31 23:13:48 +0000
commit5dd067430b9d888196b7d69de7faba9db9bb006d (patch)
tree02b19f4d6419cdf4ef294a4e691021be196b840e /contrib
parent91455f7c6d594ec31556ebc27acab841ed1ae208 (diff)
Don't try to read a multi-GB pg_stat_statements file in one call.
Windows fails on a request to read() more than INT_MAX bytes, and perhaps other platforms could have similar issues. Let's adjust this code to read at most 1GB per call. (One would not have thought the file could get that big, but now we have a field report of trouble, so it can. We likely ought to add some mechanism to limit the size of the query-texts file separately from the size of the hash table. That is not this patch, though.) Per bug #17254 from Yusuke Egashira. It's been like this for awhile, so back-patch to all supported branches. Discussion: https://postgr.es/m/17254-a926c89dc03375c2@postgresql.org
Diffstat (limited to 'contrib')
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c45
1 files changed, 29 insertions, 16 deletions
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index cc9efab2431..d3076adb715 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -1906,6 +1906,7 @@ qtext_load_file(Size *buffer_size)
char *buf;
int fd;
struct stat stat;
+ Size nread;
fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
if (fd < 0)
@@ -1946,28 +1947,40 @@ qtext_load_file(Size *buffer_size)
}
/*
- * OK, slurp in the file. If we get a short read and errno doesn't get
- * set, the reason is probably that garbage collection truncated the file
- * since we did the fstat(), so we don't log a complaint --- but we don't
- * return the data, either, since it's most likely corrupt due to
- * concurrent writes from garbage collection.
+ * OK, slurp in the file. Windows fails if we try to read more than
+ * INT_MAX bytes at once, and other platforms might not like that either,
+ * so read a very large file in 1GB segments.
*/
- errno = 0;
- if (read(fd, buf, stat.st_size) != stat.st_size)
+ nread = 0;
+ while (nread < stat.st_size)
{
- if (errno)
- ereport(LOG,
- (errcode_for_file_access(),
- errmsg("could not read pg_stat_statement file \"%s\": %m",
- PGSS_TEXT_FILE)));
- free(buf);
- CloseTransientFile(fd);
- return NULL;
+ int toread = Min(1024 * 1024 * 1024, stat.st_size - nread);
+
+ /*
+ * If we get a short read and errno doesn't get set, the reason is
+ * probably that garbage collection truncated the file since we did
+ * the fstat(), so we don't log a complaint --- but we don't return
+ * the data, either, since it's most likely corrupt due to concurrent
+ * writes from garbage collection.
+ */
+ errno = 0;
+ if (read(fd, buf + nread, toread) != toread)
+ {
+ if (errno)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ free(buf);
+ CloseTransientFile(fd);
+ return NULL;
+ }
+ nread += toread;
}
CloseTransientFile(fd);
- *buffer_size = stat.st_size;
+ *buffer_size = nread;
return buf;
}