diff options
-rw-r--r-- | src/bin/pg_dump/pg_backup.h | 1 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_backup_archiver.c | 83 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_backup_archiver.h | 6 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump.c | 46 |
4 files changed, 120 insertions, 16 deletions
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 658986de6f8..781f8fa1cc9 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -285,6 +285,7 @@ typedef int DumpId; * Function pointer prototypes for assorted callback methods. */ +typedef char *(*DefnDumperPtr) (Archive *AH, const void *userArg); typedef int (*DataDumperPtr) (Archive *AH, const void *userArg); typedef void (*SetupWorkerPtrType) (Archive *AH); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 1d131e5a57d..2f902494e64 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -1266,6 +1266,9 @@ ArchiveEntry(Archive *AHX, CatalogId catalogId, DumpId dumpId, newToc->dataDumperArg = opts->dumpArg; newToc->hadDumper = opts->dumpFn ? true : false; + newToc->defnDumper = opts->defnFn; + newToc->defnDumperArg = opts->defnArg; + newToc->formatData = NULL; newToc->dataLength = 0; @@ -2621,7 +2624,45 @@ WriteToc(ArchiveHandle *AH) WriteStr(AH, te->tag); WriteStr(AH, te->desc); WriteInt(AH, te->section); - WriteStr(AH, te->defn); + + if (te->defnLen) + { + /* + * defnLen should only be set for custom format's second call to + * WriteToc(), which rewrites the TOC in place to update data + * offsets. Instead of calling the defnDumper a second time + * (which could involve re-executing queries), just skip writing + * the entry. While regenerating the definition should + * theoretically produce the same result as before, it's expensive + * and feels risky. + * + * The custom format only calls WriteToc() a second time if + * fseeko() is usable (see _CloseArchive() in pg_backup_custom.c), + * so we can safely use it without checking. For other formats, + * we fail because one of our assumptions must no longer hold + * true. + * + * XXX This is a layering violation, but the alternative is an + * awkward and complicated callback infrastructure for this + * special case. This might be worth revisiting in the future. + */ + if (AH->format != archCustom) + pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s", + te->dumpId, te->desc, te->tag); + + if (fseeko(AH->FH, te->defnLen, SEEK_CUR != 0)) + pg_fatal("error during file seek: %m"); + } + else if (te->defnDumper) + { + char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg); + + te->defnLen = WriteStr(AH, defn); + pg_free(defn); + } + else + WriteStr(AH, te->defn); + WriteStr(AH, te->dropStmt); WriteStr(AH, te->copyStmt); WriteStr(AH, te->namespace); @@ -3849,7 +3890,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx) /* * Actually print the definition. Normally we can just print the defn - * string if any, but we have three special cases: + * string if any, but we have four special cases: * * 1. A crude hack for suppressing AUTHORIZATION clause that old pg_dump * versions put into CREATE SCHEMA. Don't mutate the variant for schema @@ -3862,6 +3903,11 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx) * 3. ACL LARGE OBJECTS entries need special processing because they * contain only one copy of the ACL GRANT/REVOKE commands, which we must * apply to each large object listed in the associated BLOB METADATA. + * + * 4. Entries with a defnDumper need to call it to generate the + * definition. This is primarily intended to provide a way to save memory + * for objects that would otherwise need a lot of it (e.g., statistics + * data). */ if (ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0 && strncmp(te->defn, "--", 2) != 0) @@ -3877,6 +3923,39 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx) { IssueACLPerBlob(AH, te); } + else if (te->defnLen && AH->format != archTar) + { + /* + * If defnLen is set, the defnDumper has already been called for this + * TOC entry. We don't normally expect a defnDumper to be called for + * a TOC entry a second time in _printTocEntry(), but there's an + * exception. The tar format first calls WriteToc(), which scans the + * entire TOC, and then it later calls RestoreArchive() to generate + * restore.sql, which scans the TOC again. There doesn't appear to be + * a good way to prevent a second defnDumper call in this case without + * storing the definition in memory, which defeats the purpose. This + * second defnDumper invocation should generate the same output as the + * first, but even if it doesn't, the worst-case scenario is that + * restore.sql might have different statistics data than the archive. + * + * In all other cases, encountering a TOC entry a second time in + * _printTocEntry() is unexpected, so we fail because one of our + * assumptions must no longer hold true. + * + * XXX This is a layering violation, but the alternative is an awkward + * and complicated callback infrastructure for this special case. This + * might be worth revisiting in the future. + */ + pg_fatal("unexpected TOC entry in _printTocEntry(): %d %s %s", + te->dumpId, te->desc, te->tag); + } + else if (te->defnDumper) + { + char *defn = te->defnDumper((Archive *) AH, te->defnDumperArg); + + te->defnLen = ahprintf(AH, "%s\n\n", defn); + pg_free(defn); + } else if (te->defn && strlen(te->defn) > 0) { ahprintf(AH, "%s\n\n", te->defn); diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index a2064f471ed..b7ebc2b39cd 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -368,6 +368,10 @@ struct _tocEntry const void *dataDumperArg; /* Arg for above routine */ void *formatData; /* TOC Entry data specific to file format */ + DefnDumperPtr defnDumper; /* routine to dump definition statement */ + const void *defnDumperArg; /* arg for above routine */ + size_t defnLen; /* length of dumped definition */ + /* working state while dumping/restoring */ pgoff_t dataLength; /* item's data size; 0 if none or unknown */ int reqs; /* do we need schema and/or data of object @@ -407,6 +411,8 @@ typedef struct _archiveOpts int nDeps; DataDumperPtr dumpFn; const void *dumpArg; + DefnDumperPtr defnFn; + const void *defnArg; } ArchiveOpts; #define ARCHIVE_OPTS(...) &(ArchiveOpts){__VA_ARGS__} /* Called to add a TOC entry */ diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 9436b25290d..e553cc84934 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -10554,17 +10554,21 @@ appendNamedArgument(PQExpBuffer out, Archive *fout, const char *argname, } /* - * dumpRelationStats -- + * dumpRelationStats_dumper -- * - * Dump command to import stats into the relation on the new database. + * Generate command to import stats into the relation on the new database. + * This routine is called by the Archiver when it wants the statistics to be + * dumped. */ -static void -dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo) +static char * +dumpRelationStats_dumper(Archive *fout, const void *userArg) { + const RelStatsInfo *rsinfo = (RelStatsInfo *) userArg; const DumpableObject *dobj = &rsinfo->dobj; PGresult *res; PQExpBuffer query; - PQExpBuffer out; + PQExpBufferData out_data; + PQExpBuffer out = &out_data; int i_attname; int i_inherited; int i_null_frac; @@ -10581,10 +10585,6 @@ dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo) int i_range_empty_frac; int i_range_bounds_histogram; - /* nothing to do if we are not dumping statistics */ - if (!fout->dopt->dumpStatistics) - return; - query = createPQExpBuffer(); if (!fout->is_prepared[PREPQUERY_GETATTRIBUTESTATS]) { @@ -10620,7 +10620,7 @@ dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo) resetPQExpBuffer(query); } - out = createPQExpBuffer(); + initPQExpBuffer(out); /* restore relation stats */ appendPQExpBufferStr(out, "SELECT * FROM pg_catalog.pg_restore_relation_stats(\n"); @@ -10764,17 +10764,35 @@ dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo) PQclear(res); + destroyPQExpBuffer(query); + return out->data; +} + +/* + * dumpRelationStats -- + * + * Make an ArchiveEntry for the relation statistics. The Archiver will take + * care of gathering the statistics and generating the restore commands when + * they are needed. + */ +static void +dumpRelationStats(Archive *fout, const RelStatsInfo *rsinfo) +{ + const DumpableObject *dobj = &rsinfo->dobj; + + /* nothing to do if we are not dumping statistics */ + if (!fout->dopt->dumpStatistics) + return; + ArchiveEntry(fout, nilCatalogId, createDumpId(), ARCHIVE_OPTS(.tag = dobj->name, .namespace = dobj->namespace->dobj.name, .description = "STATISTICS DATA", .section = rsinfo->section, - .createStmt = out->data, + .defnFn = dumpRelationStats_dumper, + .defnArg = rsinfo, .deps = dobj->dependencies, .nDeps = dobj->nDeps)); - - destroyPQExpBuffer(out); - destroyPQExpBuffer(query); } /* |