diff options
Diffstat (limited to 'src/bin')
-rw-r--r-- | src/bin/pg_dump/pg_backup_archiver.c | 15 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump.c | 113 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dumpall.c | 2 | ||||
-rw-r--r-- | src/bin/pg_dump/t/002_pg_dump.pl | 4 | ||||
-rw-r--r-- | src/bin/psql/command.c | 7 | ||||
-rw-r--r-- | src/bin/psql/help.c | 2 | ||||
-rw-r--r-- | src/bin/psql/tab-complete.in.c | 12 |
7 files changed, 142 insertions, 13 deletions
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 197c1295d93..30e0da31aa3 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -31,6 +31,8 @@ #endif #include "catalog/pg_class_d.h" +#include "catalog/pg_largeobject_metadata_d.h" +#include "catalog/pg_shdepend_d.h" #include "common/string.h" #include "compress_io.h" #include "dumputils.h" @@ -2974,6 +2976,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) int res = REQ_SCHEMA | REQ_DATA; RestoreOptions *ropt = AH->public.ropt; + /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 && + strcmp(te->desc, "TABLE DATA") == 0 && + (te->catalogId.oid == LargeObjectMetadataRelationId || + te->catalogId.oid == SharedDependRelationId)) + return REQ_DATA; + /* These items are treated specially */ if (strcmp(te->desc, "ENCODING") == 0 || strcmp(te->desc, "STDSTRINGS") == 0 || diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 1937997ea67..604fc109416 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -49,8 +49,10 @@ #include "catalog/pg_class_d.h" #include "catalog/pg_default_acl_d.h" #include "catalog/pg_largeobject_d.h" +#include "catalog/pg_largeobject_metadata_d.h" #include "catalog/pg_proc_d.h" #include "catalog/pg_publication_d.h" +#include "catalog/pg_shdepend_d.h" #include "catalog/pg_subscription_d.h" #include "catalog/pg_type_d.h" #include "common/connect.h" @@ -209,6 +211,12 @@ static int nbinaryUpgradeClassOids = 0; static SequenceItem *sequences = NULL; static int nsequences = 0; +/* + * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use + * as a dependency for pg_shdepend and any large object comments/seclabels. + */ +static DumpId lo_metadata_dumpId; + /* Maximum number of relations to fetch in a fetchAttributeStats() call. */ #define MAX_ATTR_STATS_RELS 64 @@ -1086,6 +1094,36 @@ main(int argc, char **argv) getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE); /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (dopt.binary_upgrade && fout->remoteVersion >= 120000) + { + TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId); + TableInfo *shdepend = findTableByOid(SharedDependRelationId); + + makeTableDataInfo(&dopt, lo_metadata); + makeTableDataInfo(&dopt, shdepend); + + /* + * Save pg_largeobject_metadata's dump ID for use as a dependency for + * pg_shdepend and any large object comments/seclabels. + */ + lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId; + addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId); + + /* + * Only dump large object shdepend rows for this database. + */ + shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass " + "AND dbid = (SELECT oid FROM pg_database " + " WHERE datname = current_database())"; + } + + /* * In binary-upgrade mode, we do not have to worry about the actual LO * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. @@ -3924,10 +3962,37 @@ getLOs(Archive *fout) * as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not - * pg_largeobject_metadata, after the dump is restored. + * pg_largeobject_metadata, after the dump is restored. In versions + * before v12, this is done via proper large object commands. In + * newer versions, we dump the content of pg_largeobject_metadata and + * any associated pg_shdepend rows, which is faster to restore. (On + * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID + * column is hidden and won't be dumped.) */ if (dopt->binary_upgrade) - loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + { + if (fout->remoteVersion >= 120000) + { + /* + * We should've saved pg_largeobject_metadata's dump ID before + * this point. + */ + Assert(lo_metadata_dumpId); + + loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION); + + /* + * Mark the large object as dependent on + * pg_largeobject_metadata so that any large object + * comments/seclables are dumped after it. + */ + loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId)); + loinfo->dobj.dependencies[0] = lo_metadata_dumpId; + loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1; + } + else + loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + } /* * Create a "BLOBS" data item for the group, too. This is just a @@ -9039,8 +9104,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) if (tbinfo->relkind == RELKIND_SEQUENCE) continue; - /* Don't bother with uninteresting tables, either */ - if (!tbinfo->interesting) + /* + * Don't bother with uninteresting tables, either. For binary + * upgrades, this is bypassed for pg_largeobject_metadata and + * pg_shdepend so that the columns names are collected for the + * corresponding COPY commands. Restoring the data for those catalogs + * is faster than restoring the equivalent set of large object + * commands. We can only do this for upgrades from v12 and newer; in + * older versions, pg_largeobject_metadata was created WITH OIDS, so + * the OID column is hidden and won't be dumped. + */ + if (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId))) continue; /* OK, we need info for this table */ @@ -9244,7 +9321,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) pg_fatal("unrecognized table OID %u", attrelid); /* cross-check that we only got requested tables */ if (tbinfo->relkind == RELKIND_SEQUENCE || - !tbinfo->interesting) + (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId)))) pg_fatal("unexpected column data for table \"%s\"", tbinfo->dobj.name); @@ -12583,8 +12663,13 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo) for (i = 0; i < tyinfo->nDomChecks; i++) { ConstraintInfo *domcheck = &(tyinfo->domChecks[i]); - PQExpBuffer conprefix = createPQExpBuffer(); + PQExpBuffer conprefix; + + /* but only if the constraint itself was dumped here */ + if (domcheck->separate) + continue; + conprefix = createPQExpBuffer(); appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN", fmtId(domcheck->dobj.name)); @@ -18488,6 +18573,22 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo) .section = SECTION_POST_DATA, .createStmt = q->data, .dropStmt = delq->data)); + + if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT) + { + PQExpBuffer conprefix = createPQExpBuffer(); + char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name)); + + appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN", + fmtId(coninfo->dobj.name)); + + dumpComment(fout, conprefix->data, qtypname, + tyinfo->dobj.namespace->dobj.name, + tyinfo->rolname, + coninfo->dobj.catId, 0, tyinfo->dobj.dumpId); + destroyPQExpBuffer(conprefix); + free(qtypname); + } } } else diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 3cbcad65c5f..100317b1aa9 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -632,7 +632,7 @@ main(int argc, char *argv[]) fprintf(OPF, "SET escape_string_warning = off;\n"); fprintf(OPF, "\n"); - if (!data_only) + if (!data_only && !statistics_only && !no_schema) { /* * If asked to --clean, do that first. We can avoid detailed diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 2485d8f360e..d8330e2bd17 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -1087,6 +1087,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_owner => 1, schema_only => 1, @@ -1605,6 +1606,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, schema_only => 1, schema_only_with_statistics => 1, no_large_objects => 1, @@ -4612,9 +4614,9 @@ my %tests = ( no_schema => 1, section_data => 1, test_schema_plus_large_objects => 1, - binary_upgrade => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_privs => 1, schema_only => 1, diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 0a55901b14e..0e00d73487c 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -4481,6 +4481,7 @@ SyncVariables(void) char vbuf[32]; const char *server_version; char *service_name; + char *service_file; /* get stuff from connection */ pset.encoding = PQclientEncoding(pset.db); @@ -4500,6 +4501,11 @@ SyncVariables(void) if (service_name) pg_free(service_name); + service_file = get_conninfo_value("servicefile"); + SetVariable(pset.vars, "SERVICEFILE", service_file); + if (service_file) + pg_free(service_file); + /* this bit should match connection_warnings(): */ /* Try to get full text form of version, might include "devel" etc */ server_version = PQparameterStatus(pset.db, "server_version"); @@ -4529,6 +4535,7 @@ UnsyncVariables(void) { SetVariable(pset.vars, "DBNAME", NULL); SetVariable(pset.vars, "SERVICE", NULL); + SetVariable(pset.vars, "SERVICEFILE", NULL); SetVariable(pset.vars, "USER", NULL); SetVariable(pset.vars, "HOST", NULL); SetVariable(pset.vars, "PORT", NULL); diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index a2e009ab9be..8c62729a0d1 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -748,7 +748,7 @@ void print_copyright(void) { puts("PostgreSQL Database Management System\n" - "(formerly known as Postgres, then as Postgres95)\n\n" + "(also known as Postgres, formerly known as Postgres95)\n\n" "Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group\n\n" "Portions Copyright (c) 1994, The Regents of the University of California\n\n" "Permission to use, copy, modify, and distribute this software and its\n" diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index 6872653c6c8..37524364290 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -4619,10 +4619,14 @@ match_previous_words(int pattern_id, else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", MatchAnyN, "TO", MatchAny)) COMPLETE_WITH("WITH GRANT OPTION"); /* Complete "GRANT/REVOKE ... ON * *" with TO/FROM */ - else if (Matches("GRANT", MatchAnyN, "ON", MatchAny, MatchAny)) - COMPLETE_WITH("TO"); - else if (Matches("REVOKE", MatchAnyN, "ON", MatchAny, MatchAny)) - COMPLETE_WITH("FROM"); + else if (Matches("GRANT|REVOKE", MatchAnyN, "ON", MatchAny, MatchAny) && + !TailMatches("FOREIGN", "SERVER") && !TailMatches("LARGE", "OBJECT")) + { + if (Matches("GRANT", MatchAnyN, "ON", MatchAny, MatchAny)) + COMPLETE_WITH("TO"); + else + COMPLETE_WITH("FROM"); + } /* Complete "GRANT/REVOKE * ON ALL * IN SCHEMA *" with TO/FROM */ else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "ALL", MatchAny, "IN", "SCHEMA", MatchAny) || |