Fix pg_upgrade for oid removal.
authorAndres Freund <andres@anarazel.de>
Mon, 26 Nov 2018 22:20:36 +0000 (14:20 -0800)
committerAndres Freund <andres@anarazel.de>
Mon, 26 Nov 2018 22:37:08 +0000 (14:37 -0800)
pg_upgrade previously copied pg_largeobject_metadata over from the old
cluster. That doesn't work, because the table has oids before
578b229718. I missed that.

As most pieces of metadata for large objects already were dumped as
DDL (except for comments overwritten by pg_upgrade, due to the copy of
pg_largeobject_metadata) it seems reasonable to just also dump grants
for large objects.  If we ever consider this a relevant performance
problem, we'd need to fix the rest of the already emitted DDL
too.

There's still an open discussion about whether we'll want to force a
specific ordering for the dumped objects, as currently
pg_largeobjects_metadata potentially has a different ordering
before/after pg_upgrade, which can make automated testing a bit
harder.

Reported-By: Andrew Dunstan
Author: Andres Freund
Discussion: https://postgr.es/m/91a8a980-41bc-412b-fba2-2ba71a141c2b@2ndQuadrant.com

src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/t/002_pg_dump.pl
src/bin/pg_upgrade/info.c
src/bin/pg_upgrade/pg_upgrade.c

index 9c4e1dc32a6e30b61fbf3f7584fa4db58ce22482..6a0fcdd4c3cc0dc8c6841400c5d168e93a7f19e2 100644 (file)
@@ -2866,8 +2866,8 @@ dumpDatabase(Archive *fout)
                     NULL, NULL);
 
    /*
-    * pg_largeobject and pg_largeobject_metadata come from the old system
-    * intact, so set their relfrozenxids and relminmxids.
+    * pg_largeobject comes from the old system intact, so set its
+    * relfrozenxids and relminmxids.
     */
    if (dopt->binary_upgrade)
    {
@@ -2912,47 +2912,6 @@ dumpDatabase(Archive *fout)
 
        PQclear(lo_res);
 
-       /*
-        * pg_largeobject_metadata
-        */
-       if (fout->remoteVersion >= 90000)
-       {
-           resetPQExpBuffer(loFrozenQry);
-           resetPQExpBuffer(loOutQry);
-
-           if (fout->remoteVersion >= 90300)
-               appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, relminmxid\n"
-                                 "FROM pg_catalog.pg_class\n"
-                                 "WHERE oid = %u;\n",
-                                 LargeObjectMetadataRelationId);
-           else
-               appendPQExpBuffer(loFrozenQry, "SELECT relfrozenxid, 0 AS relminmxid\n"
-                                 "FROM pg_catalog.pg_class\n"
-                                 "WHERE oid = %u;\n",
-                                 LargeObjectMetadataRelationId);
-
-           lo_res = ExecuteSqlQueryForSingleRow(fout, loFrozenQry->data);
-
-           i_relfrozenxid = PQfnumber(lo_res, "relfrozenxid");
-           i_relminmxid = PQfnumber(lo_res, "relminmxid");
-
-           appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, set pg_largeobject_metadata relfrozenxid and relminmxid\n");
-           appendPQExpBuffer(loOutQry, "UPDATE pg_catalog.pg_class\n"
-                             "SET relfrozenxid = '%u', relminmxid = '%u'\n"
-                             "WHERE oid = %u;\n",
-                             atooid(PQgetvalue(lo_res, 0, i_relfrozenxid)),
-                             atooid(PQgetvalue(lo_res, 0, i_relminmxid)),
-                             LargeObjectMetadataRelationId);
-           ArchiveEntry(fout, nilCatalogId, createDumpId(),
-                        "pg_largeobject_metadata", NULL, NULL, "",
-                        "pg_largeobject_metadata", SECTION_PRE_DATA,
-                        loOutQry->data, "", NULL,
-                        NULL, 0,
-                        NULL, NULL);
-
-           PQclear(lo_res);
-       }
-
        destroyPQExpBuffer(loFrozenQry);
        destroyPQExpBuffer(loOutQry);
    }
@@ -3266,18 +3225,14 @@ getBlobs(Archive *fout)
            binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
 
        /*
-        * In binary-upgrade mode for blobs, we do *not* dump out the data or
-        * the ACLs, should any exist.  The data and ACL (if any) will be
-        * copied by pg_upgrade, which simply copies the pg_largeobject and
-        * pg_largeobject_metadata tables.
-        *
-        * We *do* dump out the definition of the blob because we need that to
-        * make the restoration of the comments, and anything else, work since
-        * pg_upgrade copies the files behind pg_largeobject and
-        * pg_largeobject_metadata after the dump is restored.
+        * In binary-upgrade mode for blobs, we do *not* dump out the blob
+        * data, as it will be copied by pg_upgrade, which simply copies the
+        * pg_largeobject table. We *do* however dump out anything but the
+        * data, as pg_upgrade copies just pg_largeobject, but not
+        * pg_largeobject_metadata, after the dump is restored.
         */
        if (dopt->binary_upgrade)
-           binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL);
+           binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA;
    }
 
    /*
index 8d66fd2ee7b680e159377464944659bb7028fcd7..2afd950591bdefd2ce34c147057db55645c5c2f7 100644 (file)
@@ -2791,9 +2791,9 @@ my %tests = (
            data_only              => 1,
            section_pre_data       => 1,
            test_schema_plus_blobs => 1,
+           binary_upgrade => 1,
        },
        unlike => {
-           binary_upgrade => 1,
            no_blobs       => 1,
            no_privs       => 1,
            schema_only    => 1,
index fd0b44c3ce936207e5589057d3b7ec0c463773a0..95a00a491329cfbe940dc57279590e68804f19b9 100644 (file)
@@ -441,8 +441,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
     *
     * pg_largeobject contains user data that does not appear in pg_dump
     * output, so we have to copy that system table.  It's easiest to do that
-    * by treating it as a user table.  Likewise for pg_largeobject_metadata,
-    * if it exists.
+    * by treating it as a user table.
     */
    snprintf(query + strlen(query), sizeof(query) - strlen(query),
             "WITH regular_heap (reloid, indtable, toastheap) AS ( "
@@ -458,10 +457,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
             "                        'binary_upgrade', 'pg_toast') AND "
             "      c.oid >= %u::pg_catalog.oid) OR "
             "     (n.nspname = 'pg_catalog' AND "
-            "      relname IN ('pg_largeobject'%s) ))), ",
-            FirstNormalObjectId,
-            (GET_MAJOR_VERSION(old_cluster.major_version) >= 900) ?
-            ", 'pg_largeobject_metadata'" : "");
+            "      relname IN ('pg_largeobject') ))), ",
+            FirstNormalObjectId);
 
    /*
     * Add a CTE that collects OIDs of toast tables belonging to the tables
index b777f9d651bc05cd72a45b25c7bb7e5914ee4e93..47119dc42d4489481ba34364705ca875172d4b75 100644 (file)
@@ -28,8 +28,9 @@
  * We control all assignments of pg_enum.oid because these oids are stored
  * in user tables as enum values.
  *
- * We control all assignments of pg_authid.oid because these oids are stored
- * in pg_largeobject_metadata.
+ * We control all assignments of pg_authid.oid for historical reasons (the
+ * oids used to be stored in pg_largeobject_metadata, which is now copied via
+ * SQL commands), that might change at some point in the future.
  */