Change executor to just Assert that table locks were already obtained.
authorTom Lane <tgl@sss.pgh.pa.us>
Wed, 3 Oct 2018 20:05:05 +0000 (16:05 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Wed, 3 Oct 2018 20:05:12 +0000 (16:05 -0400)
Instead of locking tables during executor startup, just Assert that
suitable locks were obtained already during the parse/plan pipeline
(or re-obtained by the plan cache).  This must be so, else we have a
hazard that concurrent DDL has invalidated the plan.

This is pretty inefficient as well as undercommented, but it's all going
to go away shortly, so I didn't try hard.  This commit is just another
attempt to use the buildfarm to see if we've missed anything in the plan
to simplify the executor's table management.

Note that the change needed here in relation_open() exposes that
parallel workers now really are accessing tables without holding any
lock of their own, whereas they were not doing that before this commit.
This does not give me a warm fuzzy feeling about that aspect of parallel
query; it does not seem like a good design, and we now know that it's
had exactly no actual testing.  I think that we should modify parallel
query so that that change can be reverted.

Discussion: https://postgr.es/m/468c85d9-540e-66a2-1dde-fec2b741e688@lab.ntt.co.jp

src/backend/access/heap/heapam.c
src/backend/executor/execMain.c
src/backend/executor/execUtils.c

index 5f1a69ca53c6ea8f6b4a13bf4d6d8705cd22555b..727d6776e13f6b03967fe9bb386c1084c3dc01ab 100644 (file)
@@ -1140,9 +1140,13 @@ relation_open(Oid relationId, LOCKMODE lockmode)
        /*
         * If we didn't get the lock ourselves, assert that caller holds one,
         * except in bootstrap mode where no locks are used.
+        *
+        * Also, parallel workers currently assume that their parent holds locks
+        * for tables used in the parallel query (a mighty shaky assumption).
         */
        Assert(lockmode != NoLock ||
                   IsBootstrapProcessingMode() ||
+                  IsParallelWorker() ||
                   CheckRelationLockedByMe(r, AccessShareLock, true));
 
        /* Make note that we've accessed a temporary relation */
index 1e97ff5643fb601c7f50e25980e6775ae290a0ef..9569d2fa42300f85d6b07a0f1c66f9e3381ff66b 100644 (file)
@@ -849,8 +849,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                        Relation        resultRelation;
 
                        resultRelationOid = getrelid(resultRelationIndex, rangeTable);
-                       Assert(rt_fetch(resultRelationIndex, rangeTable)->rellockmode == RowExclusiveLock);
-                       resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
+                       resultRelation = heap_open(resultRelationOid, NoLock);
+                       Assert(CheckRelationLockedByMe(resultRelation, RowExclusiveLock, true));
 
                        InitResultRelInfo(resultRelInfo,
                                                          resultRelation,
@@ -890,8 +890,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                                Relation        resultRelDesc;
 
                                resultRelOid = getrelid(resultRelIndex, rangeTable);
-                               Assert(rt_fetch(resultRelIndex, rangeTable)->rellockmode == RowExclusiveLock);
-                               resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
+                               resultRelDesc = heap_open(resultRelOid, NoLock);
+                               Assert(CheckRelationLockedByMe(resultRelDesc, RowExclusiveLock, true));
                                InitResultRelInfo(resultRelInfo,
                                                                  resultRelDesc,
                                                                  lfirst_int(l),
@@ -903,7 +903,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                        estate->es_root_result_relations = resultRelInfos;
                        estate->es_num_root_result_relations = num_roots;
 
-                       /* Simply lock the rest of them. */
+                       /* Simply check the rest of them are locked. */
+#ifdef USE_ASSERT_CHECKING
                        foreach(l, plannedstmt->nonleafResultRelations)
                        {
                                Index           resultRelIndex = lfirst_int(l);
@@ -912,11 +913,15 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                                if (!list_member_int(plannedstmt->rootResultRelations,
                                                                         resultRelIndex))
                                {
-                                       Assert(rt_fetch(resultRelIndex, rangeTable)->rellockmode == RowExclusiveLock);
-                                       LockRelationOid(getrelid(resultRelIndex, rangeTable),
-                                                                       RowExclusiveLock);
+                                       Relation        resultRelDesc;
+
+                                       resultRelDesc = heap_open(getrelid(resultRelIndex, rangeTable),
+                                                                                         NoLock);
+                                       Assert(CheckRelationLockedByMe(resultRelDesc, RowExclusiveLock, true));
+                                       heap_close(resultRelDesc, NoLock);
                                }
                        }
+#endif
                }
        }
        else
@@ -945,7 +950,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
        {
                PlanRowMark *rc = (PlanRowMark *) lfirst(l);
                Oid                     relid;
-               LOCKMODE        rellockmode;
                Relation        relation;
                ExecRowMark *erm;
 
@@ -963,8 +967,10 @@ InitPlan(QueryDesc *queryDesc, int eflags)
                        case ROW_MARK_SHARE:
                        case ROW_MARK_KEYSHARE:
                        case ROW_MARK_REFERENCE:
-                               rellockmode = rt_fetch(rc->rti, rangeTable)->rellockmode;
-                               relation = heap_open(relid, rellockmode);
+                               relation = heap_open(relid, NoLock);
+                               Assert(CheckRelationLockedByMe(relation,
+                                                                                          rt_fetch(rc->rti, rangeTable)->rellockmode,
+                                                                                          true));
                                break;
                        case ROW_MARK_COPY:
                                /* no physical table access is required */
index a4058c0f61dd9afb3d24a554666e6b1030d765de..ba93b401046fa3f90e08a638004783a2b094f291 100644 (file)
@@ -42,6 +42,7 @@
 
 #include "postgres.h"
 
+#include "access/parallel.h"
 #include "access/relscan.h"
 #include "access/transam.h"
 #include "executor/executor.h"
@@ -648,12 +649,14 @@ ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags)
 {
        Relation        rel;
        Oid                     reloid;
-       LOCKMODE        lockmode;
 
-       /* Open the relation and acquire lock as needed */
+       /* Open the relation and verify lock was obtained upstream */
        reloid = getrelid(scanrelid, estate->es_range_table);
-       lockmode = rt_fetch(scanrelid, estate->es_range_table)->rellockmode;
-       rel = heap_open(reloid, lockmode);
+       rel = heap_open(reloid, NoLock);
+       Assert(IsParallelWorker() ||
+                  CheckRelationLockedByMe(rel,
+                                                                  rt_fetch(scanrelid, estate->es_range_table)->rellockmode,
+                                                                  true));
 
        /*
         * Complain if we're attempting a scan of an unscannable relation, except