diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/backend/executor/nodeBitmapHeapscan.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 6b48a6d8350..3d26d96cadd 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -185,9 +185,22 @@ BitmapHeapNext(BitmapHeapScanState *node) */ if (!scan) { - bool need_tuples = false; + bool need_tuples = true; /* + * Unfortunately it turns out that the below optimization does not + * take the removal of TIDs by a concurrent vacuum into + * account. The concurrent vacuum can remove dead TIDs and make + * pages ALL_VISIBLE while those dead TIDs are referenced in the + * bitmap. This would lead to a !need_tuples scan returning too + * many tuples. + * + * In the back-branches, we therefore simply disable the + * optimization. Removing all the relevant code would be too + * invasive (and a major backpatching pain). + */ +#ifdef NOT_ANYMORE + /* * We can potentially skip fetching heap pages if we do not need * any columns of the table, either for checking non-indexable * quals or for returning data. This test is a bit simplistic, as @@ -197,6 +210,7 @@ BitmapHeapNext(BitmapHeapScanState *node) */ need_tuples = (node->ss.ps.plan->qual != NIL || node->ss.ps.plan->targetlist != NIL); +#endif scan = table_beginscan_bm(node->ss.ss_currentRelation, node->ss.ps.state->es_snapshot, |