From 6a4fe1127c5a0ea1515589e416aa29e088170c0e Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Fri, 16 Dec 2016 09:52:04 -0500 Subject: [PATCH] Fix more hash index bugs around marking buffers dirty. In _hash_freeovflpage(), if we're freeing the overflow page that immediate follows the page to which tuples are being moved (the confusingly-named "write buffer"), don't forget to mark that page dirty after updating its hasho_nextblkno. In _hash_squeezebucket(), it's not necessary to mark the primary bucket page dirty if there are no overflow pages, because there's nothing to squeeze in that case. Amit Kapila, with help from Kuntal Ghosh and Dilip Kumar, after an initial trouble report by Jeff Janes. --- src/backend/access/hash/hashovfl.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 8fbf49461d1..5f1513bb43c 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -452,6 +452,11 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf, MarkBufferDirty(prevbuf); _hash_relbuf(rel, prevbuf); } + else + { + /* ensure to mark prevbuf as dirty */ + wbuf_dirty = true; + } } /* write and unlock the write buffer */ @@ -643,7 +648,7 @@ _hash_squeezebucket(Relation rel, */ if (!BlockNumberIsValid(wopaque->hasho_nextblkno)) { - _hash_chgbufaccess(rel, wbuf, HASH_WRITE, HASH_NOLOCK); + _hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK); return; } -- 2.39.5