summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/reloptions.c10
-rw-r--r--src/backend/access/heap/heapam.c1
-rw-r--r--src/backend/access/heap/vacuumlazy.c43
3 files changed, 53 insertions, 1 deletions
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 14c23101ad..dfa802416f 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -314,6 +314,14 @@ static relopt_int intRelOpts[] =
},
{
{
+ "autovacuum_freeze_strategy_threshold",
+ "Table size at which VACUUM freezes using eager strategy, in megabytes.",
+ RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
+ ShareUpdateExclusiveLock
+ }, -1, 0, MAX_KILOBYTES
+ },
+ {
+ {
"log_autovacuum_min_duration",
"Sets the minimum execution time above which autovacuum actions will be logged",
RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
@@ -1863,6 +1871,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, multixact_freeze_max_age)},
{"autovacuum_multixact_freeze_table_age", RELOPT_TYPE_INT,
offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, multixact_freeze_table_age)},
+ {"autovacuum_freeze_strategy_threshold", RELOPT_TYPE_INT,
+ offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, freeze_strategy_threshold)},
{"log_autovacuum_min_duration", RELOPT_TYPE_INT,
offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, log_min_duration)},
{"toast_tuple_target", RELOPT_TYPE_INT,
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index e6024a980b..fec041d6cd 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -7057,6 +7057,7 @@ heap_freeze_tuple(HeapTupleHeader tuple,
cutoffs.OldestMxact = MultiXactCutoff;
cutoffs.FreezeLimit = FreezeLimit;
cutoffs.MultiXactCutoff = MultiXactCutoff;
+ cutoffs.freeze_strategy_threshold_pages = 0;
pagefrz.freeze_required = true;
pagefrz.FreezePageRelfrozenXid = FreezeLimit;
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 8f14cf85f3..e3f86493b9 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -153,6 +153,8 @@ typedef struct LVRelState
bool aggressive;
/* Use visibility map to skip? (disabled by DISABLE_PAGE_SKIPPING) */
bool skipwithvm;
+ /* Eagerly freeze pages that are eligible to become all-frozen? */
+ bool eager_freeze_strategy;
/* Wraparound failsafe has been triggered? */
bool failsafe_active;
/* Consider index vacuuming bypass optimization? */
@@ -243,6 +245,7 @@ typedef struct LVSavedErrInfo
/* non-export function prototypes */
static void lazy_scan_heap(LVRelState *vacrel);
+static void lazy_scan_strategy(LVRelState *vacrel);
static BlockNumber lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer,
BlockNumber next_block,
bool *next_unskippable_allvis,
@@ -472,6 +475,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->skipwithvm = skipwithvm;
+ /*
+ * Now determine VACUUM's freezing strategy
+ */
+ lazy_scan_strategy(vacrel);
if (verbose)
{
if (vacrel->aggressive)
@@ -1268,6 +1275,38 @@ lazy_scan_heap(LVRelState *vacrel)
}
/*
+ * lazy_scan_strategy() -- Determine freezing strategy.
+ *
+ * Our lazy freezing strategy is useful when putting off the work of freezing
+ * totally avoids freezing that turns out to have been wasted effort later on.
+ * Our eager freezing strategy is useful with larger tables that experience
+ * continual growth, where freezing pages proactively is needed just to avoid
+ * falling behind on freezing (eagerness is also likely to be cheaper in the
+ * short/medium term for such tables, but the long term picture matters most).
+ */
+static void
+lazy_scan_strategy(LVRelState *vacrel)
+{
+ BlockNumber rel_pages = vacrel->rel_pages;
+
+ /*
+ * Decide freezing strategy.
+ *
+ * The eager freezing strategy is used whenever rel_pages exceeds a
+ * threshold controlled by the freeze_strategy_threshold GUC/reloption.
+ *
+ * Also freeze eagerly with an unlogged or temp table, where the total
+ * cost of freezing pages is mostly just the cycles needed to prepare a
+ * set of freeze plans. Executing the freeze plans adds very little cost.
+ * Dirtying extra pages isn't a concern, either; VACUUM will definitely
+ * set PD_ALL_VISIBLE on affected pages, regardless of freezing strategy.
+ */
+ vacrel->eager_freeze_strategy =
+ (rel_pages > vacrel->cutoffs.freeze_strategy_threshold_pages ||
+ !RelationIsPermanent(vacrel->rel));
+}
+
+/*
* lazy_scan_skip() -- set up range of skippable blocks using visibility map.
*
* lazy_scan_heap() calls here every time it needs to set up a new range of
@@ -1795,10 +1834,12 @@ retry:
* one XID/MXID from before FreezeLimit/MultiXactCutoff is present. Also
* freeze when pruning generated an FPI, if doing so means that we set the
* page all-frozen afterwards (might not happen until final heap pass).
+ * When ongoing VACUUM opted to use the eager freezing strategy we freeze
+ * any page that will thereby become all-frozen in the visibility map.
*/
if (pagefrz.freeze_required || tuples_frozen == 0 ||
(prunestate->all_visible && prunestate->all_frozen &&
- fpi_before != pgWalUsage.wal_fpi))
+ (fpi_before != pgWalUsage.wal_fpi || vacrel->eager_freeze_strategy)))
{
/*
* We're freezing the page. Our final NewRelfrozenXid doesn't need to