TransactionId latestRemovedXid;
bool lock_waiter_detected;
+ /* Statistics about indexes */
+ IndexBulkDeleteResult **indstats;
+ int nindexes;
+
/* Used for error callback */
char *indname;
BlockNumber blkno; /* used only for heap operations */
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
LVRelStats *vacrelstats);
static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
- IndexBulkDeleteResult **stats,
LVRelStats *vacrelstats, LVParallelState *lps,
int nindexes);
static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
static bool heap_page_is_all_visible(Relation rel, Buffer buf,
LVRelStats *vacrelstats,
TransactionId *visibility_cutoff_xid, bool *all_frozen);
-static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes);
-static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
- LVShared *lvshared, LVDeadTuples *dead_tuples,
- int nindexes, LVRelStats *vacrelstats);
-static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes);
+static void lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes);
+static void parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
+ LVDeadTuples *dead_tuples, int nindexes,
+ LVRelStats *vacrelstats);
+static void vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes);
static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
LVShared *lvshared, LVSharedIndStats *shared_indstats,
LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
-static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes);
+static void lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes);
static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
static int compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
bool *can_parallel_vacuum);
write_rate;
bool aggressive; /* should we scan all unfrozen pages? */
bool scanned_all_unfrozen; /* actually scanned all such pages? */
+ char **indnames = NULL;
TransactionId xidFullScanLimit;
MultiXactId mxactFullScanLimit;
BlockNumber new_rel_pages;
vacrelstats->useindex = (nindexes > 0 &&
params->index_cleanup == VACOPT_TERNARY_ENABLED);
+ vacrelstats->indstats = (IndexBulkDeleteResult **)
+ palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
+ vacrelstats->nindexes = nindexes;
+
+ /* Save index names iff autovacuum logging requires it */
+ if (IsAutoVacuumWorkerProcess() &&
+ params->log_min_duration >= 0 &&
+ vacrelstats->nindexes > 0)
+ {
+ indnames = palloc(sizeof(char *) * vacrelstats->nindexes);
+ for (int i = 0; i < vacrelstats->nindexes; i++)
+ indnames[i] = pstrdup(RelationGetRelationName(Irel[i]));
+ }
+
/*
* Setup error traceback support for ereport(). The idea is to set up an
* error context callback to display additional information on any error
(long long) VacuumPageHit,
(long long) VacuumPageMiss,
(long long) VacuumPageDirty);
+ for (int i = 0; i < vacrelstats->nindexes; i++)
+ {
+ IndexBulkDeleteResult *stats = vacrelstats->indstats[i];
+
+ if (!stats)
+ continue;
+
+ appendStringInfo(&buf,
+ _("index \"%s\": pages: %u remain, %u newly deleted, %u currently deleted, %u reusable\n"),
+ indnames[i],
+ stats->num_pages,
+ stats->pages_newly_deleted,
+ stats->pages_deleted,
+ stats->pages_free);
+ }
appendStringInfo(&buf, _("avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"),
read_rate, write_rate);
if (track_io_timing)
pfree(buf.data);
}
}
+
+ /* Cleanup index statistics and index names */
+ for (int i = 0; i < vacrelstats->nindexes; i++)
+ {
+ if (vacrelstats->indstats[i])
+ pfree(vacrelstats->indstats[i]);
+
+ if (indnames && indnames[i])
+ pfree(indnames[i]);
+ }
}
/*
tups_vacuumed, /* tuples cleaned up by current vacuum */
nkeep, /* dead-but-not-removable tuples */
nunused; /* # existing unused line pointers */
- IndexBulkDeleteResult **indstats;
int i;
PGRUsage ru0;
Buffer vmbuffer = InvalidBuffer;
next_fsm_block_to_vacuum = (BlockNumber) 0;
num_tuples = live_tuples = tups_vacuumed = nkeep = nunused = 0;
- indstats = (IndexBulkDeleteResult **)
- palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
-
nblocks = RelationGetNumberOfBlocks(onerel);
vacrelstats->rel_pages = nblocks;
vacrelstats->scanned_pages = 0;
}
/* Work on all the indexes, then the heap */
- lazy_vacuum_all_indexes(onerel, Irel, indstats,
- vacrelstats, lps, nindexes);
+ lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps, nindexes);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats);
if (dead_tuples->num_tuples > 0)
{
/* Work on all the indexes, and then the heap */
- lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
- lps, nindexes);
+ lazy_vacuum_all_indexes(onerel, Irel, vacrelstats, lps, nindexes);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats);
/* Do post-vacuum cleanup */
if (vacrelstats->useindex)
- lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
+ lazy_cleanup_all_indexes(Irel, vacrelstats, lps, nindexes);
/*
* End parallel mode before updating index statistics as we cannot write
* during parallel mode.
*/
if (ParallelVacuumIsActive(lps))
- end_parallel_vacuum(indstats, lps, nindexes);
+ end_parallel_vacuum(vacrelstats->indstats, lps, nindexes);
/* Update index statistics */
if (vacrelstats->useindex)
- update_index_statistics(Irel, indstats, nindexes);
+ update_index_statistics(Irel, vacrelstats->indstats, nindexes);
/* If no indexes, make log report that lazy_vacuum_heap would've made */
if (vacuumed_pages)
*/
static void
lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
- IndexBulkDeleteResult **stats,
LVRelStats *vacrelstats, LVParallelState *lps,
int nindexes)
{
lps->lvshared->reltuples = vacrelstats->old_live_tuples;
lps->lvshared->estimated_count = true;
- lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+ lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
}
else
{
int idx;
for (idx = 0; idx < nindexes; idx++)
- lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
+ lazy_vacuum_index(Irel[idx], &(vacrelstats->indstats[idx]),
+ vacrelstats->dead_tuples,
vacrelstats->old_live_tuples, vacrelstats);
}
* cleanup.
*/
static void
-lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes)
+lazy_parallel_vacuum_indexes(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes)
{
int nworkers;
}
/* Process the indexes that can be processed by only leader process */
- vacuum_indexes_leader(Irel, stats, vacrelstats, lps, nindexes);
+ vacuum_indexes_leader(Irel, vacrelstats, lps, nindexes);
/*
* Join as a parallel worker. The leader process alone processes all the
* indexes in the case where no workers are launched.
*/
- parallel_vacuum_index(Irel, stats, lps->lvshared,
- vacrelstats->dead_tuples, nindexes, vacrelstats);
+ parallel_vacuum_index(Irel, lps->lvshared, vacrelstats->dead_tuples,
+ nindexes, vacrelstats);
/*
* Next, accumulate buffer and WAL usage. (This must wait for the workers
* vacuum worker processes to process the indexes in parallel.
*/
static void
-parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
- LVShared *lvshared, LVDeadTuples *dead_tuples,
- int nindexes, LVRelStats *vacrelstats)
+parallel_vacuum_index(Relation *Irel, LVShared *lvshared,
+ LVDeadTuples *dead_tuples, int nindexes,
+ LVRelStats *vacrelstats)
{
/*
* Increment the active worker count if we are able to launch any worker.
continue;
/* Do vacuum or cleanup of the index */
- vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
- dead_tuples, vacrelstats);
+ vacuum_one_index(Irel[idx], &(vacrelstats->indstats[idx]), lvshared,
+ shared_indstats, dead_tuples, vacrelstats);
}
/*
* because these indexes don't support parallel operation at that phase.
*/
static void
-vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes)
+vacuum_indexes_leader(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes)
{
int i;
/* Process the indexes skipped by parallel workers */
if (shared_indstats == NULL ||
skip_parallel_vacuum_index(Irel[i], lps->lvshared))
- vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
+ vacuum_one_index(Irel[i], &(vacrelstats->indstats[i]), lps->lvshared,
shared_indstats, vacrelstats->dead_tuples,
vacrelstats);
}
* parallel vacuum.
*/
static void
-lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats, LVParallelState *lps,
- int nindexes)
+lazy_cleanup_all_indexes(Relation *Irel, LVRelStats *vacrelstats,
+ LVParallelState *lps, int nindexes)
{
int idx;
lps->lvshared->estimated_count =
(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
- lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+ lazy_parallel_vacuum_indexes(Irel, vacrelstats, lps, nindexes);
}
else
{
for (idx = 0; idx < nindexes; idx++)
- lazy_cleanup_index(Irel[idx], &stats[idx],
+ lazy_cleanup_index(Irel[idx], &(vacrelstats->indstats[idx]),
vacrelstats->new_rel_tuples,
vacrelstats->tupcount_pages < vacrelstats->rel_pages,
vacrelstats);
InvalidTransactionId,
InvalidMultiXactId,
false);
- pfree(stats[i]);
}
}
WalUsage *wal_usage;
int nindexes;
char *sharedquery;
- IndexBulkDeleteResult **stats;
LVRelStats vacrelstats;
ErrorContextCallback errcallback;
VacuumSharedCostBalance = &(lvshared->cost_balance);
VacuumActiveNWorkers = &(lvshared->active_nworkers);
- stats = (IndexBulkDeleteResult **)
+ vacrelstats.indstats = (IndexBulkDeleteResult **)
palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
if (lvshared->maintenance_work_mem_worker > 0)
InstrStartParallelQuery();
/* Process indexes to perform vacuum/cleanup */
- parallel_vacuum_index(indrels, stats, lvshared, dead_tuples, nindexes,
+ parallel_vacuum_index(indrels, lvshared, dead_tuples, nindexes,
&vacrelstats);
/* Report buffer/WAL usage during parallel execution */
vac_close_indexes(nindexes, indrels, RowExclusiveLock);
table_close(onerel, ShareUpdateExclusiveLock);
- pfree(stats);
+ pfree(vacrelstats.indstats);
}
/*