summaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/transam/xlog.c10
-rw-r--r--src/backend/commands/explain.c21
-rw-r--r--src/backend/jit/llvm/Makefile2
-rw-r--r--src/backend/optimizer/path/costsize.c18
-rw-r--r--src/backend/optimizer/plan/createplan.c15
-rw-r--r--src/backend/optimizer/util/pathnode.c11
-rw-r--r--src/backend/postmaster/checkpointer.c155
-rw-r--r--src/backend/replication/syncrep_scanner.l11
-rw-r--r--src/backend/storage/buffer/bufmgr.c13
-rw-r--r--src/backend/storage/ipc/procsignal.c6
-rw-r--r--src/backend/utils/activity/pgstat.c52
-rw-r--r--src/backend/utils/activity/pgstat_backend.c14
-rw-r--r--src/backend/utils/activity/pgstat_io.c10
-rw-r--r--src/backend/utils/activity/pgstat_slru.c10
-rw-r--r--src/backend/utils/activity/pgstat_wal.c20
-rw-r--r--src/backend/utils/adt/tid.c2
-rw-r--r--src/backend/utils/adt/xml.c74
17 files changed, 255 insertions, 189 deletions
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index eefffc4277a..b0891998b24 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -96,6 +96,7 @@
#include "utils/guc_hooks.h"
#include "utils/guc_tables.h"
#include "utils/injection_point.h"
+#include "utils/pgstat_internal.h"
#include "utils/ps_status.h"
#include "utils/relmapper.h"
#include "utils/snapmgr.h"
@@ -1091,6 +1092,9 @@ XLogInsertRecord(XLogRecData *rdata,
pgWalUsage.wal_bytes += rechdr->xl_tot_len;
pgWalUsage.wal_records++;
pgWalUsage.wal_fpi += num_fpi;
+
+ /* Required for the flush of pending stats WAL data */
+ pgstat_report_fixed = true;
}
return EndPos;
@@ -2108,6 +2112,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
LWLockRelease(WALWriteLock);
pgWalUsage.wal_buffers_full++;
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
+
+ /*
+ * Required for the flush of pending stats WAL data, per
+ * update of pgWalUsage.
+ */
+ pgstat_report_fixed = true;
}
}
}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 7e2792ead71..8345bc0264b 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3582,6 +3582,7 @@ static void
show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
{
Plan *plan = ((PlanState *) mstate)->plan;
+ Memoize *mplan = (Memoize *) plan;
ListCell *lc;
List *context;
StringInfoData keystr;
@@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
plan,
ancestors);
- foreach(lc, ((Memoize *) plan)->param_exprs)
+ foreach(lc, mplan->param_exprs)
{
Node *expr = (Node *) lfirst(lc);
@@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
pfree(keystr.data);
+ if (es->costs)
+ {
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n",
+ mplan->est_entries, mplan->est_unique_keys,
+ mplan->est_calls, mplan->est_hit_ratio * 100.0);
+ }
+ else
+ {
+ ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es);
+ ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es);
+ ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es);
+ ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es);
+ }
+ }
+
if (!es->analyze)
return;
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e8c12060b93..68677ba42e1 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -31,7 +31,7 @@ endif
# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
-override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(LLVM_CPPFLAGS)
SHLIB_LINK += $(LLVM_LIBS)
# Because this module includes C++ files, we need to use a C++
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 1f04a2c182c..344a3188317 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2572,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
Cost input_startup_cost = mpath->subpath->startup_cost;
Cost input_total_cost = mpath->subpath->total_cost;
double tuples = mpath->subpath->rows;
- double calls = mpath->calls;
+ Cardinality est_calls = mpath->est_calls;
int width = mpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
- double est_cache_entries;
- double ndistinct;
+ Cardinality est_cache_entries;
+ Cardinality ndistinct;
double evict_ratio;
double hit_ratio;
Cost startup_cost;
@@ -2604,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
- ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
&estinfo);
/*
@@ -2616,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* certainly mean a MemoizePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
- ndistinct = calls;
+ ndistinct = est_calls;
+
+ /* Remember the ndistinct estimate for EXPLAIN */
+ mpath->est_unique_keys = ndistinct;
/*
* Since we've already estimated the maximum number of entries we can
@@ -2644,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* must look at how many scans are estimated in total for this node and
* how many of those scans we expect to get a cache hit.
*/
- hit_ratio = ((calls - ndistinct) / calls) *
+ hit_ratio = ((est_calls - ndistinct) / est_calls) *
(est_cache_entries / Max(ndistinct, est_cache_entries));
+ /* Remember the hit ratio estimate for EXPLAIN */
+ mpath->est_hit_ratio = hit_ratio;
+
Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 8a9f1d7a943..bfefc7dbea1 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -284,7 +284,10 @@ static Material *make_material(Plan *lefttree);
static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
Oid *collations, List *param_exprs,
bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids);
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls,
+ Cardinality est_unique_keys,
+ double est_hit_ratio);
static WindowAgg *make_windowagg(List *tlist, WindowClause *wc,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
@@ -1753,7 +1756,8 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
plan = make_memoize(subplan, operators, collations, param_exprs,
best_path->singlerow, best_path->binary_mode,
- best_path->est_entries, keyparamids);
+ best_path->est_entries, keyparamids, best_path->est_calls,
+ best_path->est_unique_keys, best_path->est_hit_ratio);
copy_generic_path_info(&plan->plan, (Path *) best_path);
@@ -6749,7 +6753,9 @@ materialize_finished_plan(Plan *subplan)
static Memoize *
make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
List *param_exprs, bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids)
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls, Cardinality est_unique_keys,
+ double est_hit_ratio)
{
Memoize *node = makeNode(Memoize);
Plan *plan = &node->plan;
@@ -6767,6 +6773,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
node->binary_mode = binary_mode;
node->est_entries = est_entries;
node->keyparamids = keyparamids;
+ node->est_calls = est_calls;
+ node->est_unique_keys = est_unique_keys;
+ node->est_hit_ratio = est_hit_ratio;
return node;
}
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 9cc602788ea..a4c5867cdcb 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1689,7 +1689,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
MemoizePath *
create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *param_exprs, List *hash_operators,
- bool singlerow, bool binary_mode, double calls)
+ bool singlerow, bool binary_mode, Cardinality est_calls)
{
MemoizePath *pathnode = makeNode(MemoizePath);
@@ -1710,7 +1710,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->param_exprs = param_exprs;
pathnode->singlerow = singlerow;
pathnode->binary_mode = binary_mode;
- pathnode->calls = clamp_row_est(calls);
/*
* For now we set est_entries to 0. cost_memoize_rescan() does all the
@@ -1720,6 +1719,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
*/
pathnode->est_entries = 0;
+ pathnode->est_calls = clamp_row_est(est_calls);
+
+ /* These will also be set later in cost_memoize_rescan() */
+ pathnode->est_unique_keys = 0.0;
+ pathnode->est_hit_ratio = 0.0;
+
/* we should not generate this path type when enable_memoize=false */
Assert(enable_memoize);
pathnode->path.disabled_nodes = subpath->disabled_nodes;
@@ -4259,7 +4264,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
mpath->hash_operators,
mpath->singlerow,
mpath->binary_mode,
- mpath->calls);
+ mpath->est_calls);
}
default:
break;
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 2809e298a44..8490148a47d 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -130,6 +130,13 @@ typedef struct
int num_requests; /* current # of requests */
int max_requests; /* allocated array size */
+
+ int head; /* Index of the first request in the ring
+ * buffer */
+ int tail; /* Index of the last request in the ring
+ * buffer */
+
+ /* The ring buffer of pending checkpointer requests */
CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
} CheckpointerShmemStruct;
@@ -138,6 +145,12 @@ static CheckpointerShmemStruct *CheckpointerShmem;
/* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
#define WRITES_PER_ABSORB 1000
+/* Maximum number of checkpointer requests to process in one batch */
+#define CKPT_REQ_BATCH_SIZE 10000
+
+/* Max number of requests the checkpointer request queue can hold */
+#define MAX_CHECKPOINT_REQUESTS 10000000
+
/*
* GUC parameters
*/
@@ -973,7 +986,8 @@ CheckpointerShmemInit(void)
*/
MemSet(CheckpointerShmem, 0, size);
SpinLockInit(&CheckpointerShmem->ckpt_lck);
- CheckpointerShmem->max_requests = NBuffers;
+ CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
+ CheckpointerShmem->head = CheckpointerShmem->tail = 0;
ConditionVariableInit(&CheckpointerShmem->start_cv);
ConditionVariableInit(&CheckpointerShmem->done_cv);
}
@@ -1201,6 +1215,7 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
{
CheckpointerRequest *request;
bool too_full;
+ int insert_pos;
if (!IsUnderPostmaster)
return false; /* probably shouldn't even get here */
@@ -1224,10 +1239,14 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
}
/* OK, insert request */
- request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
+ insert_pos = CheckpointerShmem->tail;
+ request = &CheckpointerShmem->requests[insert_pos];
request->ftag = *ftag;
request->type = type;
+ CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
+ CheckpointerShmem->num_requests++;
+
/* If queue is more than half full, nudge the checkpointer to empty it */
too_full = (CheckpointerShmem->num_requests >=
CheckpointerShmem->max_requests / 2);
@@ -1269,12 +1288,16 @@ CompactCheckpointerRequestQueue(void)
struct CheckpointerSlotMapping
{
CheckpointerRequest request;
- int slot;
+ int ring_idx;
};
- int n,
- preserve_count;
+ int n;
int num_skipped = 0;
+ int head;
+ int max_requests;
+ int num_requests;
+ int read_idx,
+ write_idx;
HASHCTL ctl;
HTAB *htab;
bool *skip_slot;
@@ -1286,8 +1309,13 @@ CompactCheckpointerRequestQueue(void)
if (CritSectionCount > 0)
return false;
+ max_requests = CheckpointerShmem->max_requests;
+ num_requests = CheckpointerShmem->num_requests;
+
/* Initialize skip_slot array */
- skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
+ skip_slot = palloc0(sizeof(bool) * max_requests);
+
+ head = CheckpointerShmem->head;
/* Initialize temporary hash table */
ctl.keysize = sizeof(CheckpointerRequest);
@@ -1311,7 +1339,8 @@ CompactCheckpointerRequestQueue(void)
* away preceding entries that would end up being canceled anyhow), but
* it's not clear that the extra complexity would buy us anything.
*/
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = head;
+ for (n = 0; n < num_requests; n++)
{
CheckpointerRequest *request;
struct CheckpointerSlotMapping *slotmap;
@@ -1324,16 +1353,19 @@ CompactCheckpointerRequestQueue(void)
* CheckpointerShmemInit. Note also that RelFileLocator had better
* contain no pad bytes.
*/
- request = &CheckpointerShmem->requests[n];
+ request = &CheckpointerShmem->requests[read_idx];
slotmap = hash_search(htab, request, HASH_ENTER, &found);
if (found)
{
/* Duplicate, so mark the previous occurrence as skippable */
- skip_slot[slotmap->slot] = true;
+ skip_slot[slotmap->ring_idx] = true;
num_skipped++;
}
/* Remember slot containing latest occurrence of this request value */
- slotmap->slot = n;
+ slotmap->ring_idx = read_idx;
+
+ /* Move to the next request in the ring buffer */
+ read_idx = (read_idx + 1) % max_requests;
}
/* Done with the hash table. */
@@ -1347,17 +1379,34 @@ CompactCheckpointerRequestQueue(void)
}
/* We found some duplicates; remove them. */
- preserve_count = 0;
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = write_idx = head;
+ for (n = 0; n < num_requests; n++)
{
- if (skip_slot[n])
- continue;
- CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
+ /* If this slot is NOT skipped, keep it */
+ if (!skip_slot[read_idx])
+ {
+ /* If the read and write positions are different, copy the request */
+ if (write_idx != read_idx)
+ CheckpointerShmem->requests[write_idx] =
+ CheckpointerShmem->requests[read_idx];
+
+ /* Advance the write position */
+ write_idx = (write_idx + 1) % max_requests;
+ }
+
+ read_idx = (read_idx + 1) % max_requests;
}
+
+ /*
+ * Update ring buffer state: head remains the same, tail moves, count
+ * decreases
+ */
+ CheckpointerShmem->tail = write_idx;
+ CheckpointerShmem->num_requests -= num_skipped;
+
ereport(DEBUG1,
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
- CheckpointerShmem->num_requests, preserve_count)));
- CheckpointerShmem->num_requests = preserve_count;
+ num_requests, CheckpointerShmem->num_requests)));
/* Cleanup. */
pfree(skip_slot);
@@ -1378,40 +1427,64 @@ AbsorbSyncRequests(void)
{
CheckpointerRequest *requests = NULL;
CheckpointerRequest *request;
- int n;
+ int n,
+ i;
+ bool loop;
if (!AmCheckpointerProcess())
return;
- LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
-
- /*
- * We try to avoid holding the lock for a long time by copying the request
- * array, and processing the requests after releasing the lock.
- *
- * Once we have cleared the requests from shared memory, we have to PANIC
- * if we then fail to absorb them (eg, because our hashtable runs out of
- * memory). This is because the system cannot run safely if we are unable
- * to fsync what we have been told to fsync. Fortunately, the hashtable
- * is so small that the problem is quite unlikely to arise in practice.
- */
- n = CheckpointerShmem->num_requests;
- if (n > 0)
+ do
{
- requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
- }
+ LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
+
+ /*---
+ * We try to avoid holding the lock for a long time by:
+ * 1. Copying the request array and processing the requests after
+ * releasing the lock;
+ * 2. Processing not the whole queue, but only batches of
+ * CKPT_REQ_BATCH_SIZE at once.
+ *
+ * Once we have cleared the requests from shared memory, we must
+ * PANIC if we then fail to absorb them (e.g., because our hashtable
+ * runs out of memory). This is because the system cannot run safely
+ * if we are unable to fsync what we have been told to fsync.
+ * Fortunately, the hashtable is so small that the problem is quite
+ * unlikely to arise in practice.
+ *
+ * Note: The maximum possible size of a ring buffer is
+ * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
+ * allocation size of 1Gb. Our maximum batch size,
+ * CKPT_REQ_BATCH_SIZE, is even smaller.
+ */
+ n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
+ if (n > 0)
+ {
+ if (!requests)
+ requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- START_CRIT_SECTION();
+ for (i = 0; i < n; i++)
+ {
+ requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
+ CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
+ }
- CheckpointerShmem->num_requests = 0;
+ CheckpointerShmem->num_requests -= n;
- LWLockRelease(CheckpointerCommLock);
+ }
+
+ START_CRIT_SECTION();
+
+ /* Are there any requests in the queue? If so, keep going. */
+ loop = CheckpointerShmem->num_requests != 0;
+
+ LWLockRelease(CheckpointerCommLock);
- for (request = requests; n > 0; request++, n--)
- RememberSyncRequest(&request->ftag, request->type);
+ for (request = requests; n > 0; request++, n--)
+ RememberSyncRequest(&request->ftag, request->type);
- END_CRIT_SECTION();
+ END_CRIT_SECTION();
+ } while (loop);
if (requests)
pfree(requests);
diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l
index 7dec1f869c7..02004d621e7 100644
--- a/src/backend/replication/syncrep_scanner.l
+++ b/src/backend/replication/syncrep_scanner.l
@@ -157,17 +157,16 @@ syncrep_yyerror(SyncRepConfigData **syncrep_parse_result_p, char **syncrep_parse
{
struct yyguts_t *yyg = (struct yyguts_t *) yyscanner; /* needed for yytext
* macro */
- char *syncrep_parse_error_msg = *syncrep_parse_error_msg_p;
/* report only the first error in a parse operation */
- if (syncrep_parse_error_msg)
+ if (*syncrep_parse_error_msg_p)
return;
if (yytext[0])
- syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
- message, yytext);
+ *syncrep_parse_error_msg_p = psprintf("%s at or near \"%s\"",
+ message, yytext);
else
- syncrep_parse_error_msg = psprintf("%s at end of input",
- message);
+ *syncrep_parse_error_msg_p = psprintf("%s at end of input",
+ message);
}
void
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6afdd28dba6..67431208e7f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2743,12 +2743,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
* because mdread doesn't complain about reads beyond EOF (when
* zero_damaged_pages is ON) and so a previous attempt to read a block
* beyond EOF could have left a "valid" zero-filled buffer.
- * Unfortunately, we have also seen this case occurring because of
- * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
- * that doesn't account for a recent write. In that situation, the
- * pre-existing buffer would contain valid data that we don't want to
- * overwrite. Since the legitimate cases should always have left a
- * zero-filled buffer, complain if not PageIsNew.
+ *
+ * This has also been observed when relation was overwritten by
+ * external process. Since the legitimate cases should always have
+ * left a zero-filled buffer, complain if not PageIsNew.
*/
if (existing_id >= 0)
{
@@ -2778,8 +2776,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
existing_hdr->tag.blockNum,
- relpath(bmr.smgr->smgr_rlocator, fork).str),
- errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
+ relpath(bmr.smgr->smgr_rlocator, fork).str)));
/*
* We *must* do smgr[zero]extend before succeeding, else the page
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index a9bb540b55a..087821311cc 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
void
SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len)
{
- Assert(backendPID != 0);
+ if (backendPID == 0)
+ {
+ ereport(LOG, (errmsg("invalid cancel request with PID 0")));
+ return;
+ }
/*
* See if we have a matching backend. Reading the pss_pid and
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 8b57845e870..6bc91ce0dad 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -212,6 +212,11 @@ int pgstat_fetch_consistency = PGSTAT_FETCH_CONSISTENCY_CACHE;
PgStat_LocalState pgStatLocal;
+/*
+ * Track pending reports for fixed-numbered stats, used by
+ * pgstat_report_stat().
+ */
+bool pgstat_report_fixed = false;
/* ----------
* Local data
@@ -370,7 +375,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_off = offsetof(PgStatShared_Backend, stats),
.shared_data_len = sizeof(((PgStatShared_Backend *) 0)->stats),
- .have_static_pending_cb = pgstat_backend_have_pending_cb,
.flush_static_cb = pgstat_backend_flush_cb,
.reset_timestamp_cb = pgstat_backend_reset_timestamp_cb,
},
@@ -437,7 +441,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_IO *) 0)->stats),
.flush_static_cb = pgstat_io_flush_cb,
- .have_static_pending_cb = pgstat_io_have_pending_cb,
.init_shmem_cb = pgstat_io_init_shmem_cb,
.reset_all_cb = pgstat_io_reset_all_cb,
.snapshot_cb = pgstat_io_snapshot_cb,
@@ -455,7 +458,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_SLRU *) 0)->stats),
.flush_static_cb = pgstat_slru_flush_cb,
- .have_static_pending_cb = pgstat_slru_have_pending_cb,
.init_shmem_cb = pgstat_slru_init_shmem_cb,
.reset_all_cb = pgstat_slru_reset_all_cb,
.snapshot_cb = pgstat_slru_snapshot_cb,
@@ -474,7 +476,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.init_backend_cb = pgstat_wal_init_backend_cb,
.flush_static_cb = pgstat_wal_flush_cb,
- .have_static_pending_cb = pgstat_wal_have_pending_cb,
.init_shmem_cb = pgstat_wal_init_shmem_cb,
.reset_all_cb = pgstat_wal_reset_all_cb,
.snapshot_cb = pgstat_wal_snapshot_cb,
@@ -708,29 +709,10 @@ pgstat_report_stat(bool force)
}
/* Don't expend a clock check if nothing to do */
- if (dlist_is_empty(&pgStatPending))
+ if (dlist_is_empty(&pgStatPending) &&
+ !pgstat_report_fixed)
{
- bool do_flush = false;
-
- /* Check for pending stats */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
- {
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
-
- if (!kind_info)
- continue;
- if (!kind_info->have_static_pending_cb)
- continue;
-
- if (kind_info->have_static_pending_cb())
- {
- do_flush = true;
- break;
- }
- }
-
- if (!do_flush)
- return 0;
+ return 0;
}
/*
@@ -784,16 +766,19 @@ pgstat_report_stat(bool force)
partial_flush |= pgstat_flush_pending_entries(nowait);
/* flush of other stats kinds */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ if (pgstat_report_fixed)
{
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
+ for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ {
+ const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
- if (!kind_info)
- continue;
- if (!kind_info->flush_static_cb)
- continue;
+ if (!kind_info)
+ continue;
+ if (!kind_info->flush_static_cb)
+ continue;
- partial_flush |= kind_info->flush_static_cb(nowait);
+ partial_flush |= kind_info->flush_static_cb(nowait);
+ }
}
last_flush = now;
@@ -815,6 +800,7 @@ pgstat_report_stat(bool force)
}
pending_since = 0;
+ pgstat_report_fixed = false;
return 0;
}
diff --git a/src/backend/utils/activity/pgstat_backend.c b/src/backend/utils/activity/pgstat_backend.c
index 51256277e8d..8714a85e2d9 100644
--- a/src/backend/utils/activity/pgstat_backend.c
+++ b/src/backend/utils/activity/pgstat_backend.c
@@ -66,6 +66,7 @@ pgstat_count_backend_io_op_time(IOObject io_object, IOContext io_context,
io_time);
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
void
@@ -81,6 +82,7 @@ pgstat_count_backend_io_op(IOObject io_object, IOContext io_context,
PendingBackendStats.pending_io.bytes[io_object][io_context][io_op] += bytes;
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -302,18 +304,6 @@ pgstat_flush_backend(bool nowait, bits32 flags)
}
/*
- * Check if there are any backend stats waiting for flush.
- */
-bool
-pgstat_backend_have_pending_cb(void)
-{
- if (!pgstat_tracks_backend_bktype(MyBackendType))
- return false;
-
- return (backend_has_iostats || pgstat_backend_wal_have_pending());
-}
-
-/*
* Callback to flush out locally pending backend statistics.
*
* If some stats could not be flushed due to lock contention, return true.
diff --git a/src/backend/utils/activity/pgstat_io.c b/src/backend/utils/activity/pgstat_io.c
index d8d26379a57..13ae57ed649 100644
--- a/src/backend/utils/activity/pgstat_io.c
+++ b/src/backend/utils/activity/pgstat_io.c
@@ -80,6 +80,7 @@ pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op,
pgstat_count_backend_io_op(io_object, io_context, io_op, cnt, bytes);
have_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -168,15 +169,6 @@ pgstat_fetch_stat_io(void)
}
/*
- * Check if there any IO stats waiting for flush.
- */
-bool
-pgstat_io_have_pending_cb(void)
-{
- return have_iostats;
-}
-
-/*
* Simpler wrapper of pgstat_io_flush_cb()
*/
void
diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c
index b9e940dde45..7bd8744accb 100644
--- a/src/backend/utils/activity/pgstat_slru.c
+++ b/src/backend/utils/activity/pgstat_slru.c
@@ -144,15 +144,6 @@ pgstat_get_slru_index(const char *name)
}
/*
- * Check if there are any SLRU stats entries waiting for flush.
- */
-bool
-pgstat_slru_have_pending_cb(void)
-{
- return have_slrustats;
-}
-
-/*
* Flush out locally pending SLRU stats entries
*
* If nowait is true, this function returns false on lock failure. Otherwise
@@ -247,6 +238,7 @@ get_slru_entry(int slru_idx)
Assert((slru_idx >= 0) && (slru_idx < SLRU_NUM_ELEMENTS));
have_slrustats = true;
+ pgstat_report_fixed = true;
return &pending_SLRUStats[slru_idx];
}
diff --git a/src/backend/utils/activity/pgstat_wal.c b/src/backend/utils/activity/pgstat_wal.c
index 16a1ecb4d90..0d04480d2f6 100644
--- a/src/backend/utils/activity/pgstat_wal.c
+++ b/src/backend/utils/activity/pgstat_wal.c
@@ -72,6 +72,15 @@ pgstat_fetch_stat_wal(void)
}
/*
+ * To determine whether WAL usage happened.
+ */
+static inline bool
+pgstat_wal_have_pending(void)
+{
+ return pgWalUsage.wal_records != prevWalUsage.wal_records;
+}
+
+/*
* Calculate how much WAL usage counters have increased by subtracting the
* previous counters from the current ones.
*
@@ -92,7 +101,7 @@ pgstat_wal_flush_cb(bool nowait)
* This function can be called even if nothing at all has happened. Avoid
* taking lock for nothing in that case.
*/
- if (!pgstat_wal_have_pending_cb())
+ if (!pgstat_wal_have_pending())
return false;
/*
@@ -136,15 +145,6 @@ pgstat_wal_init_backend_cb(void)
prevWalUsage = pgWalUsage;
}
-/*
- * To determine whether WAL usage happened.
- */
-bool
-pgstat_wal_have_pending_cb(void)
-{
- return pgWalUsage.wal_records != prevWalUsage.wal_records;
-}
-
void
pgstat_wal_init_shmem_cb(void *stats)
{
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1b0df111717..39dab3e42df 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS)
/*
* Cope with possibility that unsigned long is wider than BlockNumber, in
* which case strtoul will not raise an error for some values that are out
- * of the range of BlockNumber. (See similar code in oidin().)
+ * of the range of BlockNumber. (See similar code in uint32in_subr().)
*/
#if SIZEOF_LONG > 4
if (cvt != (unsigned long) blockNumber &&
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index f7b731825fc..182e8f75db7 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1769,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and we parse in CONTENT mode, the list
- * of parsed nodes from the xmlParseInNodeContext call will be returned
+ * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes. (It is caller's responsibility to free that.)
*
* Errors normally result in ereport(ERROR), but if escontext is an
@@ -1795,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PgXmlErrorContext *xmlerrcxt;
volatile xmlParserCtxtPtr ctxt = NULL;
volatile xmlDocPtr doc = NULL;
+ volatile int save_keep_blanks = -1;
/*
* This step looks annoyingly redundant, but we must do it to have a
@@ -1822,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PG_TRY();
{
bool parse_as_document = false;
- int options;
int res_code;
size_t count = 0;
xmlChar *version = NULL;
@@ -1853,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
parse_as_document = true;
}
- /*
- * Select parse options.
- *
- * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
- * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by
- * internal DTD are applied'. As for external DTDs, we try to support
- * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really
- * happen because xmlPgEntityLoader prevents it.
- */
- options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
-
/* initialize output parameters */
if (parsed_xmloptiontype != NULL)
*parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT :
@@ -1874,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (parse_as_document)
{
+ int options;
+
+ /* set up parser context used by xmlCtxtReadDoc */
ctxt = xmlNewParserCtxt();
if (ctxt == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
+ /*
+ * Select parse options.
+ *
+ * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
+ * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined
+ * by internal DTD are applied'. As for external DTDs, we try to
+ * support them too (see SQL/XML:2008 GR 10.16.7.e), but that
+ * doesn't really happen because xmlPgEntityLoader prevents it.
+ */
+ options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
+
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL, /* no URL */
"UTF-8",
@@ -1900,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
}
else
{
- xmlNodePtr root;
- xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
-
- /* set up document with empty root node to be the context node */
+ /* set up document that xmlParseBalancedChunkMemory will add to */
doc = xmlNewDoc(version);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
@@ -1916,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
"could not allocate XML document");
doc->standalone = standalone;
- root = xmlNewNode(NULL, (const xmlChar *) "content-root");
- if (root == NULL || xmlerrcxt->err_occurred)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
- "could not allocate xml node");
-
- /*
- * This attaches root to doc, so we need not free it separately;
- * and there can't yet be any old root to free.
- */
- oldroot = xmlDocSetRootElement(doc, root);
- Assert(oldroot == NULL);
+ /* set parse options --- have to do this the ugly way */
+ save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0);
/* allow empty content */
if (*(utf8string + count))
{
- xmlNodePtr node_list = NULL;
- xmlParserErrors res;
-
- res = xmlParseInNodeContext(root,
- (char *) utf8string + count,
- strlen((char *) utf8string + count),
- options,
- &node_list);
-
- if (res != XML_ERR_OK || xmlerrcxt->err_occurred)
+ res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
+ utf8string + count,
+ parsed_nodes);
+ if (res_code != 0 || xmlerrcxt->err_occurred)
{
- xmlFreeNodeList(node_list);
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
goto fail;
}
-
- if (parsed_nodes != NULL)
- *parsed_nodes = node_list;
- else
- xmlFreeNodeList(node_list);
}
}
@@ -1961,6 +1940,8 @@ fail:
}
PG_CATCH();
{
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
if (doc != NULL)
xmlFreeDoc(doc);
if (ctxt != NULL)
@@ -1972,6 +1953,9 @@ fail:
}
PG_END_TRY();
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
+
if (ctxt != NULL)
xmlFreeParserCtxt(ctxt);