diff options
Diffstat (limited to 'src')
116 files changed, 2142 insertions, 2197 deletions
diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 04952b533de..8b1b357beaa 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -254,7 +254,7 @@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ PG_SYSROOT = @PG_SYSROOT@ -override CPPFLAGS := $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) $(CPPFLAGS) +override CPPFLAGS += $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) ifdef PGXS override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) $(CPPFLAGS) diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 745a04ef26e..8f918e00af7 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -364,7 +364,7 @@ visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) { *vmbuf = vm_readbuf(rel, mapBlock, false); if (!BufferIsValid(*vmbuf)) - return false; + return (uint8) 0; } map = PageGetContents(BufferGetPage(*vmbuf)); diff --git a/src/backend/access/index/amapi.c b/src/backend/access/index/amapi.c index f0f4f974bce..60684c53422 100644 --- a/src/backend/access/index/amapi.c +++ b/src/backend/access/index/amapi.c @@ -42,6 +42,19 @@ GetIndexAmRoutine(Oid amhandler) elog(ERROR, "index access method handler function %u did not return an IndexAmRoutine struct", amhandler); + /* Assert that all required callbacks are present. */ + Assert(routine->ambuild != NULL); + Assert(routine->ambuildempty != NULL); + Assert(routine->aminsert != NULL); + Assert(routine->ambulkdelete != NULL); + Assert(routine->amvacuumcleanup != NULL); + Assert(routine->amcostestimate != NULL); + Assert(routine->amoptions != NULL); + Assert(routine->amvalidate != NULL); + Assert(routine->ambeginscan != NULL); + Assert(routine->amrescan != NULL); + Assert(routine->amendscan != NULL); + return routine; } diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index eefffc4277a..5553c20fee8 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -96,6 +96,7 @@ #include "utils/guc_hooks.h" #include "utils/guc_tables.h" #include "utils/injection_point.h" +#include "utils/pgstat_internal.h" #include "utils/ps_status.h" #include "utils/relmapper.h" #include "utils/snapmgr.h" @@ -702,7 +703,7 @@ static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version); static void WriteControlFile(void); static void ReadControlFile(void); static void UpdateControlFile(void); -static char *str_time(pg_time_t tnow); +static char *str_time(pg_time_t tnow, char *buf, size_t bufsize); static int get_sync_bit(int method); @@ -1091,6 +1092,9 @@ XLogInsertRecord(XLogRecData *rdata, pgWalUsage.wal_bytes += rechdr->xl_tot_len; pgWalUsage.wal_records++; pgWalUsage.wal_fpi += num_fpi; + + /* Required for the flush of pending stats WAL data */ + pgstat_report_fixed = true; } return EndPos; @@ -2108,6 +2112,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic) LWLockRelease(WALWriteLock); pgWalUsage.wal_buffers_full++; TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE(); + + /* + * Required for the flush of pending stats WAL data, per + * update of pgWalUsage. + */ + pgstat_report_fixed = true; } } } @@ -5361,11 +5371,9 @@ BootStrapXLOG(uint32 data_checksum_version) } static char * -str_time(pg_time_t tnow) +str_time(pg_time_t tnow, char *buf, size_t bufsize) { - char *buf = palloc(128); - - pg_strftime(buf, 128, + pg_strftime(buf, bufsize, "%Y-%m-%d %H:%M:%S %Z", pg_localtime(&tnow, log_timezone)); @@ -5608,6 +5616,7 @@ StartupXLOG(void) XLogRecPtr missingContrecPtr; TransactionId oldestActiveXID; bool promoted = false; + char timebuf[128]; /* * We should have an aux process resource owner to use, and we should not @@ -5636,25 +5645,29 @@ StartupXLOG(void) */ ereport(IsPostmasterEnvironment ? LOG : NOTICE, (errmsg("database system was shut down at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_SHUTDOWNED_IN_RECOVERY: ereport(LOG, (errmsg("database system was shut down in recovery at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_SHUTDOWNING: ereport(LOG, (errmsg("database system shutdown was interrupted; last known up at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; case DB_IN_CRASH_RECOVERY: ereport(LOG, (errmsg("database system was interrupted while in recovery at %s", - str_time(ControlFile->time)), + str_time(ControlFile->time, + timebuf, sizeof(timebuf))), errhint("This probably means that some data is corrupted and" " you will have to use the last backup for recovery."))); break; @@ -5662,7 +5675,8 @@ StartupXLOG(void) case DB_IN_ARCHIVE_RECOVERY: ereport(LOG, (errmsg("database system was interrupted while in recovery at log time %s", - str_time(ControlFile->checkPointCopy.time)), + str_time(ControlFile->checkPointCopy.time, + timebuf, sizeof(timebuf))), errhint("If this has occurred more than once some data might be corrupted" " and you might need to choose an earlier recovery target."))); break; @@ -5670,7 +5684,8 @@ StartupXLOG(void) case DB_IN_PRODUCTION: ereport(LOG, (errmsg("database system was interrupted; last known up at %s", - str_time(ControlFile->time)))); + str_time(ControlFile->time, + timebuf, sizeof(timebuf))))); break; default: @@ -6315,6 +6330,12 @@ StartupXLOG(void) */ CompleteCommitTsInitialization(); + /* Clean up EndOfWalRecoveryInfo data to appease Valgrind leak checking */ + if (endOfRecoveryInfo->lastPage) + pfree(endOfRecoveryInfo->lastPage); + pfree(endOfRecoveryInfo->recoveryStopReason); + pfree(endOfRecoveryInfo); + /* * All done with end-of-recovery actions. * diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index e8f3ba00caa..f23ec8969c2 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -1626,6 +1626,7 @@ ShutdownWalRecovery(void) close(readFile); readFile = -1; } + pfree(xlogreader->private_data); XLogReaderFree(xlogreader); XLogPrefetcherFree(xlogprefetcher); diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c index 63c2992d19f..244acf52f36 100644 --- a/src/backend/catalog/pg_subscription.c +++ b/src/backend/catalog/pg_subscription.c @@ -320,7 +320,7 @@ AddSubscriptionRelState(Oid subid, Oid relid, char state, */ void UpdateSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn) + XLogRecPtr sublsn, bool already_locked) { Relation rel; HeapTuple tup; @@ -328,9 +328,24 @@ UpdateSubscriptionRelState(Oid subid, Oid relid, char state, Datum values[Natts_pg_subscription_rel]; bool replaces[Natts_pg_subscription_rel]; - LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + if (already_locked) + { +#ifdef USE_ASSERT_CHECKING + LOCKTAG tag; - rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + Assert(CheckRelationOidLockedByMe(SubscriptionRelRelationId, + RowExclusiveLock, true)); + SET_LOCKTAG_OBJECT(tag, InvalidOid, SubscriptionRelationId, subid, 0); + Assert(LockHeldByMe(&tag, AccessShareLock, true)); +#endif + + rel = table_open(SubscriptionRelRelationId, NoLock); + } + else + { + LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock); + rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + } /* Try finding existing mapping. */ tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP, diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 7e2792ead71..8345bc0264b 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3582,6 +3582,7 @@ static void show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) { Plan *plan = ((PlanState *) mstate)->plan; + Memoize *mplan = (Memoize *) plan; ListCell *lc; List *context; StringInfoData keystr; @@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) plan, ancestors); - foreach(lc, ((Memoize *) plan)->param_exprs) + foreach(lc, mplan->param_exprs) { Node *expr = (Node *) lfirst(lc); @@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es) pfree(keystr.data); + if (es->costs) + { + if (es->format == EXPLAIN_FORMAT_TEXT) + { + ExplainIndentText(es); + appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n", + mplan->est_entries, mplan->est_unique_keys, + mplan->est_calls, mplan->est_hit_ratio * 100.0); + } + else + { + ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es); + ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es); + ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es); + ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es); + } + } + if (!es->analyze) return; diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index fcd5fcd8915..77f8461f42e 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -1588,7 +1588,7 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt) pstmt->utilityStmt = (Node *) cstmt; pstmt->stmt_location = rs->stmt_location; pstmt->stmt_len = rs->stmt_len; - pstmt->cached_plan_type = PLAN_CACHE_NONE; + pstmt->planOrigin = PLAN_STMT_INTERNAL; /* Execute statement */ ProcessUtility(pstmt, cmd, false, diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index c00f1a11384..0f03d9743d2 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -215,7 +215,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString, wrapper->utilityStmt = stmt; wrapper->stmt_location = stmt_location; wrapper->stmt_len = stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; /* do this step */ ProcessUtility(wrapper, diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index fc76f22fb82..f098a5557cf 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -189,7 +189,7 @@ ExecSerializePlan(Plan *plan, EState *estate) pstmt->permInfos = estate->es_rteperminfos; pstmt->resultRelations = NIL; pstmt->appendRelations = NIL; - pstmt->cached_plan_type = PLAN_CACHE_NONE; + pstmt->planOrigin = PLAN_STMT_INTERNAL; /* * Transfer only parallel-safe subplans, leaving a NULL "hole" in the list diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile index e8c12060b93..68677ba42e1 100644 --- a/src/backend/jit/llvm/Makefile +++ b/src/backend/jit/llvm/Makefile @@ -31,7 +31,7 @@ endif # All files in this directory use LLVM. CFLAGS += $(LLVM_CFLAGS) CXXFLAGS += $(LLVM_CXXFLAGS) -override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS) +override CPPFLAGS += $(LLVM_CPPFLAGS) SHLIB_LINK += $(LLVM_LIBS) # Because this module includes C++ files, we need to use a C++ diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index e5171467de1..25f739a6a17 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -858,7 +858,6 @@ RemoveSocketFiles(void) (void) unlink(sock_path); } /* Since we're about to exit, no need to reclaim storage */ - sock_paths = NIL; } diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index f1a08bc32ca..5f39949a367 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -23,7 +23,7 @@ #include "tcop/tcopprot.h" #include "utils/builtins.h" -static shm_mq_handle *pq_mq_handle; +static shm_mq_handle *pq_mq_handle = NULL; static bool pq_mq_busy = false; static pid_t pq_mq_parallel_leader_pid = 0; static ProcNumber pq_mq_parallel_leader_proc_number = INVALID_PROC_NUMBER; @@ -66,7 +66,11 @@ pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh) static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg) { - pq_mq_handle = NULL; + if (pq_mq_handle != NULL) + { + pfree(pq_mq_handle); + pq_mq_handle = NULL; + } whereToSendOutput = DestNone; } @@ -131,8 +135,11 @@ mq_putmessage(char msgtype, const char *s, size_t len) if (pq_mq_busy) { if (pq_mq_handle != NULL) + { shm_mq_detach(pq_mq_handle); - pq_mq_handle = NULL; + pfree(pq_mq_handle); + pq_mq_handle = NULL; + } return EOF; } @@ -152,8 +159,6 @@ mq_putmessage(char msgtype, const char *s, size_t len) iov[1].data = s; iov[1].len = len; - Assert(pq_mq_handle != NULL); - for (;;) { /* @@ -161,6 +166,7 @@ mq_putmessage(char msgtype, const char *s, size_t len) * that the shared memory value is updated before we send the parallel * message signal right after this. */ + Assert(pq_mq_handle != NULL); result = shm_mq_sendv(pq_mq_handle, iov, 2, true, true); if (pq_mq_parallel_leader_pid != 0) diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 1f04a2c182c..344a3188317 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2572,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, Cost input_startup_cost = mpath->subpath->startup_cost; Cost input_total_cost = mpath->subpath->total_cost; double tuples = mpath->subpath->rows; - double calls = mpath->calls; + Cardinality est_calls = mpath->est_calls; int width = mpath->subpath->pathtarget->width; double hash_mem_bytes; double est_entry_bytes; - double est_cache_entries; - double ndistinct; + Cardinality est_cache_entries; + Cardinality ndistinct; double evict_ratio; double hit_ratio; Cost startup_cost; @@ -2604,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, est_cache_entries = floor(hash_mem_bytes / est_entry_bytes); /* estimate on the distinct number of parameter values */ - ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL, + ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL, &estinfo); /* @@ -2616,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, * certainly mean a MemoizePath will never survive add_path(). */ if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0) - ndistinct = calls; + ndistinct = est_calls; + + /* Remember the ndistinct estimate for EXPLAIN */ + mpath->est_unique_keys = ndistinct; /* * Since we've already estimated the maximum number of entries we can @@ -2644,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath, * must look at how many scans are estimated in total for this node and * how many of those scans we expect to get a cache hit. */ - hit_ratio = ((calls - ndistinct) / calls) * + hit_ratio = ((est_calls - ndistinct) / est_calls) * (est_cache_entries / Max(ndistinct, est_cache_entries)); + /* Remember the hit ratio estimate for EXPLAIN */ + mpath->est_hit_ratio = hit_ratio; + Assert(hit_ratio >= 0 && hit_ratio <= 1.0); /* diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 8a9f1d7a943..bfefc7dbea1 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -284,7 +284,10 @@ static Material *make_material(Plan *lefttree); static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, List *param_exprs, bool singlerow, bool binary_mode, - uint32 est_entries, Bitmapset *keyparamids); + uint32 est_entries, Bitmapset *keyparamids, + Cardinality est_calls, + Cardinality est_unique_keys, + double est_hit_ratio); static WindowAgg *make_windowagg(List *tlist, WindowClause *wc, int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations, int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations, @@ -1753,7 +1756,8 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags) plan = make_memoize(subplan, operators, collations, param_exprs, best_path->singlerow, best_path->binary_mode, - best_path->est_entries, keyparamids); + best_path->est_entries, keyparamids, best_path->est_calls, + best_path->est_unique_keys, best_path->est_hit_ratio); copy_generic_path_info(&plan->plan, (Path *) best_path); @@ -6749,7 +6753,9 @@ materialize_finished_plan(Plan *subplan) static Memoize * make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, List *param_exprs, bool singlerow, bool binary_mode, - uint32 est_entries, Bitmapset *keyparamids) + uint32 est_entries, Bitmapset *keyparamids, + Cardinality est_calls, Cardinality est_unique_keys, + double est_hit_ratio) { Memoize *node = makeNode(Memoize); Plan *plan = &node->plan; @@ -6767,6 +6773,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations, node->binary_mode = binary_mode; node->est_entries = est_entries; node->keyparamids = keyparamids; + node->est_calls = est_calls; + node->est_unique_keys = est_unique_keys; + node->est_hit_ratio = est_hit_ratio; return node; } diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index a77b2147e95..d59d6e4c6a0 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -558,6 +558,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->commandType = parse->commandType; result->queryId = parse->queryId; + result->planOrigin = PLAN_STMT_STANDARD; result->hasReturning = (parse->returningList != NIL); result->hasModifyingCTE = parse->hasModifyingCTE; result->canSetTag = parse->canSetTag; @@ -582,7 +583,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions, result->utilityStmt = parse->utilityStmt; result->stmt_location = parse->stmt_location; result->stmt_len = parse->stmt_len; - result->cached_plan_type = PLAN_CACHE_NONE; result->jitFlags = PGJIT_NONE; if (jit_enabled && jit_above_cost >= 0 && diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 9cc602788ea..a4c5867cdcb 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -1689,7 +1689,7 @@ create_material_path(RelOptInfo *rel, Path *subpath) MemoizePath * create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, List *param_exprs, List *hash_operators, - bool singlerow, bool binary_mode, double calls) + bool singlerow, bool binary_mode, Cardinality est_calls) { MemoizePath *pathnode = makeNode(MemoizePath); @@ -1710,7 +1710,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, pathnode->param_exprs = param_exprs; pathnode->singlerow = singlerow; pathnode->binary_mode = binary_mode; - pathnode->calls = clamp_row_est(calls); /* * For now we set est_entries to 0. cost_memoize_rescan() does all the @@ -1720,6 +1719,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, */ pathnode->est_entries = 0; + pathnode->est_calls = clamp_row_est(est_calls); + + /* These will also be set later in cost_memoize_rescan() */ + pathnode->est_unique_keys = 0.0; + pathnode->est_hit_ratio = 0.0; + /* we should not generate this path type when enable_memoize=false */ Assert(enable_memoize); pathnode->path.disabled_nodes = subpath->disabled_nodes; @@ -4259,7 +4264,7 @@ reparameterize_path(PlannerInfo *root, Path *path, mpath->hash_operators, mpath->singlerow, mpath->binary_mode, - mpath->calls); + mpath->est_calls); } default: break; diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index 73345bb3c70..db43034b9db 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -318,6 +318,11 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type <list> opt_qualified_name %type <boolean> opt_concurrently %type <dbehavior> opt_drop_behavior +%type <list> opt_utility_option_list +%type <list> utility_option_list +%type <defelt> utility_option_elem +%type <str> utility_option_name +%type <node> utility_option_arg %type <node> alter_column_default opclass_item opclass_drop alter_using %type <ival> add_drop opt_asc_desc opt_nulls_order @@ -338,10 +343,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); create_extension_opt_item alter_extension_opt_item %type <ival> opt_lock lock_type cast_context -%type <str> utility_option_name -%type <defelt> utility_option_elem -%type <list> utility_option_list -%type <node> utility_option_arg %type <defelt> drop_option %type <boolean> opt_or_replace opt_no opt_grant_grant_option @@ -556,7 +557,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type <list> generic_option_list alter_generic_option_list %type <ival> reindex_target_relation reindex_target_all -%type <list> opt_reindex_option_list %type <node> copy_generic_opt_arg copy_generic_opt_arg_list_item %type <defelt> copy_generic_opt_elem @@ -1141,6 +1141,41 @@ opt_drop_behavior: | /* EMPTY */ { $$ = DROP_RESTRICT; /* default */ } ; +opt_utility_option_list: + '(' utility_option_list ')' { $$ = $2; } + | /* EMPTY */ { $$ = NULL; } + ; + +utility_option_list: + utility_option_elem + { + $$ = list_make1($1); + } + | utility_option_list ',' utility_option_elem + { + $$ = lappend($1, $3); + } + ; + +utility_option_elem: + utility_option_name utility_option_arg + { + $$ = makeDefElem($1, $2, @1); + } + ; + +utility_option_name: + NonReservedWord { $$ = $1; } + | analyze_keyword { $$ = "analyze"; } + | FORMAT_LA { $$ = "format"; } + ; + +utility_option_arg: + opt_boolean_or_string { $$ = (Node *) makeString($1); } + | NumericOnly { $$ = (Node *) $1; } + | /* EMPTY */ { $$ = NULL; } + ; + /***************************************************************************** * * CALL statement @@ -2028,18 +2063,12 @@ constraints_set_mode: * Checkpoint statement */ CheckPointStmt: - CHECKPOINT + CHECKPOINT opt_utility_option_list { CheckPointStmt *n = makeNode(CheckPointStmt); $$ = (Node *) n; - } - | CHECKPOINT '(' utility_option_list ')' - { - CheckPointStmt *n = makeNode(CheckPointStmt); - - $$ = (Node *) n; - n->options = $3; + n->options = $2; } ; @@ -9354,7 +9383,7 @@ DropTransformStmt: DROP TRANSFORM opt_if_exists FOR Typename LANGUAGE name opt_d *****************************************************************************/ ReindexStmt: - REINDEX opt_reindex_option_list reindex_target_relation opt_concurrently qualified_name + REINDEX opt_utility_option_list reindex_target_relation opt_concurrently qualified_name { ReindexStmt *n = makeNode(ReindexStmt); @@ -9367,7 +9396,7 @@ ReindexStmt: makeDefElem("concurrently", NULL, @4)); $$ = (Node *) n; } - | REINDEX opt_reindex_option_list SCHEMA opt_concurrently name + | REINDEX opt_utility_option_list SCHEMA opt_concurrently name { ReindexStmt *n = makeNode(ReindexStmt); @@ -9380,7 +9409,7 @@ ReindexStmt: makeDefElem("concurrently", NULL, @4)); $$ = (Node *) n; } - | REINDEX opt_reindex_option_list reindex_target_all opt_concurrently opt_single_name + | REINDEX opt_utility_option_list reindex_target_all opt_concurrently opt_single_name { ReindexStmt *n = makeNode(ReindexStmt); @@ -9402,10 +9431,6 @@ reindex_target_all: SYSTEM_P { $$ = REINDEX_OBJECT_SYSTEM; } | DATABASE { $$ = REINDEX_OBJECT_DATABASE; } ; -opt_reindex_option_list: - '(' utility_option_list ')' { $$ = $2; } - | /* EMPTY */ { $$ = NULL; } - ; /***************************************************************************** * @@ -11903,13 +11928,13 @@ ClusterStmt: n->params = $3; $$ = (Node *) n; } - | CLUSTER '(' utility_option_list ')' + | CLUSTER opt_utility_option_list { ClusterStmt *n = makeNode(ClusterStmt); n->relation = NULL; n->indexname = NULL; - n->params = $3; + n->params = $2; $$ = (Node *) n; } /* unparenthesized VERBOSE kept for pre-14 compatibility */ @@ -11919,21 +11944,18 @@ ClusterStmt: n->relation = $3; n->indexname = $4; - n->params = NIL; if ($2) - n->params = lappend(n->params, makeDefElem("verbose", NULL, @2)); + n->params = list_make1(makeDefElem("verbose", NULL, @2)); $$ = (Node *) n; } /* unparenthesized VERBOSE kept for pre-17 compatibility */ - | CLUSTER opt_verbose + | CLUSTER VERBOSE { ClusterStmt *n = makeNode(ClusterStmt); n->relation = NULL; n->indexname = NULL; - n->params = NIL; - if ($2) - n->params = lappend(n->params, makeDefElem("verbose", NULL, @2)); + n->params = list_make1(makeDefElem("verbose", NULL, @2)); $$ = (Node *) n; } /* kept for pre-8.3 compatibility */ @@ -11943,9 +11965,8 @@ ClusterStmt: n->relation = $5; n->indexname = $3; - n->params = NIL; if ($2) - n->params = lappend(n->params, makeDefElem("verbose", NULL, @2)); + n->params = list_make1(makeDefElem("verbose", NULL, @2)); $$ = (Node *) n; } ; @@ -11996,64 +12017,31 @@ VacuumStmt: VACUUM opt_full opt_freeze opt_verbose opt_analyze opt_vacuum_relati } ; -AnalyzeStmt: analyze_keyword opt_verbose opt_vacuum_relation_list +AnalyzeStmt: analyze_keyword opt_utility_option_list opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); - n->options = NIL; - if ($2) - n->options = lappend(n->options, - makeDefElem("verbose", NULL, @2)); + n->options = $2; n->rels = $3; n->is_vacuumcmd = false; $$ = (Node *) n; } - | analyze_keyword '(' utility_option_list ')' opt_vacuum_relation_list + | analyze_keyword VERBOSE opt_vacuum_relation_list { VacuumStmt *n = makeNode(VacuumStmt); - n->options = $3; - n->rels = $5; + n->options = list_make1(makeDefElem("verbose", NULL, @2)); + n->rels = $3; n->is_vacuumcmd = false; $$ = (Node *) n; } ; -utility_option_list: - utility_option_elem - { - $$ = list_make1($1); - } - | utility_option_list ',' utility_option_elem - { - $$ = lappend($1, $3); - } - ; - analyze_keyword: ANALYZE | ANALYSE /* British */ ; -utility_option_elem: - utility_option_name utility_option_arg - { - $$ = makeDefElem($1, $2, @1); - } - ; - -utility_option_name: - NonReservedWord { $$ = $1; } - | analyze_keyword { $$ = "analyze"; } - | FORMAT_LA { $$ = "format"; } - ; - -utility_option_arg: - opt_boolean_or_string { $$ = (Node *) makeString($1); } - | NumericOnly { $$ = (Node *) $1; } - | /* EMPTY */ { $$ = NULL; } - ; - opt_analyze: analyze_keyword { $$ = true; } | /*EMPTY*/ { $$ = false; } diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c index 4bdc2941efb..822cf4ec451 100644 --- a/src/backend/partitioning/partbounds.c +++ b/src/backend/partitioning/partbounds.c @@ -1007,9 +1007,6 @@ partition_bounds_copy(PartitionBoundInfo src, int ndatums; int nindexes; int partnatts; - bool hash_part; - int natts; - Datum *boundDatums; dest = (PartitionBoundInfo) palloc(sizeof(PartitionBoundInfoData)); @@ -1023,7 +1020,7 @@ partition_bounds_copy(PartitionBoundInfo src, dest->datums = (Datum **) palloc(sizeof(Datum *) * ndatums); - if (src->kind != NULL) + if (src->kind != NULL && ndatums > 0) { PartitionRangeDatumKind *boundKinds; @@ -1058,36 +1055,40 @@ partition_bounds_copy(PartitionBoundInfo src, * For hash partitioning, datums array will have two elements - modulus * and remainder. */ - hash_part = (key->strategy == PARTITION_STRATEGY_HASH); - natts = hash_part ? 2 : partnatts; - boundDatums = palloc(ndatums * natts * sizeof(Datum)); - - for (i = 0; i < ndatums; i++) + if (ndatums > 0) { - int j; - - dest->datums[i] = &boundDatums[i * natts]; + bool hash_part = (key->strategy == PARTITION_STRATEGY_HASH); + int natts = hash_part ? 2 : partnatts; + Datum *boundDatums = palloc(ndatums * natts * sizeof(Datum)); - for (j = 0; j < natts; j++) + for (i = 0; i < ndatums; i++) { - bool byval; - int typlen; + int j; - if (hash_part) - { - typlen = sizeof(int32); /* Always int4 */ - byval = true; /* int4 is pass-by-value */ - } - else + dest->datums[i] = &boundDatums[i * natts]; + + for (j = 0; j < natts; j++) { - byval = key->parttypbyval[j]; - typlen = key->parttyplen[j]; - } + if (dest->kind == NULL || + dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE) + { + bool byval; + int typlen; - if (dest->kind == NULL || - dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE) - dest->datums[i][j] = datumCopy(src->datums[i][j], - byval, typlen); + if (hash_part) + { + typlen = sizeof(int32); /* Always int4 */ + byval = true; /* int4 is pass-by-value */ + } + else + { + byval = key->parttypbyval[j]; + typlen = key->parttyplen[j]; + } + dest->datums[i][j] = datumCopy(src->datums[i][j], + byval, typlen); + } + } } } diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 9474095f271..ff96b36d710 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -310,6 +310,16 @@ static AutoVacuumShmemStruct *AutoVacuumShmem; static dlist_head DatabaseList = DLIST_STATIC_INIT(DatabaseList); static MemoryContext DatabaseListCxt = NULL; +/* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * avl_dbase structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern avl_dbase *avl_dbase_array; +avl_dbase *avl_dbase_array; +#endif + /* Pointer to my own WorkerInfo, valid on each worker */ static WorkerInfo MyWorkerInfo = NULL; @@ -562,10 +572,10 @@ AutoVacLauncherMain(const void *startup_data, size_t startup_data_len) /* * Create the initial database list. The invariant we want this list to - * keep is that it's ordered by decreasing next_time. As soon as an entry - * is updated to a higher time, it will be moved to the front (which is - * correct because the only operation is to add autovacuum_naptime to the - * entry, and time always increases). + * keep is that it's ordered by decreasing next_worker. As soon as an + * entry is updated to a higher time, it will be moved to the front (which + * is correct because the only operation is to add autovacuum_naptime to + * the entry, and time always increases). */ rebuild_database_list(InvalidOid); @@ -1020,6 +1030,10 @@ rebuild_database_list(Oid newdb) /* put all the hash elements into an array */ dbary = palloc(nelems * sizeof(avl_dbase)); + /* keep Valgrind quiet */ +#ifdef USE_VALGRIND + avl_dbase_array = dbary; +#endif i = 0; hash_seq_init(&seq, dbhash); @@ -2565,8 +2579,18 @@ deleted: /* * We leak table_toast_map here (among other things), but since we're - * going away soon, it's not a problem. + * going away soon, it's not a problem normally. But when using Valgrind, + * release some stuff to reduce complaints about leaked storage. */ +#ifdef USE_VALGRIND + hash_destroy(table_toast_map); + FreeTupleDesc(pg_class_desc); + if (bstrategy) + pfree(bstrategy); +#endif + + /* Run the rest in xact context, mainly to avoid Valgrind leak warnings */ + MemoryContextSwitchTo(TopTransactionContext); /* * Update pg_database.datfrozenxid, and truncate pg_xact if possible. We diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index 116ddf7b835..1ad65c237c3 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -613,6 +613,7 @@ ResetBackgroundWorkerCrashTimes(void) * resetting. */ rw->rw_crashed_at = 0; + rw->rw_pid = 0; /* * If there was anyone waiting for it, they're history. diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c index 2809e298a44..8490148a47d 100644 --- a/src/backend/postmaster/checkpointer.c +++ b/src/backend/postmaster/checkpointer.c @@ -130,6 +130,13 @@ typedef struct int num_requests; /* current # of requests */ int max_requests; /* allocated array size */ + + int head; /* Index of the first request in the ring + * buffer */ + int tail; /* Index of the last request in the ring + * buffer */ + + /* The ring buffer of pending checkpointer requests */ CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER]; } CheckpointerShmemStruct; @@ -138,6 +145,12 @@ static CheckpointerShmemStruct *CheckpointerShmem; /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */ #define WRITES_PER_ABSORB 1000 +/* Maximum number of checkpointer requests to process in one batch */ +#define CKPT_REQ_BATCH_SIZE 10000 + +/* Max number of requests the checkpointer request queue can hold */ +#define MAX_CHECKPOINT_REQUESTS 10000000 + /* * GUC parameters */ @@ -973,7 +986,8 @@ CheckpointerShmemInit(void) */ MemSet(CheckpointerShmem, 0, size); SpinLockInit(&CheckpointerShmem->ckpt_lck); - CheckpointerShmem->max_requests = NBuffers; + CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS); + CheckpointerShmem->head = CheckpointerShmem->tail = 0; ConditionVariableInit(&CheckpointerShmem->start_cv); ConditionVariableInit(&CheckpointerShmem->done_cv); } @@ -1201,6 +1215,7 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) { CheckpointerRequest *request; bool too_full; + int insert_pos; if (!IsUnderPostmaster) return false; /* probably shouldn't even get here */ @@ -1224,10 +1239,14 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type) } /* OK, insert request */ - request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++]; + insert_pos = CheckpointerShmem->tail; + request = &CheckpointerShmem->requests[insert_pos]; request->ftag = *ftag; request->type = type; + CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests; + CheckpointerShmem->num_requests++; + /* If queue is more than half full, nudge the checkpointer to empty it */ too_full = (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests / 2); @@ -1269,12 +1288,16 @@ CompactCheckpointerRequestQueue(void) struct CheckpointerSlotMapping { CheckpointerRequest request; - int slot; + int ring_idx; }; - int n, - preserve_count; + int n; int num_skipped = 0; + int head; + int max_requests; + int num_requests; + int read_idx, + write_idx; HASHCTL ctl; HTAB *htab; bool *skip_slot; @@ -1286,8 +1309,13 @@ CompactCheckpointerRequestQueue(void) if (CritSectionCount > 0) return false; + max_requests = CheckpointerShmem->max_requests; + num_requests = CheckpointerShmem->num_requests; + /* Initialize skip_slot array */ - skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); + skip_slot = palloc0(sizeof(bool) * max_requests); + + head = CheckpointerShmem->head; /* Initialize temporary hash table */ ctl.keysize = sizeof(CheckpointerRequest); @@ -1311,7 +1339,8 @@ CompactCheckpointerRequestQueue(void) * away preceding entries that would end up being canceled anyhow), but * it's not clear that the extra complexity would buy us anything. */ - for (n = 0; n < CheckpointerShmem->num_requests; n++) + read_idx = head; + for (n = 0; n < num_requests; n++) { CheckpointerRequest *request; struct CheckpointerSlotMapping *slotmap; @@ -1324,16 +1353,19 @@ CompactCheckpointerRequestQueue(void) * CheckpointerShmemInit. Note also that RelFileLocator had better * contain no pad bytes. */ - request = &CheckpointerShmem->requests[n]; + request = &CheckpointerShmem->requests[read_idx]; slotmap = hash_search(htab, request, HASH_ENTER, &found); if (found) { /* Duplicate, so mark the previous occurrence as skippable */ - skip_slot[slotmap->slot] = true; + skip_slot[slotmap->ring_idx] = true; num_skipped++; } /* Remember slot containing latest occurrence of this request value */ - slotmap->slot = n; + slotmap->ring_idx = read_idx; + + /* Move to the next request in the ring buffer */ + read_idx = (read_idx + 1) % max_requests; } /* Done with the hash table. */ @@ -1347,17 +1379,34 @@ CompactCheckpointerRequestQueue(void) } /* We found some duplicates; remove them. */ - preserve_count = 0; - for (n = 0; n < CheckpointerShmem->num_requests; n++) + read_idx = write_idx = head; + for (n = 0; n < num_requests; n++) { - if (skip_slot[n]) - continue; - CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n]; + /* If this slot is NOT skipped, keep it */ + if (!skip_slot[read_idx]) + { + /* If the read and write positions are different, copy the request */ + if (write_idx != read_idx) + CheckpointerShmem->requests[write_idx] = + CheckpointerShmem->requests[read_idx]; + + /* Advance the write position */ + write_idx = (write_idx + 1) % max_requests; + } + + read_idx = (read_idx + 1) % max_requests; } + + /* + * Update ring buffer state: head remains the same, tail moves, count + * decreases + */ + CheckpointerShmem->tail = write_idx; + CheckpointerShmem->num_requests -= num_skipped; + ereport(DEBUG1, (errmsg_internal("compacted fsync request queue from %d entries to %d entries", - CheckpointerShmem->num_requests, preserve_count))); - CheckpointerShmem->num_requests = preserve_count; + num_requests, CheckpointerShmem->num_requests))); /* Cleanup. */ pfree(skip_slot); @@ -1378,40 +1427,64 @@ AbsorbSyncRequests(void) { CheckpointerRequest *requests = NULL; CheckpointerRequest *request; - int n; + int n, + i; + bool loop; if (!AmCheckpointerProcess()) return; - LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); - - /* - * We try to avoid holding the lock for a long time by copying the request - * array, and processing the requests after releasing the lock. - * - * Once we have cleared the requests from shared memory, we have to PANIC - * if we then fail to absorb them (eg, because our hashtable runs out of - * memory). This is because the system cannot run safely if we are unable - * to fsync what we have been told to fsync. Fortunately, the hashtable - * is so small that the problem is quite unlikely to arise in practice. - */ - n = CheckpointerShmem->num_requests; - if (n > 0) + do { - requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest)); - memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest)); - } + LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); + + /*--- + * We try to avoid holding the lock for a long time by: + * 1. Copying the request array and processing the requests after + * releasing the lock; + * 2. Processing not the whole queue, but only batches of + * CKPT_REQ_BATCH_SIZE at once. + * + * Once we have cleared the requests from shared memory, we must + * PANIC if we then fail to absorb them (e.g., because our hashtable + * runs out of memory). This is because the system cannot run safely + * if we are unable to fsync what we have been told to fsync. + * Fortunately, the hashtable is so small that the problem is quite + * unlikely to arise in practice. + * + * Note: The maximum possible size of a ring buffer is + * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc + * allocation size of 1Gb. Our maximum batch size, + * CKPT_REQ_BATCH_SIZE, is even smaller. + */ + n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE); + if (n > 0) + { + if (!requests) + requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest)); - START_CRIT_SECTION(); + for (i = 0; i < n; i++) + { + requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head]; + CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests; + } - CheckpointerShmem->num_requests = 0; + CheckpointerShmem->num_requests -= n; - LWLockRelease(CheckpointerCommLock); + } + + START_CRIT_SECTION(); + + /* Are there any requests in the queue? If so, keep going. */ + loop = CheckpointerShmem->num_requests != 0; + + LWLockRelease(CheckpointerCommLock); - for (request = requests; n > 0; request++, n--) - RememberSyncRequest(&request->ftag, request->type); + for (request = requests; n > 0; request++, n--) + RememberSyncRequest(&request->ftag, request->type); - END_CRIT_SECTION(); + END_CRIT_SECTION(); + } while (loop); if (requests) pfree(requests); diff --git a/src/backend/postmaster/pmchild.c b/src/backend/postmaster/pmchild.c index cde1d23a4ca..584bb58c8ab 100644 --- a/src/backend/postmaster/pmchild.c +++ b/src/backend/postmaster/pmchild.c @@ -60,6 +60,17 @@ NON_EXEC_STATIC int num_pmchild_slots = 0; dlist_head ActiveChildList; /* + * Dummy pointer to persuade Valgrind that we've not leaked the array of + * PMChild structs. Make it global to ensure the compiler doesn't + * optimize it away. + */ +#ifdef USE_VALGRIND +extern PMChild *pmchild_array; +PMChild *pmchild_array; +#endif + + +/* * MaxLivePostmasterChildren * * This reports the number of postmaster child processes that can be active. @@ -125,8 +136,13 @@ InitPostmasterChildSlots(void) for (int i = 0; i < BACKEND_NUM_TYPES; i++) num_pmchild_slots += pmchild_pools[i].size; - /* Initialize them */ + /* Allocate enough slots, and make sure Valgrind doesn't complain */ slots = palloc(num_pmchild_slots * sizeof(PMChild)); +#ifdef USE_VALGRIND + pmchild_array = slots; +#endif + + /* Initialize them */ slotno = 0; for (int btype = 0; btype < BACKEND_NUM_TYPES; btype++) { diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index cca9b946e53..e01d9f0cfe8 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2630,6 +2630,13 @@ CleanupBackend(PMChild *bp, } bp = NULL; + /* + * In a crash case, exit immediately without resetting background worker + * state. However, if restart_after_crash is enabled, the background + * worker state (e.g., rw_pid) still needs be reset so the worker can + * restart after crash recovery. This reset is handled in + * ResetBackgroundWorkerCrashTimes(), not here. + */ if (crashed) { HandleChildCrash(bp_pid, exitstatus, procname); diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c index 886d99951dd..239641bfbb6 100644 --- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c +++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c @@ -421,31 +421,22 @@ libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli) "IDENTIFY_SYSTEM", WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE); if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("could not receive database system identifier and timeline ID from " "the primary server: %s", pchomp(PQerrorMessage(conn->streamConn))))); - } /* * IDENTIFY_SYSTEM returns 3 columns in 9.3 and earlier, and 4 columns in * 9.4 and onwards. */ if (PQnfields(res) < 3 || PQntuples(res) != 1) - { - int ntuples = PQntuples(res); - int nfields = PQnfields(res); - - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid response from primary server"), errdetail("Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields.", - ntuples, nfields, 1, 3))); - } + PQntuples(res), PQnfields(res), 1, 3))); primary_sysid = pstrdup(PQgetvalue(res, 0, 0)); *primary_tli = pg_strtoint32(PQgetvalue(res, 0, 1)); PQclear(res); @@ -607,13 +598,10 @@ libpqrcv_startstreaming(WalReceiverConn *conn, return false; } else if (PQresultStatus(res) != PGRES_COPY_BOTH) - { - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("could not start WAL streaming: %s", pchomp(PQerrorMessage(conn->streamConn))))); - } PQclear(res); return true; } @@ -721,26 +709,17 @@ libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn, cmd, WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE); if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("could not receive timeline history file from " "the primary server: %s", pchomp(PQerrorMessage(conn->streamConn))))); - } if (PQnfields(res) != 2 || PQntuples(res) != 1) - { - int ntuples = PQntuples(res); - int nfields = PQnfields(res); - - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid response from primary server"), errdetail("Expected 1 tuple with 2 fields, got %d tuples with %d fields.", - ntuples, nfields))); - } + PQntuples(res), PQnfields(res)))); *filename = pstrdup(PQgetvalue(res, 0, 0)); *len = PQgetlength(res, 0, 1); @@ -844,13 +823,10 @@ libpqrcv_receive(WalReceiverConn *conn, char **buffer, return -1; } else - { - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("could not receive data from WAL stream: %s", pchomp(PQerrorMessage(conn->streamConn))))); - } } if (rawlen < -1) ereport(ERROR, @@ -974,13 +950,10 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname, pfree(cmd.data); if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - PQclear(res); ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("could not create replication slot \"%s\": %s", slotname, pchomp(PQerrorMessage(conn->streamConn))))); - } if (lsn) *lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid, diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 742d9ba68e9..37377f7eb63 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -790,6 +790,8 @@ logicalrep_worker_detach(void) } LWLockRelease(LogicalRepWorkerLock); + + list_free(workers); } /* Block concurrent access. */ diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 5febd154b6b..34cf05668ae 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2599,7 +2599,7 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, if (++changes_count >= CHANGES_THRESHOLD) { - rb->update_progress_txn(rb, txn, change->lsn); + rb->update_progress_txn(rb, txn, prev_lsn); changes_count = 0; } } diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 3fea0a0206e..d3356bc84ee 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -316,7 +316,8 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); /* * End streaming so that LogRepWorkerWalRcvConn can be used to drop @@ -425,6 +426,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) ListCell *lc; bool started_tx = false; bool should_exit = false; + Relation rel = NULL; Assert(!IsTransactionState()); @@ -492,7 +494,17 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * worker to remove the origin tracking as if there is any * error while dropping we won't restart it to drop the * origin. So passing missing_ok = true. + * + * Lock the subscription and origin in the same order as we + * are doing during DDL commands to avoid deadlocks. See + * AlterSubscription_refresh. */ + LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid, + 0, AccessShareLock); + + if (!rel) + rel = table_open(SubscriptionRelRelationId, RowExclusiveLock); + ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid, rstate->relid, originname, @@ -504,7 +516,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) */ UpdateSubscriptionRelState(MyLogicalRepWorker->subid, rstate->relid, rstate->state, - rstate->lsn); + rstate->lsn, true); } } else @@ -555,7 +567,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) * This is required to avoid any undetected deadlocks * due to any existing lock as deadlock detector won't * be able to detect the waits on the latch. + * + * Also close any tables prior to the commit. */ + if (rel) + { + table_close(rel, NoLock); + rel = NULL; + } CommitTransactionCommand(); pgstat_report_stat(false); } @@ -623,6 +642,11 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn) } } + /* Close table if opened */ + if (rel) + table_close(rel, NoLock); + + if (started_tx) { /* @@ -1414,7 +1438,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, MyLogicalRepWorker->relstate, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); CommitTransactionCommand(); pgstat_report_stat(true); @@ -1547,7 +1572,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) UpdateSubscriptionRelState(MyLogicalRepWorker->subid, MyLogicalRepWorker->relid, SUBREL_STATE_FINISHEDCOPY, - MyLogicalRepWorker->relstate_lsn); + MyLogicalRepWorker->relstate_lsn, + false); CommitTransactionCommand(); diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l index 7dec1f869c7..02004d621e7 100644 --- a/src/backend/replication/syncrep_scanner.l +++ b/src/backend/replication/syncrep_scanner.l @@ -157,17 +157,16 @@ syncrep_yyerror(SyncRepConfigData **syncrep_parse_result_p, char **syncrep_parse { struct yyguts_t *yyg = (struct yyguts_t *) yyscanner; /* needed for yytext * macro */ - char *syncrep_parse_error_msg = *syncrep_parse_error_msg_p; /* report only the first error in a parse operation */ - if (syncrep_parse_error_msg) + if (*syncrep_parse_error_msg_p) return; if (yytext[0]) - syncrep_parse_error_msg = psprintf("%s at or near \"%s\"", - message, yytext); + *syncrep_parse_error_msg_p = psprintf("%s at or near \"%s\"", + message, yytext); else - syncrep_parse_error_msg = psprintf("%s at end of input", - message); + *syncrep_parse_error_msg_p = psprintf("%s at end of input", + message); } void diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 6afdd28dba6..67431208e7f 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -2743,12 +2743,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, * because mdread doesn't complain about reads beyond EOF (when * zero_damaged_pages is ON) and so a previous attempt to read a block * beyond EOF could have left a "valid" zero-filled buffer. - * Unfortunately, we have also seen this case occurring because of - * buggy Linux kernels that sometimes return an lseek(SEEK_END) result - * that doesn't account for a recent write. In that situation, the - * pre-existing buffer would contain valid data that we don't want to - * overwrite. Since the legitimate cases should always have left a - * zero-filled buffer, complain if not PageIsNew. + * + * This has also been observed when relation was overwritten by + * external process. Since the legitimate cases should always have + * left a zero-filled buffer, complain if not PageIsNew. */ if (existing_id >= 0) { @@ -2778,8 +2776,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr, ereport(ERROR, (errmsg("unexpected data beyond EOF in block %u of relation %s", existing_hdr->tag.blockNum, - relpath(bmr.smgr->smgr_rlocator, fork).str), - errhint("This has been seen to occur with buggy kernels; consider updating your system."))); + relpath(bmr.smgr->smgr_rlocator, fork).str))); /* * We *must* do smgr[zero]extend before succeeding, else the page diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 3da9c41ee1d..3c0d20f4659 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -932,10 +932,11 @@ GetLocalBufferStorage(void) num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ); /* Buffers should be I/O aligned. */ - cur_block = (char *) - TYPEALIGN(PG_IO_ALIGN_SIZE, - MemoryContextAlloc(LocalBufferContext, - num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE)); + cur_block = MemoryContextAllocAligned(LocalBufferContext, + num_bufs * BLCKSZ, + PG_IO_ALIGN_SIZE, + 0); + next_buf_in_block = 0; num_bufs_in_block = num_bufs; } diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index c6aefd2f688..beadeb5e46a 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -187,9 +187,11 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout, if (!(wakeEvents & WL_LATCH_SET)) latch = NULL; ModifyWaitEvent(LatchWaitSet, LatchWaitSetLatchPos, WL_LATCH_SET, latch); - ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos, - (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)), - NULL); + + if (IsUnderPostmaster) + ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos, + (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)), + NULL); if (WaitEventSetWait(LatchWaitSet, (wakeEvents & WL_TIMEOUT) ? timeout : -1, diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c index a9bb540b55a..087821311cc 100644 --- a/src/backend/storage/ipc/procsignal.c +++ b/src/backend/storage/ipc/procsignal.c @@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS) void SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len) { - Assert(backendPID != 0); + if (backendPID == 0) + { + ereport(LOG, (errmsg("invalid cancel request with PID 0"))); + return; + } /* * See if we have a matching backend. Reading the pss_pid and diff --git a/src/backend/tcop/backend_startup.c b/src/backend/tcop/backend_startup.c index ad0af5edc1f..14d5fc0b196 100644 --- a/src/backend/tcop/backend_startup.c +++ b/src/backend/tcop/backend_startup.c @@ -492,7 +492,7 @@ static int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) { int32 len; - char *buf; + char *buf = NULL; ProtocolVersion proto; MemoryContext oldcontext; @@ -516,7 +516,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) * scanners, which may be less benign, but it's not really our job to * notice those.) */ - return STATUS_ERROR; + goto fail; } if (pq_getbytes(((char *) &len) + 1, 3) == EOF) @@ -526,7 +526,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); - return STATUS_ERROR; + goto fail; } len = pg_ntoh32(len); @@ -538,7 +538,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("invalid length of startup packet"))); - return STATUS_ERROR; + goto fail; } /* @@ -554,7 +554,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), errmsg("incomplete startup packet"))); - return STATUS_ERROR; + goto fail; } pq_endmsgread(); @@ -568,7 +568,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) { ProcessCancelRequestPacket(port, buf, len); /* Not really an error, but we don't want to proceed further */ - return STATUS_ERROR; + goto fail; } if (proto == NEGOTIATE_SSL_CODE && !ssl_done) @@ -607,14 +607,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode_for_socket_access(), errmsg("failed to send SSL negotiation response: %m"))); - return STATUS_ERROR; /* close the connection */ + goto fail; /* close the connection */ } #ifdef USE_SSL if (SSLok == 'S' && secure_open_server(port) == -1) - return STATUS_ERROR; + goto fail; #endif + pfree(buf); + /* * At this point we should have no data already buffered. If we do, * it was received before we performed the SSL handshake, so it wasn't @@ -661,14 +663,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ereport(COMMERROR, (errcode_for_socket_access(), errmsg("failed to send GSSAPI negotiation response: %m"))); - return STATUS_ERROR; /* close the connection */ + goto fail; /* close the connection */ } #ifdef ENABLE_GSS if (GSSok == 'G' && secure_open_gssapi(port) == -1) - return STATUS_ERROR; + goto fail; #endif + pfree(buf); + /* * At this point we should have no data already buffered. If we do, * it was received before we performed the GSS handshake, so it wasn't @@ -863,7 +867,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) */ MemoryContextSwitchTo(oldcontext); + pfree(buf); + return STATUS_OK; + +fail: + /* be tidy, just to avoid Valgrind complaints */ + if (buf) + pfree(buf); + + return STATUS_ERROR; } /* diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index a297606cdd7..0cecd464902 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -988,7 +988,7 @@ pg_plan_queries(List *querytrees, const char *query_string, int cursorOptions, stmt->stmt_location = query->stmt_location; stmt->stmt_len = query->stmt_len; stmt->queryId = query->queryId; - stmt->cached_plan_type = PLAN_CACHE_NONE; + stmt->planOrigin = PLAN_STMT_INTERNAL; } else { diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c index babc34d0cbe..4f4191b0ea6 100644 --- a/src/backend/tcop/utility.c +++ b/src/backend/tcop/utility.c @@ -1234,7 +1234,7 @@ ProcessUtilitySlow(ParseState *pstate, wrapper->utilityStmt = stmt; wrapper->stmt_location = pstmt->stmt_location; wrapper->stmt_len = pstmt->stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; ProcessUtility(wrapper, queryString, @@ -1965,7 +1965,7 @@ ProcessUtilityForAlterTable(Node *stmt, AlterTableUtilityContext *context) wrapper->utilityStmt = stmt; wrapper->stmt_location = context->pstmt->stmt_location; wrapper->stmt_len = context->pstmt->stmt_len; - wrapper->cached_plan_type = PLAN_CACHE_NONE; + wrapper->planOrigin = PLAN_STMT_INTERNAL; ProcessUtility(wrapper, context->queryString, diff --git a/src/backend/tsearch/dict_ispell.c b/src/backend/tsearch/dict_ispell.c index 63bd193a78a..debfbf956cc 100644 --- a/src/backend/tsearch/dict_ispell.c +++ b/src/backend/tsearch/dict_ispell.c @@ -47,24 +47,30 @@ dispell_init(PG_FUNCTION_ARGS) if (strcmp(defel->defname, "dictfile") == 0) { + char *filename; + if (dictloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple DictFile parameters"))); - NIImportDictionary(&(d->obj), - get_tsearch_config_filename(defGetString(defel), - "dict")); + filename = get_tsearch_config_filename(defGetString(defel), + "dict"); + NIImportDictionary(&(d->obj), filename); + pfree(filename); dictloaded = true; } else if (strcmp(defel->defname, "afffile") == 0) { + char *filename; + if (affloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple AffFile parameters"))); - NIImportAffixes(&(d->obj), - get_tsearch_config_filename(defGetString(defel), - "affix")); + filename = get_tsearch_config_filename(defGetString(defel), + "affix"); + NIImportAffixes(&(d->obj), filename); + pfree(filename); affloaded = true; } else if (strcmp(defel->defname, "stopwords") == 0) diff --git a/src/backend/tsearch/dict_synonym.c b/src/backend/tsearch/dict_synonym.c index 0da5a9d6868..c2773eb01ad 100644 --- a/src/backend/tsearch/dict_synonym.c +++ b/src/backend/tsearch/dict_synonym.c @@ -199,6 +199,7 @@ skipline: } tsearch_readline_end(&trst); + pfree(filename); d->len = cur; qsort(d->syn, d->len, sizeof(Syn), compareSyn); diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c index 1bebe36a691..1e6bbde1ca7 100644 --- a/src/backend/tsearch/dict_thesaurus.c +++ b/src/backend/tsearch/dict_thesaurus.c @@ -167,17 +167,17 @@ addWrd(DictThesaurus *d, char *b, char *e, uint32 idsubst, uint16 nwrd, uint16 p static void thesaurusRead(const char *filename, DictThesaurus *d) { + char *real_filename = get_tsearch_config_filename(filename, "ths"); tsearch_readline_state trst; uint32 idsubst = 0; bool useasis = false; char *line; - filename = get_tsearch_config_filename(filename, "ths"); - if (!tsearch_readline_begin(&trst, filename)) + if (!tsearch_readline_begin(&trst, real_filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open thesaurus file \"%s\": %m", - filename))); + real_filename))); while ((line = tsearch_readline(&trst)) != NULL) { @@ -297,6 +297,7 @@ thesaurusRead(const char *filename, DictThesaurus *d) d->nsubst = idsubst; tsearch_readline_end(&trst); + pfree(real_filename); } static TheLexeme * diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c index 8b57845e870..6bc91ce0dad 100644 --- a/src/backend/utils/activity/pgstat.c +++ b/src/backend/utils/activity/pgstat.c @@ -212,6 +212,11 @@ int pgstat_fetch_consistency = PGSTAT_FETCH_CONSISTENCY_CACHE; PgStat_LocalState pgStatLocal; +/* + * Track pending reports for fixed-numbered stats, used by + * pgstat_report_stat(). + */ +bool pgstat_report_fixed = false; /* ---------- * Local data @@ -370,7 +375,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .shared_data_off = offsetof(PgStatShared_Backend, stats), .shared_data_len = sizeof(((PgStatShared_Backend *) 0)->stats), - .have_static_pending_cb = pgstat_backend_have_pending_cb, .flush_static_cb = pgstat_backend_flush_cb, .reset_timestamp_cb = pgstat_backend_reset_timestamp_cb, }, @@ -437,7 +441,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .shared_data_len = sizeof(((PgStatShared_IO *) 0)->stats), .flush_static_cb = pgstat_io_flush_cb, - .have_static_pending_cb = pgstat_io_have_pending_cb, .init_shmem_cb = pgstat_io_init_shmem_cb, .reset_all_cb = pgstat_io_reset_all_cb, .snapshot_cb = pgstat_io_snapshot_cb, @@ -455,7 +458,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .shared_data_len = sizeof(((PgStatShared_SLRU *) 0)->stats), .flush_static_cb = pgstat_slru_flush_cb, - .have_static_pending_cb = pgstat_slru_have_pending_cb, .init_shmem_cb = pgstat_slru_init_shmem_cb, .reset_all_cb = pgstat_slru_reset_all_cb, .snapshot_cb = pgstat_slru_snapshot_cb, @@ -474,7 +476,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE] .init_backend_cb = pgstat_wal_init_backend_cb, .flush_static_cb = pgstat_wal_flush_cb, - .have_static_pending_cb = pgstat_wal_have_pending_cb, .init_shmem_cb = pgstat_wal_init_shmem_cb, .reset_all_cb = pgstat_wal_reset_all_cb, .snapshot_cb = pgstat_wal_snapshot_cb, @@ -708,29 +709,10 @@ pgstat_report_stat(bool force) } /* Don't expend a clock check if nothing to do */ - if (dlist_is_empty(&pgStatPending)) + if (dlist_is_empty(&pgStatPending) && + !pgstat_report_fixed) { - bool do_flush = false; - - /* Check for pending stats */ - for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++) - { - const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); - - if (!kind_info) - continue; - if (!kind_info->have_static_pending_cb) - continue; - - if (kind_info->have_static_pending_cb()) - { - do_flush = true; - break; - } - } - - if (!do_flush) - return 0; + return 0; } /* @@ -784,16 +766,19 @@ pgstat_report_stat(bool force) partial_flush |= pgstat_flush_pending_entries(nowait); /* flush of other stats kinds */ - for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++) + if (pgstat_report_fixed) { - const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); + for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++) + { + const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind); - if (!kind_info) - continue; - if (!kind_info->flush_static_cb) - continue; + if (!kind_info) + continue; + if (!kind_info->flush_static_cb) + continue; - partial_flush |= kind_info->flush_static_cb(nowait); + partial_flush |= kind_info->flush_static_cb(nowait); + } } last_flush = now; @@ -815,6 +800,7 @@ pgstat_report_stat(bool force) } pending_since = 0; + pgstat_report_fixed = false; return 0; } diff --git a/src/backend/utils/activity/pgstat_backend.c b/src/backend/utils/activity/pgstat_backend.c index 51256277e8d..8714a85e2d9 100644 --- a/src/backend/utils/activity/pgstat_backend.c +++ b/src/backend/utils/activity/pgstat_backend.c @@ -66,6 +66,7 @@ pgstat_count_backend_io_op_time(IOObject io_object, IOContext io_context, io_time); backend_has_iostats = true; + pgstat_report_fixed = true; } void @@ -81,6 +82,7 @@ pgstat_count_backend_io_op(IOObject io_object, IOContext io_context, PendingBackendStats.pending_io.bytes[io_object][io_context][io_op] += bytes; backend_has_iostats = true; + pgstat_report_fixed = true; } /* @@ -302,18 +304,6 @@ pgstat_flush_backend(bool nowait, bits32 flags) } /* - * Check if there are any backend stats waiting for flush. - */ -bool -pgstat_backend_have_pending_cb(void) -{ - if (!pgstat_tracks_backend_bktype(MyBackendType)) - return false; - - return (backend_has_iostats || pgstat_backend_wal_have_pending()); -} - -/* * Callback to flush out locally pending backend statistics. * * If some stats could not be flushed due to lock contention, return true. diff --git a/src/backend/utils/activity/pgstat_io.c b/src/backend/utils/activity/pgstat_io.c index d8d26379a57..13ae57ed649 100644 --- a/src/backend/utils/activity/pgstat_io.c +++ b/src/backend/utils/activity/pgstat_io.c @@ -80,6 +80,7 @@ pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op, pgstat_count_backend_io_op(io_object, io_context, io_op, cnt, bytes); have_iostats = true; + pgstat_report_fixed = true; } /* @@ -168,15 +169,6 @@ pgstat_fetch_stat_io(void) } /* - * Check if there any IO stats waiting for flush. - */ -bool -pgstat_io_have_pending_cb(void) -{ - return have_iostats; -} - -/* * Simpler wrapper of pgstat_io_flush_cb() */ void diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c index b9e940dde45..7bd8744accb 100644 --- a/src/backend/utils/activity/pgstat_slru.c +++ b/src/backend/utils/activity/pgstat_slru.c @@ -144,15 +144,6 @@ pgstat_get_slru_index(const char *name) } /* - * Check if there are any SLRU stats entries waiting for flush. - */ -bool -pgstat_slru_have_pending_cb(void) -{ - return have_slrustats; -} - -/* * Flush out locally pending SLRU stats entries * * If nowait is true, this function returns false on lock failure. Otherwise @@ -247,6 +238,7 @@ get_slru_entry(int slru_idx) Assert((slru_idx >= 0) && (slru_idx < SLRU_NUM_ELEMENTS)); have_slrustats = true; + pgstat_report_fixed = true; return &pending_SLRUStats[slru_idx]; } diff --git a/src/backend/utils/activity/pgstat_wal.c b/src/backend/utils/activity/pgstat_wal.c index 16a1ecb4d90..0d04480d2f6 100644 --- a/src/backend/utils/activity/pgstat_wal.c +++ b/src/backend/utils/activity/pgstat_wal.c @@ -72,6 +72,15 @@ pgstat_fetch_stat_wal(void) } /* + * To determine whether WAL usage happened. + */ +static inline bool +pgstat_wal_have_pending(void) +{ + return pgWalUsage.wal_records != prevWalUsage.wal_records; +} + +/* * Calculate how much WAL usage counters have increased by subtracting the * previous counters from the current ones. * @@ -92,7 +101,7 @@ pgstat_wal_flush_cb(bool nowait) * This function can be called even if nothing at all has happened. Avoid * taking lock for nothing in that case. */ - if (!pgstat_wal_have_pending_cb()) + if (!pgstat_wal_have_pending()) return false; /* @@ -136,15 +145,6 @@ pgstat_wal_init_backend_cb(void) prevWalUsage = pgWalUsage; } -/* - * To determine whether WAL usage happened. - */ -bool -pgstat_wal_have_pending_cb(void) -{ - return pgWalUsage.wal_records != prevWalUsage.wal_records; -} - void pgstat_wal_init_shmem_cb(void *stats) { diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c index 1b0df111717..39dab3e42df 100644 --- a/src/backend/utils/adt/tid.c +++ b/src/backend/utils/adt/tid.c @@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS) /* * Cope with possibility that unsigned long is wider than BlockNumber, in * which case strtoul will not raise an error for some values that are out - * of the range of BlockNumber. (See similar code in oidin().) + * of the range of BlockNumber. (See similar code in uint32in_subr().) */ #if SIZEOF_LONG > 4 if (cvt != (unsigned long) blockNumber && diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index f7b731825fc..182e8f75db7 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -1769,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str) * xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode). * * If parsed_nodes isn't NULL and we parse in CONTENT mode, the list - * of parsed nodes from the xmlParseInNodeContext call will be returned + * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned * to *parsed_nodes. (It is caller's responsibility to free that.) * * Errors normally result in ereport(ERROR), but if escontext is an @@ -1795,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, PgXmlErrorContext *xmlerrcxt; volatile xmlParserCtxtPtr ctxt = NULL; volatile xmlDocPtr doc = NULL; + volatile int save_keep_blanks = -1; /* * This step looks annoyingly redundant, but we must do it to have a @@ -1822,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg, PG_TRY(); { bool parse_as_document = false; - int options; int res_code; size_t count = 0; xmlChar *version = NULL; @@ -1853,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg, parse_as_document = true; } - /* - * Select parse options. - * - * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR) - * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by - * internal DTD are applied'. As for external DTDs, we try to support - * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really - * happen because xmlPgEntityLoader prevents it. - */ - options = XML_PARSE_NOENT | XML_PARSE_DTDATTR - | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS); - /* initialize output parameters */ if (parsed_xmloptiontype != NULL) *parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT : @@ -1874,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg, if (parse_as_document) { + int options; + + /* set up parser context used by xmlCtxtReadDoc */ ctxt = xmlNewParserCtxt(); if (ctxt == NULL || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, "could not allocate parser context"); + /* + * Select parse options. + * + * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR) + * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined + * by internal DTD are applied'. As for external DTDs, we try to + * support them too (see SQL/XML:2008 GR 10.16.7.e), but that + * doesn't really happen because xmlPgEntityLoader prevents it. + */ + options = XML_PARSE_NOENT | XML_PARSE_DTDATTR + | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS); + doc = xmlCtxtReadDoc(ctxt, utf8string, NULL, /* no URL */ "UTF-8", @@ -1900,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, } else { - xmlNodePtr root; - xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY; - - /* set up document with empty root node to be the context node */ + /* set up document that xmlParseBalancedChunkMemory will add to */ doc = xmlNewDoc(version); if (doc == NULL || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, @@ -1916,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg, "could not allocate XML document"); doc->standalone = standalone; - root = xmlNewNode(NULL, (const xmlChar *) "content-root"); - if (root == NULL || xmlerrcxt->err_occurred) - xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY, - "could not allocate xml node"); - - /* - * This attaches root to doc, so we need not free it separately; - * and there can't yet be any old root to free. - */ - oldroot = xmlDocSetRootElement(doc, root); - Assert(oldroot == NULL); + /* set parse options --- have to do this the ugly way */ + save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0); /* allow empty content */ if (*(utf8string + count)) { - xmlNodePtr node_list = NULL; - xmlParserErrors res; - - res = xmlParseInNodeContext(root, - (char *) utf8string + count, - strlen((char *) utf8string + count), - options, - &node_list); - - if (res != XML_ERR_OK || xmlerrcxt->err_occurred) + res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0, + utf8string + count, + parsed_nodes); + if (res_code != 0 || xmlerrcxt->err_occurred) { - xmlFreeNodeList(node_list); xml_errsave(escontext, xmlerrcxt, ERRCODE_INVALID_XML_CONTENT, "invalid XML content"); goto fail; } - - if (parsed_nodes != NULL) - *parsed_nodes = node_list; - else - xmlFreeNodeList(node_list); } } @@ -1961,6 +1940,8 @@ fail: } PG_CATCH(); { + if (save_keep_blanks != -1) + xmlKeepBlanksDefault(save_keep_blanks); if (doc != NULL) xmlFreeDoc(doc); if (ctxt != NULL) @@ -1972,6 +1953,9 @@ fail: } PG_END_TRY(); + if (save_keep_blanks != -1) + xmlKeepBlanksDefault(save_keep_blanks); + if (ctxt != NULL) xmlFreeParserCtxt(ctxt); diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c index ce596bf5638..b9d5a5998be 100644 --- a/src/backend/utils/cache/evtcache.c +++ b/src/backend/utils/cache/evtcache.c @@ -78,7 +78,6 @@ BuildEventTriggerCache(void) { HASHCTL ctl; HTAB *cache; - MemoryContext oldcontext; Relation rel; Relation irel; SysScanDesc scan; @@ -110,9 +109,6 @@ BuildEventTriggerCache(void) (Datum) 0); } - /* Switch to correct memory context. */ - oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext); - /* Prevent the memory context from being nuked while we're rebuilding. */ EventTriggerCacheState = ETCS_REBUILD_STARTED; @@ -145,6 +141,7 @@ BuildEventTriggerCache(void) bool evttags_isnull; EventTriggerCacheEntry *entry; bool found; + MemoryContext oldcontext; /* Get next tuple. */ tup = systable_getnext_ordered(scan, ForwardScanDirection); @@ -171,6 +168,9 @@ BuildEventTriggerCache(void) else continue; + /* Switch to correct memory context. */ + oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext); + /* Allocate new cache item. */ item = palloc0(sizeof(EventTriggerCacheItem)); item->fnoid = form->evtfoid; @@ -188,6 +188,9 @@ BuildEventTriggerCache(void) entry->triggerlist = lappend(entry->triggerlist, item); else entry->triggerlist = list_make1(item); + + /* Restore previous memory context. */ + MemoryContextSwitchTo(oldcontext); } /* Done with pg_event_trigger scan. */ @@ -195,9 +198,6 @@ BuildEventTriggerCache(void) index_close(irel, AccessShareLock); relation_close(rel, AccessShareLock); - /* Restore previous memory context. */ - MemoryContextSwitchTo(oldcontext); - /* Install new cache. */ EventTriggerCache = cache; @@ -240,6 +240,8 @@ DecodeTextArrayToBitmapset(Datum array) } pfree(elems); + if ((Pointer) arr != DatumGetPointer(array)) + pfree(arr); return bms; } diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index f4d2b9458a5..6661d2c6b73 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -463,8 +463,7 @@ CompleteCachedPlan(CachedPlanSource *plansource, /* * Save the final parameter types (or other parameter specification data) - * into the source_context, as well as our other parameters. Also save - * the result tuple descriptor. + * into the source_context, as well as our other parameters. */ MemoryContextSwitchTo(source_context); @@ -480,9 +479,25 @@ CompleteCachedPlan(CachedPlanSource *plansource, plansource->parserSetupArg = parserSetupArg; plansource->cursor_options = cursor_options; plansource->fixed_result = fixed_result; - plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); + /* + * Also save the result tuple descriptor. PlanCacheComputeResultDesc may + * leak some cruft; normally we just accept that to save a copy step, but + * in USE_VALGRIND mode be tidy by running it in the caller's context. + */ +#ifdef USE_VALGRIND + MemoryContextSwitchTo(oldcxt); + plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); + if (plansource->resultDesc) + { + MemoryContextSwitchTo(source_context); + plansource->resultDesc = CreateTupleDescCopy(plansource->resultDesc); + MemoryContextSwitchTo(oldcxt); + } +#else + plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list); MemoryContextSwitchTo(oldcxt); +#endif plansource->is_complete = true; plansource->is_valid = true; @@ -1390,7 +1405,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, { PlannedStmt *pstmt = (PlannedStmt *) lfirst(lc); - pstmt->cached_plan_type = customplan ? PLAN_CACHE_CUSTOM : PLAN_CACHE_GENERIC; + pstmt->planOrigin = customplan ? PLAN_STMT_CACHE_CUSTOM : PLAN_STMT_CACHE_GENERIC; } return plan; diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index 18cccd778fd..e8ae53238d0 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -321,7 +321,9 @@ lookup_ts_dictionary_cache(Oid dictId) /* * Init method runs in dictionary's private memory context, and we - * make sure the options are stored there too + * make sure the options are stored there too. This typically + * results in a small amount of memory leakage, but it's not worth + * complicating the API for tmplinit functions to avoid it. */ oldcontext = MemoryContextSwitchTo(entry->dictCtx); diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index f9aec38a11f..6a347698edf 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -1171,9 +1171,6 @@ load_domaintype_info(TypeCacheEntry *typentry) elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin", NameStr(typTup->typname), NameStr(c->conname)); - /* Convert conbin to C string in caller context */ - constring = TextDatumGetCString(val); - /* Create the DomainConstraintCache object and context if needed */ if (dcc == NULL) { @@ -1189,9 +1186,8 @@ load_domaintype_info(TypeCacheEntry *typentry) dcc->dccRefCount = 0; } - /* Create node trees in DomainConstraintCache's context */ - oldcxt = MemoryContextSwitchTo(dcc->dccContext); - + /* Convert conbin to a node tree, still in caller's context */ + constring = TextDatumGetCString(val); check_expr = (Expr *) stringToNode(constring); /* @@ -1206,10 +1202,13 @@ load_domaintype_info(TypeCacheEntry *typentry) */ check_expr = expression_planner(check_expr); + /* Create only the minimally needed stuff in dccContext */ + oldcxt = MemoryContextSwitchTo(dcc->dccContext); + r = makeNode(DomainConstraintState); r->constrainttype = DOM_CONSTRAINT_CHECK; r->name = pstrdup(NameStr(c->conname)); - r->check_expr = check_expr; + r->check_expr = copyObject(check_expr); r->check_exprstate = NULL; MemoryContextSwitchTo(oldcxt); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 1ad155d446e..81da03629f0 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -22,10 +22,11 @@ * lookup key's hash value as a partition number --- this will work because * of the way calc_bucket() maps hash values to bucket numbers. * - * For hash tables in shared memory, the memory allocator function should - * match malloc's semantics of returning NULL on failure. For hash tables - * in local memory, we typically use palloc() which will throw error on - * failure. The code in this file has to cope with both cases. + * The memory allocator function should match malloc's semantics of returning + * NULL on failure. (This is essential for hash tables in shared memory. + * For hash tables in local memory, we used to use palloc() which will throw + * error on failure; but we no longer do, so it's untested whether this + * module will still cope with that behavior.) * * dynahash.c provides support for these types of lookup keys: * @@ -98,6 +99,7 @@ #include "access/xact.h" #include "common/hashfn.h" +#include "lib/ilist.h" #include "port/pg_bitutils.h" #include "storage/shmem.h" #include "storage/spin.h" @@ -195,6 +197,7 @@ struct HASHHDR long ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ int nelem_alloc; /* number of entries to allocate at once */ + bool isfixed; /* if true, don't enlarge */ #ifdef HASH_STATISTICS @@ -227,7 +230,6 @@ struct HTAB MemoryContext hcxt; /* memory context if default allocator used */ char *tabname; /* table name (for error messages) */ bool isshared; /* true if table is in shared memory */ - bool isfixed; /* if true, don't enlarge */ /* freezing a shared table isn't allowed, so we can keep state here */ bool frozen; /* true = no more inserts allowed */ @@ -236,6 +238,16 @@ struct HTAB Size keysize; /* hash key length in bytes */ long ssize; /* segment size --- must be power of 2 */ int sshift; /* segment shift = log2(ssize) */ + + /* + * In a USE_VALGRIND build, non-shared hashtables keep an slist chain of + * all the element blocks they have allocated. This pacifies Valgrind, + * which would otherwise often claim that the element blocks are "possibly + * lost" for lack of any non-interior pointers to their starts. + */ +#ifdef USE_VALGRIND + slist_head element_blocks; +#endif }; /* @@ -618,8 +630,10 @@ hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags) } } + /* Set isfixed if requested, but not till after we build initial entries */ if (flags & HASH_FIXED_SIZE) - hashp->isfixed = true; + hctl->isfixed = true; + return hashp; } @@ -644,6 +658,8 @@ hdefault(HTAB *hashp) hctl->ssize = DEF_SEGSIZE; hctl->sshift = DEF_SEGSIZE_SHIFT; + hctl->isfixed = false; /* can be enlarged */ + #ifdef HASH_STATISTICS hctl->accesses = hctl->collisions = 0; #endif @@ -1708,23 +1724,51 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx) { HASHHDR *hctl = hashp->hctl; Size elementSize; + Size requestSize; + char *allocedBlock; HASHELEMENT *firstElement; HASHELEMENT *tmpElement; HASHELEMENT *prevElement; int i; - if (hashp->isfixed) + if (hctl->isfixed) return false; /* Each element has a HASHELEMENT header plus user data. */ elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize); + requestSize = nelem * elementSize; + + /* Add space for slist_node list link if we need one. */ +#ifdef USE_VALGRIND + if (!hashp->isshared) + requestSize += MAXALIGN(sizeof(slist_node)); +#endif + + /* Allocate the memory. */ CurrentDynaHashCxt = hashp->hcxt; - firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize); + allocedBlock = hashp->alloc(requestSize); - if (!firstElement) + if (!allocedBlock) return false; + /* + * If USE_VALGRIND, each allocated block of elements of a non-shared + * hashtable is chained into a list, so that Valgrind won't think it's + * been leaked. + */ +#ifdef USE_VALGRIND + if (hashp->isshared) + firstElement = (HASHELEMENT *) allocedBlock; + else + { + slist_push_head(&hashp->element_blocks, (slist_node *) allocedBlock); + firstElement = (HASHELEMENT *) (allocedBlock + MAXALIGN(sizeof(slist_node))); + } +#else + firstElement = (HASHELEMENT *) allocedBlock; +#endif + /* prepare to link all the new entries into the freelist */ prevElement = NULL; tmpElement = firstElement; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 43b4dbccc3d..65d8cbfaed5 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1183,7 +1183,6 @@ UnlinkLockFiles(int status, Datum arg) /* Should we complain if the unlink fails? */ } /* Since we're about to exit, no need to reclaim storage */ - lock_files = NIL; /* * Lock file removal should always be the last externally visible action diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 667df448732..e404c345e6e 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -249,6 +249,7 @@ static void reapply_stacked_values(struct config_generic *variable, const char *curvalue, GucContext curscontext, GucSource cursource, Oid cursrole); +static void free_placeholder(struct config_string *pHolder); static bool validate_option_array_item(const char *name, const char *value, bool skipIfNoPermissions); static void write_auto_conf_file(int fd, const char *filename, ConfigVariable *head); @@ -4722,8 +4723,13 @@ AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt) * the config file cannot cause postmaster start to fail, so we * don't have to be too tense about possibly installing a bad * value.) + * + * As an exception, we skip this check if this is a RESET command + * for an unknown custom GUC, else there'd be no way for users to + * remove such settings with reserved prefixes. */ - (void) assignable_custom_variable_name(name, false, ERROR); + if (value || !valid_custom_variable_name(name)) + (void) assignable_custom_variable_name(name, false, ERROR); } /* @@ -5018,16 +5024,8 @@ define_custom_variable(struct config_generic *variable) set_config_sourcefile(name, pHolder->gen.sourcefile, pHolder->gen.sourceline); - /* - * Free up as much as we conveniently can of the placeholder structure. - * (This neglects any stack items, so it's possible for some memory to be - * leaked. Since this can only happen once per session per variable, it - * doesn't seem worth spending much code on.) - */ - set_string_field(pHolder, pHolder->variable, NULL); - set_string_field(pHolder, &pHolder->reset_val, NULL); - - guc_free(pHolder); + /* Now we can free the no-longer-referenced placeholder variable */ + free_placeholder(pHolder); } /* @@ -5127,6 +5125,25 @@ reapply_stacked_values(struct config_generic *variable, } /* + * Free up a no-longer-referenced placeholder GUC variable. + * + * This neglects any stack items, so it's possible for some memory to be + * leaked. Since this can only happen once per session per variable, it + * doesn't seem worth spending much code on. + */ +static void +free_placeholder(struct config_string *pHolder) +{ + /* Placeholders are always STRING type, so free their values */ + Assert(pHolder->gen.vartype == PGC_STRING); + set_string_field(pHolder, pHolder->variable, NULL); + set_string_field(pHolder, &pHolder->reset_val, NULL); + + guc_free(unconstify(char *, pHolder->gen.name)); + guc_free(pHolder); +} + +/* * Functions for extensions to call to define their custom GUC variables. */ void @@ -5286,9 +5303,7 @@ MarkGUCPrefixReserved(const char *className) /* * Check for existing placeholders. We must actually remove invalid - * placeholders, else future parallel worker startups will fail. (We - * don't bother trying to free associated memory, since this shouldn't - * happen often.) + * placeholders, else future parallel worker startups will fail. */ hash_seq_init(&status, guc_hashtab); while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL) @@ -5312,6 +5327,8 @@ MarkGUCPrefixReserved(const char *className) NULL); /* Remove it from any lists it's in, too */ RemoveGUCFromLists(var); + /* And free it */ + free_placeholder((struct config_string *) var); } } @@ -6711,6 +6728,7 @@ validate_option_array_item(const char *name, const char *value, { struct config_generic *gconf; + bool reset_custom; /* * There are three cases to consider: @@ -6729,16 +6747,21 @@ validate_option_array_item(const char *name, const char *value, * it's assumed to be fully validated.) * * name is not known and can't be created as a placeholder. Throw error, - * unless skipIfNoPermissions is true, in which case return false. + * unless skipIfNoPermissions or reset_custom is true. If reset_custom is + * true, this is a RESET or RESET ALL operation for an unknown custom GUC + * with a reserved prefix, in which case we want to fall through to the + * placeholder case described in the preceding paragraph (else there'd be + * no way for users to remove them). Otherwise, return false. */ - gconf = find_option(name, true, skipIfNoPermissions, ERROR); - if (!gconf) + reset_custom = (!value && valid_custom_variable_name(name)); + gconf = find_option(name, true, skipIfNoPermissions || reset_custom, ERROR); + if (!gconf && !reset_custom) { /* not known, failed to make a placeholder */ return false; } - if (gconf->flags & GUC_CUSTOM_PLACEHOLDER) + if (!gconf || gconf->flags & GUC_CUSTOM_PLACEHOLDER) { /* * We cannot do any meaningful check on the value, so only permissions diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c index e08b26e8c14..4df25944deb 100644 --- a/src/backend/utils/misc/ps_status.c +++ b/src/backend/utils/misc/ps_status.c @@ -100,6 +100,17 @@ static void flush_ps_display(void); static int save_argc; static char **save_argv; +/* + * Valgrind seems not to consider the global "environ" variable as a valid + * root pointer; so when we allocate a new environment array, it claims that + * data is leaked. To fix that, keep our own statically-allocated copy of the + * pointer. (Oddly, this doesn't seem to be a problem for "argv".) + */ +#if defined(PS_USE_CLOBBER_ARGV) && defined(USE_VALGRIND) +extern char **ps_status_new_environ; +char **ps_status_new_environ; +#endif + /* * Call this early in startup to save the original argc/argv values. @@ -206,6 +217,11 @@ save_ps_display_args(int argc, char **argv) } new_environ[i] = NULL; environ = new_environ; + + /* See notes about Valgrind above. */ +#ifdef USE_VALGRIND + ps_status_new_environ = new_environ; +#endif } /* diff --git a/src/backend/utils/mmgr/alignedalloc.c b/src/backend/utils/mmgr/alignedalloc.c index 7eea695de62..b1be7426914 100644 --- a/src/backend/utils/mmgr/alignedalloc.c +++ b/src/backend/utils/mmgr/alignedalloc.c @@ -45,6 +45,15 @@ AlignedAllocFree(void *pointer) GetMemoryChunkContext(unaligned)->name, chunk); #endif + /* + * Create a dummy vchunk covering the start of the unaligned chunk, but + * not overlapping the aligned chunk. This will be freed while pfree'ing + * the unaligned chunk, keeping Valgrind happy. Then when we return to + * the outer pfree, that will clean up the vchunk for the aligned chunk. + */ + VALGRIND_MEMPOOL_ALLOC(GetMemoryChunkContext(unaligned), unaligned, + (char *) pointer - (char *) unaligned); + /* Recursively pfree the unaligned chunk */ pfree(unaligned); } @@ -123,6 +132,15 @@ AlignedAllocRealloc(void *pointer, Size size, int flags) VALGRIND_MAKE_MEM_DEFINED(pointer, old_size); memcpy(newptr, pointer, Min(size, old_size)); + /* + * Create a dummy vchunk covering the start of the old unaligned chunk, + * but not overlapping the aligned chunk. This will be freed while + * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when + * we return to repalloc, it will move the vchunk for the aligned chunk. + */ + VALGRIND_MEMPOOL_ALLOC(ctx, unaligned, + (char *) pointer - (char *) unaligned); + pfree(unaligned); return newptr; diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 666ecd8f78d..9ef109ca586 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -103,6 +103,8 @@ #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData)) #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \ + ALLOC_BLOCKHDRSZ) typedef struct AllocBlockData *AllocBlock; /* forward reference */ @@ -458,6 +460,21 @@ AllocSetContextCreateInternal(MemoryContext parent, * we'd leak the header/initial block if we ereport in this stretch. */ + /* Create a vpool associated with the context */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + + /* + * Create a vchunk covering both the AllocSetContext struct and the keeper + * block's header. (Perhaps it would be more sensible for these to be two + * separate vchunks, but doing that seems to tickle bugs in some versions + * of Valgrind.) We must have these vchunks, and also a vchunk for each + * subsequently-added block header, so that Valgrind considers the + * pointers within them while checking for leaked memory. Note that + * Valgrind doesn't distinguish between these vchunks and those created by + * mcxt.c for the user-accessible-data chunks we allocate. + */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + /* Fill in the initial block's block header */ block = KeeperBlock(set); block->aset = set; @@ -585,6 +602,14 @@ AllocSetReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); #endif + + /* + * We need to free the block header's vchunk explicitly, although + * the user-data vchunks within will go away in the TRIM below. + * Otherwise Valgrind complains about leaked allocations. + */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } block = next; @@ -592,6 +617,14 @@ AllocSetReset(MemoryContext context) Assert(context->mem_allocated == keepersize); + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the AllocSetContext and + * keeper-block header. This gets rid of the vchunks for whatever user + * data is getting discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* Reset block size allocation sequence, too */ set->nextBlockSize = set->initBlockSize; } @@ -648,6 +681,9 @@ AllocSetDelete(MemoryContext context) freelist->first_free = (AllocSetContext *) oldset->header.nextchild; freelist->num_free--; + /* Destroy the context's vpool --- see notes below */ + VALGRIND_DESTROY_MEMPOOL(oldset); + /* All that remains is to free the header/initial block */ free(oldset); } @@ -675,13 +711,24 @@ AllocSetDelete(MemoryContext context) #endif if (!IsKeeperBlock(set, block)) + { + /* As in AllocSetReset, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); free(block); + } block = next; } Assert(context->mem_allocated == keepersize); + /* + * Destroy the vpool. We don't seem to need to explicitly free the + * initial block's header vchunk, nor any user-data vchunks that Valgrind + * still knows about; they'll all go away automatically. + */ + VALGRIND_DESTROY_MEMPOOL(set); + /* Finally, free the context header, including the keeper block */ free(set); } @@ -716,6 +763,9 @@ AllocSetAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ); + context->mem_allocated += blksize; block->aset = set; @@ -922,6 +972,9 @@ AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ); + context->mem_allocated += blksize; block->aset = set; @@ -1104,6 +1157,10 @@ AllocSetFree(void *pointer) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, block->freeptr - ((char *) block)); #endif + + /* As in AllocSetReset, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } else @@ -1184,6 +1241,7 @@ AllocSetRealloc(void *pointer, Size size, int flags) * realloc() to make the containing block bigger, or smaller, with * minimum space wastage. */ + AllocBlock newblock; Size chksize; Size blksize; Size oldblksize; @@ -1223,14 +1281,21 @@ AllocSetRealloc(void *pointer, Size size, int flags) blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ; oldblksize = block->endptr - ((char *) block); - block = (AllocBlock) realloc(block, blksize); - if (block == NULL) + newblock = (AllocBlock) realloc(block, blksize); + if (newblock == NULL) { /* Disallow access to the chunk header. */ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ); return MemoryContextAllocationFailure(&set->header, size, flags); } + /* + * Move the block-header vchunk explicitly. (mcxt.c will take care of + * moving the vchunk for the user data.) + */ + VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ); + block = newblock; + /* updated separately, not to underflow when (oldblksize > blksize) */ set->header.mem_allocated -= oldblksize; set->header.mem_allocated += blksize; @@ -1294,7 +1359,7 @@ AllocSetRealloc(void *pointer, Size size, int flags) /* Ensure any padding bytes are marked NOACCESS. */ VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size); - /* Disallow access to the chunk header . */ + /* Disallow access to the chunk header. */ VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ); return pointer; diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c index f7a37d1b3e8..2805d55a2ec 100644 --- a/src/backend/utils/mmgr/bump.c +++ b/src/backend/utils/mmgr/bump.c @@ -45,7 +45,9 @@ #include "utils/memutils_memorychunk.h" #include "utils/memutils_internal.h" -#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock)) +#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock)) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(BumpContext)) + \ + Bump_BLOCKHDRSZ) /* No chunk header unless built with MEMORY_CONTEXT_CHECKING */ #ifdef MEMORY_CONTEXT_CHECKING @@ -189,6 +191,12 @@ BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, * Avoid writing code that can fail between here and MemoryContextCreate; * we'd leak the header and initial block if we ereport in this stretch. */ + + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + /* This vchunk covers the BumpContext and the keeper block header */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + dlist_init(&set->blocks); /* Fill in the initial block's block header */ @@ -262,6 +270,14 @@ BumpReset(MemoryContext context) BumpBlockFree(set, block); } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the BumpContext and keeper-block + * header. This gets rid of the vchunks for whatever user data is getting + * discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* Reset block size allocation sequence, too */ set->nextBlockSize = set->initBlockSize; @@ -279,6 +295,10 @@ BumpDelete(MemoryContext context) { /* Reset to release all releasable BumpBlocks */ BumpReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header and keeper block */ free(context); } @@ -318,6 +338,9 @@ BumpAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); + context->mem_allocated += blksize; /* the block is completely full */ @@ -455,6 +478,9 @@ BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); + context->mem_allocated += blksize; /* initialize the new block */ @@ -606,6 +632,9 @@ BumpBlockFree(BumpContext *set, BumpBlock *block) wipe_mem(block, ((char *) block->endptr - (char *) block)); #endif + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index 18679ad4f1e..cfafc9bf082 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -45,6 +45,8 @@ #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock)) #define Generation_CHUNKHDRSZ sizeof(MemoryChunk) +#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(GenerationContext)) + \ + Generation_BLOCKHDRSZ) #define Generation_CHUNK_FRACTION 8 @@ -221,6 +223,12 @@ GenerationContextCreate(MemoryContext parent, * Avoid writing code that can fail between here and MemoryContextCreate; * we'd leak the header if we ereport in this stretch. */ + + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(set, 0, false); + /* This vchunk covers the GenerationContext and the keeper block header */ + VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); + dlist_init(&set->blocks); /* Fill in the initial block's block header */ @@ -309,6 +317,14 @@ GenerationReset(MemoryContext context) GenerationBlockFree(set, block); } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the GenerationContext and + * keeper-block header. This gets rid of the vchunks for whatever user + * data is getting discarded by the context reset. + */ + VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); + /* set it so new allocations to make use of the keeper block */ set->block = KeeperBlock(set); @@ -329,6 +345,10 @@ GenerationDelete(MemoryContext context) { /* Reset to release all releasable GenerationBlocks */ GenerationReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header and keeper block */ free(context); } @@ -365,6 +385,9 @@ GenerationAllocLarge(MemoryContext context, Size size, int flags) if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ); + context->mem_allocated += blksize; /* block with a single (used) chunk */ @@ -487,6 +510,9 @@ GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, if (block == NULL) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ); + context->mem_allocated += blksize; /* initialize the new block */ @@ -677,6 +703,9 @@ GenerationBlockFree(GenerationContext *set, GenerationBlock *block) wipe_mem(block, block->blksize); #endif + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(set, block); + free(block); } diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 15fa4d0a55e..47fd774c7d2 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -8,6 +8,23 @@ * context-type-specific operations via the function pointers in a * context's MemoryContextMethods struct. * + * A note about Valgrind support: when USE_VALGRIND is defined, we provide + * support for memory leak tracking at the allocation-unit level. Valgrind + * does leak detection by tracking allocated "chunks", which can be grouped + * into "pools". The "chunk" terminology is overloaded, since we use that + * word for our allocation units, and it's sometimes important to distinguish + * those from the Valgrind objects that describe them. To reduce confusion, + * let's use the terms "vchunk" and "vpool" for the Valgrind objects. + * + * We use a separate vpool for each memory context. The context-type-specific + * code is responsible for creating and deleting the vpools, and also for + * creating vchunks to cover its management data structures such as block + * headers. (There must be a vchunk that includes every pointer we want + * Valgrind to consider for leak-tracking purposes.) This module creates + * and deletes the vchunks that cover the caller-visible allocated chunks. + * However, the context-type-specific code must handle cleaning up those + * vchunks too during memory context reset operations. + * * * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -418,8 +435,6 @@ MemoryContextResetOnly(MemoryContext context) context->methods->reset(context); context->isReset = true; - VALGRIND_DESTROY_MEMPOOL(context); - VALGRIND_CREATE_MEMPOOL(context, 0, false); } } @@ -526,8 +541,6 @@ MemoryContextDeleteOnly(MemoryContext context) context->ident = NULL; context->methods->delete_context(context); - - VALGRIND_DESTROY_MEMPOOL(context); } /* @@ -560,9 +573,7 @@ MemoryContextDeleteChildren(MemoryContext context) * the specified context, since that means it will automatically be freed * when no longer needed. * - * There is no API for deregistering a callback once registered. If you - * want it to not do anything anymore, adjust the state pointed to by its - * "arg" to indicate that. + * Note that callers can assume this cannot fail. */ void MemoryContextRegisterResetCallback(MemoryContext context, @@ -578,6 +589,41 @@ MemoryContextRegisterResetCallback(MemoryContext context, } /* + * MemoryContextUnregisterResetCallback + * Undo the effects of MemoryContextRegisterResetCallback. + * + * This can be used if a callback's effects are no longer required + * at some point before the context has been reset/deleted. It is the + * caller's responsibility to pfree the callback struct (if needed). + * + * An assertion failure occurs if the callback was not registered. + * We could alternatively define that case as a no-op, but that seems too + * likely to mask programming errors such as passing the wrong context. + */ +void +MemoryContextUnregisterResetCallback(MemoryContext context, + MemoryContextCallback *cb) +{ + MemoryContextCallback *prev, + *cur; + + Assert(MemoryContextIsValid(context)); + + for (prev = NULL, cur = context->reset_cbs; cur != NULL; + prev = cur, cur = cur->next) + { + if (cur != cb) + continue; + if (prev) + prev->next = cur->next; + else + context->reset_cbs = cur->next; + return; + } + Assert(false); +} + +/* * MemoryContextCallResetCallbacks * Internal function to call all registered callbacks for context. */ @@ -1137,8 +1183,6 @@ MemoryContextCreate(MemoryContext node, node->nextchild = NULL; node->allowInCritSection = false; } - - VALGRIND_CREATE_MEMPOOL(node, 0, false); } /* @@ -1421,7 +1465,13 @@ MemoryContextAllocAligned(MemoryContext context, void *unaligned; void *aligned; - /* wouldn't make much sense to waste that much space */ + /* + * Restrict alignto to ensure that it can fit into the "value" field of + * the redirection MemoryChunk, and that the distance back to the start of + * the unaligned chunk will fit into the space available for that. This + * isn't a limitation in practice, since it wouldn't make much sense to + * waste that much space. + */ Assert(alignto < (128 * 1024 * 1024)); /* ensure alignto is a power of 2 */ @@ -1458,10 +1508,15 @@ MemoryContextAllocAligned(MemoryContext context, alloc_size += 1; #endif - /* perform the actual allocation */ - unaligned = MemoryContextAllocExtended(context, alloc_size, flags); + /* + * Perform the actual allocation, but do not pass down MCXT_ALLOC_ZERO. + * This ensures that wasted bytes beyond the aligned chunk do not become + * DEFINED. + */ + unaligned = MemoryContextAllocExtended(context, alloc_size, + flags & ~MCXT_ALLOC_ZERO); - /* set the aligned pointer */ + /* compute the aligned pointer */ aligned = (void *) TYPEALIGN(alignto, (char *) unaligned + sizeof(MemoryChunk)); @@ -1489,12 +1544,23 @@ MemoryContextAllocAligned(MemoryContext context, set_sentinel(aligned, size); #endif - /* Mark the bytes before the redirection header as noaccess */ - VALGRIND_MAKE_MEM_NOACCESS(unaligned, - (char *) alignedchunk - (char *) unaligned); + /* + * MemoryContextAllocExtended marked the whole unaligned chunk as a + * vchunk. Undo that, instead making just the aligned chunk be a vchunk. + * This prevents Valgrind from complaining that the vchunk is possibly + * leaked, since only pointers to the aligned chunk will exist. + * + * After these calls, the aligned chunk will be marked UNDEFINED, and all + * the rest of the unaligned chunk (the redirection chunk header, the + * padding bytes before it, and any wasted trailing bytes) will be marked + * NOACCESS, which is what we want. + */ + VALGRIND_MEMPOOL_FREE(context, unaligned); + VALGRIND_MEMPOOL_ALLOC(context, aligned, size); - /* Disallow access to the redirection chunk header. */ - VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk)); + /* Now zero (and make DEFINED) just the aligned chunk, if requested */ + if ((flags & MCXT_ALLOC_ZERO) != 0) + MemSetAligned(aligned, 0, size); return aligned; } @@ -1528,16 +1594,12 @@ void pfree(void *pointer) { #ifdef USE_VALGRIND - MemoryContextMethodID method = GetMemoryChunkMethodID(pointer); MemoryContext context = GetMemoryChunkContext(pointer); #endif MCXT_METHOD(pointer, free_p) (pointer); -#ifdef USE_VALGRIND - if (method != MCTX_ALIGNED_REDIRECT_ID) - VALGRIND_MEMPOOL_FREE(context, pointer); -#endif + VALGRIND_MEMPOOL_FREE(context, pointer); } /* @@ -1547,9 +1609,6 @@ pfree(void *pointer) void * repalloc(void *pointer, Size size) { -#ifdef USE_VALGRIND - MemoryContextMethodID method = GetMemoryChunkMethodID(pointer); -#endif #if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND) MemoryContext context = GetMemoryChunkContext(pointer); #endif @@ -1572,10 +1631,7 @@ repalloc(void *pointer, Size size) */ ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0); -#ifdef USE_VALGRIND - if (method != MCTX_ALIGNED_REDIRECT_ID) - VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size); -#endif + VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size); return ret; } diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index d32c0d318fb..0e35abcf5a0 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -377,6 +377,11 @@ SlabContextCreate(MemoryContext parent, * we'd leak the header if we ereport in this stretch. */ + /* See comments about Valgrind interactions in aset.c */ + VALGRIND_CREATE_MEMPOOL(slab, 0, false); + /* This vchunk covers the SlabContext only */ + VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext)); + /* Fill in SlabContext-specific header fields */ slab->chunkSize = (uint32) chunkSize; slab->fullChunkSize = (uint32) fullChunkSize; @@ -451,6 +456,10 @@ SlabReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); context->mem_allocated -= slab->blockSize; } @@ -467,11 +476,23 @@ SlabReset(MemoryContext context) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); context->mem_allocated -= slab->blockSize; } } + /* + * Instruct Valgrind to throw away all the vchunks associated with this + * context, except for the one covering the SlabContext. This gets rid of + * the vchunks for whatever user data is getting discarded by the context + * reset. + */ + VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext)); + slab->curBlocklistIndex = 0; Assert(context->mem_allocated == 0); @@ -486,6 +507,10 @@ SlabDelete(MemoryContext context) { /* Reset to release all the SlabBlocks */ SlabReset(context); + + /* Destroy the vpool -- see notes in aset.c */ + VALGRIND_DESTROY_MEMPOOL(context); + /* And free the context header */ free(context); } @@ -567,6 +592,9 @@ SlabAllocFromNewBlock(MemoryContext context, Size size, int flags) if (unlikely(block == NULL)) return MemoryContextAllocationFailure(context, size, flags); + /* Make a vchunk covering the new block's header */ + VALGRIND_MEMPOOL_ALLOC(slab, block, Slab_BLOCKHDRSZ); + block->slab = slab; context->mem_allocated += slab->blockSize; @@ -795,6 +823,10 @@ SlabFree(void *pointer) #ifdef CLOBBER_FREED_MEMORY wipe_mem(block, slab->blockSize); #endif + + /* As in aset.c, free block-header vchunks explicitly */ + VALGRIND_MEMPOOL_FREE(slab, block); + free(block); slab->header.mem_allocated -= slab->blockSize; } diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile index 997e0a013e9..c0470efda92 100644 --- a/src/bin/initdb/Makefile +++ b/src/bin/initdb/Makefile @@ -20,7 +20,7 @@ include $(top_builddir)/src/Makefile.global # from libpq, else we have risks of version skew if we run with a libpq # shared library from a different PG version. Define # USE_PRIVATE_ENCODING_FUNCS to ensure that that happens. -override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(ICU_CFLAGS) $(CPPFLAGS) +override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) $(ICU_CFLAGS) # We need libpq only because fe_utils does. LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(ICU_LIBS) diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index aa1589e3331..a1976fae607 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -17,6 +17,7 @@ #include <ctype.h> +#include "catalog/pg_am_d.h" #include "catalog/pg_class_d.h" #include "catalog/pg_collation_d.h" #include "catalog/pg_extension_d.h" @@ -945,6 +946,24 @@ findOprByOid(Oid oid) } /* + * findAccessMethodByOid + * finds the DumpableObject for the access method with the given oid + * returns NULL if not found + */ +AccessMethodInfo * +findAccessMethodByOid(Oid oid) +{ + CatalogId catId; + DumpableObject *dobj; + + catId.tableoid = AccessMethodRelationId; + catId.oid = oid; + dobj = findObjectByCatalogId(catId); + Assert(dobj == NULL || dobj->objType == DO_ACCESS_METHOD); + return (AccessMethodInfo *) dobj; +} + +/* * findCollationByOid * finds the DumpableObject for the collation with the given oid * returns NULL if not found diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build index 4a4ebbd8ec9..a2233b0a1b4 100644 --- a/src/bin/pg_dump/meson.build +++ b/src/bin/pg_dump/meson.build @@ -102,7 +102,6 @@ tests += { 't/003_pg_dump_with_server.pl', 't/004_pg_dump_parallel.pl', 't/005_pg_dump_filterfile.pl', - 't/006_pg_dumpall.pl', 't/010_dump_connstr.pl', ], }, diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c index 5974d6706fd..086adcdc502 100644 --- a/src/bin/pg_dump/parallel.c +++ b/src/bin/pg_dump/parallel.c @@ -334,16 +334,6 @@ on_exit_close_archive(Archive *AHX) } /* - * When pg_restore restores multiple databases, then update already added entry - * into array for cleanup. - */ -void -replace_on_exit_close_archive(Archive *AHX) -{ - shutdown_info.AHX = AHX; -} - -/* * on_exit_nicely handler for shutting down database connections and * worker processes cleanly. */ diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index af0007fb6d2..4ebef1e8644 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -308,7 +308,7 @@ extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ro extern void ProcessArchiveRestoreOptions(Archive *AHX); -extern void RestoreArchive(Archive *AHX, bool append_data); +extern void RestoreArchive(Archive *AHX); /* Open an existing archive */ extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 30e0da31aa3..dce88f040ac 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -87,7 +87,7 @@ static int RestoringToDB(ArchiveHandle *AH); static void dump_lo_buf(ArchiveHandle *AH); static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim); static void SetOutput(ArchiveHandle *AH, const char *filename, - const pg_compress_specification compression_spec, bool append_data); + const pg_compress_specification compression_spec); static CompressFileHandle *SaveOutput(ArchiveHandle *AH); static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput); @@ -339,14 +339,9 @@ ProcessArchiveRestoreOptions(Archive *AHX) StrictNamesCheck(ropt); } -/* - * RestoreArchive - * - * If append_data is set, then append data into file as we are restoring dump - * of multiple databases which was taken by pg_dumpall. - */ +/* Public */ void -RestoreArchive(Archive *AHX, bool append_data) +RestoreArchive(Archive *AHX) { ArchiveHandle *AH = (ArchiveHandle *) AHX; RestoreOptions *ropt = AH->public.ropt; @@ -463,7 +458,7 @@ RestoreArchive(Archive *AHX, bool append_data) */ sav = SaveOutput(AH); if (ropt->filename || ropt->compression_spec.algorithm != PG_COMPRESSION_NONE) - SetOutput(AH, ropt->filename, ropt->compression_spec, append_data); + SetOutput(AH, ropt->filename, ropt->compression_spec); ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n"); @@ -1302,7 +1297,7 @@ PrintTOCSummary(Archive *AHX) sav = SaveOutput(AH); if (ropt->filename) - SetOutput(AH, ropt->filename, out_compression_spec, false); + SetOutput(AH, ropt->filename, out_compression_spec); if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT, localtime(&AH->createDate)) == 0) @@ -1681,8 +1676,7 @@ archprintf(Archive *AH, const char *fmt,...) static void SetOutput(ArchiveHandle *AH, const char *filename, - const pg_compress_specification compression_spec, - bool append_data) + const pg_compress_specification compression_spec) { CompressFileHandle *CFH; const char *mode; @@ -1702,7 +1696,7 @@ SetOutput(ArchiveHandle *AH, const char *filename, else fn = fileno(stdout); - if (append_data || AH->mode == archModeAppend) + if (AH->mode == archModeAppend) mode = PG_BINARY_A; else mode = PG_BINARY_W; diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 365073b3eae..325b53fc9bd 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -394,7 +394,6 @@ struct _tocEntry extern int parallel_restore(ArchiveHandle *AH, TocEntry *te); extern void on_exit_close_archive(Archive *AHX); -extern void replace_on_exit_close_archive(Archive *AHX); extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3); diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index d94d0de2a5d..b5ba3b46dd9 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -826,7 +826,7 @@ _CloseArchive(ArchiveHandle *AH) savVerbose = AH->public.verbose; AH->public.verbose = 0; - RestoreArchive((Archive *) AH, false); + RestoreArchive((Archive *) AH); SetArchiveOptions((Archive *) AH, savDopt, savRopt); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 6298edb26b5..f3a353a61a5 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -449,8 +449,6 @@ main(int argc, char **argv) bool data_only = false; bool schema_only = false; bool statistics_only = false; - bool with_data = false; - bool with_schema = false; bool with_statistics = false; bool no_data = false; bool no_schema = false; @@ -514,6 +512,7 @@ main(int argc, char **argv) {"section", required_argument, NULL, 5}, {"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1}, {"snapshot", required_argument, NULL, 6}, + {"statistics", no_argument, NULL, 22}, {"statistics-only", no_argument, NULL, 18}, {"strict-names", no_argument, &strict_names, 1}, {"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1}, @@ -528,9 +527,6 @@ main(int argc, char **argv) {"no-toast-compression", no_argument, &dopt.no_toast_compression, 1}, {"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1}, {"no-sync", no_argument, NULL, 7}, - {"with-data", no_argument, NULL, 22}, - {"with-schema", no_argument, NULL, 23}, - {"with-statistics", no_argument, NULL, 24}, {"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1}, {"rows-per-insert", required_argument, NULL, 10}, {"include-foreign-data", required_argument, NULL, 11}, @@ -798,14 +794,6 @@ main(int argc, char **argv) break; case 22: - with_data = true; - break; - - case 23: - with_schema = true; - break; - - case 24: with_statistics = true; break; @@ -852,13 +840,17 @@ main(int argc, char **argv) if (statistics_only && no_statistics) pg_fatal("options --statistics-only and --no-statistics cannot be used together"); - /* reject conflicting "with-" and "no-" options */ - if (with_data && no_data) - pg_fatal("options --with-data and --no-data cannot be used together"); - if (with_schema && no_schema) - pg_fatal("options --with-schema and --no-schema cannot be used together"); + /* reject conflicting "no-" options */ if (with_statistics && no_statistics) - pg_fatal("options --with-statistics and --no-statistics cannot be used together"); + pg_fatal("options --statistics and --no-statistics cannot be used together"); + + /* reject conflicting "-only" options */ + if (data_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-a/--data-only", "--statistics"); + if (schema_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-s/--schema-only", "--statistics"); if (schema_only && foreign_servers_include_patterns.head != NULL) pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together"); @@ -873,16 +865,14 @@ main(int argc, char **argv) pg_fatal("option --if-exists requires option -c/--clean"); /* - * Set derivative flags. An "-only" option may be overridden by an - * explicit "with-" option; e.g. "--schema-only --with-statistics" will - * include schema and statistics. Other ambiguous or nonsensical - * combinations, e.g. "--schema-only --no-schema", will have already - * caused an error in one of the checks above. + * Set derivative flags. Ambiguous or nonsensical combinations, e.g. + * "--schema-only --no-schema", will have already caused an error in one + * of the checks above. */ dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) || - (data_only || with_data)) && !no_data; + data_only) && !no_data; dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) || - (schema_only || with_schema)) && !no_schema; + schema_only) && !no_schema; dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) || (statistics_only || with_statistics)) && !no_statistics; @@ -1265,7 +1255,7 @@ main(int argc, char **argv) * right now. */ if (plainText) - RestoreArchive(fout, false); + RestoreArchive(fout); CloseArchive(fout); @@ -1355,6 +1345,7 @@ help(const char *progname) printf(_(" --sequence-data include sequence data in dump\n")); printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n")); printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n")); + printf(_(" --statistics dump the statistics\n")); printf(_(" --statistics-only dump only the statistics, not schema or data\n")); printf(_(" --strict-names require table and/or schema include patterns to\n" " match at least one entity each\n")); @@ -1363,9 +1354,6 @@ help(const char *progname) printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data dump the data\n")); - printf(_(" --with-schema dump the schema\n")); - printf(_(" --with-statistics dump the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -d, --dbname=DBNAME database to dump\n")); @@ -2207,6 +2195,13 @@ selectDumpableProcLang(ProcLangInfo *plang, Archive *fout) static void selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout) { + /* see getAccessMethods() comment about v9.6. */ + if (fout->remoteVersion < 90600) + { + method->dobj.dump = DUMP_COMPONENT_NONE; + return; + } + if (checkExtensionMembership(&method->dobj, fout)) return; /* extension membership overrides all else */ @@ -6262,6 +6257,8 @@ getOperators(Archive *fout) int i_oprnamespace; int i_oprowner; int i_oprkind; + int i_oprleft; + int i_oprright; int i_oprcode; /* @@ -6273,6 +6270,8 @@ getOperators(Archive *fout) "oprnamespace, " "oprowner, " "oprkind, " + "oprleft, " + "oprright, " "oprcode::oid AS oprcode " "FROM pg_operator"); @@ -6288,6 +6287,8 @@ getOperators(Archive *fout) i_oprnamespace = PQfnumber(res, "oprnamespace"); i_oprowner = PQfnumber(res, "oprowner"); i_oprkind = PQfnumber(res, "oprkind"); + i_oprleft = PQfnumber(res, "oprleft"); + i_oprright = PQfnumber(res, "oprright"); i_oprcode = PQfnumber(res, "oprcode"); for (i = 0; i < ntups; i++) @@ -6301,6 +6302,8 @@ getOperators(Archive *fout) findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace))); oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner)); oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0]; + oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft)); + oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright)); oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode)); /* Decide whether we want to dump it */ @@ -6329,6 +6332,7 @@ getCollations(Archive *fout) int i_collname; int i_collnamespace; int i_collowner; + int i_collencoding; query = createPQExpBuffer(); @@ -6339,7 +6343,8 @@ getCollations(Archive *fout) appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, " "collnamespace, " - "collowner " + "collowner, " + "collencoding " "FROM pg_collation"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -6353,6 +6358,7 @@ getCollations(Archive *fout) i_collname = PQfnumber(res, "collname"); i_collnamespace = PQfnumber(res, "collnamespace"); i_collowner = PQfnumber(res, "collowner"); + i_collencoding = PQfnumber(res, "collencoding"); for (i = 0; i < ntups; i++) { @@ -6364,6 +6370,7 @@ getCollations(Archive *fout) collinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_collnamespace))); collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner)); + collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding)); /* Decide whether we want to dump it */ selectDumpableObject(&(collinfo[i].dobj), fout); @@ -6454,16 +6461,28 @@ getAccessMethods(Archive *fout) int i_amhandler; int i_amtype; - /* Before 9.6, there are no user-defined access methods */ - if (fout->remoteVersion < 90600) - return; - query = createPQExpBuffer(); - /* Select all access methods from pg_am table */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, " - "amhandler::pg_catalog.regproc AS amhandler " - "FROM pg_am"); + /* + * Select all access methods from pg_am table. v9.6 introduced CREATE + * ACCESS METHOD, so earlier versions usually have only built-in access + * methods. v9.6 also changed the access method API, replacing dozens of + * pg_am columns with amhandler. Even if a user created an access method + * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am + * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read + * pg_am just to facilitate findAccessMethodByOid() providing the + * OID-to-name mapping. + */ + appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, "); + if (fout->remoteVersion >= 90600) + appendPQExpBufferStr(query, + "amtype, " + "amhandler::pg_catalog.regproc AS amhandler "); + else + appendPQExpBufferStr(query, + "'i'::pg_catalog.\"char\" AS amtype, " + "'-'::pg_catalog.regproc AS amhandler "); + appendPQExpBufferStr(query, "FROM pg_am"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); @@ -6512,6 +6531,7 @@ getOpclasses(Archive *fout) OpclassInfo *opcinfo; int i_tableoid; int i_oid; + int i_opcmethod; int i_opcname; int i_opcnamespace; int i_opcowner; @@ -6521,7 +6541,7 @@ getOpclasses(Archive *fout) * system-defined opclasses at dump-out time. */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, " + appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, " "opcnamespace, " "opcowner " "FROM pg_opclass"); @@ -6534,6 +6554,7 @@ getOpclasses(Archive *fout) i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); + i_opcmethod = PQfnumber(res, "opcmethod"); i_opcname = PQfnumber(res, "opcname"); i_opcnamespace = PQfnumber(res, "opcnamespace"); i_opcowner = PQfnumber(res, "opcowner"); @@ -6547,6 +6568,7 @@ getOpclasses(Archive *fout) opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname)); opcinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace))); + opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod)); opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner)); /* Decide whether we want to dump it */ @@ -6572,6 +6594,7 @@ getOpfamilies(Archive *fout) OpfamilyInfo *opfinfo; int i_tableoid; int i_oid; + int i_opfmethod; int i_opfname; int i_opfnamespace; int i_opfowner; @@ -6583,7 +6606,7 @@ getOpfamilies(Archive *fout) * system-defined opfamilies at dump-out time. */ - appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, " + appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, " "opfnamespace, " "opfowner " "FROM pg_opfamily"); @@ -6597,6 +6620,7 @@ getOpfamilies(Archive *fout) i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_opfname = PQfnumber(res, "opfname"); + i_opfmethod = PQfnumber(res, "opfmethod"); i_opfnamespace = PQfnumber(res, "opfnamespace"); i_opfowner = PQfnumber(res, "opfowner"); @@ -6609,6 +6633,7 @@ getOpfamilies(Archive *fout) opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname)); opfinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace))); + opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod)); opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner)); /* Decide whether we want to dump it */ diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 93a4475d51b..dde85ed156c 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -260,6 +260,8 @@ typedef struct _oprInfo DumpableObject dobj; const char *rolname; char oprkind; + Oid oprleft; + Oid oprright; Oid oprcode; } OprInfo; @@ -273,12 +275,14 @@ typedef struct _accessMethodInfo typedef struct _opclassInfo { DumpableObject dobj; + Oid opcmethod; const char *rolname; } OpclassInfo; typedef struct _opfamilyInfo { DumpableObject dobj; + Oid opfmethod; const char *rolname; } OpfamilyInfo; @@ -286,6 +290,7 @@ typedef struct _collInfo { DumpableObject dobj; const char *rolname; + int collencoding; } CollInfo; typedef struct _convInfo @@ -760,6 +765,7 @@ extern TableInfo *findTableByOid(Oid oid); extern TypeInfo *findTypeByOid(Oid oid); extern FuncInfo *findFuncByOid(Oid oid); extern OprInfo *findOprByOid(Oid oid); +extern AccessMethodInfo *findAccessMethodByOid(Oid oid); extern CollInfo *findCollationByOid(Oid oid); extern NamespaceInfo *findNamespaceByOid(Oid oid); extern ExtensionInfo *findExtensionByOid(Oid oid); diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index f99a0797ea7..a02da3e9652 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -162,6 +162,8 @@ static DumpId postDataBoundId; static int DOTypeNameCompare(const void *p1, const void *p2); +static int pgTypeNameCompare(Oid typid1, Oid typid2); +static int accessMethodNameCompare(Oid am1, Oid am2); static bool TopoSort(DumpableObject **objs, int numObjs, DumpableObject **ordering, @@ -228,12 +230,39 @@ DOTypeNameCompare(const void *p1, const void *p2) else if (obj2->namespace) return 1; - /* Sort by name */ + /* + * Sort by name. With a few exceptions, names here are single catalog + * columns. To get a fuller picture, grep pg_dump.c for "dobj.name = ". + * Names here don't match "Name:" in plain format output, which is a + * _tocEntry.tag. For example, DumpableObject.name of a constraint is + * pg_constraint.conname, but _tocEntry.tag of a constraint is relname and + * conname joined with a space. + */ cmpval = strcmp(obj1->name, obj2->name); if (cmpval != 0) return cmpval; - /* To have a stable sort order, break ties for some object types */ + /* + * Sort by type. This helps types that share a type priority without + * sharing a unique name constraint, e.g. opclass and opfamily. + */ + cmpval = obj1->objType - obj2->objType; + if (cmpval != 0) + return cmpval; + + /* + * To have a stable sort order, break ties for some object types. Most + * catalogs have a natural key, e.g. pg_proc_proname_args_nsp_index. Where + * the above "namespace" and "name" comparisons don't cover all natural + * key columns, compare the rest here. + * + * The natural key usually refers to other catalogs by surrogate keys. + * Hence, this translates each of those references to the natural key of + * the referenced catalog. That may descend through multiple levels of + * catalog references. For example, to sort by pg_proc.proargtypes, + * descend to each pg_type and then further to its pg_namespace, for an + * overall sort by (nspname, typname). + */ if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG) { FuncInfo *fobj1 = *(FuncInfo *const *) p1; @@ -246,22 +275,10 @@ DOTypeNameCompare(const void *p1, const void *p2) return cmpval; for (i = 0; i < fobj1->nargs; i++) { - TypeInfo *argtype1 = findTypeByOid(fobj1->argtypes[i]); - TypeInfo *argtype2 = findTypeByOid(fobj2->argtypes[i]); - - if (argtype1 && argtype2) - { - if (argtype1->dobj.namespace && argtype2->dobj.namespace) - { - cmpval = strcmp(argtype1->dobj.namespace->dobj.name, - argtype2->dobj.namespace->dobj.name); - if (cmpval != 0) - return cmpval; - } - cmpval = strcmp(argtype1->dobj.name, argtype2->dobj.name); - if (cmpval != 0) - return cmpval; - } + cmpval = pgTypeNameCompare(fobj1->argtypes[i], + fobj2->argtypes[i]); + if (cmpval != 0) + return cmpval; } } else if (obj1->objType == DO_OPERATOR) @@ -273,6 +290,57 @@ DOTypeNameCompare(const void *p1, const void *p2) cmpval = (oobj2->oprkind - oobj1->oprkind); if (cmpval != 0) return cmpval; + /* Within an oprkind, sort by argument type names */ + cmpval = pgTypeNameCompare(oobj1->oprleft, oobj2->oprleft); + if (cmpval != 0) + return cmpval; + cmpval = pgTypeNameCompare(oobj1->oprright, oobj2->oprright); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_OPCLASS) + { + OpclassInfo *opcobj1 = *(OpclassInfo *const *) p1; + OpclassInfo *opcobj2 = *(OpclassInfo *const *) p2; + + /* Sort by access method name, per pg_opclass_am_name_nsp_index */ + cmpval = accessMethodNameCompare(opcobj1->opcmethod, + opcobj2->opcmethod); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_OPFAMILY) + { + OpfamilyInfo *opfobj1 = *(OpfamilyInfo *const *) p1; + OpfamilyInfo *opfobj2 = *(OpfamilyInfo *const *) p2; + + /* Sort by access method name, per pg_opfamily_am_name_nsp_index */ + cmpval = accessMethodNameCompare(opfobj1->opfmethod, + opfobj2->opfmethod); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_COLLATION) + { + CollInfo *cobj1 = *(CollInfo *const *) p1; + CollInfo *cobj2 = *(CollInfo *const *) p2; + + /* + * Sort by encoding, per pg_collation_name_enc_nsp_index. Technically, + * this is not necessary, because wherever this changes dump order, + * restoring the dump fails anyway. CREATE COLLATION can't create a + * tie for this to break, because it imposes restrictions to make + * (nspname, collname) uniquely identify a collation within a given + * DatabaseEncoding. While pg_import_system_collations() can create a + * tie, pg_dump+restore fails after + * pg_import_system_collations('my_schema') does so. However, there's + * little to gain by ignoring one natural key column on the basis of + * those limitations elsewhere, so respect the full natural key like + * we do for other object types. + */ + cmpval = cobj1->collencoding - cobj2->collencoding; + if (cmpval != 0) + return cmpval; } else if (obj1->objType == DO_ATTRDEF) { @@ -317,11 +385,143 @@ DOTypeNameCompare(const void *p1, const void *p2) if (cmpval != 0) return cmpval; } + else if (obj1->objType == DO_CONSTRAINT) + { + ConstraintInfo *robj1 = *(ConstraintInfo *const *) p1; + ConstraintInfo *robj2 = *(ConstraintInfo *const *) p2; - /* Usually shouldn't get here, but if we do, sort by OID */ + /* + * Sort domain constraints before table constraints, for consistency + * with our decision to sort CREATE DOMAIN before CREATE TABLE. + */ + if (robj1->condomain) + { + if (robj2->condomain) + { + /* Sort by domain name (domain namespace was considered) */ + cmpval = strcmp(robj1->condomain->dobj.name, + robj2->condomain->dobj.name); + if (cmpval != 0) + return cmpval; + } + else + return PRIO_TYPE - PRIO_TABLE; + } + else if (robj2->condomain) + return PRIO_TABLE - PRIO_TYPE; + else + { + /* Sort by table name (table namespace was considered already) */ + cmpval = strcmp(robj1->contable->dobj.name, + robj2->contable->dobj.name); + if (cmpval != 0) + return cmpval; + } + } + else if (obj1->objType == DO_PUBLICATION_REL) + { + PublicationRelInfo *probj1 = *(PublicationRelInfo *const *) p1; + PublicationRelInfo *probj2 = *(PublicationRelInfo *const *) p2; + + /* Sort by publication name, since (namespace, name) match the rel */ + cmpval = strcmp(probj1->publication->dobj.name, + probj2->publication->dobj.name); + if (cmpval != 0) + return cmpval; + } + else if (obj1->objType == DO_PUBLICATION_TABLE_IN_SCHEMA) + { + PublicationSchemaInfo *psobj1 = *(PublicationSchemaInfo *const *) p1; + PublicationSchemaInfo *psobj2 = *(PublicationSchemaInfo *const *) p2; + + /* Sort by publication name, since ->name is just nspname */ + cmpval = strcmp(psobj1->publication->dobj.name, + psobj2->publication->dobj.name); + if (cmpval != 0) + return cmpval; + } + + /* + * Shouldn't get here except after catalog corruption, but if we do, sort + * by OID. This may make logically-identical databases differ in the + * order of objects in dump output. Users will get spurious schema diffs. + * Expect flaky failures of 002_pg_upgrade.pl test 'dump outputs from + * original and restored regression databases match' if the regression + * database contains objects allowing that test to reach here. That's a + * consequence of the test using "pg_restore -j", which doesn't fully + * constrain OID assignment order. + */ + Assert(false); return oidcmp(obj1->catId.oid, obj2->catId.oid); } +/* Compare two OID-identified pg_type values by nspname, then by typname. */ +static int +pgTypeNameCompare(Oid typid1, Oid typid2) +{ + TypeInfo *typobj1; + TypeInfo *typobj2; + int cmpval; + + if (typid1 == typid2) + return 0; + + typobj1 = findTypeByOid(typid1); + typobj2 = findTypeByOid(typid2); + + if (!typobj1 || !typobj2) + { + /* + * getTypes() didn't find some OID. Assume catalog corruption, e.g. + * an oprright value without the corresponding OID in a pg_type row. + * Report as "equal", so the caller uses the next available basis for + * comparison, e.g. the next function argument. + * + * Unary operators have InvalidOid in oprleft (if oprkind='r') or in + * oprright (if oprkind='l'). Caller already sorted by oprkind, + * calling us only for like-kind operators. Hence, "typid1 == typid2" + * took care of InvalidOid. (v14 removed postfix operator support. + * Hence, when dumping from v14+, only oprleft can be InvalidOid.) + */ + Assert(false); + return 0; + } + + if (!typobj1->dobj.namespace || !typobj2->dobj.namespace) + Assert(false); /* catalog corruption */ + else + { + cmpval = strcmp(typobj1->dobj.namespace->dobj.name, + typobj2->dobj.namespace->dobj.name); + if (cmpval != 0) + return cmpval; + } + return strcmp(typobj1->dobj.name, typobj2->dobj.name); +} + +/* Compare two OID-identified pg_am values by amname. */ +static int +accessMethodNameCompare(Oid am1, Oid am2) +{ + AccessMethodInfo *amobj1; + AccessMethodInfo *amobj2; + + if (am1 == am2) + return 0; + + amobj1 = findAccessMethodByOid(am1); + amobj2 = findAccessMethodByOid(am2); + + if (!amobj1 || !amobj2) + { + /* catalog corruption: handle like pgTypeNameCompare() does */ + Assert(false); + return 0; + } + + return strcmp(amobj1->dobj.name, amobj2->dobj.name); +} + /* * Sort the given objects into a safe dump order using dependency diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 100317b1aa9..27aa1b65698 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -65,10 +65,9 @@ static void dropTablespaces(PGconn *conn); static void dumpTablespaces(PGconn *conn); static void dropDBs(PGconn *conn); static void dumpUserConfig(PGconn *conn, const char *username); -static void dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat); +static void dumpDatabases(PGconn *conn); static void dumpTimestamp(const char *msg); -static int runPgDump(const char *dbname, const char *create_opts, - char *dbfile, ArchiveFormat archDumpFormat); +static int runPgDump(const char *dbname, const char *create_opts); static void buildShSecLabels(PGconn *conn, const char *catalog_name, Oid objectId, const char *objtype, const char *objname, @@ -77,7 +76,6 @@ static void executeCommand(PGconn *conn, const char *query); static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns, SimpleStringList *names); static void read_dumpall_filters(const char *filename, SimpleStringList *pattern); -static ArchiveFormat parseDumpFormat(const char *format); static char pg_dump_bin[MAXPGPATH]; static PQExpBuffer pgdumpopts; @@ -107,8 +105,6 @@ static int no_subscriptions = 0; static int no_toast_compression = 0; static int no_unlogged_table_data = 0; static int no_role_passwords = 0; -static int with_data = 0; -static int with_schema = 0; static int with_statistics = 0; static int server_version; static int load_via_partition_root = 0; @@ -150,7 +146,6 @@ main(int argc, char *argv[]) {"password", no_argument, NULL, 'W'}, {"no-privileges", no_argument, NULL, 'x'}, {"no-acl", no_argument, NULL, 'x'}, - {"format", required_argument, NULL, 'F'}, /* * the following options don't have an equivalent short option letter @@ -183,11 +178,9 @@ main(int argc, char *argv[]) {"no-sync", no_argument, NULL, 4}, {"no-toast-compression", no_argument, &no_toast_compression, 1}, {"no-unlogged-table-data", no_argument, &no_unlogged_table_data, 1}, - {"with-data", no_argument, &with_data, 1}, - {"with-schema", no_argument, &with_schema, 1}, - {"with-statistics", no_argument, &with_statistics, 1}, {"on-conflict-do-nothing", no_argument, &on_conflict_do_nothing, 1}, {"rows-per-insert", required_argument, NULL, 7}, + {"statistics", no_argument, &with_statistics, 1}, {"statistics-only", no_argument, &statistics_only, 1}, {"filter", required_argument, NULL, 8}, {"sequence-data", no_argument, &sequence_data, 1}, @@ -201,8 +194,6 @@ main(int argc, char *argv[]) char *pgdb = NULL; char *use_role = NULL; const char *dumpencoding = NULL; - ArchiveFormat archDumpFormat = archNull; - const char *formatName = "p"; trivalue prompt_password = TRI_DEFAULT; bool data_only = false; bool globals_only = false; @@ -252,7 +243,7 @@ main(int argc, char *argv[]) pgdumpopts = createPQExpBuffer(); - while ((c = getopt_long(argc, argv, "acd:E:f:F:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1) + while ((c = getopt_long(argc, argv, "acd:E:f:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1) { switch (c) { @@ -280,9 +271,7 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " -f "); appendShellString(pgdumpopts, filename); break; - case 'F': - formatName = pg_strdup(optarg); - break; + case 'g': globals_only = true; break; @@ -431,21 +420,6 @@ main(int argc, char *argv[]) exit_nicely(1); } - /* Get format for dump. */ - archDumpFormat = parseDumpFormat(formatName); - - /* - * If a non-plain format is specified, a file name is also required as the - * path to the main directory. - */ - if (archDumpFormat != archNull && - (!filename || strcmp(filename, "") == 0)) - { - pg_log_error("option -F/--format=d|c|t requires option -f/--file"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - exit_nicely(1); - } - /* * If password values are not required in the dump, switch to using * pg_roles which is equally useful, just more likely to have unrestricted @@ -497,12 +471,8 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " --no-toast-compression"); if (no_unlogged_table_data) appendPQExpBufferStr(pgdumpopts, " --no-unlogged-table-data"); - if (with_data) - appendPQExpBufferStr(pgdumpopts, " --with-data"); - if (with_schema) - appendPQExpBufferStr(pgdumpopts, " --with-schema"); if (with_statistics) - appendPQExpBufferStr(pgdumpopts, " --with-statistics"); + appendPQExpBufferStr(pgdumpopts, " --statistics"); if (on_conflict_do_nothing) appendPQExpBufferStr(pgdumpopts, " --on-conflict-do-nothing"); if (statistics_only) @@ -511,33 +481,6 @@ main(int argc, char *argv[]) appendPQExpBufferStr(pgdumpopts, " --sequence-data"); /* - * Open the output file if required, otherwise use stdout. If required, - * then create new directory and global.dat file. - */ - if (archDumpFormat != archNull) - { - char global_path[MAXPGPATH]; - - /* Create new directory or accept the empty existing directory. */ - create_or_open_dir(filename); - - snprintf(global_path, MAXPGPATH, "%s/global.dat", filename); - - OPF = fopen(global_path, PG_BINARY_W); - if (!OPF) - pg_fatal("could not open file \"%s\": %m", global_path); - } - else if (filename) - { - OPF = fopen(filename, PG_BINARY_W); - if (!OPF) - pg_fatal("could not open output file \"%s\": %m", - filename); - } - else - OPF = stdout; - - /* * If there was a database specified on the command line, use that, * otherwise try to connect to database "postgres", and failing that * "template1". @@ -577,6 +520,19 @@ main(int argc, char *argv[]) &database_exclude_names); /* + * Open the output file if required, otherwise use stdout + */ + if (filename) + { + OPF = fopen(filename, PG_BINARY_W); + if (!OPF) + pg_fatal("could not open output file \"%s\": %m", + filename); + } + else + OPF = stdout; + + /* * Set the client encoding if requested. */ if (dumpencoding) @@ -675,7 +631,7 @@ main(int argc, char *argv[]) } if (!globals_only && !roles_only && !tablespaces_only) - dumpDatabases(conn, archDumpFormat); + dumpDatabases(conn); PQfinish(conn); @@ -688,7 +644,7 @@ main(int argc, char *argv[]) fclose(OPF); /* sync the resulting file, errors are not fatal */ - if (dosync && (archDumpFormat == archNull)) + if (dosync) (void) fsync_fname(filename, false); } @@ -699,14 +655,12 @@ main(int argc, char *argv[]) static void help(void) { - printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname); + printf(_("%s exports a PostgreSQL database cluster as an SQL script.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]...\n"), progname); printf(_("\nGeneral options:\n")); printf(_(" -f, --file=FILENAME output file name\n")); - printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n" - " plain text (default))\n")); printf(_(" -v, --verbose verbose mode\n")); printf(_(" -V, --version output version information, then exit\n")); printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n")); @@ -750,13 +704,11 @@ help(void) printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n")); printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n")); printf(_(" --sequence-data include sequence data in dump\n")); + printf(_(" --statistics dump the statistics\n")); printf(_(" --statistics-only dump only the statistics, not schema or data\n")); printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data dump the data\n")); - printf(_(" --with-schema dump the schema\n")); - printf(_(" --with-statistics dump the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -d, --dbname=CONNSTR connect using connection string\n")); @@ -1013,6 +965,9 @@ dumpRoles(PGconn *conn) * We do it this way because config settings for roles could mention the * names of other roles. */ + if (PQntuples(res) > 0) + fprintf(OPF, "\n--\n-- User Configurations\n--\n"); + for (i = 0; i < PQntuples(res); i++) dumpUserConfig(conn, PQgetvalue(res, i, i_rolname)); @@ -1526,7 +1481,6 @@ dumpUserConfig(PGconn *conn, const char *username) { PQExpBuffer buf = createPQExpBuffer(); PGresult *res; - static bool header_done = false; printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting " "WHERE setdatabase = 0 AND setrole = " @@ -1538,13 +1492,7 @@ dumpUserConfig(PGconn *conn, const char *username) res = executeQuery(conn, buf->data); if (PQntuples(res) > 0) - { - if (!header_done) - fprintf(OPF, "\n--\n-- User Configurations\n--\n"); - header_done = true; - fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username); - } for (int i = 0; i < PQntuples(res); i++) { @@ -1618,13 +1566,10 @@ expand_dbname_patterns(PGconn *conn, * Dump contents of databases. */ static void -dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) +dumpDatabases(PGconn *conn) { PGresult *res; int i; - char db_subdir[MAXPGPATH]; - char dbfilepath[MAXPGPATH]; - FILE *map_file = NULL; /* * Skip databases marked not datallowconn, since we'd be unable to connect @@ -1638,42 +1583,18 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) * doesn't have some failure mode with --clean. */ res = executeQuery(conn, - "SELECT datname, oid " + "SELECT datname " "FROM pg_database d " "WHERE datallowconn AND datconnlimit != -2 " "ORDER BY (datname <> 'template1'), datname"); - if (archDumpFormat == archNull && PQntuples(res) > 0) + if (PQntuples(res) > 0) fprintf(OPF, "--\n-- Databases\n--\n\n"); - /* - * If directory/tar/custom format is specified, create a subdirectory - * under the main directory and each database dump file or subdirectory - * will be created in that subdirectory by pg_dump. - */ - if (archDumpFormat != archNull) - { - char map_file_path[MAXPGPATH]; - - snprintf(db_subdir, MAXPGPATH, "%s/databases", filename); - - /* Create a subdirectory with 'databases' name under main directory. */ - if (mkdir(db_subdir, pg_dir_create_mode) != 0) - pg_fatal("could not create directory \"%s\": %m", db_subdir); - - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename); - - /* Create a map file (to store dboid and dbname) */ - map_file = fopen(map_file_path, PG_BINARY_W); - if (!map_file) - pg_fatal("could not open file \"%s\": %m", map_file_path); - } - for (i = 0; i < PQntuples(res); i++) { char *dbname = PQgetvalue(res, i, 0); - char *oid = PQgetvalue(res, i, 1); - const char *create_opts = ""; + const char *create_opts; int ret; /* Skip template0, even if it's not marked !datallowconn. */ @@ -1687,27 +1608,9 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) continue; } - /* - * If this is not a plain format dump, then append dboid and dbname to - * the map.dat file. - */ - if (archDumpFormat != archNull) - { - if (archDumpFormat == archCustom) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid); - else if (archDumpFormat == archTar) - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid); - else - snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid); - - /* Put one line entry for dboid and dbname in map file. */ - fprintf(map_file, "%s %s\n", oid, dbname); - } - pg_log_info("dumping database \"%s\"", dbname); - if (archDumpFormat == archNull) - fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname); + fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname); /* * We assume that "template1" and "postgres" already exist in the @@ -1721,9 +1624,12 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) { if (output_clean) create_opts = "--clean --create"; - /* Since pg_dump won't emit a \connect command, we must */ - else if (archDumpFormat == archNull) + else + { + create_opts = ""; + /* Since pg_dump won't emit a \connect command, we must */ fprintf(OPF, "\\connect %s\n\n", dbname); + } } else create_opts = "--create"; @@ -1731,30 +1637,19 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) if (filename) fclose(OPF); - ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat); + ret = runPgDump(dbname, create_opts); if (ret != 0) pg_fatal("pg_dump failed on database \"%s\", exiting", dbname); if (filename) { - char global_path[MAXPGPATH]; - - if (archDumpFormat != archNull) - snprintf(global_path, MAXPGPATH, "%s/global.dat", filename); - else - snprintf(global_path, MAXPGPATH, "%s", filename); - - OPF = fopen(global_path, PG_BINARY_A); + OPF = fopen(filename, PG_BINARY_A); if (!OPF) pg_fatal("could not re-open the output file \"%s\": %m", - global_path); + filename); } } - /* Close map file */ - if (archDumpFormat != archNull) - fclose(map_file); - PQclear(res); } @@ -1764,8 +1659,7 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat) * Run pg_dump on dbname, with specified options. */ static int -runPgDump(const char *dbname, const char *create_opts, char *dbfile, - ArchiveFormat archDumpFormat) +runPgDump(const char *dbname, const char *create_opts) { PQExpBufferData connstrbuf; PQExpBufferData cmd; @@ -1774,36 +1668,17 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile, initPQExpBuffer(&connstrbuf); initPQExpBuffer(&cmd); + printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin, + pgdumpopts->data, create_opts); + /* - * If this is not a plain format dump, then append file name and dump - * format to the pg_dump command to get archive dump. + * If we have a filename, use the undocumented plain-append pg_dump + * format. */ - if (archDumpFormat != archNull) - { - printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin, - dbfile, create_opts); - - if (archDumpFormat == archDirectory) - appendPQExpBufferStr(&cmd, " --format=directory "); - else if (archDumpFormat == archCustom) - appendPQExpBufferStr(&cmd, " --format=custom "); - else if (archDumpFormat == archTar) - appendPQExpBufferStr(&cmd, " --format=tar "); - } + if (filename) + appendPQExpBufferStr(&cmd, " -Fa "); else - { - printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin, - pgdumpopts->data, create_opts); - - /* - * If we have a filename, use the undocumented plain-append pg_dump - * format. - */ - if (filename) - appendPQExpBufferStr(&cmd, " -Fa "); - else - appendPQExpBufferStr(&cmd, " -Fp "); - } + appendPQExpBufferStr(&cmd, " -Fp "); /* * Append the database name to the already-constructed stem of connection @@ -1948,36 +1823,3 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern) filter_free(&fstate); } - -/* - * parseDumpFormat - * - * This will validate dump formats. - */ -static ArchiveFormat -parseDumpFormat(const char *format) -{ - ArchiveFormat archDumpFormat; - - if (pg_strcasecmp(format, "c") == 0) - archDumpFormat = archCustom; - else if (pg_strcasecmp(format, "custom") == 0) - archDumpFormat = archCustom; - else if (pg_strcasecmp(format, "d") == 0) - archDumpFormat = archDirectory; - else if (pg_strcasecmp(format, "directory") == 0) - archDumpFormat = archDirectory; - else if (pg_strcasecmp(format, "p") == 0) - archDumpFormat = archNull; - else if (pg_strcasecmp(format, "plain") == 0) - archDumpFormat = archNull; - else if (pg_strcasecmp(format, "t") == 0) - archDumpFormat = archTar; - else if (pg_strcasecmp(format, "tar") == 0) - archDumpFormat = archTar; - else - pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"", - format); - - return archDumpFormat; -} diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index 6ef789cb06d..6c129278bc5 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -2,7 +2,7 @@ * * pg_restore.c * pg_restore is an utility extracting postgres database definitions - * from a backup archive created by pg_dump/pg_dumpall using the archiver + * from a backup archive created by pg_dump using the archiver * interface. * * pg_restore will read the backup archive and @@ -41,15 +41,11 @@ #include "postgres_fe.h" #include <ctype.h> -#include <sys/stat.h> #ifdef HAVE_TERMIOS_H #include <termios.h> #endif -#include "common/string.h" -#include "connectdb.h" #include "fe_utils/option_utils.h" -#include "fe_utils/string_utils.h" #include "filter.h" #include "getopt_long.h" #include "parallel.h" @@ -57,43 +53,18 @@ static void usage(const char *progname); static void read_restore_filters(const char *filename, RestoreOptions *opts); -static bool file_exists_in_directory(const char *dir, const char *filename); -static int restore_one_database(const char *inputFileSpec, RestoreOptions *opts, - int numWorkers, bool append_data, int num); -static int read_one_statement(StringInfo inBuf, FILE *pfile); -static int restore_all_databases(PGconn *conn, const char *dumpdirpath, - SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers); -static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath, - const char *outfile); -static void copy_or_print_global_file(const char *outfile, FILE *pfile); -static int get_dbnames_list_to_restore(PGconn *conn, - SimplePtrList *dbname_oid_list, - SimpleStringList db_exclude_patterns); -static int get_dbname_oid_list_from_mfile(const char *dumpdirpath, - SimplePtrList *dbname_oid_list); - -/* - * Stores a database OID and the corresponding name. - */ -typedef struct DbOidName -{ - Oid oid; - char str[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated string here */ -} DbOidName; - int main(int argc, char **argv) { RestoreOptions *opts; int c; + int exit_code; int numWorkers = 1; + Archive *AH; char *inputFileSpec; bool data_only = false; bool schema_only = false; - int n_errors = 0; - bool globals_only = false; - SimpleStringList db_exclude_patterns = {NULL, NULL}; static int disable_triggers = 0; static int enable_row_security = 0; static int if_exists = 0; @@ -111,15 +82,12 @@ main(int argc, char **argv) static int no_subscriptions = 0; static int strict_names = 0; static int statistics_only = 0; - static int with_data = 0; - static int with_schema = 0; static int with_statistics = 0; struct option cmdopts[] = { {"clean", 0, NULL, 'c'}, {"create", 0, NULL, 'C'}, {"data-only", 0, NULL, 'a'}, - {"globals-only", 0, NULL, 'g'}, {"dbname", 1, NULL, 'd'}, {"exit-on-error", 0, NULL, 'e'}, {"exclude-schema", 1, NULL, 'N'}, @@ -169,12 +137,9 @@ main(int argc, char **argv) {"no-security-labels", no_argument, &no_security_labels, 1}, {"no-subscriptions", no_argument, &no_subscriptions, 1}, {"no-statistics", no_argument, &no_statistics, 1}, - {"with-data", no_argument, &with_data, 1}, - {"with-schema", no_argument, &with_schema, 1}, - {"with-statistics", no_argument, &with_statistics, 1}, + {"statistics", no_argument, &with_statistics, 1}, {"statistics-only", no_argument, &statistics_only, 1}, {"filter", required_argument, NULL, 4}, - {"exclude-database", required_argument, NULL, 6}, {NULL, 0, NULL, 0} }; @@ -203,7 +168,7 @@ main(int argc, char **argv) } } - while ((c = getopt_long(argc, argv, "acCd:ef:F:gh:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1", + while ((c = getopt_long(argc, argv, "acCd:ef:F:h:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1", cmdopts, NULL)) != -1) { switch (c) @@ -230,14 +195,11 @@ main(int argc, char **argv) if (strlen(optarg) != 0) opts->formatName = pg_strdup(optarg); break; - case 'g': - /* restore only global.dat file from directory */ - globals_only = true; - break; case 'h': if (strlen(optarg) != 0) opts->cparams.pghost = pg_strdup(optarg); break; + case 'j': /* number of restore jobs */ if (!option_parse_int(optarg, "-j/--jobs", 1, PG_MAX_JOBS, @@ -352,9 +314,6 @@ main(int argc, char **argv) exit(1); opts->exit_on_error = true; break; - case 6: /* database patterns to skip */ - simple_string_list_append(&db_exclude_patterns, optarg); - break; default: /* getopt_long already emitted a complaint */ @@ -382,13 +341,6 @@ main(int argc, char **argv) if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary) pg_fatal("one of -d/--dbname and -f/--file must be specified"); - if (db_exclude_patterns.head != NULL && globals_only) - { - pg_log_error("option --exclude-database cannot be used together with -g/--globals-only"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - exit_nicely(1); - } - /* Should get at most one of -d and -f, else user is confused */ if (opts->cparams.dbname) { @@ -417,13 +369,17 @@ main(int argc, char **argv) if (statistics_only && no_statistics) pg_fatal("options --statistics-only and --no-statistics cannot be used together"); - /* reject conflicting "with-" and "no-" options */ - if (with_data && no_data) - pg_fatal("options --with-data and --no-data cannot be used together"); - if (with_schema && no_schema) - pg_fatal("options --with-schema and --no-schema cannot be used together"); + /* reject conflicting "no-" options */ if (with_statistics && no_statistics) - pg_fatal("options --with-statistics and --no-statistics cannot be used together"); + pg_fatal("options --statistics and --no-statistics cannot be used together"); + + /* reject conflicting "only-" options */ + if (data_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-a/--data-only", "--statistics"); + if (schema_only && with_statistics) + pg_fatal("options %s and %s cannot be used together", + "-s/--schema-only", "--statistics"); if (data_only && opts->dropSchema) pg_fatal("options -c/--clean and -a/--data-only cannot be used together"); @@ -443,16 +399,14 @@ main(int argc, char **argv) pg_fatal("cannot specify both --single-transaction and multiple jobs"); /* - * Set derivative flags. An "-only" option may be overridden by an - * explicit "with-" option; e.g. "--schema-only --with-statistics" will - * include schema and statistics. Other ambiguous or nonsensical - * combinations, e.g. "--schema-only --no-schema", will have already - * caused an error in one of the checks above. + * Set derivative flags. Ambiguous or nonsensical combinations, e.g. + * "--schema-only --no-schema", will have already caused an error in one + * of the checks above. */ opts->dumpData = ((opts->dumpData && !schema_only && !statistics_only) || - (data_only || with_data)) && !no_data; + data_only) && !no_data; opts->dumpSchema = ((opts->dumpSchema && !data_only && !statistics_only) || - (schema_only || with_schema)) && !no_schema; + schema_only) && !no_schema; opts->dumpStatistics = ((opts->dumpStatistics && !schema_only && !data_only) || (statistics_only || with_statistics)) && !no_statistics; @@ -496,114 +450,6 @@ main(int argc, char **argv) opts->formatName); } - /* - * If toc.dat file is not present in the current path, then check for - * global.dat. If global.dat file is present, then restore all the - * databases from map.dat (if it exists), but skip restoring those - * matching --exclude-database patterns. - */ - if (inputFileSpec != NULL && !file_exists_in_directory(inputFileSpec, "toc.dat") && - file_exists_in_directory(inputFileSpec, "global.dat")) - { - PGconn *conn = NULL; /* Connection to restore global sql - * commands. */ - - /* - * Can only use --list or --use-list options with a single database - * dump. - */ - if (opts->tocSummary) - pg_fatal("option -l/--list cannot be used when restoring an archive created by pg_dumpall"); - else if (opts->tocFile) - pg_fatal("option -L/--use-list cannot be used when restoring an archive created by pg_dumpall"); - - /* - * To restore from a pg_dumpall archive, -C (create database) option - * must be specified unless we are only restoring globals. - */ - if (!globals_only && opts->createDB != 1) - { - pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall"); - pg_log_error_hint("Try \"%s --help\" for more information.", progname); - pg_log_error_hint("Individual databases can be restored using their specific archives."); - exit_nicely(1); - } - - /* - * Connect to the database to execute global sql commands from - * global.dat file. - */ - if (opts->cparams.dbname) - { - conn = ConnectDatabase(opts->cparams.dbname, NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - - - if (!conn) - pg_fatal("could not connect to database \"%s\"", opts->cparams.dbname); - } - - /* If globals-only, then return from here. */ - if (globals_only) - { - /* - * Open global.dat file and execute/append all the global sql - * commands. - */ - n_errors = process_global_sql_commands(conn, inputFileSpec, - opts->filename); - - if (conn) - PQfinish(conn); - - pg_log_info("database restoring skipped because option -g/--globals-only was specified"); - } - else - { - /* Now restore all the databases from map.dat */ - n_errors = restore_all_databases(conn, inputFileSpec, db_exclude_patterns, - opts, numWorkers); - } - - /* Free db pattern list. */ - simple_string_list_destroy(&db_exclude_patterns); - } - else /* process if global.dat file does not exist. */ - { - if (db_exclude_patterns.head != NULL) - pg_fatal("option --exclude-database can be used only when restoring an archive created by pg_dumpall"); - - if (globals_only) - pg_fatal("option -g/--globals-only can be used only when restoring an archive created by pg_dumpall"); - - n_errors = restore_one_database(inputFileSpec, opts, numWorkers, false, 0); - } - - /* Done, print a summary of ignored errors during restore. */ - if (n_errors) - { - pg_log_warning("errors ignored on restore: %d", n_errors); - return 1; - } - - return 0; -} - -/* - * restore_one_database - * - * This will restore one database using toc.dat file. - * - * returns the number of errors while doing restore. - */ -static int -restore_one_database(const char *inputFileSpec, RestoreOptions *opts, - int numWorkers, bool append_data, int num) -{ - Archive *AH; - int n_errors; - AH = OpenArchive(inputFileSpec, opts->format); SetArchiveOptions(AH, NULL, opts); @@ -611,15 +457,9 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts, /* * We don't have a connection yet but that doesn't matter. The connection * is initialized to NULL and if we terminate through exit_nicely() while - * it's still NULL, the cleanup function will just be a no-op. If we are - * restoring multiple databases, then only update AX handle for cleanup as - * the previous entry was already in the array and we had closed previous - * connection, so we can use the same array slot. + * it's still NULL, the cleanup function will just be a no-op. */ - if (!append_data || num == 0) - on_exit_close_archive(AH); - else - replace_on_exit_close_archive(AH); + on_exit_close_archive(AH); /* Let the archiver know how noisy to be */ AH->verbose = opts->verbose; @@ -639,21 +479,25 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts, else { ProcessArchiveRestoreOptions(AH); - RestoreArchive(AH, append_data); + RestoreArchive(AH); } - n_errors = AH->n_errors; + /* done, print a summary of ignored errors */ + if (AH->n_errors) + pg_log_warning("errors ignored on restore: %d", AH->n_errors); /* AH may be freed in CloseArchive? */ + exit_code = AH->n_errors ? 1 : 0; + CloseArchive(AH); - return n_errors; + return exit_code; } static void usage(const char *progname) { - printf(_("%s restores PostgreSQL databases from archives created by pg_dump or pg_dumpall.\n\n"), progname); + printf(_("%s restores a PostgreSQL database from an archive created by pg_dump.\n\n"), progname); printf(_("Usage:\n")); printf(_(" %s [OPTION]... [FILE]\n"), progname); @@ -671,7 +515,6 @@ usage(const char *progname) printf(_(" -c, --clean clean (drop) database objects before recreating\n")); printf(_(" -C, --create create the target database\n")); printf(_(" -e, --exit-on-error exit on error, default is to continue\n")); - printf(_(" -g, --globals-only restore only global objects, no databases\n")); printf(_(" -I, --index=NAME restore named index\n")); printf(_(" -j, --jobs=NUM use this many parallel jobs to restore\n")); printf(_(" -L, --use-list=FILENAME use table of contents from this file for\n" @@ -688,7 +531,6 @@ usage(const char *progname) printf(_(" -1, --single-transaction restore as a single transaction\n")); printf(_(" --disable-triggers disable triggers during data-only restore\n")); printf(_(" --enable-row-security enable row security\n")); - printf(_(" --exclude-database=PATTERN do not restore the specified database(s)\n")); printf(_(" --filter=FILENAME restore or skip objects based on expressions\n" " in FILENAME\n")); printf(_(" --if-exists use IF EXISTS when dropping objects\n")); @@ -705,6 +547,7 @@ usage(const char *progname) printf(_(" --no-table-access-method do not restore table access methods\n")); printf(_(" --no-tablespaces do not restore tablespace assignments\n")); printf(_(" --section=SECTION restore named section (pre-data, data, or post-data)\n")); + printf(_(" --statistics restore the statistics\n")); printf(_(" --statistics-only restore only the statistics, not schema or data\n")); printf(_(" --strict-names require table and/or schema include patterns to\n" " match at least one entity each\n")); @@ -712,9 +555,6 @@ usage(const char *progname) printf(_(" --use-set-session-authorization\n" " use SET SESSION AUTHORIZATION commands instead of\n" " ALTER OWNER commands to set ownership\n")); - printf(_(" --with-data restore the data\n")); - printf(_(" --with-schema restore the schema\n")); - printf(_(" --with-statistics restore the statistics\n")); printf(_("\nConnection options:\n")); printf(_(" -h, --host=HOSTNAME database server host or socket directory\n")); @@ -725,8 +565,8 @@ usage(const char *progname) printf(_(" --role=ROLENAME do SET ROLE before restore\n")); printf(_("\n" - "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n" - "combined and specified multiple times to select multiple objects.\n")); + "The options -I, -n, -N, -P, -t, -T, and --section can be combined and specified\n" + "multiple times to select multiple objects.\n")); printf(_("\nIf no input file name is supplied, then standard input is used.\n\n")); printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT); printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL); @@ -831,585 +671,3 @@ read_restore_filters(const char *filename, RestoreOptions *opts) filter_free(&fstate); } - -/* - * file_exists_in_directory - * - * Returns true if the file exists in the given directory. - */ -static bool -file_exists_in_directory(const char *dir, const char *filename) -{ - struct stat st; - char buf[MAXPGPATH]; - - if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH) - pg_fatal("directory name too long: \"%s\"", dir); - - return (stat(buf, &st) == 0 && S_ISREG(st.st_mode)); -} - -/* - * read_one_statement - * - * This will start reading from passed file pointer using fgetc and read till - * semicolon(sql statement terminator for global.dat file) - * - * EOF is returned if end-of-file input is seen; time to shut down. - */ - -static int -read_one_statement(StringInfo inBuf, FILE *pfile) -{ - int c; /* character read from getc() */ - int m; - - StringInfoData q; - - initStringInfo(&q); - - resetStringInfo(inBuf); - - /* - * Read characters until EOF or the appropriate delimiter is seen. - */ - while ((c = fgetc(pfile)) != EOF) - { - if (c != '\'' && c != '"' && c != '\n' && c != ';') - { - appendStringInfoChar(inBuf, (char) c); - while ((c = fgetc(pfile)) != EOF) - { - if (c != '\'' && c != '"' && c != ';' && c != '\n') - appendStringInfoChar(inBuf, (char) c); - else - break; - } - } - - if (c == '\'' || c == '"') - { - appendStringInfoChar(&q, (char) c); - m = c; - - while ((c = fgetc(pfile)) != EOF) - { - appendStringInfoChar(&q, (char) c); - - if (c == m) - { - appendStringInfoString(inBuf, q.data); - resetStringInfo(&q); - break; - } - } - } - - if (c == ';') - { - appendStringInfoChar(inBuf, (char) ';'); - break; - } - - if (c == '\n') - appendStringInfoChar(inBuf, (char) '\n'); - } - - pg_free(q.data); - - /* No input before EOF signal means time to quit. */ - if (c == EOF && inBuf->len == 0) - return EOF; - - /* return something that's not EOF */ - return 'Q'; -} - -/* - * get_dbnames_list_to_restore - * - * This will mark for skipping any entries from dbname_oid_list that pattern match an - * entry in the db_exclude_patterns list. - * - * Returns the number of database to be restored. - * - */ -static int -get_dbnames_list_to_restore(PGconn *conn, - SimplePtrList *dbname_oid_list, - SimpleStringList db_exclude_patterns) -{ - int count_db = 0; - PQExpBuffer query; - PGresult *res; - - query = createPQExpBuffer(); - - if (!conn) - pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore"); - - /* - * Process one by one all dbnames and if specified to skip restoring, then - * remove dbname from list. - */ - for (SimplePtrListCell *db_cell = dbname_oid_list->head; - db_cell; db_cell = db_cell->next) - { - DbOidName *dbidname = (DbOidName *) db_cell->ptr; - bool skip_db_restore = false; - PQExpBuffer db_lit = createPQExpBuffer(); - - appendStringLiteralConn(db_lit, dbidname->str, conn); - - for (SimpleStringListCell *pat_cell = db_exclude_patterns.head; pat_cell; pat_cell = pat_cell->next) - { - /* - * If there is an exact match then we don't need to try a pattern - * match - */ - if (pg_strcasecmp(dbidname->str, pat_cell->val) == 0) - skip_db_restore = true; - /* Otherwise, try a pattern match if there is a connection */ - else if (conn) - { - int dotcnt; - - appendPQExpBufferStr(query, "SELECT 1 "); - processSQLNamePattern(conn, query, pat_cell->val, false, - false, NULL, db_lit->data, - NULL, NULL, NULL, &dotcnt); - - if (dotcnt > 0) - { - pg_log_error("improper qualified name (too many dotted names): %s", - dbidname->str); - PQfinish(conn); - exit_nicely(1); - } - - res = executeQuery(conn, query->data); - - if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res)) - { - skip_db_restore = true; - pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val); - } - - PQclear(res); - resetPQExpBuffer(query); - } - - if (skip_db_restore) - break; - } - - destroyPQExpBuffer(db_lit); - - /* - * Mark db to be skipped or increment the counter of dbs to be - * restored - */ - if (skip_db_restore) - { - pg_log_info("excluding database \"%s\"", dbidname->str); - dbidname->oid = InvalidOid; - } - else - { - count_db++; - } - } - - destroyPQExpBuffer(query); - - return count_db; -} - -/* - * get_dbname_oid_list_from_mfile - * - * Open map.dat file and read line by line and then prepare a list of database - * names and corresponding db_oid. - * - * Returns, total number of database names in map.dat file. - */ -static int -get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oid_list) -{ - StringInfoData linebuf; - FILE *pfile; - char map_file_path[MAXPGPATH]; - int count = 0; - - - /* - * If there is only global.dat file in dump, then return from here as - * there is no database to restore. - */ - if (!file_exists_in_directory(dumpdirpath, "map.dat")) - { - pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath); - return 0; - } - - snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath); - - /* Open map.dat file. */ - pfile = fopen(map_file_path, PG_BINARY_R); - - if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", map_file_path); - - initStringInfo(&linebuf); - - /* Append all the dbname/db_oid combinations to the list. */ - while (pg_get_line_buf(pfile, &linebuf)) - { - Oid db_oid = InvalidOid; - char *dbname; - DbOidName *dbidname; - int namelen; - char *p = linebuf.data; - - /* Extract dboid. */ - while (isdigit((unsigned char) *p)) - p++; - if (p > linebuf.data && *p == ' ') - { - sscanf(linebuf.data, "%u", &db_oid); - p++; - } - - /* dbname is the rest of the line */ - dbname = p; - namelen = strlen(dbname); - - /* Report error and exit if the file has any corrupted data. */ - if (!OidIsValid(db_oid) || namelen <= 1) - pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path, - count + 1); - - pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"", - dbname, db_oid, map_file_path); - - dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1); - dbidname->oid = db_oid; - strlcpy(dbidname->str, dbname, namelen); - - simple_ptr_list_append(dbname_oid_list, dbidname); - count++; - } - - /* Close map.dat file. */ - fclose(pfile); - - return count; -} - -/* - * restore_all_databases - * - * This will restore databases those dumps are present in - * directory based on map.dat file mapping. - * - * This will skip restoring for databases that are specified with - * exclude-database option. - * - * returns, number of errors while doing restore. - */ -static int -restore_all_databases(PGconn *conn, const char *dumpdirpath, - SimpleStringList db_exclude_patterns, RestoreOptions *opts, - int numWorkers) -{ - SimplePtrList dbname_oid_list = {NULL, NULL}; - int num_db_restore = 0; - int num_total_db; - int n_errors_total; - int count = 0; - char *connected_db = NULL; - bool dumpData = opts->dumpData; - bool dumpSchema = opts->dumpSchema; - bool dumpStatistics = opts->dumpSchema; - - /* Save db name to reuse it for all the database. */ - if (opts->cparams.dbname) - connected_db = opts->cparams.dbname; - - num_total_db = get_dbname_oid_list_from_mfile(dumpdirpath, &dbname_oid_list); - - /* If map.dat has no entries, return after processing global.dat */ - if (dbname_oid_list.head == NULL) - return process_global_sql_commands(conn, dumpdirpath, opts->filename); - - pg_log_info(ngettext("found %d database name in \"%s\"", - "found %d database names in \"%s\"", - num_total_db), - num_total_db, "map.dat"); - - if (!conn) - { - pg_log_info("trying to connect to database \"%s\"", "postgres"); - - conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - - /* Try with template1. */ - if (!conn) - { - pg_log_info("trying to connect to database \"%s\"", "template1"); - - conn = ConnectDatabase("template1", NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - } - } - - /* - * filter the db list according to the exclude patterns - */ - num_db_restore = get_dbnames_list_to_restore(conn, &dbname_oid_list, - db_exclude_patterns); - - /* Open global.dat file and execute/append all the global sql commands. */ - n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename); - - /* Close the db connection as we are done with globals and patterns. */ - if (conn) - PQfinish(conn); - - /* Exit if no db needs to be restored. */ - if (dbname_oid_list.head == NULL || num_db_restore == 0) - { - pg_log_info(ngettext("no database needs restoring out of %d database", - "no database needs restoring out of %d databases", num_total_db), - num_total_db); - return n_errors_total; - } - - pg_log_info("need to restore %d databases out of %d databases", num_db_restore, num_total_db); - - /* - * We have a list of databases to restore after processing the - * exclude-database switch(es). Now we can restore them one by one. - */ - for (SimplePtrListCell *db_cell = dbname_oid_list.head; - db_cell; db_cell = db_cell->next) - { - DbOidName *dbidname = (DbOidName *) db_cell->ptr; - char subdirpath[MAXPGPATH]; - char subdirdbpath[MAXPGPATH]; - char dbfilename[MAXPGPATH]; - int n_errors; - - /* ignore dbs marked for skipping */ - if (dbidname->oid == InvalidOid) - continue; - - /* - * We need to reset override_dbname so that objects can be restored - * into an already created database. (used with -d/--dbname option) - */ - if (opts->cparams.override_dbname) - { - pfree(opts->cparams.override_dbname); - opts->cparams.override_dbname = NULL; - } - - snprintf(subdirdbpath, MAXPGPATH, "%s/databases", dumpdirpath); - - /* - * Look for the database dump file/dir. If there is an {oid}.tar or - * {oid}.dmp file, use it. Otherwise try to use a directory called - * {oid} - */ - snprintf(dbfilename, MAXPGPATH, "%u.tar", dbidname->oid); - if (file_exists_in_directory(subdirdbpath, dbfilename)) - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.tar", dumpdirpath, dbidname->oid); - else - { - snprintf(dbfilename, MAXPGPATH, "%u.dmp", dbidname->oid); - - if (file_exists_in_directory(subdirdbpath, dbfilename)) - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.dmp", dumpdirpath, dbidname->oid); - else - snprintf(subdirpath, MAXPGPATH, "%s/databases/%u", dumpdirpath, dbidname->oid); - } - - pg_log_info("restoring database \"%s\"", dbidname->str); - - /* If database is already created, then don't set createDB flag. */ - if (opts->cparams.dbname) - { - PGconn *test_conn; - - test_conn = ConnectDatabase(dbidname->str, NULL, opts->cparams.pghost, - opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT, - false, progname, NULL, NULL, NULL, NULL); - if (test_conn) - { - PQfinish(test_conn); - - /* Use already created database for connection. */ - opts->createDB = 0; - opts->cparams.dbname = dbidname->str; - } - else - { - /* we'll have to create it */ - opts->createDB = 1; - opts->cparams.dbname = connected_db; - } - } - - /* - * Reset flags - might have been reset in pg_backup_archiver.c by the - * previous restore. - */ - opts->dumpData = dumpData; - opts->dumpSchema = dumpSchema; - opts->dumpStatistics = dumpStatistics; - - /* Restore the single database. */ - n_errors = restore_one_database(subdirpath, opts, numWorkers, true, count); - - /* Print a summary of ignored errors during single database restore. */ - if (n_errors) - { - n_errors_total += n_errors; - pg_log_warning("errors ignored on database \"%s\" restore: %d", dbidname->str, n_errors); - } - - count++; - } - - /* Log number of processed databases. */ - pg_log_info("number of restored databases is %d", num_db_restore); - - /* Free dbname and dboid list. */ - simple_ptr_list_destroy(&dbname_oid_list); - - return n_errors_total; -} - -/* - * process_global_sql_commands - * - * Open global.dat and execute or copy the sql commands one by one. - * - * If outfile is not NULL, copy all sql commands into outfile rather than - * executing them. - * - * Returns the number of errors while processing global.dat - */ -static int -process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile) -{ - char global_file_path[MAXPGPATH]; - PGresult *result; - StringInfoData sqlstatement, - user_create; - FILE *pfile; - int n_errors = 0; - - snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath); - - /* Open global.dat file. */ - pfile = fopen(global_file_path, PG_BINARY_R); - - if (pfile == NULL) - pg_fatal("could not open file \"%s\": %m", global_file_path); - - /* - * If outfile is given, then just copy all global.dat file data into - * outfile. - */ - if (outfile) - { - copy_or_print_global_file(outfile, pfile); - return 0; - } - - /* Init sqlstatement to append commands. */ - initStringInfo(&sqlstatement); - - /* creation statement for our current role */ - initStringInfo(&user_create); - appendStringInfoString(&user_create, "CREATE ROLE "); - /* should use fmtId here, but we don't know the encoding */ - appendStringInfoString(&user_create, PQuser(conn)); - appendStringInfoChar(&user_create, ';'); - - /* Process file till EOF and execute sql statements. */ - while (read_one_statement(&sqlstatement, pfile) != EOF) - { - /* don't try to create the role we are connected as */ - if (strstr(sqlstatement.data, user_create.data)) - continue; - - pg_log_info("executing query: %s", sqlstatement.data); - result = PQexec(conn, sqlstatement.data); - - switch (PQresultStatus(result)) - { - case PGRES_COMMAND_OK: - case PGRES_TUPLES_OK: - case PGRES_EMPTY_QUERY: - break; - default: - n_errors++; - pg_log_error("could not execute query: %s", PQerrorMessage(conn)); - pg_log_error_detail("Command was: %s", sqlstatement.data); - } - PQclear(result); - } - - /* Print a summary of ignored errors during global.dat. */ - if (n_errors) - pg_log_warning(ngettext("ignored %d error in file \"%s\"", - "ignored %d errors in file \"%s\"", n_errors), - n_errors, global_file_path); - fclose(pfile); - - return n_errors; -} - -/* - * copy_or_print_global_file - * - * Copy global.dat into the output file. If "-" is used as outfile, - * then print commands to stdout. - */ -static void -copy_or_print_global_file(const char *outfile, FILE *pfile) -{ - char out_file_path[MAXPGPATH]; - FILE *OPF; - int c; - - /* "-" is used for stdout. */ - if (strcmp(outfile, "-") == 0) - OPF = stdout; - else - { - snprintf(out_file_path, MAXPGPATH, "%s", outfile); - OPF = fopen(out_file_path, PG_BINARY_W); - - if (OPF == NULL) - { - fclose(pfile); - pg_fatal("could not open file: \"%s\"", outfile); - } - } - - /* Append global.dat into output file or print to stdout. */ - while ((c = fgetc(pfile)) != EOF) - fputc(c, OPF); - - fclose(pfile); - - /* Close output file. */ - if (strcmp(outfile, "-") != 0) - fclose(OPF); -} diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl index c3c5fae11ea..37d893d5e6a 100644 --- a/src/bin/pg_dump/t/001_basic.pl +++ b/src/bin/pg_dump/t/001_basic.pl @@ -237,24 +237,6 @@ command_fails_like( 'pg_restore: options -C\/--create and -1\/--single-transaction cannot be used together' ); -command_fails_like( - [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ], - qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, - 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only' -); - -command_fails_like( - [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ], - qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --exclude-database is used in pg_restore with dump of pg_dump' -); - -command_fails_like( - [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ], - qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/, - 'When option --globals-only is not used in pg_restore with dump of pg_dump' -); - # also fails for -r and -t, but it seems pointless to add more tests for those. command_fails_like( [ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ], @@ -262,8 +244,4 @@ command_fails_like( 'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only' ); -command_fails_like( - [ 'pg_dumpall', '--format', 'x' ], - qr/\Qpg_dumpall: error: unrecognized output format "x";\E/, - 'pg_dumpall: unrecognized output format'); done_testing(); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 6c7ec80e271..a86b38466de 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -68,7 +68,7 @@ my %pgdump_runs = ( '--no-data', '--sequence-data', '--binary-upgrade', - '--with-statistics', + '--statistics', '--dbname' => 'postgres', # alternative way to specify database ], restore_cmd => [ @@ -76,7 +76,7 @@ my %pgdump_runs = ( '--format' => 'custom', '--verbose', '--file' => "$tempdir/binary_upgrade.sql", - '--with-statistics', + '--statistics', "$tempdir/binary_upgrade.dump", ], }, @@ -90,13 +90,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => '1', '--file' => "$tempdir/compression_gzip_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_gzip_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_gzip_custom.dump", ], command_like => { @@ -119,7 +119,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'gzip:1', '--file' => "$tempdir/compression_gzip_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -137,7 +137,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_gzip_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_gzip_dir", ], }, @@ -150,7 +150,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => '1', '--file' => "$tempdir/compression_gzip_plain.sql.gz", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -169,13 +169,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_lz4_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_lz4_custom.dump", ], command_like => { @@ -198,7 +198,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'lz4:1', '--file' => "$tempdir/compression_lz4_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Verify that data files were compressed @@ -210,7 +210,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_lz4_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_lz4_dir", ], }, @@ -223,7 +223,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => 'lz4', '--file' => "$tempdir/compression_lz4_plain.sql.lz4", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -245,13 +245,13 @@ my %pgdump_runs = ( '--format' => 'custom', '--compress' => 'zstd', '--file' => "$tempdir/compression_zstd_custom.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/compression_zstd_custom.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_zstd_custom.dump", ], command_like => { @@ -273,7 +273,7 @@ my %pgdump_runs = ( '--format' => 'directory', '--compress' => 'zstd:1', '--file' => "$tempdir/compression_zstd_dir", - '--with-statistics', + '--statistics', 'postgres', ], # Give coverage for manually compressed blobs.toc files during @@ -294,7 +294,7 @@ my %pgdump_runs = ( 'pg_restore', '--jobs' => '2', '--file' => "$tempdir/compression_zstd_dir.sql", - '--with-statistics', + '--statistics', "$tempdir/compression_zstd_dir", ], }, @@ -308,7 +308,7 @@ my %pgdump_runs = ( '--format' => 'plain', '--compress' => 'zstd:long', '--file' => "$tempdir/compression_zstd_plain.sql.zst", - '--with-statistics', + '--statistics', 'postgres', ], # Decompress the generated file to run through the tests. @@ -327,7 +327,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/clean.sql", '--clean', - '--with-statistics', + '--statistics', '--dbname' => 'postgres', # alternative way to specify database ], }, @@ -338,7 +338,7 @@ my %pgdump_runs = ( '--clean', '--if-exists', '--encoding' => 'UTF8', # no-op, just for testing - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -357,7 +357,7 @@ my %pgdump_runs = ( '--create', '--no-reconnect', # no-op, just for testing '--verbose', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -376,7 +376,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults.sql", - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -385,7 +385,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_no_public.sql", - '--with-statistics', + '--statistics', 'regress_pg_dump_test', ], }, @@ -395,7 +395,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--clean', '--file' => "$tempdir/defaults_no_public_clean.sql", - '--with-statistics', + '--statistics', 'regress_pg_dump_test', ], }, @@ -404,7 +404,7 @@ my %pgdump_runs = ( dump_cmd => [ 'pg_dump', '--no-sync', '--file' => "$tempdir/defaults_public_owner.sql", - '--with-statistics', + '--statistics', 'regress_public_owner', ], }, @@ -419,14 +419,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.dump", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'custom', '--file' => "$tempdir/defaults_custom_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_custom_format.dump", ], command_like => { @@ -451,14 +451,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'directory', '--file' => "$tempdir/defaults_dir_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_dir_format", ], command_like => { @@ -484,13 +484,13 @@ my %pgdump_runs = ( '--format' => 'directory', '--jobs' => 2, '--file' => "$tempdir/defaults_parallel", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/defaults_parallel.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_parallel", ], }, @@ -502,14 +502,14 @@ my %pgdump_runs = ( 'pg_dump', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.tar", - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--format' => 'tar', '--file' => "$tempdir/defaults_tar_format.sql", - '--with-statistics', + '--statistics', "$tempdir/defaults_tar_format.tar", ], }, @@ -518,7 +518,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_dump_test_schema.sql", '--exclude-schema' => 'dump_test', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -527,7 +527,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_test_table.sql", '--exclude-table' => 'dump_test.test_table', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -536,7 +536,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/exclude_measurement.sql", '--exclude-table-and-children' => 'dump_test.measurement', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -546,7 +546,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/exclude_measurement_data.sql", '--exclude-table-data-and-children' => 'dump_test.measurement', '--no-unlogged-table-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -556,7 +556,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/exclude_test_table_data.sql", '--exclude-table-data' => 'dump_test.test_table', '--no-unlogged-table-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -575,7 +575,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/pg_dumpall_globals.sql", '--globals-only', '--no-sync', - '--with-statistics', + '--statistics', ], }, pg_dumpall_globals_clean => { @@ -585,14 +585,14 @@ my %pgdump_runs = ( '--globals-only', '--clean', '--no-sync', - '--with-statistics', + '--statistics', ], }, pg_dumpall_dbprivs => { dump_cmd => [ 'pg_dumpall', '--no-sync', '--file' => "$tempdir/pg_dumpall_dbprivs.sql", - '--with-statistics', + '--statistics', ], }, pg_dumpall_exclude => { @@ -602,7 +602,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/pg_dumpall_exclude.sql", '--exclude-database' => '*dump_test*', '--no-sync', - '--with-statistics', + '--statistics', ], }, no_toast_compression => { @@ -610,7 +610,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_toast_compression.sql", '--no-toast-compression', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -619,7 +619,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_large_objects.sql", '--no-large-objects', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -628,7 +628,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_policies.sql", '--no-policies', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -637,7 +637,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_privs.sql", '--no-privileges', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -646,7 +646,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_owner.sql", '--no-owner', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -655,7 +655,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/no_table_access_method.sql", '--no-table-access-method', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -664,7 +664,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/only_dump_test_schema.sql", '--schema' => 'dump_test', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -675,7 +675,7 @@ my %pgdump_runs = ( '--table' => 'dump_test.test_table', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -686,7 +686,7 @@ my %pgdump_runs = ( '--table-and-children' => 'dump_test.measurement', '--lock-wait-timeout' => (1000 * $PostgreSQL::Test::Utils::timeout_default), - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -696,7 +696,7 @@ my %pgdump_runs = ( '--file' => "$tempdir/role.sql", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -709,13 +709,13 @@ my %pgdump_runs = ( '--file' => "$tempdir/role_parallel", '--role' => 'regress_dump_test_role', '--schema' => 'dump_test_second_schema', - '--with-statistics', + '--statistics', 'postgres', ], restore_cmd => [ 'pg_restore', '--file' => "$tempdir/role_parallel.sql", - '--with-statistics', + '--statistics', "$tempdir/role_parallel", ], }, @@ -744,7 +744,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_pre_data.sql", '--section' => 'pre-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -753,7 +753,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_data.sql", '--section' => 'data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -762,7 +762,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', '--file' => "$tempdir/section_post_data.sql", '--section' => 'post-data', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -773,7 +773,7 @@ my %pgdump_runs = ( '--schema' => 'dump_test', '--large-objects', '--no-large-objects', - '--with-statistics', + '--statistics', 'postgres', ], }, @@ -789,7 +789,7 @@ my %pgdump_runs = ( 'pg_dump', '--no-sync', "--file=$tempdir/no_data_no_schema.sql", '--no-data', '--no-schema', 'postgres', - '--with-statistics', + '--statistics', ], }, statistics_only => { @@ -799,18 +799,11 @@ my %pgdump_runs = ( 'postgres', ], }, - schema_only_with_statistics => { - dump_cmd => [ - 'pg_dump', '--no-sync', - "--file=$tempdir/schema_only_with_statistics.sql", - '--schema-only', '--with-statistics', 'postgres', - ], - }, no_schema => { dump_cmd => [ 'pg_dump', '--no-sync', "--file=$tempdir/no_schema.sql", '--no-schema', - '--with-statistics', 'postgres', + '--statistics', 'postgres', ], },); @@ -5212,6 +5205,17 @@ command_fails_like( 'pg_dump', '--port' => $port, '--strict-names', + '--schema-only', + '--statistics', + ], + qr/\Qpg_dump: error: options -s\/--schema-only and --statistics cannot be used together\E/, + 'cannot use --schema-only and --statistics together'); + +command_fails_like( + [ + 'pg_dump', + '--port' => $port, + '--strict-names', '--table' => 'nonexistent*' ], qr/\Qpg_dump: error: no matching tables were found for pattern\E/, diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl deleted file mode 100644 index c274b777586..00000000000 --- a/src/bin/pg_dump/t/006_pg_dumpall.pl +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) 2021-2025, PostgreSQL Global Development Group - -use strict; -use warnings FATAL => 'all'; - -use PostgreSQL::Test::Cluster; -use PostgreSQL::Test::Utils; -use Test::More; - -my $tempdir = PostgreSQL::Test::Utils::tempdir; -my $run_db = 'postgres'; -my $sep = $windows_os ? "\\" : "/"; - -# Tablespace locations used by "restore_tablespace" test case. -my $tablespace1 = "${tempdir}${sep}tbl1"; -my $tablespace2 = "${tempdir}${sep}tbl2"; -mkdir($tablespace1) || die "mkdir $tablespace1 $!"; -mkdir($tablespace2) || die "mkdir $tablespace2 $!"; - -# Scape tablespace locations on Windows. -$tablespace1 = $windows_os ? ($tablespace1 =~ s/\\/\\\\/gr) : $tablespace1; -$tablespace2 = $windows_os ? ($tablespace2 =~ s/\\/\\\\/gr) : $tablespace2; - -# Where pg_dumpall will be executed. -my $node = PostgreSQL::Test::Cluster->new('node'); -$node->init; -$node->start; - - -############################################################### -# Definition of the pg_dumpall test cases to run. -# -# Each of these test cases are named and those names are used for fail -# reporting and also to save the dump and restore information needed for the -# test to assert. -# -# The "setup_sql" is a psql valid script that contains SQL commands to execute -# before of actually execute the tests. The setups are all executed before of -# any test execution. -# -# The "dump_cmd" and "restore_cmd" are the commands that will be executed. The -# "restore_cmd" must have the --file flag to save the restore output so that we -# can assert on it. -# -# The "like" and "unlike" is a regexp that is used to match the pg_restore -# output. It must have at least one of then filled per test cases but it also -# can have both. See "excluding_databases" test case for example. -my %pgdumpall_runs = ( - restore_roles => { - setup_sql => ' - CREATE ROLE dumpall WITH ENCRYPTED PASSWORD \'admin\' SUPERUSER; - CREATE ROLE dumpall2 WITH REPLICATION CONNECTION LIMIT 10;', - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_roles", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_roles.sql", - "$tempdir/restore_roles", - ], - like => qr/ - ^\s*\QCREATE ROLE dumpall;\E\s*\n - \s*\QALTER ROLE dumpall WITH SUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS PASSWORD 'SCRAM-SHA-256\E - [^']+';\s*\n - \s*\QCREATE ROLE dumpall2;\E - \s*\QALTER ROLE dumpall2 WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN REPLICATION NOBYPASSRLS CONNECTION LIMIT 10;\E - /xm - }, - - restore_tablespace => { - setup_sql => " - CREATE ROLE tap; - CREATE TABLESPACE tbl1 OWNER tap LOCATION '$tablespace1'; - CREATE TABLESPACE tbl2 OWNER tap LOCATION '$tablespace2' WITH (seq_page_cost=1.0);", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_tablespace", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_tablespace.sql", - "$tempdir/restore_tablespace", - ], - # Match "E" as optional since it is added on LOCATION when running on - # Windows. - like => qr/^ - \n\QCREATE TABLESPACE tbl1 OWNER tap LOCATION \E(?:E)?\Q'$tablespace1';\E - \n\QCREATE TABLESPACE tbl2 OWNER tap LOCATION \E(?:E)?\Q'$tablespace2';\E - \n\QALTER TABLESPACE tbl2 SET (seq_page_cost=1.0);\E - /xm, - }, - - restore_grants => { - setup_sql => " - CREATE DATABASE tapgrantsdb; - CREATE SCHEMA private; - CREATE SEQUENCE serial START 101; - CREATE FUNCTION fn() RETURNS void AS \$\$ - BEGIN - END; - \$\$ LANGUAGE plpgsql; - CREATE ROLE super; - CREATE ROLE grant1; - CREATE ROLE grant2; - CREATE ROLE grant3; - CREATE ROLE grant4; - CREATE ROLE grant5; - CREATE ROLE grant6; - CREATE ROLE grant7; - CREATE ROLE grant8; - - CREATE TABLE t (id int); - INSERT INTO t VALUES (1), (2), (3), (4); - - GRANT SELECT ON TABLE t TO grant1; - GRANT INSERT ON TABLE t TO grant2; - GRANT ALL PRIVILEGES ON TABLE t to grant3; - GRANT CONNECT, CREATE ON DATABASE tapgrantsdb TO grant4; - GRANT USAGE, CREATE ON SCHEMA private TO grant5; - GRANT USAGE, SELECT, UPDATE ON SEQUENCE serial TO grant6; - GRANT super TO grant7; - GRANT EXECUTE ON FUNCTION fn() TO grant8; - ", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/restore_grants", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/restore_grants.sql", - "$tempdir/restore_grants", - ], - like => qr/^ - \n\QGRANT super TO grant7 WITH INHERIT TRUE GRANTED BY\E - (.*\n)* - \n\QGRANT ALL ON SCHEMA private TO grant5;\E - (.*\n)* - \n\QGRANT ALL ON FUNCTION public.fn() TO grant8;\E - (.*\n)* - \n\QGRANT ALL ON SEQUENCE public.serial TO grant6;\E - (.*\n)* - \n\QGRANT SELECT ON TABLE public.t TO grant1;\E - \n\QGRANT INSERT ON TABLE public.t TO grant2;\E - \n\QGRANT ALL ON TABLE public.t TO grant3;\E - (.*\n)* - \n\QGRANT CREATE,CONNECT ON DATABASE tapgrantsdb TO grant4;\E - /xm, - }, - - excluding_databases => { - setup_sql => 'CREATE DATABASE db1; - \c db1 - CREATE TABLE t1 (id int); - INSERT INTO t1 VALUES (1), (2), (3), (4); - CREATE TABLE t2 (id int); - INSERT INTO t2 VALUES (1), (2), (3), (4); - - CREATE DATABASE db2; - \c db2 - CREATE TABLE t3 (id int); - INSERT INTO t3 VALUES (1), (2), (3), (4); - CREATE TABLE t4 (id int); - INSERT INTO t4 VALUES (1), (2), (3), (4); - - CREATE DATABASE dbex3; - \c dbex3 - CREATE TABLE t5 (id int); - INSERT INTO t5 VALUES (1), (2), (3), (4); - CREATE TABLE t6 (id int); - INSERT INTO t6 VALUES (1), (2), (3), (4); - - CREATE DATABASE dbex4; - \c dbex4 - CREATE TABLE t7 (id int); - INSERT INTO t7 VALUES (1), (2), (3), (4); - CREATE TABLE t8 (id int); - INSERT INTO t8 VALUES (1), (2), (3), (4); - - CREATE DATABASE db5; - \c db5 - CREATE TABLE t9 (id int); - INSERT INTO t9 VALUES (1), (2), (3), (4); - CREATE TABLE t10 (id int); - INSERT INTO t10 VALUES (1), (2), (3), (4); - ', - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/excluding_databases", - '--exclude-database' => 'dbex*', - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/excluding_databases.sql", - '--exclude-database' => 'db5', - "$tempdir/excluding_databases", - ], - like => qr/^ - \n\QCREATE DATABASE db1\E - (.*\n)* - \n\QCREATE TABLE public.t1 (\E - (.*\n)* - \n\QCREATE TABLE public.t2 (\E - (.*\n)* - \n\QCREATE DATABASE db2\E - (.*\n)* - \n\QCREATE TABLE public.t3 (\E - (.*\n)* - \n\QCREATE TABLE public.t4 (/xm, - unlike => qr/^ - \n\QCREATE DATABASE db3\E - (.*\n)* - \n\QCREATE TABLE public.t5 (\E - (.*\n)* - \n\QCREATE TABLE public.t6 (\E - (.*\n)* - \n\QCREATE DATABASE db4\E - (.*\n)* - \n\QCREATE TABLE public.t7 (\E - (.*\n)* - \n\QCREATE TABLE public.t8 (\E - \n\QCREATE DATABASE db5\E - (.*\n)* - \n\QCREATE TABLE public.t9 (\E - (.*\n)* - \n\QCREATE TABLE public.t10 (\E - /xm, - }, - - format_directory => { - setup_sql => "CREATE TABLE format_directory(a int, b boolean, c text); - INSERT INTO format_directory VALUES (1, true, 'name1'), (2, false, 'name2');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--file' => "$tempdir/format_directory", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'directory', - '--file' => "$tempdir/format_directory.sql", - "$tempdir/format_directory", - ], - like => qr/^\n\QCOPY public.format_directory (a, b, c) FROM stdin;/xm - }, - - format_tar => { - setup_sql => "CREATE TABLE format_tar(a int, b boolean, c text); - INSERT INTO format_tar VALUES (1, false, 'name3'), (2, true, 'name4');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'tar', - '--file' => "$tempdir/format_tar", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'tar', - '--file' => "$tempdir/format_tar.sql", - "$tempdir/format_tar", - ], - like => qr/^\n\QCOPY public.format_tar (a, b, c) FROM stdin;/xm - }, - - format_custom => { - setup_sql => "CREATE TABLE format_custom(a int, b boolean, c text); - INSERT INTO format_custom VALUES (1, false, 'name5'), (2, true, 'name6');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'custom', - '--file' => "$tempdir/format_custom", - ], - restore_cmd => [ - 'pg_restore', '-C', - '--format' => 'custom', - '--file' => "$tempdir/format_custom.sql", - "$tempdir/format_custom", - ], - like => qr/^ \n\QCOPY public.format_custom (a, b, c) FROM stdin;/xm - }, - - dump_globals_only => { - setup_sql => "CREATE TABLE format_dir(a int, b boolean, c text); - INSERT INTO format_dir VALUES (1, false, 'name5'), (2, true, 'name6');", - dump_cmd => [ - 'pg_dumpall', - '--format' => 'directory', - '--globals-only', - '--file' => "$tempdir/dump_globals_only", - ], - restore_cmd => [ - 'pg_restore', '-C', '--globals-only', - '--format' => 'directory', - '--file' => "$tempdir/dump_globals_only.sql", - "$tempdir/dump_globals_only", - ], - like => qr/ - ^\s*\QCREATE ROLE dumpall;\E\s*\n - /xm - },); - -# First execute the setup_sql -foreach my $run (sort keys %pgdumpall_runs) -{ - if ($pgdumpall_runs{$run}->{setup_sql}) - { - $node->safe_psql($run_db, $pgdumpall_runs{$run}->{setup_sql}); - } -} - -# Execute the tests -foreach my $run (sort keys %pgdumpall_runs) -{ - # Create a new target cluster to pg_restore each test case run so that we - # don't need to take care of the cleanup from the target cluster after each - # run. - my $target_node = PostgreSQL::Test::Cluster->new("target_$run"); - $target_node->init; - $target_node->start; - - # Dumpall from node cluster. - $node->command_ok(\@{ $pgdumpall_runs{$run}->{dump_cmd} }, - "$run: pg_dumpall runs"); - - # Restore the dump on "target_node" cluster. - my @restore_cmd = ( - @{ $pgdumpall_runs{$run}->{restore_cmd} }, - '--host', $target_node->host, '--port', $target_node->port); - - my ($stdout, $stderr) = run_command(\@restore_cmd); - - # pg_restore --file output file. - my $output_file = slurp_file("$tempdir/${run}.sql"); - - if ( !($pgdumpall_runs{$run}->{like}) - && !($pgdumpall_runs{$run}->{unlike})) - { - die "missing \"like\" or \"unlike\" in test \"$run\""; - } - - if ($pgdumpall_runs{$run}->{like}) - { - like($output_file, $pgdumpall_runs{$run}->{like}, "should dump $run"); - } - - if ($pgdumpall_runs{$run}->{unlike}) - { - unlike( - $output_file, - $pgdumpall_runs{$run}->{unlike}, - "should not dump $run"); - } -} - -# Some negative test case with dump of pg_dumpall and restore using pg_restore -# test case 1: when -C is not used in pg_restore with dump of pg_dumpall -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", - '--format' => 'custom', - '--file' => "$tempdir/error_test.sql", - ], - qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/, - 'When -C is not used in pg_restore with dump of pg_dumpall'); - -# test case 2: When --list option is used with dump of pg_dumpall -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", '-C', - '--format' => 'custom', - '--list', - '--file' => "$tempdir/error_test.sql", - ], - qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/, - 'When --list is used in pg_restore with dump of pg_dumpall'); - -# test case 3: When non-exist database is given with -d option -$node->command_fails_like( - [ - 'pg_restore', - "$tempdir/format_custom", '-C', - '--format' => 'custom', - '-d' => 'dbpq', - ], - qr/\Qpg_restore: error: could not connect to database "dbpq"\E/, - 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall' -); - -$node->stop('fast'); - -done_testing(); diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 5e6403f0773..310f53c5577 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -956,12 +956,12 @@ check_for_new_tablespace_dir(void) prep_status("Checking for new cluster tablespace directories"); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++) { struct stat statbuf; snprintf(new_tablespace_dir, MAXPGPATH, "%s%s", - os_info.old_tablespaces[tblnum], + new_cluster.tablespaces[tblnum], new_cluster.tablespace_suffix); if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT) @@ -1013,17 +1013,17 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) * directory. We can't create a proper old cluster delete script in that * case. */ - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++) { - char old_tablespace_dir[MAXPGPATH]; + char new_tablespace_dir[MAXPGPATH]; - strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH); - canonicalize_path(old_tablespace_dir); - if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir)) + strlcpy(new_tablespace_dir, new_cluster.tablespaces[tblnum], MAXPGPATH); + canonicalize_path(new_tablespace_dir); + if (path_is_prefix_of_path(old_cluster_pgdata, new_tablespace_dir)) { /* reproduce warning from CREATE TABLESPACE that is in the log */ pg_log(PG_WARNING, - "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir); + "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", new_tablespace_dir); /* Unlink file in case it is left over from a previous run. */ unlink(*deletion_script_file_name); @@ -1051,9 +1051,9 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) /* delete old cluster's alternate tablespaces */ old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix); fix_path_separator(old_tblspc_suffix); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE, - fix_path_separator(os_info.old_tablespaces[tblnum]), + fix_path_separator(old_cluster.tablespaces[tblnum]), old_tblspc_suffix, PATH_QUOTE); pfree(old_tblspc_suffix); diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c index 183f08ce1e8..55f6e7b4d9c 100644 --- a/src/bin/pg_upgrade/dump.c +++ b/src/bin/pg_upgrade/dump.c @@ -58,7 +58,7 @@ generate_old_dump(void) (user_opts.transfer_mode == TRANSFER_MODE_SWAP) ? "" : "--sequence-data", log_opts.verbose ? "--verbose" : "", - user_opts.do_statistics ? "--with-statistics" : "--no-statistics", + user_opts.do_statistics ? "--statistics" : "--no-statistics", log_opts.dumpdir, sql_file_name, escaped_connstr.data); diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index a437067cdca..c39eb077c2f 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -443,10 +443,26 @@ get_db_infos(ClusterInfo *cluster) for (tupnum = 0; tupnum < ntups; tupnum++) { + char *spcloc = PQgetvalue(res, tupnum, i_spclocation); + bool inplace = spcloc[0] && !is_absolute_path(spcloc); + dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); - snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s", - PQgetvalue(res, tupnum, i_spclocation)); + + /* + * The tablespace location might be "", meaning the cluster default + * location, i.e. pg_default or pg_global. For in-place tablespaces, + * pg_tablespace_location() returns a path relative to the data + * directory. + */ + if (inplace) + snprintf(dbinfos[tupnum].db_tablespace, + sizeof(dbinfos[tupnum].db_tablespace), + "%s/%s", cluster->pgdata, spcloc); + else + snprintf(dbinfos[tupnum].db_tablespace, + sizeof(dbinfos[tupnum].db_tablespace), + "%s", spcloc); } PQclear(res); @@ -616,11 +632,21 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg) /* Is the tablespace oid non-default? */ if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) { + char *spcloc = PQgetvalue(res, relnum, i_spclocation); + bool inplace = spcloc[0] && !is_absolute_path(spcloc); + /* * The tablespace location might be "", meaning the cluster - * default location, i.e. pg_default or pg_global. + * default location, i.e. pg_default or pg_global. For in-place + * tablespaces, pg_tablespace_location() returns a path relative + * to the data directory. */ - tablespace = PQgetvalue(res, relnum, i_spclocation); + if (inplace) + tablespace = psprintf("%s/%s", + os_info.running_cluster->pgdata, + spcloc); + else + tablespace = spcloc; /* Can we reuse the previous string allocation? */ if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) @@ -630,6 +656,10 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg) last_tablespace = curr->tablespace = pg_strdup(tablespace); curr->tblsp_alloc = true; } + + /* Free palloc'd string for in-place tablespaces. */ + if (inplace) + pfree(tablespace); } else /* A zero reltablespace oid indicates the database tablespace. */ diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c index 056aa2edaee..6d7941844a7 100644 --- a/src/bin/pg_upgrade/parallel.c +++ b/src/bin/pg_upgrade/parallel.c @@ -40,6 +40,7 @@ typedef struct char *old_pgdata; char *new_pgdata; char *old_tablespace; + char *new_tablespace; } transfer_thread_arg; static exec_thread_arg **exec_thread_args; @@ -171,7 +172,7 @@ win32_exec_prog(exec_thread_arg *args) void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace) + char *old_tablespace, char *new_tablespace) { #ifndef WIN32 pid_t child; @@ -181,7 +182,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, #endif if (user_opts.jobs <= 1) - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); + transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL, NULL); else { /* parallel */ @@ -225,7 +226,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, if (child == 0) { transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - old_tablespace); + old_tablespace, new_tablespace); /* if we take another exit path, it will be non-zero */ /* use _exit to skip atexit() functions */ _exit(0); @@ -246,6 +247,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, new_arg->new_pgdata = pg_strdup(new_pgdata); pg_free(new_arg->old_tablespace); new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; + new_arg->new_tablespace = new_tablespace ? pg_strdup(new_tablespace) : NULL; child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, new_arg, 0, NULL); @@ -263,7 +265,8 @@ DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args) { transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata, - args->new_pgdata, args->old_tablespace); + args->new_pgdata, args->old_tablespace, + args->new_tablespace); /* terminates thread */ return 0; diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index e9401430e69..0ef47be0dc1 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -300,6 +300,8 @@ typedef struct uint32 major_version; /* PG_VERSION of cluster */ char major_version_str[64]; /* string PG_VERSION of cluster */ uint32 bin_version; /* version returned from pg_ctl */ + char **tablespaces; /* tablespace directories */ + int num_tablespaces; const char *tablespace_suffix; /* directory specification */ int nsubs; /* number of subscriptions */ bool sub_retain_dead_tuples; /* whether a subscription enables @@ -356,8 +358,6 @@ typedef struct const char *progname; /* complete pathname for this program */ char *user; /* username for clusters */ bool user_specified; /* user specified on command-line */ - char **old_tablespaces; /* tablespaces */ - int num_old_tablespaces; LibraryInfo *libraries; /* loadable libraries */ int num_libraries; ClusterInfo *running_cluster; @@ -457,7 +457,7 @@ void transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata); void transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); + char *old_tablespace, char *new_tablespace); /* tablespace.c */ @@ -505,7 +505,7 @@ void parallel_exec_prog(const char *log_file, const char *opt_log_file, const char *fmt,...) pg_attribute_printf(3, 4); void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); + char *old_tablespace, char *new_tablespace); bool reap_child(bool wait_for_child); /* task.c */ diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c index 8d8e816a01f..38c17ceabf2 100644 --- a/src/bin/pg_upgrade/relfilenumber.c +++ b/src/bin/pg_upgrade/relfilenumber.c @@ -17,7 +17,7 @@ #include "common/logging.h" #include "pg_upgrade.h" -static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace); +static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace); static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_frozenbit); /* @@ -136,21 +136,22 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, */ if (user_opts.jobs <= 1) parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, NULL); + new_pgdata, NULL, NULL); else { int tblnum; /* transfer default tablespace */ parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, old_pgdata); + new_pgdata, old_pgdata, new_pgdata); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum], + new_cluster.tablespaces[tblnum]); /* reap all children */ while (reap_child(true) == true) ; @@ -169,7 +170,8 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, */ void transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, char *old_tablespace) + char *old_pgdata, char *new_pgdata, + char *old_tablespace, char *new_tablespace) { int old_dbnum, new_dbnum; @@ -204,7 +206,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, new_pgdata); if (n_maps) { - transfer_single_new_db(mappings, n_maps, old_tablespace); + transfer_single_new_db(mappings, n_maps, old_tablespace, new_tablespace); } /* We allocate something even for n_maps == 0 */ pg_free(mappings); @@ -234,10 +236,10 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, * moved_db_dir: Destination for the pg_restore-generated database directory. */ static bool -prepare_for_swap(const char *old_tablespace, Oid db_oid, - char *old_catalog_dir, char *new_db_dir, char *moved_db_dir) +prepare_for_swap(const char *old_tablespace, const char *new_tablespace, + Oid db_oid, char *old_catalog_dir, char *new_db_dir, + char *moved_db_dir) { - const char *new_tablespace; const char *old_tblspc_suffix; const char *new_tblspc_suffix; char old_tblspc[MAXPGPATH]; @@ -247,24 +249,14 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid, struct stat st; if (strcmp(old_tablespace, old_cluster.pgdata) == 0) - { - new_tablespace = new_cluster.pgdata; - new_tblspc_suffix = "/base"; old_tblspc_suffix = "/base"; - } else - { - /* - * XXX: The below line is a hack to deal with the fact that we - * presently don't have an easy way to find the corresponding new - * tablespace's path. This will need to be fixed if/when we add - * pg_upgrade support for in-place tablespaces. - */ - new_tablespace = old_tablespace; + old_tblspc_suffix = old_cluster.tablespace_suffix; + if (strcmp(new_tablespace, new_cluster.pgdata) == 0) + new_tblspc_suffix = "/base"; + else new_tblspc_suffix = new_cluster.tablespace_suffix; - old_tblspc_suffix = old_cluster.tablespace_suffix; - } /* Old and new cluster paths. */ snprintf(old_tblspc, sizeof(old_tblspc), "%s%s", old_tablespace, old_tblspc_suffix); @@ -450,7 +442,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir, * during pg_restore. */ static void -do_swap(FileNameMap *maps, int size, char *old_tablespace) +do_swap(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace) { char old_catalog_dir[MAXPGPATH]; char new_db_dir[MAXPGPATH]; @@ -470,21 +462,23 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace) */ if (old_tablespace) { - if (prepare_for_swap(old_tablespace, maps[0].db_oid, + if (prepare_for_swap(old_tablespace, new_tablespace, maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); } else { - if (prepare_for_swap(old_cluster.pgdata, maps[0].db_oid, + if (prepare_for_swap(old_cluster.pgdata, new_cluster.pgdata, maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); - for (int tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (int tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) { - if (prepare_for_swap(os_info.old_tablespaces[tblnum], maps[0].db_oid, + if (prepare_for_swap(old_cluster.tablespaces[tblnum], + new_cluster.tablespaces[tblnum], + maps[0].db_oid, old_catalog_dir, new_db_dir, moved_db_dir)) swap_catalog_files(maps, size, old_catalog_dir, new_db_dir, moved_db_dir); @@ -498,7 +492,8 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace) * create links for mappings stored in "maps" array. */ static void -transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace) +transfer_single_new_db(FileNameMap *maps, int size, + char *old_tablespace, char *new_tablespace) { int mapnum; bool vm_must_add_frozenbit = false; @@ -520,7 +515,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace) */ Assert(!vm_must_add_frozenbit); - do_swap(maps, size, old_tablespace); + do_swap(maps, size, old_tablespace, new_tablespace); return; } diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl index 58fe8a8c7dc..348f4021462 100644 --- a/src/bin/pg_upgrade/t/006_transfer_modes.pl +++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl @@ -38,6 +38,13 @@ sub test_mode } $new->init(); + # allow_in_place_tablespaces is available as far back as v10. + if ($old->pg_version >= 10) + { + $new->append_conf('postgresql.conf', "allow_in_place_tablespaces = true"); + $old->append_conf('postgresql.conf', "allow_in_place_tablespaces = true"); + } + # Create a small variety of simple test objects on the old cluster. We'll # check that these reach the new version after upgrading. $old->start; @@ -49,8 +56,7 @@ sub test_mode $old->safe_psql('testdb1', "VACUUM FULL test2"); $old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432"); - # For cross-version tests, we can also check that pg_upgrade handles - # tablespaces. + # If an old installation is provided, we can test non-in-place tablespaces. if (defined($ENV{oldinstall})) { my $tblspc = PostgreSQL::Test::Utils::tempdir_short(); @@ -64,6 +70,19 @@ sub test_mode $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)"); } + + # If the old cluster is >= v10, we can test in-place tablespaces. + if ($old->pg_version >= 10) + { + $old->safe_psql('postgres', + "CREATE TABLESPACE inplc_tblspc LOCATION ''"); + $old->safe_psql('postgres', + "CREATE DATABASE testdb3 TABLESPACE inplc_tblspc"); + $old->safe_psql('postgres', + "CREATE TABLE test5 TABLESPACE inplc_tblspc AS SELECT generate_series(503, 606)"); + $old->safe_psql('testdb3', + "CREATE TABLE test6 AS SELECT generate_series(607, 711)"); + } $old->stop; my $result = command_ok_or_fails_like( @@ -94,8 +113,7 @@ sub test_mode $result = $new->safe_psql('testdb1', "SELECT nextval('testseq')"); is($result, '5432', "sequence data after pg_upgrade $mode"); - # For cross-version tests, we should have some objects in a non-default - # tablespace. + # Tests for non-in-place tablespaces. if (defined($ENV{oldinstall})) { $result = @@ -105,6 +123,15 @@ sub test_mode $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4"); is($result, '103', "test4 data after pg_upgrade $mode"); } + + # Tests for in-place tablespaces. + if ($old->pg_version >= 10) + { + $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test5"); + is($result, '104', "test5 data after pg_upgrade $mode"); + $result = $new->safe_psql('testdb3', "SELECT COUNT(*) FROM test6"); + is($result, '105', "test6 data after pg_upgrade $mode"); + } $new->stop; } diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c index 3520a75ba31..151d74e1734 100644 --- a/src/bin/pg_upgrade/tablespace.c +++ b/src/bin/pg_upgrade/tablespace.c @@ -23,10 +23,20 @@ init_tablespaces(void) set_tablespace_directory_suffix(&old_cluster); set_tablespace_directory_suffix(&new_cluster); - if (os_info.num_old_tablespaces > 0 && + if (old_cluster.num_tablespaces > 0 && strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) - pg_fatal("Cannot upgrade to/from the same system catalog version when\n" - "using tablespaces."); + { + for (int i = 0; i < old_cluster.num_tablespaces; i++) + { + /* + * In-place tablespaces are okay for same-version upgrades because + * their paths will differ between clusters. + */ + if (strcmp(old_cluster.tablespaces[i], new_cluster.tablespaces[i]) == 0) + pg_fatal("Cannot upgrade to/from the same system catalog version when\n" + "using tablespaces."); + } + } } @@ -53,19 +63,48 @@ get_tablespace_paths(void) res = executeQueryOrDie(conn, "%s", query); - if ((os_info.num_old_tablespaces = PQntuples(res)) != 0) - os_info.old_tablespaces = - (char **) pg_malloc(os_info.num_old_tablespaces * sizeof(char *)); + old_cluster.num_tablespaces = PQntuples(res); + new_cluster.num_tablespaces = PQntuples(res); + + if (PQntuples(res) != 0) + { + old_cluster.tablespaces = + (char **) pg_malloc(old_cluster.num_tablespaces * sizeof(char *)); + new_cluster.tablespaces = + (char **) pg_malloc(new_cluster.num_tablespaces * sizeof(char *)); + } else - os_info.old_tablespaces = NULL; + { + old_cluster.tablespaces = NULL; + new_cluster.tablespaces = NULL; + } i_spclocation = PQfnumber(res, "spclocation"); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) + for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++) { struct stat statBuf; + char *spcloc = PQgetvalue(res, tblnum, i_spclocation); - os_info.old_tablespaces[tblnum] = pg_strdup(PQgetvalue(res, tblnum, i_spclocation)); + /* + * For now, we do not expect non-in-place tablespaces to move during + * upgrade. If that changes, it will likely become necessary to run + * the above query on the new cluster, too. + * + * pg_tablespace_location() returns absolute paths for non-in-place + * tablespaces and relative paths for in-place ones, so we use + * is_absolute_path() to distinguish between them. + */ + if (is_absolute_path(PQgetvalue(res, tblnum, i_spclocation))) + { + old_cluster.tablespaces[tblnum] = pg_strdup(spcloc); + new_cluster.tablespaces[tblnum] = old_cluster.tablespaces[tblnum]; + } + else + { + old_cluster.tablespaces[tblnum] = psprintf("%s/%s", old_cluster.pgdata, spcloc); + new_cluster.tablespaces[tblnum] = psprintf("%s/%s", new_cluster.pgdata, spcloc); + } /* * Check that the tablespace path exists and is a directory. @@ -76,21 +115,21 @@ get_tablespace_paths(void) * that contains user tablespaces is moved as part of pg_upgrade * preparation and the symbolic links are not updated. */ - if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0) + if (stat(old_cluster.tablespaces[tblnum], &statBuf) != 0) { if (errno == ENOENT) report_status(PG_FATAL, "tablespace directory \"%s\" does not exist", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); else report_status(PG_FATAL, "could not stat tablespace directory \"%s\": %m", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); } if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "tablespace path \"%s\" is not a directory", - os_info.old_tablespaces[tblnum]); + old_cluster.tablespaces[tblnum]); } PQclear(res); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 497a936c141..125f3c7bbbe 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -3495,6 +3495,8 @@ doRetry(CState *st, pg_time_usec_t *now) static int discardUntilSync(CState *st) { + bool received_sync = false; + /* send a sync */ if (!PQpipelineSync(st->con)) { @@ -3509,10 +3511,21 @@ discardUntilSync(CState *st) PGresult *res = PQgetResult(st->con); if (PQresultStatus(res) == PGRES_PIPELINE_SYNC) + received_sync = true; + else if (received_sync) { - PQclear(res); - res = PQgetResult(st->con); + /* + * PGRES_PIPELINE_SYNC must be followed by another + * PGRES_PIPELINE_SYNC or NULL; otherwise, assert failure. + */ Assert(res == NULL); + + /* + * Reset ongoing sync count to 0 since all PGRES_PIPELINE_SYNC + * results have been discarded. + */ + st->num_syncs = 0; + PQclear(res); break; } PQclear(res); diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index dbc586c5bc3..1f2ca946fc5 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -1010,7 +1010,7 @@ static const SchemaQuery Query_for_trigger_of_table = { #define Query_for_list_of_database_vars \ "SELECT conf FROM ("\ -" SELECT setdatabase, pg_catalog.split_part(unnest(setconfig),'=',1) conf"\ +" SELECT setdatabase, pg_catalog.split_part(pg_catalog.unnest(setconfig),'=',1) conf"\ " FROM pg_db_role_setting "\ " ) s, pg_database d "\ " WHERE s.setdatabase = d.oid "\ @@ -1086,9 +1086,12 @@ Keywords_for_list_of_owner_roles, "PUBLIC" " WHERE usename LIKE '%s'" #define Query_for_list_of_user_vars \ -" SELECT pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) "\ -" FROM pg_catalog.pg_roles "\ -" WHERE rolname LIKE '%s'" +"SELECT conf FROM ("\ +" SELECT rolname, pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) conf"\ +" FROM pg_catalog.pg_roles"\ +" ) s"\ +" WHERE s.conf like '%s' "\ +" AND s.rolname LIKE '%s'" #define Query_for_list_of_access_methods \ " SELECT amname "\ @@ -2517,7 +2520,10 @@ match_previous_words(int pattern_id, /* ALTER USER,ROLE <name> RESET */ else if (Matches("ALTER", "USER|ROLE", MatchAny, "RESET")) + { + set_completion_reference(prev2_wd); COMPLETE_WITH_QUERY_PLUS(Query_for_list_of_user_vars, "ALL"); + } /* ALTER USER,ROLE <name> WITH */ else if (Matches("ALTER", "USER|ROLE", MatchAny, "WITH")) @@ -5015,7 +5021,7 @@ match_previous_words(int pattern_id, /* Complete with a variable name */ else if (TailMatches("SET|RESET") && !TailMatches("UPDATE", MatchAny, "SET") && - !TailMatches("ALTER", "DATABASE", MatchAny, "RESET")) + !TailMatches("ALTER", "DATABASE|USER|ROLE", MatchAny, "RESET")) COMPLETE_WITH_QUERY_VERBATIM_PLUS(Query_for_list_of_set_vars, "CONSTRAINTS", "TRANSACTION", diff --git a/src/common/Makefile b/src/common/Makefile index 1e2b91c83c4..2c720caa509 100644 --- a/src/common/Makefile +++ b/src/common/Makefile @@ -163,7 +163,7 @@ libpgcommon_shlib.a: $(OBJS_SHLIB) # The JSON API normally exits on out-of-memory; disable that behavior for shared # library builds. This requires libpq's pqexpbuffer.h. jsonapi_shlib.o: override CPPFLAGS += -DJSONAPI_USE_PQEXPBUFFER -jsonapi_shlib.o: override CPPFLAGS += -I$(libpq_srcdir) +jsonapi_shlib.o: override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) # Because this uses its own compilation rule, it doesn't use the # dependency tracking logic from Makefile.global. To make sure that diff --git a/src/include/catalog/pg_subscription_rel.h b/src/include/catalog/pg_subscription_rel.h index c91797c869c..f458447a0e5 100644 --- a/src/include/catalog/pg_subscription_rel.h +++ b/src/include/catalog/pg_subscription_rel.h @@ -85,7 +85,7 @@ typedef struct SubscriptionRelState extern void AddSubscriptionRelState(Oid subid, Oid relid, char state, XLogRecPtr sublsn, bool retain_lock); extern void UpdateSubscriptionRelState(Oid subid, Oid relid, char state, - XLogRecPtr sublsn); + XLogRecPtr sublsn, bool already_locked); extern char GetSubscriptionRelState(Oid subid, Oid relid, XLogRecPtr *sublsn); extern void RemoveSubscriptionRel(Oid subid, Oid relid); diff --git a/src/include/libpq/libpq-be-fe-helpers.h b/src/include/libpq/libpq-be-fe-helpers.h index af13bd6bf3d..1c4a342090c 100644 --- a/src/include/libpq/libpq-be-fe-helpers.h +++ b/src/include/libpq/libpq-be-fe-helpers.h @@ -30,17 +30,7 @@ #ifndef LIBPQ_BE_FE_HELPERS_H #define LIBPQ_BE_FE_HELPERS_H -/* - * Despite the name, BUILDING_DLL is set only when building code directly part - * of the backend. Which also is where libpq isn't allowed to be - * used. Obviously this doesn't protect against libpq-fe.h getting included - * otherwise, but perhaps still protects against a few mistakes... - */ -#ifdef BUILDING_DLL -#error "libpq may not be used code directly built into the backend" -#endif - -#include "libpq-fe.h" +#include "libpq/libpq-be-fe.h" #include "miscadmin.h" #include "storage/fd.h" #include "storage/latch.h" @@ -289,41 +279,30 @@ libpqsrv_exec_params(PGconn *conn, static inline PGresult * libpqsrv_get_result_last(PGconn *conn, uint32 wait_event_info) { - PGresult *volatile lastResult = NULL; + PGresult *lastResult = NULL; - /* In what follows, do not leak any PGresults on an error. */ - PG_TRY(); + for (;;) { - for (;;) - { - /* Wait for, and collect, the next PGresult. */ - PGresult *result; + /* Wait for, and collect, the next PGresult. */ + PGresult *result; - result = libpqsrv_get_result(conn, wait_event_info); - if (result == NULL) - break; /* query is complete, or failure */ + result = libpqsrv_get_result(conn, wait_event_info); + if (result == NULL) + break; /* query is complete, or failure */ - /* - * Emulate PQexec()'s behavior of returning the last result when - * there are many. - */ - PQclear(lastResult); - lastResult = result; - - if (PQresultStatus(lastResult) == PGRES_COPY_IN || - PQresultStatus(lastResult) == PGRES_COPY_OUT || - PQresultStatus(lastResult) == PGRES_COPY_BOTH || - PQstatus(conn) == CONNECTION_BAD) - break; - } - } - PG_CATCH(); - { + /* + * Emulate PQexec()'s behavior of returning the last result when there + * are many. + */ PQclear(lastResult); - PG_RE_THROW(); - } - PG_END_TRY(); + lastResult = result; + if (PQresultStatus(lastResult) == PGRES_COPY_IN || + PQresultStatus(lastResult) == PGRES_COPY_OUT || + PQresultStatus(lastResult) == PGRES_COPY_BOTH || + PQstatus(conn) == CONNECTION_BAD) + break; + } return lastResult; } @@ -462,13 +441,21 @@ exit: ; * This function is intended to be set via PQsetNoticeReceiver() so that * NOTICE, WARNING, and similar messages from the connection are reported via * ereport(), instead of being printed to stderr. + * + * Because this will be called from libpq with a "real" (not wrapped) + * PGresult, we need to temporarily ignore libpq-be-fe.h's wrapper macros + * for PGresult and also PQresultErrorMessage, and put back the wrappers + * afterwards. That's not pretty, but there seems no better alternative. */ +#undef PGresult +#undef PQresultErrorMessage + static inline void libpqsrv_notice_receiver(void *arg, const PGresult *res) { - char *message; + const char *message; int len; - char *prefix = (char *) arg; + const char *prefix = (const char *) arg; /* * Trim the trailing newline from the message text returned from @@ -484,4 +471,7 @@ libpqsrv_notice_receiver(void *arg, const PGresult *res) errmsg_internal("%s: %.*s", prefix, len, message)); } +#define PGresult libpqsrv_PGresult +#define PQresultErrorMessage libpqsrv_PQresultErrorMessage + #endif /* LIBPQ_BE_FE_HELPERS_H */ diff --git a/src/include/libpq/libpq-be-fe.h b/src/include/libpq/libpq-be-fe.h new file mode 100644 index 00000000000..e3f796b0230 --- /dev/null +++ b/src/include/libpq/libpq-be-fe.h @@ -0,0 +1,259 @@ +/*------------------------------------------------------------------------- + * + * libpq-be-fe.h + * Wrapper functions for using libpq in extensions + * + * Code built directly into the backend is not allowed to link to libpq + * directly. Extension code is allowed to use libpq however. One of the + * main risks in doing so is leaking the malloc-allocated structures + * returned by libpq, causing a process-lifespan memory leak. + * + * This file provides wrapper objects to help in building memory-safe code. + * A PGresult object wrapped this way acts much as if it were palloc'd: + * it will go away when the specified context is reset or deleted. + * We might later extend the concept to other objects such as PGconns. + * + * See also the libpq-be-fe-helpers.h file, which provides additional + * facilities built on top of this one. + * + * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/libpq/libpq-be-fe.h + * + *------------------------------------------------------------------------- + */ +#ifndef LIBPQ_BE_FE_H +#define LIBPQ_BE_FE_H + +/* + * Despite the name, BUILDING_DLL is set only when building code directly part + * of the backend. Which also is where libpq isn't allowed to be + * used. Obviously this doesn't protect against libpq-fe.h getting included + * otherwise, but perhaps still protects against a few mistakes... + */ +#ifdef BUILDING_DLL +#error "libpq may not be used in code directly built into the backend" +#endif + +#include "libpq-fe.h" + +/* + * Memory-context-safe wrapper object for a PGresult. + */ +typedef struct libpqsrv_PGresult +{ + PGresult *res; /* the wrapped PGresult */ + MemoryContext ctx; /* the MemoryContext it's attached to */ + MemoryContextCallback cb; /* the callback that implements freeing */ +} libpqsrv_PGresult; + + +/* + * Wrap the given PGresult in a libpqsrv_PGresult object, so that it will + * go away automatically if the current memory context is reset or deleted. + * + * To avoid potential memory leaks, backend code must always apply this + * immediately to the output of any PGresult-yielding libpq function. + */ +static inline libpqsrv_PGresult * +libpqsrv_PQwrap(PGresult *res) +{ + libpqsrv_PGresult *bres; + MemoryContext ctx = CurrentMemoryContext; + + /* We pass through a NULL result as-is, since there's nothing to free */ + if (res == NULL) + return NULL; + /* Attempt to allocate the wrapper ... this had better not throw error */ + bres = (libpqsrv_PGresult *) + MemoryContextAllocExtended(ctx, + sizeof(libpqsrv_PGresult), + MCXT_ALLOC_NO_OOM); + /* If we failed to allocate a wrapper, free the PGresult before failing */ + if (bres == NULL) + { + PQclear(res); + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + } + /* Okay, set up the wrapper */ + bres->res = res; + bres->ctx = ctx; + bres->cb.func = (MemoryContextCallbackFunction) PQclear; + bres->cb.arg = res; + MemoryContextRegisterResetCallback(ctx, &bres->cb); + return bres; +} + +/* + * Free a wrapped PGresult, after detaching it from the memory context. + * Like PQclear(), allow the argument to be NULL. + */ +static inline void +libpqsrv_PQclear(libpqsrv_PGresult *bres) +{ + if (bres) + { + MemoryContextUnregisterResetCallback(bres->ctx, &bres->cb); + PQclear(bres->res); + pfree(bres); + } +} + +/* + * Move a wrapped PGresult to have a different parent context. + */ +static inline libpqsrv_PGresult * +libpqsrv_PGresultSetParent(libpqsrv_PGresult *bres, MemoryContext ctx) +{ + libpqsrv_PGresult *newres; + + /* We pass through a NULL result as-is */ + if (bres == NULL) + return NULL; + /* Make a new wrapper in the target context, raising error on OOM */ + newres = (libpqsrv_PGresult *) + MemoryContextAlloc(ctx, sizeof(libpqsrv_PGresult)); + /* Okay, set up the new wrapper */ + newres->res = bres->res; + newres->ctx = ctx; + newres->cb.func = (MemoryContextCallbackFunction) PQclear; + newres->cb.arg = bres->res; + MemoryContextRegisterResetCallback(ctx, &newres->cb); + /* Disarm and delete the old wrapper */ + MemoryContextUnregisterResetCallback(bres->ctx, &bres->cb); + pfree(bres); + return newres; +} + +/* + * Convenience wrapper for PQgetResult. + * + * We could supply wrappers for other PGresult-returning functions too, + * but at present there's no need. + */ +static inline libpqsrv_PGresult * +libpqsrv_PQgetResult(PGconn *conn) +{ + return libpqsrv_PQwrap(PQgetResult(conn)); +} + +/* + * Accessor functions for libpqsrv_PGresult. While it's not necessary to use + * these, they emulate the behavior of the underlying libpq functions when + * passed a NULL pointer. This is particularly important for PQresultStatus, + * which is often the first check on a result. + */ + +static inline ExecStatusType +libpqsrv_PQresultStatus(const libpqsrv_PGresult *res) +{ + if (!res) + return PGRES_FATAL_ERROR; + return PQresultStatus(res->res); +} + +static inline const char * +libpqsrv_PQresultErrorMessage(const libpqsrv_PGresult *res) +{ + if (!res) + return ""; + return PQresultErrorMessage(res->res); +} + +static inline char * +libpqsrv_PQresultErrorField(const libpqsrv_PGresult *res, int fieldcode) +{ + if (!res) + return NULL; + return PQresultErrorField(res->res, fieldcode); +} + +static inline char * +libpqsrv_PQcmdStatus(const libpqsrv_PGresult *res) +{ + if (!res) + return NULL; + return PQcmdStatus(res->res); +} + +static inline int +libpqsrv_PQntuples(const libpqsrv_PGresult *res) +{ + if (!res) + return 0; + return PQntuples(res->res); +} + +static inline int +libpqsrv_PQnfields(const libpqsrv_PGresult *res) +{ + if (!res) + return 0; + return PQnfields(res->res); +} + +static inline char * +libpqsrv_PQgetvalue(const libpqsrv_PGresult *res, int tup_num, int field_num) +{ + if (!res) + return NULL; + return PQgetvalue(res->res, tup_num, field_num); +} + +static inline int +libpqsrv_PQgetlength(const libpqsrv_PGresult *res, int tup_num, int field_num) +{ + if (!res) + return 0; + return PQgetlength(res->res, tup_num, field_num); +} + +static inline int +libpqsrv_PQgetisnull(const libpqsrv_PGresult *res, int tup_num, int field_num) +{ + if (!res) + return 1; /* pretend it is null */ + return PQgetisnull(res->res, tup_num, field_num); +} + +static inline char * +libpqsrv_PQfname(const libpqsrv_PGresult *res, int field_num) +{ + if (!res) + return NULL; + return PQfname(res->res, field_num); +} + +static inline const char * +libpqsrv_PQcmdTuples(const libpqsrv_PGresult *res) +{ + if (!res) + return ""; + return PQcmdTuples(res->res); +} + +/* + * Redefine these libpq entry point names concerned with PGresults so that + * they will operate on libpqsrv_PGresults instead. This avoids needing to + * convert a lot of pre-existing code, and reduces the notational differences + * between frontend and backend libpq-using code. + */ +#define PGresult libpqsrv_PGresult +#define PQclear libpqsrv_PQclear +#define PQgetResult libpqsrv_PQgetResult +#define PQresultStatus libpqsrv_PQresultStatus +#define PQresultErrorMessage libpqsrv_PQresultErrorMessage +#define PQresultErrorField libpqsrv_PQresultErrorField +#define PQcmdStatus libpqsrv_PQcmdStatus +#define PQntuples libpqsrv_PQntuples +#define PQnfields libpqsrv_PQnfields +#define PQgetvalue libpqsrv_PQgetvalue +#define PQgetlength libpqsrv_PQgetlength +#define PQgetisnull libpqsrv_PQgetisnull +#define PQfname libpqsrv_PQfname +#define PQcmdTuples libpqsrv_PQcmdTuples + +#endif /* LIBPQ_BE_FE_H */ diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index e5dd15098f6..ad2726f026f 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -2133,10 +2133,12 @@ typedef struct MemoizePath * complete after caching the first record. */ bool binary_mode; /* true when cache key should be compared bit * by bit, false when using hash equality ops */ - Cardinality calls; /* expected number of rescans */ uint32 est_entries; /* The maximum number of entries that the * planner expects will fit in the cache, or 0 * if unknown */ + Cardinality est_calls; /* expected number of rescans */ + Cardinality est_unique_keys; /* estimated unique keys, for EXPLAIN */ + double est_hit_ratio; /* estimated cache hit ratio, for EXPLAIN */ } MemoizePath; /* diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 46e2e09ea35..29d7732d6a0 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -29,18 +29,19 @@ */ /* ---------------- - * CachedPlanType + * PlannedStmtOrigin * - * CachedPlanType identifies whether a PlannedStmt is a cached plan, and if - * so, whether it is generic or custom. + * PlannedStmtOrigin identifies from where a PlannedStmt comes from. * ---------------- */ -typedef enum CachedPlanType +typedef enum PlannedStmtOrigin { - PLAN_CACHE_NONE = 0, /* Not a cached plan */ - PLAN_CACHE_GENERIC, /* Generic cached plan */ - PLAN_CACHE_CUSTOM, /* Custom cached plan */ -} CachedPlanType; + PLAN_STMT_UNKNOWN = 0, /* plan origin is not yet known */ + PLAN_STMT_INTERNAL, /* generated internally by a query */ + PLAN_STMT_STANDARD, /* standard planned statement */ + PLAN_STMT_CACHE_GENERIC, /* Generic cached plan */ + PLAN_STMT_CACHE_CUSTOM, /* Custom cached plan */ +} PlannedStmtOrigin; /* ---------------- * PlannedStmt node @@ -72,8 +73,8 @@ typedef struct PlannedStmt /* plan identifier (can be set by plugins) */ int64 planId; - /* type of cached plan */ - CachedPlanType cached_plan_type; + /* origin of plan */ + PlannedStmtOrigin planOrigin; /* is it insert|update|delete|merge RETURNING? */ bool hasReturning; @@ -1073,6 +1074,16 @@ typedef struct Memoize /* paramids from param_exprs */ Bitmapset *keyparamids; + + /* Estimated number of rescans, for EXPLAIN */ + Cardinality est_calls; + + /* Estimated number of distinct lookup keys, for EXPLAIN */ + Cardinality est_unique_keys; + + /* Estimated cache hit ratio, for EXPLAIN */ + double est_hit_ratio; + } Memoize; /* ---------------- diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h index 60dcdb77e41..58936e963cb 100644 --- a/src/include/optimizer/pathnode.h +++ b/src/include/optimizer/pathnode.h @@ -90,7 +90,7 @@ extern MemoizePath *create_memoize_path(PlannerInfo *root, List *hash_operators, bool singlerow, bool binary_mode, - double calls); + Cardinality est_calls); extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath, SpecialJoinInfo *sjinfo); extern GatherPath *create_gather_path(PlannerInfo *root, diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index 277ec33c00b..00808e23f49 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -87,6 +87,14 @@ typedef struct catcache typedef struct catctup { + /* + * Each tuple in a cache is a member of a dlist that stores the elements + * of its hash bucket. We keep each dlist in LRU order to speed repeated + * lookups. Keep the dlist_node field first so that Valgrind understands + * the struct is reachable. + */ + dlist_node cache_elem; /* list member of per-bucket list */ + int ct_magic; /* for identifying CatCTup entries */ #define CT_MAGIC 0x57261502 @@ -99,13 +107,6 @@ typedef struct catctup Datum keys[CATCACHE_MAXKEYS]; /* - * Each tuple in a cache is a member of a dlist that stores the elements - * of its hash bucket. We keep each dlist in LRU order to speed repeated - * lookups. - */ - dlist_node cache_elem; /* list member of per-bucket list */ - - /* * A tuple marked "dead" must not be returned by subsequent searches. * However, it won't be physically deleted from the cache until its * refcount goes to zero. (If it's a member of a CatCList, the list's @@ -158,13 +159,17 @@ typedef struct catctup */ typedef struct catclist { + /* + * Keep the dlist_node field first so that Valgrind understands the struct + * is reachable. + */ + dlist_node cache_elem; /* list member of per-catcache list */ + int cl_magic; /* for identifying CatCList entries */ #define CL_MAGIC 0x52765103 uint32 hash_value; /* hash value for lookup keys */ - dlist_node cache_elem; /* list member of per-catcache list */ - /* * Lookup keys for the entry, with the first nkeys elements being valid. * All by-reference are separately allocated. diff --git a/src/include/utils/memdebug.h b/src/include/utils/memdebug.h index 7309271834b..80692dcef93 100644 --- a/src/include/utils/memdebug.h +++ b/src/include/utils/memdebug.h @@ -29,6 +29,7 @@ #define VALGRIND_MEMPOOL_ALLOC(context, addr, size) do {} while (0) #define VALGRIND_MEMPOOL_FREE(context, addr) do {} while (0) #define VALGRIND_MEMPOOL_CHANGE(context, optr, nptr, size) do {} while (0) +#define VALGRIND_MEMPOOL_TRIM(context, addr, size) do {} while (0) #endif diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h index e1b42267b22..039b9cba61a 100644 --- a/src/include/utils/palloc.h +++ b/src/include/utils/palloc.h @@ -133,6 +133,8 @@ MemoryContextSwitchTo(MemoryContext context) /* Registration of memory context reset/delete callbacks */ extern void MemoryContextRegisterResetCallback(MemoryContext context, MemoryContextCallback *cb); +extern void MemoryContextUnregisterResetCallback(MemoryContext context, + MemoryContextCallback *cb); /* * These are like standard strdup() except the copied string is diff --git a/src/include/utils/pgstat_internal.h b/src/include/utils/pgstat_internal.h index d5557e6e998..6cf00008f63 100644 --- a/src/include/utils/pgstat_internal.h +++ b/src/include/utils/pgstat_internal.h @@ -295,18 +295,11 @@ typedef struct PgStat_KindInfo * * Returns true if some of the stats could not be flushed, due to lock * contention for example. Optional. - */ - bool (*flush_static_cb) (bool nowait); - - /* - * For fixed-numbered or variable-numbered statistics: Check for pending - * stats in need of flush with flush_static_cb, when these do not use - * PgStat_EntryRef->pending. * - * Returns true if there are any stats pending for flush, triggering - * flush_static_cb. Optional. + * "pgstat_report_fixed" needs to be set to trigger the flush of pending + * stats. */ - bool (*have_static_pending_cb) (void); + bool (*flush_static_cb) (bool nowait); /* * For fixed-numbered statistics: Reset All. @@ -627,7 +620,6 @@ extern void pgstat_archiver_snapshot_cb(void); extern bool pgstat_flush_backend(bool nowait, bits32 flags); extern bool pgstat_backend_flush_cb(bool nowait); -extern bool pgstat_backend_have_pending_cb(void); extern void pgstat_backend_reset_timestamp_cb(PgStatShared_Common *header, TimestampTz ts); @@ -676,7 +668,6 @@ extern bool pgstat_function_flush_cb(PgStat_EntryRef *entry_ref, bool nowait); extern void pgstat_flush_io(bool nowait); -extern bool pgstat_io_have_pending_cb(void); extern bool pgstat_io_flush_cb(bool nowait); extern void pgstat_io_init_shmem_cb(void *stats); extern void pgstat_io_reset_all_cb(TimestampTz ts); @@ -738,7 +729,6 @@ extern PgStatShared_Common *pgstat_init_entry(PgStat_Kind kind, * Functions in pgstat_slru.c */ -extern bool pgstat_slru_have_pending_cb(void); extern bool pgstat_slru_flush_cb(bool nowait); extern void pgstat_slru_init_shmem_cb(void *stats); extern void pgstat_slru_reset_all_cb(TimestampTz ts); @@ -750,7 +740,6 @@ extern void pgstat_slru_snapshot_cb(void); */ extern void pgstat_wal_init_backend_cb(void); -extern bool pgstat_wal_have_pending_cb(void); extern bool pgstat_wal_flush_cb(bool nowait); extern void pgstat_wal_init_shmem_cb(void *stats); extern void pgstat_wal_reset_all_cb(TimestampTz ts); @@ -778,8 +767,23 @@ extern void pgstat_create_transactional(PgStat_Kind kind, Oid dboid, uint64 obji * Variables in pgstat.c */ -extern PGDLLIMPORT PgStat_LocalState pgStatLocal; +/* + * Track if *any* pending fixed-numbered statistics should be flushed to + * shared memory. + * + * This flag can be switched to true by fixed-numbered statistics to let + * pgstat_report_stat() know if it needs to go through one round of + * reports, calling flush_static_cb for each fixed-numbered statistics + * kind. When this flag is not set, pgstat_report_stat() is able to do + * a fast exit, knowing that there are no pending fixed-numbered statistics. + * + * Statistics callbacks should never reset this flag; pgstat_report_stat() + * is in charge of doing that. + */ +extern PGDLLIMPORT bool pgstat_report_fixed; +/* Backend-local stats state */ +extern PGDLLIMPORT PgStat_LocalState pgStatLocal; /* * Implementation of inline functions declared above. diff --git a/src/include/utils/pgstat_kind.h b/src/include/utils/pgstat_kind.h index f44169fd5a3..eb5f0b3ae6d 100644 --- a/src/include/utils/pgstat_kind.h +++ b/src/include/utils/pgstat_kind.h @@ -18,7 +18,7 @@ /* Range of IDs allowed, for built-in and custom kinds */ #define PGSTAT_KIND_MIN 1 /* Minimum ID allowed */ -#define PGSTAT_KIND_MAX 256 /* Maximum ID allowed */ +#define PGSTAT_KIND_MAX 32 /* Maximum ID allowed */ /* use 0 for INVALID, to catch zero-initialized data */ #define PGSTAT_KIND_INVALID 0 @@ -46,7 +46,7 @@ /* Custom stats kinds */ /* Range of IDs allowed for custom stats kinds */ -#define PGSTAT_KIND_CUSTOM_MIN 128 +#define PGSTAT_KIND_CUSTOM_MIN 24 #define PGSTAT_KIND_CUSTOM_MAX PGSTAT_KIND_MAX #define PGSTAT_KIND_CUSTOM_SIZE (PGSTAT_KIND_CUSTOM_MAX - PGSTAT_KIND_CUSTOM_MIN + 1) @@ -55,7 +55,7 @@ * development and have not reserved their own unique kind ID yet. See: * https://wiki.postgresql.org/wiki/CustomCumulativeStats */ -#define PGSTAT_KIND_EXPERIMENTAL 128 +#define PGSTAT_KIND_EXPERIMENTAL 24 static inline bool pgstat_is_kind_builtin(PgStat_Kind kind) diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c index dd6fd1fe7f4..06f0135813b 100644 --- a/src/interfaces/ecpg/ecpglib/prepare.c +++ b/src/interfaces/ecpg/ecpglib/prepare.c @@ -603,7 +603,10 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha prep = ecpg_find_prepared_statement(stmtID, con, NULL); /* This prepared name doesn't exist on this connection. */ if (!prep && !prepare_common(lineno, con, stmtID, query)) + { + ecpg_free(*name); return false; + } } else @@ -619,11 +622,17 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha return false; if (!ECPGprepare(lineno, connection_name, 0, stmtID, query)) + { + ecpg_free(*name); return false; + } entNo = AddStmtToCache(lineno, stmtID, connection_name, compat, query); if (entNo < 0) + { + ecpg_free(*name); return false; + } } /* increase usage counter */ diff --git a/src/interfaces/libpq-oauth/Makefile b/src/interfaces/libpq-oauth/Makefile index 270fc0cf2d9..682f17413b3 100644 --- a/src/interfaces/libpq-oauth/Makefile +++ b/src/interfaces/libpq-oauth/Makefile @@ -24,7 +24,7 @@ NAME = pq-oauth-$(MAJORVERSION) override shlib := lib$(NAME)$(DLSUFFIX) override stlib := libpq-oauth.a -override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(LIBCURL_CPPFLAGS) $(CPPFLAGS) +override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(CPPFLAGS) $(LIBCURL_CPPFLAGS) OBJS = \ $(WIN32RES) diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 47d67811509..da6650066d4 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -24,7 +24,7 @@ NAME= pq SO_MAJOR_VERSION= 5 SO_MINOR_VERSION= $(MAJORVERSION) -override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port +override CPPFLAGS := -I$(srcdir) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port $(CPPFLAGS) ifneq ($(PORTNAME), win32) override CFLAGS += $(PTHREAD_CFLAGS) endif diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c index 65517c5703b..c872a0267f0 100644 --- a/src/interfaces/libpq/fe-cancel.c +++ b/src/interfaces/libpq/fe-cancel.c @@ -379,7 +379,24 @@ PQgetCancel(PGconn *conn) /* Check that we have received a cancellation key */ if (conn->be_cancel_key_len == 0) - return NULL; + { + /* + * In case there is no cancel key, return an all-zero PGcancel object. + * Actually calling PQcancel on this will fail, but we allow creating + * the PGcancel object anyway. Arguably it would be better return NULL + * to indicate that cancellation is not possible, but there'd be no + * way for the caller to distinguish "out of memory" from "server did + * not send a cancel key". Also, this is how PGgetCancel() has always + * behaved, and if we changed it, some clients would stop working + * altogether with servers that don't support cancellation. (The + * modern PQcancelCreate() function returns a failed connection object + * instead.) + * + * The returned dummy object has cancel_pkt_len == 0; we check for + * that in PQcancel() to identify it as a dummy. + */ + return calloc(1, sizeof(PGcancel)); + } cancel_req_len = offsetof(CancelRequestPacket, cancelAuthCode) + conn->be_cancel_key_len; cancel = malloc(offsetof(PGcancel, cancel_req) + cancel_req_len); @@ -544,6 +561,15 @@ PQcancel(PGcancel *cancel, char *errbuf, int errbufsize) return false; } + if (cancel->cancel_pkt_len == 0) + { + /* This is a dummy PGcancel object, see PQgetCancel */ + strlcpy(errbuf, "PQcancel() -- no cancellation key received", errbufsize); + /* strlcpy probably doesn't change errno, but be paranoid */ + SOCK_ERRNO_SET(save_errno); + return false; + } + /* * We need to open a temporary connection to the postmaster. Do this with * only kernel calls. diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index ee961425a5b..f6976689a69 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -177,6 +177,7 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, yyscan_t scanner; Datum prosrcdatum; char *proc_source; + char *proc_signature; HeapTuple typeTup; Form_pg_type typeStruct; PLpgSQL_variable *var; @@ -223,6 +224,9 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, plpgsql_check_syntax = forValidator; plpgsql_curr_compile = function; + /* format_procedure leaks memory, so run it in temp context */ + proc_signature = format_procedure(fcinfo->flinfo->fn_oid); + /* * All the permanent output of compilation (e.g. parse tree) is kept in a * per-function memory context, so it can be reclaimed easily. @@ -237,7 +241,7 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo, ALLOCSET_DEFAULT_SIZES); plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt); - function->fn_signature = format_procedure(fcinfo->flinfo->fn_oid); + function->fn_signature = pstrdup(proc_signature); MemoryContextSetIdentifier(func_cxt, function->fn_signature); function->fn_oid = fcinfo->flinfo->fn_oid; function->fn_input_collation = fcinfo->fncollation; @@ -1673,6 +1677,11 @@ plpgsql_parse_wordrowtype(char *ident) { Oid classOid; Oid typOid; + TypeName *typName; + MemoryContext oldCxt; + + /* Avoid memory leaks in long-term function context */ + oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt); /* * Look up the relation. Note that because relation rowtypes have the @@ -1695,9 +1704,12 @@ plpgsql_parse_wordrowtype(char *ident) errmsg("relation \"%s\" does not have a composite type", ident))); + typName = makeTypeName(ident); + + MemoryContextSwitchTo(oldCxt); + /* Build and return the row type struct */ - return plpgsql_build_datatype(typOid, -1, InvalidOid, - makeTypeName(ident)); + return plpgsql_build_datatype(typOid, -1, InvalidOid, typName); } /* ---------- @@ -1711,6 +1723,7 @@ plpgsql_parse_cwordrowtype(List *idents) Oid classOid; Oid typOid; RangeVar *relvar; + TypeName *typName; MemoryContext oldCxt; /* @@ -1733,11 +1746,12 @@ plpgsql_parse_cwordrowtype(List *idents) errmsg("relation \"%s\" does not have a composite type", relvar->relname))); + typName = makeTypeNameFromNameList(idents); + MemoryContextSwitchTo(oldCxt); /* Build and return the row type struct */ - return plpgsql_build_datatype(typOid, -1, InvalidOid, - makeTypeNameFromNameList(idents)); + return plpgsql_build_datatype(typOid, -1, InvalidOid, typName); } /* @@ -1952,6 +1966,8 @@ plpgsql_build_recfield(PLpgSQL_rec *rec, const char *fldname) * origtypname is the parsed form of what the user wrote as the type name. * It can be NULL if the type could not be a composite type, or if it was * identified by OID to begin with (e.g., it's a function argument type). + * origtypname is in short-lived storage and must be copied if we choose + * to incorporate it into the function's parse tree. */ PLpgSQL_type * plpgsql_build_datatype(Oid typeOid, int32 typmod, @@ -2070,7 +2086,7 @@ build_datatype(HeapTuple typeTup, int32 typmod, errmsg("type %s is not composite", format_type_be(typ->typoid)))); - typ->origtypname = origtypname; + typ->origtypname = copyObject(origtypname); typ->tcache = typentry; typ->tupdesc_id = typentry->tupDesc_identifier; } diff --git a/src/pl/plpgsql/src/pl_gram.y b/src/pl/plpgsql/src/pl_gram.y index 7b672ea5179..17568d82554 100644 --- a/src/pl/plpgsql/src/pl_gram.y +++ b/src/pl/plpgsql/src/pl_gram.y @@ -3853,6 +3853,7 @@ parse_datatype(const char *string, int location, yyscan_t yyscanner) int32 typmod; sql_error_callback_arg cbarg; ErrorContextCallback syntax_errcontext; + MemoryContext oldCxt; cbarg.location = location; cbarg.yyscanner = yyscanner; @@ -3862,9 +3863,14 @@ parse_datatype(const char *string, int location, yyscan_t yyscanner) syntax_errcontext.previous = error_context_stack; error_context_stack = &syntax_errcontext; - /* Let the main parser try to parse it under standard SQL rules */ + /* + * Let the main parser try to parse it under standard SQL rules. The + * parser leaks memory, so run it in temp context. + */ + oldCxt = MemoryContextSwitchTo(plpgsql_compile_tmp_cxt); typeName = typeStringToTypeName(string, NULL); typenameTypeIdAndMod(NULL, typeName, &type_id, &typmod); + MemoryContextSwitchTo(oldCxt); /* Restore former ereport callback */ error_context_stack = syntax_errcontext.previous; diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile index f959083a0bd..25f295c3709 100644 --- a/src/pl/plpython/Makefile +++ b/src/pl/plpython/Makefile @@ -11,7 +11,7 @@ ifeq ($(PORTNAME), win32) override python_libspec = endif -override CPPFLAGS := -I. -I$(srcdir) $(python_includespec) $(CPPFLAGS) +override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(python_includespec) rpathdir = $(python_libdir) diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile index ea52a2efc22..dd57f7d694c 100644 --- a/src/pl/tcl/Makefile +++ b/src/pl/tcl/Makefile @@ -11,7 +11,7 @@ top_builddir = ../../.. include $(top_builddir)/src/Makefile.global -override CPPFLAGS := -I. -I$(srcdir) $(TCL_INCLUDE_SPEC) $(CPPFLAGS) +override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(TCL_INCLUDE_SPEC) # On Windows, we don't link directly with the Tcl library; see below ifneq ($(PORTNAME), win32) diff --git a/src/test/modules/injection_points/injection_stats.c b/src/test/modules/injection_points/injection_stats.c index 14903c629e0..e3947b23ba5 100644 --- a/src/test/modules/injection_points/injection_stats.c +++ b/src/test/modules/injection_points/injection_stats.c @@ -59,7 +59,7 @@ static const PgStat_KindInfo injection_stats = { /* * Kind ID reserved for statistics of injection points. */ -#define PGSTAT_KIND_INJECTION 129 +#define PGSTAT_KIND_INJECTION 25 /* Track if stats are loaded */ static bool inj_stats_loaded = false; diff --git a/src/test/modules/injection_points/injection_stats_fixed.c b/src/test/modules/injection_points/injection_stats_fixed.c index 3d0c01bdd05..bc54c79d190 100644 --- a/src/test/modules/injection_points/injection_stats_fixed.c +++ b/src/test/modules/injection_points/injection_stats_fixed.c @@ -64,7 +64,7 @@ static const PgStat_KindInfo injection_stats_fixed = { /* * Kind ID reserved for statistics of injection points. */ -#define PGSTAT_KIND_INJECTION_FIXED 130 +#define PGSTAT_KIND_INJECTION_FIXED 26 /* Track if fixed-numbered stats are loaded */ static bool inj_fixed_loaded = false; diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm index 61f68e0cc2e..35413f14019 100644 --- a/src/test/perl/PostgreSQL/Test/Cluster.pm +++ b/src/test/perl/PostgreSQL/Test/Cluster.pm @@ -304,6 +304,7 @@ sub is_alive my $ret = PostgreSQL::Test::Utils::system_log( 'pg_isready', + '--timeout' => $PostgreSQL::Test::Utils::timeout_default, '--host' => $self->host, '--port' => $self->port); diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index debfa635c36..4c5af018ee4 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -228,6 +228,13 @@ is( $node->safe_psql( 'before-orderly-restart', 'can still write after crash restart'); +# Confirm that the logical replication launcher, a background worker +# without the never-restart flag, has also restarted successfully. +is($node->poll_query_until('postgres', + "SELECT count(*) = 1 FROM pg_stat_activity WHERE backend_type = 'logical replication launcher'"), + '1', + 'logical replication launcher restarted after crash'); + # Just to be sure, check that an orderly restart now still works $node->restart(); diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out index f9bd252444f..dc541d61adf 100644 --- a/src/test/regress/expected/foreign_key.out +++ b/src/test/regress/expected/foreign_key.out @@ -1750,7 +1750,7 @@ Indexes: Referenced by: TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) --- Check the exsting FK trigger +-- Check the existing FK trigger SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid) WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass) diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index 3a2eacd793f..1ec3fa34a2d 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -1934,3 +1934,24 @@ RESET client_min_messages; RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user, regress_publication_user2; DROP ROLE regress_publication_user_dummy; +-- stage objects for pg_dump tests +CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int); +CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION dump_pub_qual_1ct FOR + TABLE ONLY pubme.t0 (c, d) WHERE (c > 0); +CREATE PUBLICATION dump_pub_qual_2ct FOR + TABLE ONLY pubme.t0 (c) WHERE (c > 0), + TABLE ONLY pubme.t1 (c); +CREATE PUBLICATION dump_pub_nsp_1ct FOR + TABLES IN SCHEMA pubme; +CREATE PUBLICATION dump_pub_nsp_2ct FOR + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2; +CREATE PUBLICATION dump_pub_all FOR + TABLE ONLY pubme.t0, + TABLE ONLY pubme.t1 WHERE (c < 0), + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2 + WITH (publish_via_partition_root = true); +RESET client_min_messages; diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql index cfcecb4e911..39174ad1eb9 100644 --- a/src/test/regress/sql/foreign_key.sql +++ b/src/test/regress/sql/foreign_key.sql @@ -1296,7 +1296,7 @@ UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; -- check psql behavior \d fk_notpartitioned_pk --- Check the exsting FK trigger +-- Check the existing FK trigger SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid) WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass) diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index c9e309190df..2585f083181 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -1229,3 +1229,25 @@ RESET client_min_messages; RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user, regress_publication_user2; DROP ROLE regress_publication_user_dummy; + +-- stage objects for pg_dump tests +CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int); +CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION dump_pub_qual_1ct FOR + TABLE ONLY pubme.t0 (c, d) WHERE (c > 0); +CREATE PUBLICATION dump_pub_qual_2ct FOR + TABLE ONLY pubme.t0 (c) WHERE (c > 0), + TABLE ONLY pubme.t1 (c); +CREATE PUBLICATION dump_pub_nsp_1ct FOR + TABLES IN SCHEMA pubme; +CREATE PUBLICATION dump_pub_nsp_2ct FOR + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2; +CREATE PUBLICATION dump_pub_all FOR + TABLE ONLY pubme.t0, + TABLE ONLY pubme.t1 WHERE (c < 0), + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2 + WITH (publish_via_partition_root = true); +RESET client_min_messages; diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list index 4353befab99..e6f2e93b2d6 100644 --- a/src/tools/pgindent/typedefs.list +++ b/src/tools/pgindent/typedefs.list @@ -391,7 +391,6 @@ CachedFunctionHashEntry CachedFunctionHashKey CachedPlan CachedPlanSource -CachedPlanType CallContext CallStmt CancelRequestPacket @@ -2276,6 +2275,7 @@ PlanInvalItem PlanRowMark PlanState PlannedStmt +PlannedStmtOrigin PlannerGlobal PlannerInfo PlannerParamItem @@ -3757,6 +3757,7 @@ leafSegmentInfo leaf_item libpq_gettext_func libpq_source +libpqsrv_PGresult line_t lineno_t list_sort_comparator diff --git a/src/tools/valgrind.supp b/src/tools/valgrind.supp index 2ad5b81526d..3880007dfb3 100644 --- a/src/tools/valgrind.supp +++ b/src/tools/valgrind.supp @@ -194,3 +194,36 @@ Memcheck:Addr8 fun:pg_numa_touch_mem_if_required } + + +# Memory-leak suppressions +# Note that a suppression rule will silence complaints about memory blocks +# allocated in matching places, but it won't prevent "indirectly lost" +# complaints about blocks that are only reachable via the suppressed blocks. + +# Suppress complaints about stuff leaked during function cache loading. +# Both the PL/pgSQL and SQL-function parsing processes generate some cruft +# within the function's cache context, which doesn't seem worth the trouble +# to get rid of. Moreover, there are cases where CachedFunction structs +# are intentionally leaked because we're unsure if any fn_extra pointers +# remain. +{ + hide_function_cache_leaks + Memcheck:Leak + match-leak-kinds: definite,possible,indirect + + ... + fun:cached_function_compile +} + +# Suppress complaints about stuff leaked during TS dictionary loading. +# Not very much is typically lost there, and preventing it would +# require a risky API change for TS tmplinit functions. +{ + hide_ts_dictionary_leaks + Memcheck:Leak + match-leak-kinds: definite,possible,indirect + + ... + fun:lookup_ts_dictionary_cache +} |