/*
* Current tuple has no posting list. If TID is visible save info about it
- * for the next comparisons in the loop in bt_page_check(). Report
+ * for the next comparisons in the loop in bt_target_page_check(). Report
* duplicate if lVis_tid is already valid.
*/
else
* Note that !readonly callers must reverify that target page has not
* been concurrently deleted.
*
- * Save rightfirstdataoffset for detailed error message.
+ * Save rightfirstoffset for detailed error message.
*/
static BTScanInsert
bt_right_page_check_scankey(BtreeCheckState *state, OffsetNumber *rightfirstoffset)
<term><literal>xid_wraparound</literal></term>
<listitem>
<para>
- Runs the test suite under <filename>src/test/module/xid_wrapround</filename>.
+ Runs the test suite under <filename>src/test/modules/xid_wraparound</filename>.
Not enabled by default because it is resource intensive.
</para>
</listitem>
backend Build backend and related modules
bin Build frontend binaries
contrib Build contrib modules
- pl Build procedual languages
+ pl Build procedural languages
Developer Targets:
reformat-dat-files Rewrite catalog data files into standard format
*
* After waiting for all workers to finish, merge the per-worker results into
* the complete index. The results from each worker are sorted by block number
- * (start of the page range). While combinig the per-worker results we merge
+ * (start of the page range). While combining the per-worker results we merge
* summaries for the same page range, and also fill-in empty summaries for
* ranges without any tuples.
*
* name: name of SLRU. (This is user-visible, pick with care!)
* nslots: number of page slots to use.
* nlsns: number of LSN groups per page (set to zero if not relevant).
- * ctllock: LWLock to use to control access to the shared control structure.
* subdir: PGDATA-relative subdirectory that will contain the files.
* buffer_tranche_id: tranche ID to use for the SLRU's per-buffer LWLocks.
* bank_tranche_id: tranche ID to use for the bank LWLocks.
/*
* Return the extra open flags used for opening a file, depending on the
- * value of the GUCs wal_sync_method, fsync and io_direct.
+ * value of the GUCs wal_sync_method, fsync and debug_io_direct.
*/
static int
get_sync_bit(int method)
* Check against pre-existing constraints. If we are allowed
* to merge with an existing constraint, there's no more to do
* here. (We omit the duplicate constraint from the result,
- * which is what ATAddCheckConstraint wants.)
+ * which is what ATAddCheckNNConstraint wants.)
*/
if (MergeWithExistingConstraint(rel, ccname, expr,
allow_merge, is_local,
* ExecValuesScan scans a values list.
* ExecValuesNext retrieve next tuple in sequential order.
* ExecInitValuesScan creates and initializes a valuesscan node.
- * ExecEndValuesScan releases any storage allocated.
* ExecReScanValuesScan rescans the values list
*/
#include "postgres.h"
static void _jumbleNode(JumbleState *jstate, Node *node);
static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleList(JumbleState *jstate, Node *node);
-static void _jumbleRangeTblEntry(JumbleState *jstate, Node *node);
/*
* Given a possibly multi-statement source string, confine our attention to the
/*
* Logical slot sync/creation requires wal_level >= logical.
*
- * Sincle altering the wal_level requires a server restart, so error out
- * in this case regardless of elevel provided by caller.
+ * Since altering the wal_level requires a server restart, so error out in
+ * this case regardless of elevel provided by caller.
*/
if (wal_level < WAL_LEVEL_LOGICAL)
ereport(ERROR,
}
/*
- * get_json_nested_columns - Parse back nested JSON_TABLE columns
+ * get_json_table_nested_columns - Parse back nested JSON_TABLE columns
*/
static void
get_json_table_nested_columns(TableFunc *tf, JsonTablePlan *plan,
/*
* Returns version of Unicode used by Postgres in "major.minor" format (the
* same format as the Unicode version reported by ICU). The third component
- * ("update version") never involves additions to the character repertiore and
+ * ("update version") never involves additions to the character repertoire and
* is unimportant for most purposes.
*
* See: https://unicode.org/versions/
* files requiring reconstruction. If such files occur outside these
* directories, we want to just copy them straight to the output
* directory. This is to protect against a user creating a file with a
- * strange name like INCREMENTAL.config and then compaining that
+ * strange name like INCREMENTAL.config and then complaining that
* incremental backups don't work properly. The test here is a bit tricky:
* incremental files occur in subdirectories of base, in pg_global itself,
* and in subdirectories of pg_tblspc only if in-place tablespaces are
}
/*
- * read_quoted_pattern - read quoted possibly multi line string
+ * read_quoted_string - read quoted possibly multi line string
*
* Reads a quoted string which can span over multiple lines and returns a
* pointer to next char after ending double quotes; it will exit on errors.
" ON o.roname = 'pg_' || s.oid "
"INNER JOIN pg_catalog.pg_database d "
" ON d.oid = s.subdbid "
- "WHERE o.roname iS NULL;");
+ "WHERE o.roname IS NULL;");
ntup = PQntuples(res);
for (int i = 0; i < ntup; i++)
* return it.
*
* This accepts the dbname which will be appended to the primary_conninfo.
- * The dbname will be ignored by walreciever process but slotsync worker uses
+ * The dbname will be ignored by walreceiver process but slotsync worker uses
* it to connect to the primary server.
*/
PQExpBuffer
const char *queryString);
extern List *AddRelationNotNullConstraints(Relation rel,
List *constraints,
- List *additional_notnulls);
+ List *old_notnulls);
extern void RelationClearMissing(Relation rel);
extern void SetAttrMissing(Oid relid, char *attname, char *value);
* report_error_fn should not return.
*/
typedef int (*io_callback_fn) (void *callback_arg, void *data, int length);
-typedef void (*report_error_fn) (void *calblack_arg, char *msg,...) pg_attribute_printf(2, 3);
+typedef void (*report_error_fn) (void *callback_arg, char *msg,...) pg_attribute_printf(2, 3);
/*
*
* We provide a set of hooks here - which the provider must take care to set
* up correctly - to allow extensions to supply their own methods of scanning
- * a relation or joing relations. For example, a provider might provide GPU
+ * a relation or join relations. For example, a provider might provide GPU
* acceleration, a cache-based scan, or some other kind of logic we haven't
* dreamed up yet.
*
*
* The actual value is obtained by evaluating formatted_expr. raw_expr is
* only there for displaying the original user-written expression and is not
- * evaluated by ExecInterpExpr() and eval_const_exprs_mutator().
+ * evaluated by ExecInterpExpr() and eval_const_expressions_mutator().
*/
typedef struct JsonValueExpr
{
# The expected events and outcomes above assume that SSL support
# is enabled. When libpq is compiled without SSL support, all
# attempts to connect with sslmode=require or
- # sslnegotition=direct/requiredirectwould fail immediately without
+ # sslnegotiation=direct/requiredirect would fail immediately without
# even connecting to the server. Skip those, because we tested
# them earlier already.
my ($sslmodes, $sslnegotiations);
### Helper functions
-# Test the cube of parameters: user, gssencmode, sslmode, and sslnegotitation
+# Test the cube of parameters: user, gssencmode, sslmode, and sslnegotiation
sub test_matrix
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
log_exact => '2',
err_like => [qr/You are welcome/]);
-# Try to login as allowed Alice. We don't check the Mallroy login, because
+# Try to login as allowed Alice. We don't check the Mallory login, because
# FATAL error could cause a timing-dependant panic of IPC::Run.
psql_command(
$node, 'SELECT 1;', 0, 'try regress_alice',
# Test the JSON parser performance tester. Here we are just checking that
# the performance tester can run, both with the standard parser and the
# incremental parser. An actual performance test will run with thousands
-# of iterations onstead of just one.
+# of iterations instead of just one.
use strict;
use warnings;
my $log_offset = -s $node->logfile;
# worker_spi_launch() may be able to detect that the worker has been
-# stopped, so do not rely on psql_safe().
+# stopped, so do not rely on safe_psql().
$node->psql('postgres',
qq[SELECT worker_spi_launch(12, $noconndb_id, $myrole_id);]);
$node->wait_for_log(
/*
* Now fill in worker-specific data, and do the actual registrations.
*
- * bgw_extra can optionally include a dabatase OID, a role OID and a set
+ * bgw_extra can optionally include a database OID, a role OID and a set
* of flags. This is left empty here to fallback to the related GUCs at
* startup (0 for the bgworker flags).
*/