dnl PKG_CHECK_MODULES(), but does not set variables or print errors.
dnl
dnl Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG])
-dnl only at the first occurence in configure.ac, so if the first place
+dnl only at the first occurrence in configure.ac, so if the first place
dnl it's called might be skipped (such as if it is within an "if", you
dnl have to call PKG_CHECK_EXISTS manually
AC_DEFUN([PKG_CHECK_EXISTS],
(5 rows)
-- gist_page_items_bytea prints the raw key data as a bytea. The output of that is
--- platform-dependent (endianess), so omit the actual key data from the output.
+-- platform-dependent (endianness), so omit the actual key data from the output.
SELECT itemoffset, ctid, itemlen FROM gist_page_items_bytea(get_raw_page('test_gist_idx', 0));
itemoffset | ctid | itemlen
------------+-----------+---------
SELECT * FROM gist_page_items(get_raw_page('test_gist_idx', 1), 'test_gist_idx') LIMIT 5;
-- gist_page_items_bytea prints the raw key data as a bytea. The output of that is
--- platform-dependent (endianess), so omit the actual key data from the output.
+-- platform-dependent (endianness), so omit the actual key data from the output.
SELECT itemoffset, ctid, itemlen FROM gist_page_items_bytea(get_raw_page('test_gist_idx', 0));
DROP TABLE test_gist;
log file by reducing the value of this parameter. On a system with
low WAL activity, increasing it reduces the amount of requests necessary
to access WAL archives, something useful for example in cloud
- environments where the amount of times an infrastructure is accessed
+ environments where the number of times an infrastructure is accessed
is taken into account.
</para>
</listitem>
Including <literal>jsonlog</literal> in the
<varname>log_destination</varname> list provides a convenient way to
import log files into many different programs. This option emits log
- lines in (<acronym>JSON</acronym>) format.
+ lines in <acronym>JSON</acronym> format.
</para>
<para>
The <parameter>txn</parameter> parameter contains meta information about
the transaction the sequence change is part of. Note however that for
non-transactional increments, the transaction may be either NULL or not
- NULL, depending on if the transaction already has XID assigned.
- The <parameter>sequence_lsn</parameter> has WAL location of the sequence
- update. The <parameter>transactional</parameter> says if the sequence has
- to be replayed as part of the transaction or directly.
+ NULL, depending on if the transaction already has an XID assigned.
+ The <parameter>sequence_lsn</parameter> has the WAL location of the
+ sequence update. <parameter>transactional</parameter> says if the
+ sequence has to be replayed as part of the transaction or directly.
The <parameter>last_value</parameter>, <parameter>log_cnt</parameter> and
<parameter>is_called</parameter> parameters describe the sequence change.
streamed in blocks demarcated by <function>stream_start_cb</function>
and <function>stream_stop_cb</function> callbacks. Once all the decoded
changes are transmitted, the transaction can be committed using the
- the <function>stream_commit_cb</function> callback
+ <function>stream_commit_cb</function> callback
(or possibly aborted using the <function>stream_abort_cb</function> callback).
If two-phase commits are supported, the transaction can be prepared using the
<function>stream_prepare_cb</function> callback,
it's passed to and used as <varname>application_name</varname>
in a foreign server, note that it will be truncated to less than
<symbol>NAMEDATALEN</symbol> characters and anything other than
- than printable ASCII characters will be replaced with question
+ printable ASCII characters will be replaced with question
marks (<literal>?</literal>).
See <xref linkend="guc-application-name"/> for details.
</para>
/*
* Tuple visibility is only computed once for each tuple, for correctness
* and efficiency reasons; see comment in heap_page_prune() for
- * details. This is of type int8[,] intead of HTSV_Result[], so we can use
+ * details. This is of type int8[,] instead of HTSV_Result[], so we can use
* -1 to indicate no visibility has been computed, e.g. for LP_DEAD items.
*
* Same indexing as ->marked.
* full-page image to be included in the WAL record.
*
* The returned values are cached copies from backend-private memory, and
- * possibly out-of-date or, indeed, uninitalized, in which case they will
+ * possibly out-of-date or, indeed, uninitialized, in which case they will
* be InvalidXLogRecPtr and false, respectively. XLogInsertRecord will
* re-check them against up-to-date values, while holding the WAL insert lock.
*/
indexRelationId = binary_upgrade_next_index_pg_class_oid;
binary_upgrade_next_index_pg_class_oid = InvalidOid;
- /* Overide the index relfilenode */
+ /* Override the index relfilenode */
if ((relkind == RELKIND_INDEX) &&
(!OidIsValid(binary_upgrade_next_index_pg_class_relfilenode)))
ereport(ERROR,
ListCell *lc;
List *context;
StringInfoData keystr;
- char *seperator = "";
+ char *separator = "";
bool useprefix;
int64 memPeakKb;
{
Node *expr = (Node *) lfirst(lc);
- appendStringInfoString(&keystr, seperator);
+ appendStringInfoString(&keystr, separator);
appendStringInfoString(&keystr, deparse_expression(expr, context,
useprefix, false));
- seperator = ", ";
+ separator = ", ";
}
if (es->format != EXPLAIN_FORMAT_TEXT)
* leaving the default in place would make debugging harder.
*
* MINGW's own C runtime doesn't have _set_abort_behavior(). When
- * targetting Microsoft's UCRT with mingw, it never links to the debug
+ * targeting Microsoft's UCRT with mingw, it never links to the debug
* version of the library and thus doesn't need the call to
* _set_abort_behavior() either.
*/
* This is called during a normal ("smart" or "fast") database shutdown.
* After this point, no new background workers will be started, so anything
* that might be waiting for them needs to be kicked off its wait. We do
- * that by cancelling the bgworker registration entirely, which is perhaps
+ * that by canceling the bgworker registration entirely, which is perhaps
* overkill, but since we're shutting down it does not matter whether the
* registration record sticks around.
*
* Forward begin_backup callback.
*
* Only use this implementation if you want the bbsink you're implementing to
- * share a buffer with the succesor bbsink.
+ * share a buffer with the successor bbsink.
*/
void
bbsink_forward_begin_backup(bbsink *sink)
/*
* Forward archive_contents callback.
*
- * Code that wants to use this should initalize its own bbs_buffer and
+ * Code that wants to use this should initialize its own bbs_buffer and
* bbs_buffer_length fields to the values from the successor sink. In cases
* where the buffer isn't shared, the data needs to be copied before forwarding
* the callback. We don't do try to do that here, because there's really no
}
/*
limit (sum(1/i^2),i=1,inf) = pi^2/6
- resj = sum(wi/i^2),i=1,noccurence,
+ resj = sum(wi/i^2),i=1,noccurrence,
wi - should be sorted desc,
don't sort for now, just choose maximum weight. This should be corrected
Oleg Bartunov
*
* As a debugging aid, we try to give some hint about what kind of message
* provoked the failure. Perhaps this is not detailed enough, but it's not
- * clear that it's worth expending any more code on what shoud be a
+ * clear that it's worth expending any more code on what should be a
* can't-happen case.
*/
static void
# For nearly all pg_basebackup invocations some options should be specified,
# to keep test times reasonable. Using @pg_basebackup_defs as the first
-# element of the array passed to to IPC::Run interpolate the array (as it is
+# element of the array passed to IPC::Run interpolate the array (as it is
# not a reference to an array)...
my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast');
SKIP:
{
my $tar = $ENV{TAR};
- # don't check for a working tar here, to accomodate various odd
+ # don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
SKIP: {
my $tar = $ENV{TAR};
- # don't check for a working tar here, to accomodate various odd
+ # don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
SKIP: {
my $tar = $ENV{TAR};
- # don't check for a working tar here, to accomodate various odd
+ # don't check for a working tar here, to accommodate various odd
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
* accounts is big enough to be worth using COPY and tracking runtime
*/
- /* use COPY with FREEZE on v14 and later without partioning */
+ /* use COPY with FREEZE on v14 and later without partitioning */
if (partitions == 0 && PQserverVersion(con) >= 140000)
copy_statement = "copy pgbench_accounts from stdin with (freeze on)";
else
* must return a pointer to its allocated state, which will be passed
* as-is as the first argument to the other callbacks.
*
- * Input paramters:
+ * Input parameters:
*
* port: The client Port.
*
chomp($result{stderr});
# use is_deeply so there's one test result for each test above, without
- # loosing the information whether stdout/stderr mismatched.
+ # losing the information whether stdout/stderr mismatched.
is_deeply(\%result, \%expect, $uri);
}
# local %ENV = $self->_get_env{[%extra_settings]);
#
# A copy of the environment is taken and node's host and port settings are
-# added as PGHOST and PGPORT, Then the extra settings (if any) are applied.
-# Any setting in %extra_settings with a value that is undefined is deleted
-# the remainder are# set. Then the PATH and (DY)LD_LIBRARY_PATH are adjusted
+# added as PGHOST and PGPORT, then the extra settings (if any) are applied.
+# Any setting in %extra_settings with a value that is undefined is deleted;
+# the remainder are set. Then the PATH and (DY)LD_LIBRARY_PATH are adjusted
# if the node's install path is set, and the copy environment is returned.
#
# The install path set in new() needs to be a directory containing
SELECT 1;
SELECT false;
END;
--- check display of function argments in sub-SELECT
+-- check display of function arguments in sub-SELECT
CREATE TABLE functest1 (i int);
CREATE FUNCTION functest_S_16(a int, b int) RETURNS void
LANGUAGE SQL
insert into parted_notnull_inh_test (b) values (null);
ERROR: null value in column "b" of relation "parted_notnull_inh_test1" violates not-null constraint
DETAIL: Failing row contains (1, null).
--- note that while b's default is overriden, a's default is preserved
+-- note that while b's default is overridden, a's default is preserved
\d parted_notnull_inh_test1
Table "public.parted_notnull_inh_test1"
Column | Type | Collation | Nullable | Default
('pg_namespace'::regclass, 0, 0), -- no schema
('pg_statistic_ext'::regclass, 0, 0), -- no statistics
('pg_ts_parser'::regclass, 0, 0), -- no TS parser
- ('pg_ts_dict'::regclass, 0, 0), -- no TS dictionnary
+ ('pg_ts_dict'::regclass, 0, 0), -- no TS dictionary
('pg_ts_template'::regclass, 0, 0), -- no TS template
('pg_ts_config'::regclass, 0, 0), -- no TS configuration
('pg_authid'::regclass, 0, 0), -- no role
SELECT false;
END;
--- check display of function argments in sub-SELECT
+-- check display of function arguments in sub-SELECT
CREATE TABLE functest1 (i int);
CREATE FUNCTION functest_S_16(a int, b int) RETURNS void
LANGUAGE SQL
create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a);
create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1);
insert into parted_notnull_inh_test (b) values (null);
--- note that while b's default is overriden, a's default is preserved
+-- note that while b's default is overridden, a's default is preserved
\d parted_notnull_inh_test1
drop table parted_notnull_inh_test;
('pg_namespace'::regclass, 0, 0), -- no schema
('pg_statistic_ext'::regclass, 0, 0), -- no statistics
('pg_ts_parser'::regclass, 0, 0), -- no TS parser
- ('pg_ts_dict'::regclass, 0, 0), -- no TS dictionnary
+ ('pg_ts_dict'::regclass, 0, 0), -- no TS dictionary
('pg_ts_template'::regclass, 0, 0), -- no TS template
('pg_ts_config'::regclass, 0, 0), -- no TS configuration
('pg_authid'::regclass, 0, 0), -- no role
# application_name to ensure that the walsender is (re)started.
#
# Not all of these are registered as tests as we need to poll for a change
-# but the test suite will fail none the less when something goes wrong.
+# but the test suite will fail nonetheless when something goes wrong.
my $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
);