From c29c578908dc0271eeb13a4014e54bff07a29c05 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sun, 8 Oct 2017 21:44:17 -0400 Subject: [PATCH] Don't use SGML empty tags MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit For DocBook XML compatibility, don't use SGML empty tags () anymore, replace by the full tag name. Add a warning option to catch future occurrences. Alexander Lakhin, Jürgen Purtz --- doc/src/sgml/Makefile | 3 +- doc/src/sgml/acronyms.sgml | 18 +- doc/src/sgml/adminpack.sgml | 54 +- doc/src/sgml/advanced.sgml | 110 +- doc/src/sgml/amcheck.sgml | 66 +- doc/src/sgml/arch-dev.sgml | 64 +- doc/src/sgml/array.sgml | 110 +- doc/src/sgml/auth-delay.sgml | 6 +- doc/src/sgml/auto-explain.sgml | 36 +- doc/src/sgml/backup.sgml | 496 +-- doc/src/sgml/bgworker.sgml | 80 +- doc/src/sgml/biblio.sgml | 2 +- doc/src/sgml/bki.sgml | 86 +- doc/src/sgml/bloom.sgml | 24 +- doc/src/sgml/brin.sgml | 78 +- doc/src/sgml/btree-gin.sgml | 18 +- doc/src/sgml/btree-gist.sgml | 32 +- doc/src/sgml/catalogs.sgml | 1012 +++---- doc/src/sgml/charset.sgml | 270 +- doc/src/sgml/citext.sgml | 120 +- doc/src/sgml/client-auth.sgml | 328 +- doc/src/sgml/config.sgml | 2156 ++++++------- doc/src/sgml/contrib-spi.sgml | 80 +- doc/src/sgml/contrib.sgml | 26 +- doc/src/sgml/cube.sgml | 158 +- doc/src/sgml/custom-scan.sgml | 136 +- doc/src/sgml/datatype.sgml | 692 ++--- doc/src/sgml/datetime.sgml | 72 +- doc/src/sgml/dblink.sgml | 270 +- doc/src/sgml/ddl.sgml | 390 +-- doc/src/sgml/dfunc.sgml | 40 +- doc/src/sgml/dict-int.sgml | 18 +- doc/src/sgml/dict-xsyn.sgml | 40 +- doc/src/sgml/diskusage.sgml | 16 +- doc/src/sgml/dml.sgml | 32 +- doc/src/sgml/docguide.sgml | 2 +- doc/src/sgml/earthdistance.sgml | 32 +- doc/src/sgml/ecpg.sgml | 734 ++--- doc/src/sgml/errcodes.sgml | 18 +- doc/src/sgml/event-trigger.sgml | 78 +- doc/src/sgml/extend.sgml | 362 +-- doc/src/sgml/external-projects.sgml | 18 +- doc/src/sgml/fdwhandler.sgml | 880 +++--- doc/src/sgml/file-fdw.sgml | 48 +- doc/src/sgml/func.sgml | 2528 ++++++++-------- doc/src/sgml/fuzzystrmatch.sgml | 26 +- doc/src/sgml/generate-errcodes-table.pl | 4 +- doc/src/sgml/generic-wal.sgml | 42 +- doc/src/sgml/geqo.sgml | 12 +- doc/src/sgml/gin.sgml | 286 +- doc/src/sgml/gist.sgml | 402 +-- doc/src/sgml/high-availability.sgml | 516 ++-- doc/src/sgml/history.sgml | 4 +- doc/src/sgml/hstore.sgml | 152 +- doc/src/sgml/indexam.sgml | 446 +-- doc/src/sgml/indices.sgml | 224 +- doc/src/sgml/info.sgml | 6 +- doc/src/sgml/information_schema.sgml | 364 +-- doc/src/sgml/install-windows.sgml | 58 +- doc/src/sgml/installation.sgml | 460 +-- doc/src/sgml/intagg.sgml | 28 +- doc/src/sgml/intarray.sgml | 68 +- doc/src/sgml/intro.sgml | 4 +- doc/src/sgml/isn.sgml | 28 +- doc/src/sgml/json.sgml | 174 +- doc/src/sgml/libpq.sgml | 1414 ++++----- doc/src/sgml/lo.sgml | 34 +- doc/src/sgml/lobj.sgml | 128 +- doc/src/sgml/logicaldecoding.sgml | 18 +- doc/src/sgml/ltree.sgml | 234 +- doc/src/sgml/maintenance.sgml | 338 +-- doc/src/sgml/manage-ag.sgml | 196 +- doc/src/sgml/monitoring.sgml | 1440 ++++----- doc/src/sgml/mvcc.sgml | 130 +- doc/src/sgml/nls.sgml | 24 +- doc/src/sgml/notation.sgml | 8 +- doc/src/sgml/oid2name.sgml | 60 +- doc/src/sgml/pageinspect.sgml | 36 +- doc/src/sgml/parallel.sgml | 96 +- doc/src/sgml/perform.sgml | 344 +-- doc/src/sgml/pgbuffercache.sgml | 16 +- doc/src/sgml/pgcrypto.sgml | 176 +- doc/src/sgml/pgfreespacemap.sgml | 8 +- doc/src/sgml/pgprewarm.sgml | 16 +- doc/src/sgml/pgrowlocks.sgml | 10 +- doc/src/sgml/pgstandby.sgml | 100 +- doc/src/sgml/pgstatstatements.sgml | 106 +- doc/src/sgml/pgstattuple.sgml | 30 +- doc/src/sgml/pgtrgm.sgml | 90 +- doc/src/sgml/pgvisibility.sgml | 12 +- doc/src/sgml/planstats.sgml | 52 +- doc/src/sgml/plhandler.sgml | 50 +- doc/src/sgml/plperl.sgml | 148 +- doc/src/sgml/plpgsql.sgml | 1204 ++++---- doc/src/sgml/plpython.sgml | 104 +- doc/src/sgml/pltcl.sgml | 210 +- doc/src/sgml/postgres-fdw.sgml | 198 +- doc/src/sgml/postgres.sgml | 16 +- doc/src/sgml/problems.sgml | 20 +- doc/src/sgml/protocol.sgml | 474 +-- doc/src/sgml/queries.sgml | 674 ++--- doc/src/sgml/query.sgml | 36 +- doc/src/sgml/rangetypes.sgml | 66 +- doc/src/sgml/recovery-config.sgml | 132 +- doc/src/sgml/ref/abort.sgml | 2 +- doc/src/sgml/ref/alter_aggregate.sgml | 18 +- doc/src/sgml/ref/alter_collation.sgml | 2 +- doc/src/sgml/ref/alter_conversion.sgml | 2 +- doc/src/sgml/ref/alter_database.sgml | 4 +- .../sgml/ref/alter_default_privileges.sgml | 20 +- doc/src/sgml/ref/alter_domain.sgml | 20 +- doc/src/sgml/ref/alter_extension.sgml | 22 +- .../sgml/ref/alter_foreign_data_wrapper.sgml | 18 +- doc/src/sgml/ref/alter_foreign_table.sgml | 42 +- doc/src/sgml/ref/alter_function.sgml | 28 +- doc/src/sgml/ref/alter_group.sgml | 8 +- doc/src/sgml/ref/alter_index.sgml | 10 +- doc/src/sgml/ref/alter_materialized_view.sgml | 4 +- doc/src/sgml/ref/alter_opclass.sgml | 2 +- doc/src/sgml/ref/alter_operator.sgml | 2 +- doc/src/sgml/ref/alter_opfamily.sgml | 32 +- doc/src/sgml/ref/alter_publication.sgml | 8 +- doc/src/sgml/ref/alter_role.sgml | 22 +- doc/src/sgml/ref/alter_schema.sgml | 2 +- doc/src/sgml/ref/alter_sequence.sgml | 34 +- doc/src/sgml/ref/alter_server.sgml | 12 +- doc/src/sgml/ref/alter_statistics.sgml | 4 +- doc/src/sgml/ref/alter_subscription.sgml | 4 +- doc/src/sgml/ref/alter_system.sgml | 12 +- doc/src/sgml/ref/alter_table.sgml | 140 +- doc/src/sgml/ref/alter_tablespace.sgml | 4 +- doc/src/sgml/ref/alter_trigger.sgml | 4 +- doc/src/sgml/ref/alter_tsconfig.sgml | 20 +- doc/src/sgml/ref/alter_tsdictionary.sgml | 8 +- doc/src/sgml/ref/alter_tsparser.sgml | 2 +- doc/src/sgml/ref/alter_tstemplate.sgml | 2 +- doc/src/sgml/ref/alter_type.sgml | 6 +- doc/src/sgml/ref/alter_user_mapping.sgml | 14 +- doc/src/sgml/ref/alter_view.sgml | 10 +- doc/src/sgml/ref/analyze.sgml | 16 +- doc/src/sgml/ref/begin.sgml | 4 +- doc/src/sgml/ref/close.sgml | 4 +- doc/src/sgml/ref/cluster.sgml | 10 +- doc/src/sgml/ref/clusterdb.sgml | 62 +- doc/src/sgml/ref/comment.sgml | 26 +- doc/src/sgml/ref/commit.sgml | 2 +- doc/src/sgml/ref/commit_prepared.sgml | 2 +- doc/src/sgml/ref/copy.sgml | 226 +- doc/src/sgml/ref/create_access_method.sgml | 8 +- doc/src/sgml/ref/create_aggregate.sgml | 164 +- doc/src/sgml/ref/create_cast.sgml | 80 +- doc/src/sgml/ref/create_collation.sgml | 2 +- doc/src/sgml/ref/create_conversion.sgml | 6 +- doc/src/sgml/ref/create_database.sgml | 54 +- doc/src/sgml/ref/create_domain.sgml | 22 +- doc/src/sgml/ref/create_event_trigger.sgml | 6 +- doc/src/sgml/ref/create_extension.sgml | 38 +- .../sgml/ref/create_foreign_data_wrapper.sgml | 12 +- doc/src/sgml/ref/create_foreign_table.sgml | 44 +- doc/src/sgml/ref/create_function.sgml | 128 +- doc/src/sgml/ref/create_index.sgml | 120 +- doc/src/sgml/ref/create_language.sgml | 42 +- .../sgml/ref/create_materialized_view.sgml | 10 +- doc/src/sgml/ref/create_opclass.sgml | 38 +- doc/src/sgml/ref/create_operator.sgml | 24 +- doc/src/sgml/ref/create_opfamily.sgml | 4 +- doc/src/sgml/ref/create_policy.sgml | 20 +- doc/src/sgml/ref/create_publication.sgml | 14 +- doc/src/sgml/ref/create_role.sgml | 94 +- doc/src/sgml/ref/create_rule.sgml | 32 +- doc/src/sgml/ref/create_schema.sgml | 32 +- doc/src/sgml/ref/create_sequence.sgml | 28 +- doc/src/sgml/ref/create_server.sgml | 8 +- doc/src/sgml/ref/create_statistics.sgml | 10 +- doc/src/sgml/ref/create_subscription.sgml | 4 +- doc/src/sgml/ref/create_table.sgml | 294 +- doc/src/sgml/ref/create_table_as.sgml | 36 +- doc/src/sgml/ref/create_tablespace.sgml | 22 +- doc/src/sgml/ref/create_trigger.sgml | 172 +- doc/src/sgml/ref/create_tsconfig.sgml | 2 +- doc/src/sgml/ref/create_tstemplate.sgml | 2 +- doc/src/sgml/ref/create_type.sgml | 98 +- doc/src/sgml/ref/create_user.sgml | 4 +- doc/src/sgml/ref/create_user_mapping.sgml | 10 +- doc/src/sgml/ref/create_view.sgml | 140 +- doc/src/sgml/ref/createdb.sgml | 66 +- doc/src/sgml/ref/createuser.sgml | 114 +- doc/src/sgml/ref/declare.sgml | 56 +- doc/src/sgml/ref/delete.sgml | 62 +- doc/src/sgml/ref/discard.sgml | 10 +- doc/src/sgml/ref/do.sgml | 18 +- doc/src/sgml/ref/drop_access_method.sgml | 4 +- doc/src/sgml/ref/drop_aggregate.sgml | 10 +- doc/src/sgml/ref/drop_collation.sgml | 4 +- doc/src/sgml/ref/drop_conversion.sgml | 2 +- doc/src/sgml/ref/drop_database.sgml | 2 +- doc/src/sgml/ref/drop_domain.sgml | 6 +- doc/src/sgml/ref/drop_extension.sgml | 6 +- .../sgml/ref/drop_foreign_data_wrapper.sgml | 6 +- doc/src/sgml/ref/drop_foreign_table.sgml | 2 +- doc/src/sgml/ref/drop_function.sgml | 12 +- doc/src/sgml/ref/drop_index.sgml | 12 +- doc/src/sgml/ref/drop_language.sgml | 4 +- doc/src/sgml/ref/drop_opclass.sgml | 12 +- doc/src/sgml/ref/drop_opfamily.sgml | 4 +- doc/src/sgml/ref/drop_owned.sgml | 2 +- doc/src/sgml/ref/drop_publication.sgml | 2 +- doc/src/sgml/ref/drop_role.sgml | 4 +- doc/src/sgml/ref/drop_schema.sgml | 2 +- doc/src/sgml/ref/drop_sequence.sgml | 2 +- doc/src/sgml/ref/drop_server.sgml | 6 +- doc/src/sgml/ref/drop_subscription.sgml | 2 +- doc/src/sgml/ref/drop_table.sgml | 6 +- doc/src/sgml/ref/drop_tablespace.sgml | 6 +- doc/src/sgml/ref/drop_tsconfig.sgml | 4 +- doc/src/sgml/ref/drop_tsdictionary.sgml | 2 +- doc/src/sgml/ref/drop_tsparser.sgml | 2 +- doc/src/sgml/ref/drop_tstemplate.sgml | 2 +- doc/src/sgml/ref/drop_type.sgml | 4 +- doc/src/sgml/ref/drop_user_mapping.sgml | 14 +- doc/src/sgml/ref/drop_view.sgml | 2 +- doc/src/sgml/ref/dropdb.sgml | 48 +- doc/src/sgml/ref/dropuser.sgml | 46 +- doc/src/sgml/ref/ecpg-ref.sgml | 6 +- doc/src/sgml/ref/end.sgml | 2 +- doc/src/sgml/ref/execute.sgml | 4 +- doc/src/sgml/ref/explain.sgml | 18 +- doc/src/sgml/ref/fetch.sgml | 36 +- doc/src/sgml/ref/grant.sgml | 98 +- doc/src/sgml/ref/import_foreign_schema.sgml | 14 +- doc/src/sgml/ref/initdb.sgml | 42 +- doc/src/sgml/ref/insert.sgml | 86 +- doc/src/sgml/ref/listen.sgml | 6 +- doc/src/sgml/ref/load.sgml | 14 +- doc/src/sgml/ref/lock.sgml | 72 +- doc/src/sgml/ref/move.sgml | 2 +- doc/src/sgml/ref/notify.sgml | 12 +- doc/src/sgml/ref/pg_basebackup.sgml | 40 +- doc/src/sgml/ref/pg_config-ref.sgml | 100 +- doc/src/sgml/ref/pg_controldata.sgml | 8 +- doc/src/sgml/ref/pg_ctl-ref.sgml | 50 +- doc/src/sgml/ref/pg_dump.sgml | 280 +- doc/src/sgml/ref/pg_dumpall.sgml | 102 +- doc/src/sgml/ref/pg_isready.sgml | 32 +- doc/src/sgml/ref/pg_receivewal.sgml | 34 +- doc/src/sgml/ref/pg_recvlogical.sgml | 26 +- doc/src/sgml/ref/pg_resetwal.sgml | 56 +- doc/src/sgml/ref/pg_restore.sgml | 150 +- doc/src/sgml/ref/pg_rewind.sgml | 48 +- doc/src/sgml/ref/pg_waldump.sgml | 16 +- doc/src/sgml/ref/pgarchivecleanup.sgml | 50 +- doc/src/sgml/ref/pgbench.sgml | 568 ++-- doc/src/sgml/ref/pgtestfsync.sgml | 16 +- doc/src/sgml/ref/pgtesttiming.sgml | 10 +- doc/src/sgml/ref/pgupgrade.sgml | 232 +- doc/src/sgml/ref/postgres-ref.sgml | 76 +- doc/src/sgml/ref/postmaster.sgml | 2 +- doc/src/sgml/ref/prepare.sgml | 18 +- doc/src/sgml/ref/prepare_transaction.sgml | 30 +- doc/src/sgml/ref/psql-ref.sgml | 662 ++-- doc/src/sgml/ref/reassign_owned.sgml | 2 +- .../sgml/ref/refresh_materialized_view.sgml | 4 +- doc/src/sgml/ref/reindex.sgml | 34 +- doc/src/sgml/ref/reindexdb.sgml | 80 +- doc/src/sgml/ref/release_savepoint.sgml | 2 +- doc/src/sgml/ref/reset.sgml | 10 +- doc/src/sgml/ref/revoke.sgml | 48 +- doc/src/sgml/ref/rollback.sgml | 2 +- doc/src/sgml/ref/rollback_prepared.sgml | 2 +- doc/src/sgml/ref/rollback_to.sgml | 28 +- doc/src/sgml/ref/savepoint.sgml | 8 +- doc/src/sgml/ref/security_label.sgml | 22 +- doc/src/sgml/ref/select.sgml | 598 ++-- doc/src/sgml/ref/set.sgml | 44 +- doc/src/sgml/ref/set_constraints.sgml | 14 +- doc/src/sgml/ref/set_role.sgml | 36 +- doc/src/sgml/ref/set_session_auth.sgml | 16 +- doc/src/sgml/ref/set_transaction.sgml | 12 +- doc/src/sgml/ref/show.sgml | 2 +- doc/src/sgml/ref/start_transaction.sgml | 6 +- doc/src/sgml/ref/truncate.sgml | 38 +- doc/src/sgml/ref/unlisten.sgml | 2 +- doc/src/sgml/ref/update.sgml | 88 +- doc/src/sgml/ref/vacuum.sgml | 24 +- doc/src/sgml/ref/vacuumdb.sgml | 52 +- doc/src/sgml/ref/values.sgml | 60 +- doc/src/sgml/regress.sgml | 122 +- doc/src/sgml/release-10.sgml | 736 ++--- doc/src/sgml/release-7.4.sgml | 700 ++--- doc/src/sgml/release-8.0.sgml | 1266 ++++---- doc/src/sgml/release-8.1.sgml | 1344 ++++---- doc/src/sgml/release-8.2.sgml | 1598 +++++----- doc/src/sgml/release-8.3.sgml | 1682 +++++----- doc/src/sgml/release-8.4.sgml | 2468 +++++++-------- doc/src/sgml/release-9.0.sgml | 2572 ++++++++-------- doc/src/sgml/release-9.1.sgml | 2678 ++++++++-------- doc/src/sgml/release-9.2.sgml | 2694 ++++++++--------- doc/src/sgml/release-9.3.sgml | 2518 +++++++-------- doc/src/sgml/release-9.4.sgml | 2142 ++++++------- doc/src/sgml/release-9.5.sgml | 1800 +++++------ doc/src/sgml/release-9.6.sgml | 1516 +++++----- doc/src/sgml/release-old.sgml | 314 +- doc/src/sgml/release.sgml | 2 +- doc/src/sgml/rowtypes.sgml | 130 +- doc/src/sgml/rules.sgml | 326 +- doc/src/sgml/runtime.sgml | 610 ++-- doc/src/sgml/seg.sgml | 70 +- doc/src/sgml/sepgsql.sgml | 190 +- doc/src/sgml/sourcerepo.sgml | 24 +- doc/src/sgml/sources.sgml | 170 +- doc/src/sgml/spgist.sgml | 468 +-- doc/src/sgml/spi.sgml | 226 +- doc/src/sgml/sslinfo.sgml | 14 +- doc/src/sgml/start.sgml | 22 +- doc/src/sgml/storage.sgml | 328 +- doc/src/sgml/syntax.sgml | 362 +-- doc/src/sgml/tablefunc.sgml | 168 +- doc/src/sgml/tablesample-method.sgml | 128 +- doc/src/sgml/tcn.sgml | 8 +- doc/src/sgml/test-decoding.sgml | 4 +- doc/src/sgml/textsearch.sgml | 734 ++--- doc/src/sgml/trigger.sgml | 220 +- doc/src/sgml/tsm-system-rows.sgml | 8 +- doc/src/sgml/tsm-system-time.sgml | 8 +- doc/src/sgml/typeconv.sgml | 122 +- doc/src/sgml/unaccent.sgml | 44 +- doc/src/sgml/user-manag.sgml | 138 +- doc/src/sgml/uuid-ossp.sgml | 26 +- doc/src/sgml/vacuumlo.sgml | 42 +- doc/src/sgml/wal.sgml | 138 +- doc/src/sgml/xaggr.sgml | 160 +- doc/src/sgml/xfunc.sgml | 614 ++-- doc/src/sgml/xindex.sgml | 192 +- doc/src/sgml/xml2.sgml | 58 +- doc/src/sgml/xoper.sgml | 142 +- doc/src/sgml/xplang.sgml | 26 +- doc/src/sgml/xtypes.sgml | 68 +- 337 files changed, 31636 insertions(+), 31635 deletions(-) diff --git a/doc/src/sgml/Makefile b/doc/src/sgml/Makefile index 164c00bb63..428eb569fc 100644 --- a/doc/src/sgml/Makefile +++ b/doc/src/sgml/Makefile @@ -66,10 +66,11 @@ ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML) # Enable some extra warnings # -wfully-tagged needed to throw a warning on missing tags # for older tool chains, 2007-08-31 -override SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged +override SPFLAGS += -wall -wno-unused-param -wfully-tagged # Additional warnings for XML compatibility. The conditional is meant # to detect whether we are using OpenSP rather than the ancient # original SP. +override SPFLAGS += -wempty ifneq (,$(filter o%,$(notdir $(OSX)))) override SPFLAGS += -wdata-delim -winstance-ignore-ms -winstance-include-ms -winstance-param-entity endif diff --git a/doc/src/sgml/acronyms.sgml b/doc/src/sgml/acronyms.sgml index 29f85e0846..35514d4d9a 100644 --- a/doc/src/sgml/acronyms.sgml +++ b/doc/src/sgml/acronyms.sgml @@ -4,8 +4,8 @@ Acronyms - This is a list of acronyms commonly used in the PostgreSQL - documentation and in discussions about PostgreSQL. + This is a list of acronyms commonly used in the PostgreSQL + documentation and in discussions about PostgreSQL. @@ -153,7 +153,7 @@ Data Definition Language, SQL commands such as CREATE - TABLE, ALTER USER + TABLE, ALTER USER @@ -164,8 +164,8 @@ Data - Manipulation Language, SQL commands such as INSERT, - UPDATE, DELETE + Manipulation Language, SQL commands such as INSERT, + UPDATE, DELETE @@ -281,7 +281,7 @@ Grand Unified Configuration, - the PostgreSQL subsystem that handles server configuration + the PostgreSQL subsystem that handles server configuration @@ -384,7 +384,7 @@ LSN - Log Sequence Number, see pg_lsn + Log Sequence Number, see pg_lsn and WAL Internals. @@ -486,7 +486,7 @@ PGSQL - PostgreSQL + PostgreSQL @@ -495,7 +495,7 @@ PGXS - PostgreSQL Extension System + PostgreSQL Extension System diff --git a/doc/src/sgml/adminpack.sgml b/doc/src/sgml/adminpack.sgml index fddf90c4a5..b27a4a325d 100644 --- a/doc/src/sgml/adminpack.sgml +++ b/doc/src/sgml/adminpack.sgml @@ -8,8 +8,8 @@ - adminpack provides a number of support functions which - pgAdmin and other administration and management tools can + adminpack provides a number of support functions which + pgAdmin and other administration and management tools can use to provide additional functionality, such as remote management of server log files. Use of all these functions is restricted to superusers. @@ -25,7 +25,7 @@ - <filename>adminpack</> Functions + <filename>adminpack</filename> Functions Name Return Type Description @@ -58,7 +58,7 @@ pg_catalog.pg_logdir_ls() setof record - List the log files in the log_directory directory + List the log files in the log_directory directory @@ -69,9 +69,9 @@ pg_file_write - pg_file_write writes the specified data into - the file named by filename. If append is - false, the file must not already exist. If append is true, + pg_file_write writes the specified data into + the file named by filename. If append is + false, the file must not already exist. If append is true, the file can already exist, and will be appended to if so. Returns the number of bytes written. @@ -80,15 +80,15 @@ pg_file_rename - pg_file_rename renames a file. If archivename - is omitted or NULL, it simply renames oldname - to newname (which must not already exist). - If archivename is provided, it first - renames newname to archivename (which must - not already exist), and then renames oldname - to newname. In event of failure of the second rename step, - it will try to rename archivename back - to newname before reporting the error. + pg_file_rename renames a file. If archivename + is omitted or NULL, it simply renames oldname + to newname (which must not already exist). + If archivename is provided, it first + renames newname to archivename (which must + not already exist), and then renames oldname + to newname. In event of failure of the second rename step, + it will try to rename archivename back + to newname before reporting the error. Returns true on success, false if the source file(s) are not present or not writable; other cases throw errors. @@ -97,19 +97,19 @@ pg_file_unlink - pg_file_unlink removes the specified file. + pg_file_unlink removes the specified file. Returns true on success, false if the specified file is not present - or the unlink() call fails; other cases throw errors. + or the unlink() call fails; other cases throw errors. pg_logdir_ls - pg_logdir_ls returns the start timestamps and path + pg_logdir_ls returns the start timestamps and path names of all the log files in the directory. The parameter must have its - default setting (postgresql-%Y-%m-%d_%H%M%S.log) to use this + default setting (postgresql-%Y-%m-%d_%H%M%S.log) to use this function. @@ -119,12 +119,12 @@ and should not be used in new applications; instead use those shown in and . These functions are - provided in adminpack only for compatibility with old - versions of pgAdmin. + provided in adminpack only for compatibility with old + versions of pgAdmin.
- Deprecated <filename>adminpack</> Functions + Deprecated <filename>adminpack</filename> Functions Name Return Type Description @@ -136,22 +136,22 @@ pg_catalog.pg_file_read(filename text, offset bigint, nbytes bigint) text - Alternate name for pg_read_file() + Alternate name for pg_read_file() pg_catalog.pg_file_length(filename text) bigint - Same as size column returned - by pg_stat_file() + Same as size column returned + by pg_stat_file() pg_catalog.pg_logfile_rotate() integer - Alternate name for pg_rotate_logfile(), but note that it + Alternate name for pg_rotate_logfile(), but note that it returns integer 0 or 1 rather than boolean diff --git a/doc/src/sgml/advanced.sgml b/doc/src/sgml/advanced.sgml index f47c01987b..bf87df4dcb 100644 --- a/doc/src/sgml/advanced.sgml +++ b/doc/src/sgml/advanced.sgml @@ -145,7 +145,7 @@ DETAIL: Key (city)=(Berkeley) is not present in table "cities". - Transactions are a fundamental concept of all database + Transactions are a fundamental concept of all database systems. The essential point of a transaction is that it bundles multiple steps into a single, all-or-nothing operation. The intermediate states between the steps are not visible to other concurrent transactions, @@ -182,8 +182,8 @@ UPDATE branches SET balance = balance + 100.00 remain a happy customer if she was debited without Bob being credited. We need a guarantee that if something goes wrong partway through the operation, none of the steps executed so far will take effect. Grouping - the updates into a transaction gives us this guarantee. - A transaction is said to be atomic: from the point of + the updates into a transaction gives us this guarantee. + A transaction is said to be atomic: from the point of view of other transactions, it either happens completely or not at all. @@ -216,9 +216,9 @@ UPDATE branches SET balance = balance + 100.00 - In PostgreSQL, a transaction is set up by surrounding + In PostgreSQL, a transaction is set up by surrounding the SQL commands of the transaction with - BEGIN and COMMIT commands. So our banking + BEGIN and COMMIT commands. So our banking transaction would actually look like: @@ -233,23 +233,23 @@ COMMIT; If, partway through the transaction, we decide we do not want to commit (perhaps we just noticed that Alice's balance went negative), - we can issue the command ROLLBACK instead of - COMMIT, and all our updates so far will be canceled. + we can issue the command ROLLBACK instead of + COMMIT, and all our updates so far will be canceled. - PostgreSQL actually treats every SQL statement as being - executed within a transaction. If you do not issue a BEGIN + PostgreSQL actually treats every SQL statement as being + executed within a transaction. If you do not issue a BEGIN command, - then each individual statement has an implicit BEGIN and - (if successful) COMMIT wrapped around it. A group of - statements surrounded by BEGIN and COMMIT - is sometimes called a transaction block. + then each individual statement has an implicit BEGIN and + (if successful) COMMIT wrapped around it. A group of + statements surrounded by BEGIN and COMMIT + is sometimes called a transaction block. - Some client libraries issue BEGIN and COMMIT + Some client libraries issue BEGIN and COMMIT commands automatically, so that you might get the effect of transaction blocks without asking. Check the documentation for the interface you are using. @@ -258,11 +258,11 @@ COMMIT; It's possible to control the statements in a transaction in a more - granular fashion through the use of savepoints. Savepoints + granular fashion through the use of savepoints. Savepoints allow you to selectively discard parts of the transaction, while committing the rest. After defining a savepoint with - SAVEPOINT, you can if needed roll back to the savepoint - with ROLLBACK TO. All the transaction's database changes + SAVEPOINT, you can if needed roll back to the savepoint + with ROLLBACK TO. All the transaction's database changes between defining the savepoint and rolling back to it are discarded, but changes earlier than the savepoint are kept. @@ -308,7 +308,7 @@ COMMIT; This example is, of course, oversimplified, but there's a lot of control possible in a transaction block through the use of savepoints. - Moreover, ROLLBACK TO is the only way to regain control of a + Moreover, ROLLBACK TO is the only way to regain control of a transaction block that was put in aborted state by the system due to an error, short of rolling it back completely and starting again. @@ -325,7 +325,7 @@ COMMIT; - A window function performs a calculation across a set of + A window function performs a calculation across a set of table rows that are somehow related to the current row. This is comparable to the type of calculation that can be done with an aggregate function. However, window functions do not cause rows to become grouped into a single @@ -360,31 +360,31 @@ SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM emps The first three output columns come directly from the table - empsalary, and there is one output row for each row in the + empsalary, and there is one output row for each row in the table. The fourth column represents an average taken across all the table - rows that have the same depname value as the current row. - (This actually is the same function as the non-window avg - aggregate, but the OVER clause causes it to be + rows that have the same depname value as the current row. + (This actually is the same function as the non-window avg + aggregate, but the OVER clause causes it to be treated as a window function and computed across the window frame.) - A window function call always contains an OVER clause + A window function call always contains an OVER clause directly following the window function's name and argument(s). This is what syntactically distinguishes it from a normal function or non-window - aggregate. The OVER clause determines exactly how the + aggregate. The OVER clause determines exactly how the rows of the query are split up for processing by the window function. - The PARTITION BY clause within OVER + The PARTITION BY clause within OVER divides the rows into groups, or partitions, that share the same - values of the PARTITION BY expression(s). For each row, + values of the PARTITION BY expression(s). For each row, the window function is computed across the rows that fall into the same partition as the current row. You can also control the order in which rows are processed by - window functions using ORDER BY within OVER. - (The window ORDER BY does not even have to match the + window functions using ORDER BY within OVER. + (The window ORDER BY does not even have to match the order in which the rows are output.) Here is an example: @@ -409,39 +409,39 @@ FROM empsalary; (10 rows) - As shown here, the rank function produces a numerical rank - for each distinct ORDER BY value in the current row's - partition, using the order defined by the ORDER BY clause. - rank needs no explicit parameter, because its behavior - is entirely determined by the OVER clause. + As shown here, the rank function produces a numerical rank + for each distinct ORDER BY value in the current row's + partition, using the order defined by the ORDER BY clause. + rank needs no explicit parameter, because its behavior + is entirely determined by the OVER clause. The rows considered by a window function are those of the virtual - table produced by the query's FROM clause as filtered by its - WHERE, GROUP BY, and HAVING clauses + table produced by the query's FROM clause as filtered by its + WHERE, GROUP BY, and HAVING clauses if any. For example, a row removed because it does not meet the - WHERE condition is not seen by any window function. + WHERE condition is not seen by any window function. A query can contain multiple window functions that slice up the data - in different ways using different OVER clauses, but + in different ways using different OVER clauses, but they all act on the same collection of rows defined by this virtual table. - We already saw that ORDER BY can be omitted if the ordering + We already saw that ORDER BY can be omitted if the ordering of rows is not important. It is also possible to omit PARTITION - BY, in which case there is a single partition containing all rows. + BY, in which case there is a single partition containing all rows. There is another important concept associated with window functions: for each row, there is a set of rows within its partition called its - window frame. Some window functions act only + window frame. Some window functions act only on the rows of the window frame, rather than of the whole partition. - By default, if ORDER BY is supplied then the frame consists of + By default, if ORDER BY is supplied then the frame consists of all rows from the start of the partition up through the current row, plus any following rows that are equal to the current row according to the - ORDER BY clause. When ORDER BY is omitted the + ORDER BY clause. When ORDER BY is omitted the default frame consists of all rows in the partition. @@ -450,7 +450,7 @@ FROM empsalary; for details. - Here is an example using sum: + Here is an example using sum: @@ -474,11 +474,11 @@ SELECT salary, sum(salary) OVER () FROM empsalary; - Above, since there is no ORDER BY in the OVER + Above, since there is no ORDER BY in the OVER clause, the window frame is the same as the partition, which for lack of - PARTITION BY is the whole table; in other words each sum is + PARTITION BY is the whole table; in other words each sum is taken over the whole table and so we get the same result for each output - row. But if we add an ORDER BY clause, we get very different + row. But if we add an ORDER BY clause, we get very different results: @@ -510,8 +510,8 @@ SELECT salary, sum(salary) OVER (ORDER BY salary) FROM empsalary; Window functions are permitted only in the SELECT list - and the ORDER BY clause of the query. They are forbidden - elsewhere, such as in GROUP BY, HAVING + and the ORDER BY clause of the query. They are forbidden + elsewhere, such as in GROUP BY, HAVING and WHERE clauses. This is because they logically execute after the processing of those clauses. Also, window functions execute after non-window aggregate functions. This means it is valid to @@ -534,15 +534,15 @@ WHERE pos < 3; The above query only shows the rows from the inner query having - rank less than 3. + rank less than 3. When a query involves multiple window functions, it is possible to write - out each one with a separate OVER clause, but this is + out each one with a separate OVER clause, but this is duplicative and error-prone if the same windowing behavior is wanted for several functions. Instead, each windowing behavior can be named - in a WINDOW clause and then referenced in OVER. + in a WINDOW clause and then referenced in OVER. For example: @@ -623,13 +623,13 @@ CREATE TABLE capitals ( In this case, a row of capitals - inherits all columns (name, - population, and altitude) from its + inherits all columns (name, + population, and altitude) from its parent, cities. The type of the column name is text, a native PostgreSQL type for variable length character strings. State capitals have - an extra column, state, that shows their state. In + an extra column, state, that shows their state. In PostgreSQL, a table can inherit from zero or more other tables. diff --git a/doc/src/sgml/amcheck.sgml b/doc/src/sgml/amcheck.sgml index dd71dbd679..0dd68f0ba1 100644 --- a/doc/src/sgml/amcheck.sgml +++ b/doc/src/sgml/amcheck.sgml @@ -8,19 +8,19 @@ - The amcheck module provides functions that allow you to + The amcheck module provides functions that allow you to verify the logical consistency of the structure of indexes. If the structure appears to be valid, no error is raised. - The functions verify various invariants in the + The functions verify various invariants in the structure of the representation of particular indexes. The correctness of the access method functions behind index scans and other important operations relies on these invariants always holding. For example, certain functions verify, among other things, - that all B-Tree pages have items in logical order (e.g., - for B-Tree indexes on text, index tuples should be in + that all B-Tree pages have items in logical order (e.g., + for B-Tree indexes on text, index tuples should be in collated lexical order). If that particular invariant somehow fails to hold, we can expect binary searches on the affected page to incorrectly guide index scans, resulting in wrong answers to SQL @@ -35,7 +35,7 @@ functions. - amcheck functions may be used only by superusers. + amcheck functions may be used only by superusers. @@ -82,7 +82,7 @@ ORDER BY c.relpages DESC LIMIT 10; (10 rows) This example shows a session that performs verification of every - catalog index in the database test. Details of just + catalog index in the database test. Details of just the 10 largest indexes verified are displayed. Since no error is raised, all indexes tested appear to be logically consistent. Naturally, this query could easily be changed to call @@ -90,10 +90,10 @@ ORDER BY c.relpages DESC LIMIT 10; database where verification is supported. - bt_index_check acquires an AccessShareLock + bt_index_check acquires an AccessShareLock on the target index and the heap relation it belongs to. This lock mode is the same lock mode acquired on relations by simple - SELECT statements. + SELECT statements. bt_index_check does not verify invariants that span child/parent relationships, nor does it verify that the target index is consistent with its heap relation. When a @@ -132,13 +132,13 @@ ORDER BY c.relpages DESC LIMIT 10; logical inconsistency or other problem. - A ShareLock is required on the target index by + A ShareLock is required on the target index by bt_index_parent_check (a - ShareLock is also acquired on the heap relation). + ShareLock is also acquired on the heap relation). These locks prevent concurrent data modification from - INSERT, UPDATE, and DELETE + INSERT, UPDATE, and DELETE commands. The locks also prevent the underlying relation from - being concurrently processed by VACUUM, as well as + being concurrently processed by VACUUM, as well as all other utility commands. Note that the function holds locks only while running, not for the entire transaction. @@ -159,13 +159,13 @@ ORDER BY c.relpages DESC LIMIT 10; - Using <filename>amcheck</> effectively + Using <filename>amcheck</filename> effectively - amcheck can be effective at detecting various types of + amcheck can be effective at detecting various types of failure modes that data page - checksums will always fail to catch. These include: + checksums will always fail to catch. These include: @@ -176,13 +176,13 @@ ORDER BY c.relpages DESC LIMIT 10; This includes issues caused by the comparison rules of operating system collations changing. Comparisons of datums of a collatable - type like text must be immutable (just as all + type like text must be immutable (just as all comparisons used for B-Tree index scans must be immutable), which implies that operating system collation rules must never change. Though rare, updates to operating system collation rules can cause these issues. More commonly, an inconsistency in the collation order between a master server and a standby server is - implicated, possibly because the major operating + implicated, possibly because the major operating system version in use is inconsistent. Such inconsistencies will generally only arise on standby servers, and so can generally only be detected on standby servers. @@ -190,25 +190,25 @@ ORDER BY c.relpages DESC LIMIT 10; If a problem like this arises, it may not affect each individual index that is ordered using an affected collation, simply because - indexed values might happen to have the same + indexed values might happen to have the same absolute ordering regardless of the behavioral inconsistency. See and for - further details about how PostgreSQL uses + further details about how PostgreSQL uses operating system locales and collations. Corruption caused by hypothetical undiscovered bugs in the - underlying PostgreSQL access method code or sort + underlying PostgreSQL access method code or sort code. Automatic verification of the structural integrity of indexes plays a role in the general testing of new or proposed - PostgreSQL features that could plausibly allow a + PostgreSQL features that could plausibly allow a logical inconsistency to be introduced. One obvious testing - strategy is to call amcheck functions continuously + strategy is to call amcheck functions continuously when running the standard regression tests. See for details on running the tests. @@ -219,12 +219,12 @@ ORDER BY c.relpages DESC LIMIT 10; simply not be enabled. - Note that amcheck examines a page as represented in some + Note that amcheck examines a page as represented in some shared memory buffer at the time of verification if there is only a shared buffer hit when accessing the block. Consequently, - amcheck does not necessarily examine data read from the + amcheck does not necessarily examine data read from the file system at the time of verification. Note that when checksums are - enabled, amcheck may raise an error due to a checksum + enabled, amcheck may raise an error due to a checksum failure when a corrupt block is read into a buffer. @@ -234,7 +234,7 @@ ORDER BY c.relpages DESC LIMIT 10; and operating system. - PostgreSQL does not protect against correctable + PostgreSQL does not protect against correctable memory errors and it is assumed you will operate using RAM that uses industry standard Error Correcting Codes (ECC) or better protection. However, ECC memory is typically only immune to @@ -244,7 +244,7 @@ ORDER BY c.relpages DESC LIMIT 10; - In general, amcheck can only prove the presence of + In general, amcheck can only prove the presence of corruption; it cannot prove its absence. @@ -252,19 +252,19 @@ ORDER BY c.relpages DESC LIMIT 10; Repairing corruption - No error concerning corruption raised by amcheck should - ever be a false positive. In practice, amcheck is more + No error concerning corruption raised by amcheck should + ever be a false positive. In practice, amcheck is more likely to find software bugs than problems with hardware. - amcheck raises errors in the event of conditions that, + amcheck raises errors in the event of conditions that, by definition, should never happen, and so careful analysis of - amcheck errors is often required. + amcheck errors is often required. There is no general method of repairing problems that - amcheck detects. An explanation for the root cause of + amcheck detects. An explanation for the root cause of an invariant violation should be sought. may play a useful role in diagnosing - corruption that amcheck detects. A REINDEX + corruption that amcheck detects. A REINDEX may not be effective in repairing corruption. diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml index c835e87215..5423aadb9c 100644 --- a/doc/src/sgml/arch-dev.sgml +++ b/doc/src/sgml/arch-dev.sgml @@ -118,7 +118,7 @@ PostgreSQL is implemented using a - simple process per user client/server model. In this model + simple process per user client/server model. In this model there is one client process connected to exactly one server process. As we do not know ahead of time how many connections will be made, we have to @@ -137,9 +137,9 @@ The client process can be any program that understands the PostgreSQL protocol described in . Many clients are based on the - C-language library libpq, but several independent + C-language library libpq, but several independent implementations of the protocol exist, such as the Java - JDBC driver. + JDBC driver. @@ -184,8 +184,8 @@ text) for valid syntax. If the syntax is correct a parse tree is built up and handed back; otherwise an error is returned. The parser and lexer are - implemented using the well-known Unix tools bison - and flex. + implemented using the well-known Unix tools bison + and flex. @@ -251,7 +251,7 @@ back by the parser as input and does the semantic interpretation needed to understand which tables, functions, and operators are referenced by the query. The data structure that is built to represent this - information is called the query tree. + information is called the query tree. @@ -259,10 +259,10 @@ system catalog lookups can only be done within a transaction, and we do not wish to start a transaction immediately upon receiving a query string. The raw parsing stage is sufficient to identify the transaction - control commands (BEGIN, ROLLBACK, etc), and + control commands (BEGIN, ROLLBACK, etc), and these can then be correctly executed without any further analysis. Once we know that we are dealing with an actual query (such as - SELECT or UPDATE), it is okay to + SELECT or UPDATE), it is okay to start a transaction if we're not already in one. Only then can the transformation process be invoked. @@ -270,10 +270,10 @@ The query tree created by the transformation process is structurally similar to the raw parse tree in most places, but it has many differences - in detail. For example, a FuncCall node in the + in detail. For example, a FuncCall node in the parse tree represents something that looks syntactically like a function - call. This might be transformed to either a FuncExpr - or Aggref node depending on whether the referenced + call. This might be transformed to either a FuncExpr + or Aggref node depending on whether the referenced name turns out to be an ordinary function or an aggregate function. Also, information about the actual data types of columns and expression results is added to the query tree. @@ -354,10 +354,10 @@ The planner's search procedure actually works with data structures - called paths, which are simply cut-down representations of + called paths, which are simply cut-down representations of plans containing only as much information as the planner needs to make its decisions. After the cheapest path is determined, a full-fledged - plan tree is built to pass to the executor. This represents + plan tree is built to pass to the executor. This represents the desired execution plan in sufficient detail for the executor to run it. In the rest of this section we'll ignore the distinction between paths and plans. @@ -378,12 +378,12 @@ relation.attribute OPR constant. If relation.attribute happens to match the key of the B-tree index and OPR is one of the operators listed in - the index's operator class, another plan is created using + the index's operator class, another plan is created using the B-tree index to scan the relation. If there are further indexes present and the restrictions in the query happen to match a key of an index, further plans will be considered. Index scan plans are also generated for indexes that have a sort ordering that can match the - query's ORDER BY clause (if any), or a sort ordering that + query's ORDER BY clause (if any), or a sort ordering that might be useful for merge joining (see below). @@ -462,9 +462,9 @@ the base relations, plus nested-loop, merge, or hash join nodes as needed, plus any auxiliary steps needed, such as sort nodes or aggregate-function calculation nodes. Most of these plan node - types have the additional ability to do selection + types have the additional ability to do selection (discarding rows that do not meet a specified Boolean condition) - and projection (computation of a derived column set + and projection (computation of a derived column set based on given column values, that is, evaluation of scalar expressions where needed). One of the responsibilities of the planner is to attach selection conditions from the @@ -496,7 +496,7 @@ subplan) is, let's say, a Sort node and again recursion is needed to obtain an input row. The child node of the Sort might - be a SeqScan node, representing actual reading of a table. + be a SeqScan node, representing actual reading of a table. Execution of this node causes the executor to fetch a row from the table and return it up to the calling node. The Sort node will repeatedly call its child to obtain all the rows to be sorted. @@ -529,24 +529,24 @@ The executor mechanism is used to evaluate all four basic SQL query types: - SELECT, INSERT, UPDATE, and - DELETE. For SELECT, the top-level executor + SELECT, INSERT, UPDATE, and + DELETE. For SELECT, the top-level executor code only needs to send each row returned by the query plan tree off - to the client. For INSERT, each returned row is inserted - into the target table specified for the INSERT. This is - done in a special top-level plan node called ModifyTable. + to the client. For INSERT, each returned row is inserted + into the target table specified for the INSERT. This is + done in a special top-level plan node called ModifyTable. (A simple - INSERT ... VALUES command creates a trivial plan tree - consisting of a single Result node, which computes just one - result row, and ModifyTable above it to perform the insertion. - But INSERT ... SELECT can demand the full power - of the executor mechanism.) For UPDATE, the planner arranges + INSERT ... VALUES command creates a trivial plan tree + consisting of a single Result node, which computes just one + result row, and ModifyTable above it to perform the insertion. + But INSERT ... SELECT can demand the full power + of the executor mechanism.) For UPDATE, the planner arranges that each computed row includes all the updated column values, plus - the TID (tuple ID, or row ID) of the original target row; - this data is fed into a ModifyTable node, which uses the + the TID (tuple ID, or row ID) of the original target row; + this data is fed into a ModifyTable node, which uses the information to create a new updated row and mark the old row deleted. - For DELETE, the only column that is actually returned by the - plan is the TID, and the ModifyTable node simply uses the TID + For DELETE, the only column that is actually returned by the + plan is the TID, and the ModifyTable node simply uses the TID to visit each target row and mark it deleted. diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index 88eb4be04d..9187f6e02e 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -32,7 +32,7 @@ CREATE TABLE sal_emp ( ); As shown, an array data type is named by appending square brackets - ([]) to the data type name of the array elements. The + ([]) to the data type name of the array elements. The above command will create a table named sal_emp with a column of type text (name), a @@ -69,7 +69,7 @@ CREATE TABLE tictactoe ( An alternative syntax, which conforms to the SQL standard by using - the keyword ARRAY, can be used for one-dimensional arrays. + the keyword ARRAY, can be used for one-dimensional arrays. pay_by_quarter could have been defined as: @@ -79,7 +79,7 @@ CREATE TABLE tictactoe ( pay_by_quarter integer ARRAY, - As before, however, PostgreSQL does not enforce the + As before, however, PostgreSQL does not enforce the size restriction in any case. @@ -107,8 +107,8 @@ CREATE TABLE tictactoe ( for the type, as recorded in its pg_type entry. Among the standard data types provided in the PostgreSQL distribution, all use a comma - (,), except for type box which uses a semicolon - (;). Each val is + (,), except for type box which uses a semicolon + (;). Each val is either a constant of the array element type, or a subarray. An example of an array constant is: @@ -119,10 +119,10 @@ CREATE TABLE tictactoe ( - To set an element of an array constant to NULL, write NULL + To set an element of an array constant to NULL, write NULL for the element value. (Any upper- or lower-case variant of - NULL will do.) If you want an actual string value - NULL, you must put double quotes around it. + NULL will do.) If you want an actual string value + NULL, you must put double quotes around it. @@ -176,7 +176,7 @@ ERROR: multidimensional arrays must have array expressions with matching dimens - The ARRAY constructor syntax can also be used: + The ARRAY constructor syntax can also be used: INSERT INTO sal_emp VALUES ('Bill', @@ -190,7 +190,7 @@ INSERT INTO sal_emp Notice that the array elements are ordinary SQL constants or expressions; for instance, string literals are single quoted, instead of - double quoted as they would be in an array literal. The ARRAY + double quoted as they would be in an array literal. The ARRAY constructor syntax is discussed in more detail in . @@ -222,8 +222,8 @@ SELECT name FROM sal_emp WHERE pay_by_quarter[1] <> pay_by_quarter[2]; The array subscript numbers are written within square brackets. By default PostgreSQL uses a one-based numbering convention for arrays, that is, - an array of n elements starts with array[1] and - ends with array[n]. + an array of n elements starts with array[1] and + ends with array[n]. @@ -259,8 +259,8 @@ SELECT schedule[1:2][1:1] FROM sal_emp WHERE name = 'Bill'; If any dimension is written as a slice, i.e., contains a colon, then all dimensions are treated as slices. Any dimension that has only a single number (no colon) is treated as being from 1 - to the number specified. For example, [2] is treated as - [1:2], as in this example: + to the number specified. For example, [2] is treated as + [1:2], as in this example: SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill'; @@ -272,7 +272,7 @@ SELECT schedule[1:2][2] FROM sal_emp WHERE name = 'Bill'; To avoid confusion with the non-slice case, it's best to use slice syntax - for all dimensions, e.g., [1:2][1:1], not [2][1:1]. + for all dimensions, e.g., [1:2][1:1], not [2][1:1]. @@ -302,9 +302,9 @@ SELECT schedule[:][1:1] FROM sal_emp WHERE name = 'Bill'; An array subscript expression will return null if either the array itself or any of the subscript expressions are null. Also, null is returned if a subscript is outside the array bounds (this case does not raise an error). - For example, if schedule - currently has the dimensions [1:3][1:2] then referencing - schedule[3][3] yields NULL. Similarly, an array reference + For example, if schedule + currently has the dimensions [1:3][1:2] then referencing + schedule[3][3] yields NULL. Similarly, an array reference with the wrong number of subscripts yields a null rather than an error. @@ -423,16 +423,16 @@ UPDATE sal_emp SET pay_by_quarter[1:2] = '{27000,27000}' A stored array value can be enlarged by assigning to elements not already present. Any positions between those previously present and the newly assigned elements will be filled with nulls. For example, if array - myarray currently has 4 elements, it will have six - elements after an update that assigns to myarray[6]; - myarray[5] will contain null. + myarray currently has 4 elements, it will have six + elements after an update that assigns to myarray[6]; + myarray[5] will contain null. Currently, enlargement in this fashion is only allowed for one-dimensional arrays, not multidimensional arrays. Subscripted assignment allows creation of arrays that do not use one-based - subscripts. For example one might assign to myarray[-2:7] to + subscripts. For example one might assign to myarray[-2:7] to create an array with subscript values from -2 to 7. @@ -457,8 +457,8 @@ SELECT ARRAY[5,6] || ARRAY[[1,2],[3,4]]; The concatenation operator allows a single element to be pushed onto the beginning or end of a one-dimensional array. It also accepts two - N-dimensional arrays, or an N-dimensional - and an N+1-dimensional array. + N-dimensional arrays, or an N-dimensional + and an N+1-dimensional array. @@ -501,10 +501,10 @@ SELECT array_dims(ARRAY[[1,2],[3,4]] || ARRAY[[5,6],[7,8],[9,0]]); - When an N-dimensional array is pushed onto the beginning - or end of an N+1-dimensional array, the result is - analogous to the element-array case above. Each N-dimensional - sub-array is essentially an element of the N+1-dimensional + When an N-dimensional array is pushed onto the beginning + or end of an N+1-dimensional array, the result is + analogous to the element-array case above. Each N-dimensional + sub-array is essentially an element of the N+1-dimensional array's outer dimension. For example: SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); @@ -587,9 +587,9 @@ SELECT array_append(ARRAY[1, 2], NULL); -- this might have been meant The heuristic it uses to resolve the constant's type is to assume it's of the same type as the operator's other input — in this case, integer array. So the concatenation operator is presumed to - represent array_cat, not array_append. When + represent array_cat, not array_append. When that's the wrong choice, it could be fixed by casting the constant to the - array's element type; but explicit use of array_append might + array's element type; but explicit use of array_append might be a preferable solution. @@ -633,7 +633,7 @@ SELECT * FROM sal_emp WHERE 10000 = ALL (pay_by_quarter); - Alternatively, the generate_subscripts function can be used. + Alternatively, the generate_subscripts function can be used. For example: @@ -648,7 +648,7 @@ SELECT * FROM - You can also search an array using the && operator, + You can also search an array using the && operator, which checks whether the left operand overlaps with the right operand. For instance: @@ -662,8 +662,8 @@ SELECT * FROM sal_emp WHERE pay_by_quarter && ARRAY[10000]; - You can also search for specific values in an array using the array_position - and array_positions functions. The former returns the subscript of + You can also search for specific values in an array using the array_position + and array_positions functions. The former returns the subscript of the first occurrence of a value in an array; the latter returns an array with the subscripts of all occurrences of the value in the array. For example: @@ -703,13 +703,13 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); The external text representation of an array value consists of items that are interpreted according to the I/O conversion rules for the array's element type, plus decoration that indicates the array structure. - The decoration consists of curly braces ({ and }) + The decoration consists of curly braces ({ and }) around the array value plus delimiter characters between adjacent items. - The delimiter character is usually a comma (,) but can be - something else: it is determined by the typdelim setting + The delimiter character is usually a comma (,) but can be + something else: it is determined by the typdelim setting for the array's element type. Among the standard data types provided in the PostgreSQL distribution, all use a comma, - except for type box, which uses a semicolon (;). + except for type box, which uses a semicolon (;). In a multidimensional array, each dimension (row, plane, cube, etc.) gets its own level of curly braces, and delimiters must be written between adjacent curly-braced entities of the same level. @@ -719,7 +719,7 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); The array output routine will put double quotes around element values if they are empty strings, contain curly braces, delimiter characters, double quotes, backslashes, or white space, or match the word - NULL. Double quotes and backslashes + NULL. Double quotes and backslashes embedded in element values will be backslash-escaped. For numeric data types it is safe to assume that double quotes will never appear, but for textual data types one should be prepared to cope with either the presence @@ -731,10 +731,10 @@ SELECT array_positions(ARRAY[1, 4, 3, 1, 3, 4, 2, 1], 1); set to one. To represent arrays with other lower bounds, the array subscript ranges can be specified explicitly before writing the array contents. - This decoration consists of square brackets ([]) + This decoration consists of square brackets ([]) around each array dimension's lower and upper bounds, with - a colon (:) delimiter character in between. The - array dimension decoration is followed by an equal sign (=). + a colon (:) delimiter character in between. The + array dimension decoration is followed by an equal sign (=). For example: SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 @@ -750,23 +750,23 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 - If the value written for an element is NULL (in any case + If the value written for an element is NULL (in any case variant), the element is taken to be NULL. The presence of any quotes or backslashes disables this and allows the literal string value - NULL to be entered. Also, for backward compatibility with - pre-8.2 versions of PostgreSQL, the NULL to be entered. Also, for backward compatibility with + pre-8.2 versions of PostgreSQL, the configuration parameter can be turned - off to suppress recognition of NULL as a NULL. + off to suppress recognition of NULL as a NULL. As shown previously, when writing an array value you can use double - quotes around any individual array element. You must do so + quotes around any individual array element. You must do so if the element value would otherwise confuse the array-value parser. For example, elements containing curly braces, commas (or the data type's delimiter character), double quotes, backslashes, or leading or trailing whitespace must be double-quoted. Empty strings and strings matching the - word NULL must be quoted, too. To put a double quote or + word NULL must be quoted, too. To put a double quote or backslash in a quoted array element value, use escape string syntax and precede it with a backslash. Alternatively, you can avoid quotes and use backslash-escaping to protect all data characters that would otherwise @@ -785,17 +785,17 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2 Remember that what you write in an SQL command will first be interpreted as a string literal, and then as an array. This doubles the number of - backslashes you need. For example, to insert a text array + backslashes you need. For example, to insert a text array value containing a backslash and a double quote, you'd need to write: INSERT ... VALUES (E'{"\\\\","\\""}'); The escape string processor removes one level of backslashes, so that - what arrives at the array-value parser looks like {"\\","\""}. - In turn, the strings fed to the text data type's input routine - become \ and " respectively. (If we were working + what arrives at the array-value parser looks like {"\\","\""}. + In turn, the strings fed to the text data type's input routine + become \ and " respectively. (If we were working with a data type whose input routine also treated backslashes specially, - bytea for example, we might need as many as eight backslashes + bytea for example, we might need as many as eight backslashes in the command to get one backslash into the stored array element.) Dollar quoting (see ) can be used to avoid the need to double backslashes. @@ -804,10 +804,10 @@ INSERT ... VALUES (E'{"\\\\","\\""}'); - The ARRAY constructor syntax (see + The ARRAY constructor syntax (see ) is often easier to work with than the array-literal syntax when writing array values in SQL - commands. In ARRAY, individual element values are written the + commands. In ARRAY, individual element values are written the same way they would be written when not members of an array. diff --git a/doc/src/sgml/auth-delay.sgml b/doc/src/sgml/auth-delay.sgml index 9a6e3e9bb4..9221d2dfb6 100644 --- a/doc/src/sgml/auth-delay.sgml +++ b/doc/src/sgml/auth-delay.sgml @@ -18,7 +18,7 @@ In order to function, this module must be loaded via - in postgresql.conf. + in postgresql.conf. @@ -29,7 +29,7 @@ auth_delay.milliseconds (int) - auth_delay.milliseconds configuration parameter + auth_delay.milliseconds configuration parameter @@ -42,7 +42,7 @@ - These parameters must be set in postgresql.conf. + These parameters must be set in postgresql.conf. Typical usage might be: diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml index 38e6f50c80..240098c82f 100644 --- a/doc/src/sgml/auto-explain.sgml +++ b/doc/src/sgml/auto-explain.sgml @@ -24,10 +24,10 @@ LOAD 'auto_explain'; (You must be superuser to do that.) More typical usage is to preload - it into some or all sessions by including auto_explain in + it into some or all sessions by including auto_explain in or in - postgresql.conf. Then you can track unexpectedly slow queries + postgresql.conf. Then you can track unexpectedly slow queries no matter when they happen. Of course there is a price in overhead for that. @@ -47,7 +47,7 @@ LOAD 'auto_explain'; auto_explain.log_min_duration (integer) - auto_explain.log_min_duration configuration parameter + auto_explain.log_min_duration configuration parameter @@ -66,13 +66,13 @@ LOAD 'auto_explain'; auto_explain.log_analyze (boolean) - auto_explain.log_analyze configuration parameter + auto_explain.log_analyze configuration parameter - auto_explain.log_analyze causes EXPLAIN ANALYZE - output, rather than just EXPLAIN output, to be printed + auto_explain.log_analyze causes EXPLAIN ANALYZE + output, rather than just EXPLAIN output, to be printed when an execution plan is logged. This parameter is off by default. Only superusers can change this setting. @@ -92,14 +92,14 @@ LOAD 'auto_explain'; auto_explain.log_buffers (boolean) - auto_explain.log_buffers configuration parameter + auto_explain.log_buffers configuration parameter auto_explain.log_buffers controls whether buffer usage statistics are printed when an execution plan is logged; it's - equivalent to the BUFFERS option of EXPLAIN. + equivalent to the BUFFERS option of EXPLAIN. This parameter has no effect unless auto_explain.log_analyze is enabled. This parameter is off by default. @@ -112,14 +112,14 @@ LOAD 'auto_explain'; auto_explain.log_timing (boolean) - auto_explain.log_timing configuration parameter + auto_explain.log_timing configuration parameter auto_explain.log_timing controls whether per-node timing information is printed when an execution plan is logged; it's - equivalent to the TIMING option of EXPLAIN. + equivalent to the TIMING option of EXPLAIN. The overhead of repeatedly reading the system clock can slow down queries significantly on some systems, so it may be useful to set this parameter to off when only actual row counts, and not exact times, are @@ -136,7 +136,7 @@ LOAD 'auto_explain'; auto_explain.log_triggers (boolean) - auto_explain.log_triggers configuration parameter + auto_explain.log_triggers configuration parameter @@ -155,14 +155,14 @@ LOAD 'auto_explain'; auto_explain.log_verbose (boolean) - auto_explain.log_verbose configuration parameter + auto_explain.log_verbose configuration parameter auto_explain.log_verbose controls whether verbose details are printed when an execution plan is logged; it's - equivalent to the VERBOSE option of EXPLAIN. + equivalent to the VERBOSE option of EXPLAIN. This parameter is off by default. Only superusers can change this setting. @@ -173,13 +173,13 @@ LOAD 'auto_explain'; auto_explain.log_format (enum) - auto_explain.log_format configuration parameter + auto_explain.log_format configuration parameter auto_explain.log_format selects the - EXPLAIN output format to be used. + EXPLAIN output format to be used. The allowed values are text, xml, json, and yaml. The default is text. Only superusers can change this setting. @@ -191,7 +191,7 @@ LOAD 'auto_explain'; auto_explain.log_nested_statements (boolean) - auto_explain.log_nested_statements configuration parameter + auto_explain.log_nested_statements configuration parameter @@ -208,7 +208,7 @@ LOAD 'auto_explain'; auto_explain.sample_rate (real) - auto_explain.sample_rate configuration parameter + auto_explain.sample_rate configuration parameter @@ -224,7 +224,7 @@ LOAD 'auto_explain'; In ordinary usage, these parameters are set - in postgresql.conf, although superusers can alter them + in postgresql.conf, although superusers can alter them on-the-fly within their own sessions. Typical usage might be: diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml index bd55e8bb77..dd9c1bff5b 100644 --- a/doc/src/sgml/backup.sgml +++ b/doc/src/sgml/backup.sgml @@ -3,10 +3,10 @@ Backup and Restore - backup + backup - As with everything that contains valuable data, PostgreSQL + As with everything that contains valuable data, PostgreSQL databases should be backed up regularly. While the procedure is essentially simple, it is important to have a clear understanding of the underlying techniques and assumptions. @@ -14,9 +14,9 @@ There are three fundamentally different approaches to backing up - PostgreSQL data: + PostgreSQL data: - SQL dump + SQL dump File system level backup Continuous archiving @@ -25,30 +25,30 @@ - <acronym>SQL</> Dump + <acronym>SQL</acronym> Dump The idea behind this dump method is to generate a file with SQL commands that, when fed back to the server, will recreate the database in the same state as it was at the time of the dump. - PostgreSQL provides the utility program + PostgreSQL provides the utility program for this purpose. The basic usage of this command is: pg_dump dbname > outfile - As you see, pg_dump writes its result to the + As you see, pg_dump writes its result to the standard output. We will see below how this can be useful. - While the above command creates a text file, pg_dump + While the above command creates a text file, pg_dump can create files in other formats that allow for parallelism and more fine-grained control of object restoration. - pg_dump is a regular PostgreSQL + pg_dump is a regular PostgreSQL client application (albeit a particularly clever one). This means that you can perform this backup procedure from any remote host that has - access to the database. But remember that pg_dump + access to the database. But remember that pg_dump does not operate with special permissions. In particular, it must have read access to all tables that you want to back up, so in order to back up the entire database you almost always have to run it as a @@ -60,9 +60,9 @@ pg_dump dbname > - To specify which database server pg_dump should + To specify which database server pg_dump should contact, use the command line options ). psql + supports options similar to pg_dump for specifying the database server to connect to and the user name to use. See the reference page for more information. Non-text file dumps are restored using the dbname < - By default, the psql script will continue to + By default, the psql script will continue to execute after an SQL error is encountered. You might wish to run psql with - the ON_ERROR_STOP variable set to alter that + the ON_ERROR_STOP variable set to alter that behavior and have psql exit with an exit status of 3 if an SQL error occurs: @@ -147,8 +147,8 @@ psql --set ON_ERROR_STOP=on dbname < infile Alternatively, you can specify that the whole dump should be restored as a single transaction, so the restore is either fully completed or fully rolled back. This mode can be specified by - passing the - The ability of pg_dump and psql to + The ability of pg_dump and psql to write to or read from pipes makes it possible to dump a database directly from one server to another, for example: -pg_dump -h host1 dbname | psql -h host2 dbname +pg_dump -h host1 dbname | psql -h host2 dbname - The dumps produced by pg_dump are relative to - template0. This means that any languages, procedures, - etc. added via template1 will also be dumped by - pg_dump. As a result, when restoring, if you are - using a customized template1, you must create the - empty database from template0, as in the example + The dumps produced by pg_dump are relative to + template0. This means that any languages, procedures, + etc. added via template1 will also be dumped by + pg_dump. As a result, when restoring, if you are + using a customized template1, you must create the + empty database from template0, as in the example above. @@ -183,52 +183,52 @@ pg_dump -h host1 dbname | psql -h h see and for more information. For more advice on how to load large amounts of data - into PostgreSQL efficiently, refer to PostgreSQL efficiently, refer to . - Using <application>pg_dumpall</> + Using <application>pg_dumpall</application> - pg_dump dumps only a single database at a time, + pg_dump dumps only a single database at a time, and it does not dump information about roles or tablespaces (because those are cluster-wide rather than per-database). To support convenient dumping of the entire contents of a database cluster, the program is provided. - pg_dumpall backs up each database in a given + pg_dumpall backs up each database in a given cluster, and also preserves cluster-wide data such as role and tablespace definitions. The basic usage of this command is: -pg_dumpall > outfile +pg_dumpall > outfile - The resulting dump can be restored with psql: + The resulting dump can be restored with psql: psql -f infile postgres (Actually, you can specify any existing database name to start from, - but if you are loading into an empty cluster then postgres + but if you are loading into an empty cluster then postgres should usually be used.) It is always necessary to have - database superuser access when restoring a pg_dumpall + database superuser access when restoring a pg_dumpall dump, as that is required to restore the role and tablespace information. If you use tablespaces, make sure that the tablespace paths in the dump are appropriate for the new installation. - pg_dumpall works by emitting commands to re-create + pg_dumpall works by emitting commands to re-create roles, tablespaces, and empty databases, then invoking - pg_dump for each database. This means that while + pg_dump for each database. This means that while each database will be internally consistent, the snapshots of different databases are not synchronized. Cluster-wide data can be dumped alone using the - pg_dumpall option. This is necessary to fully backup the cluster if running the - pg_dump command on individual databases. + pg_dump command on individual databases. @@ -237,8 +237,8 @@ psql -f infile postgres Some operating systems have maximum file size limits that cause - problems when creating large pg_dump output files. - Fortunately, pg_dump can write to the standard + problems when creating large pg_dump output files. + Fortunately, pg_dump can write to the standard output, so you can use standard Unix tools to work around this potential problem. There are several possible methods: @@ -268,7 +268,7 @@ cat filename.gz | gunzip | psql - Use <command>split</>. + Use <command>split</command>. The split command allows you to split the output into smaller files that are @@ -288,10 +288,10 @@ cat filename* | psql - Use <application>pg_dump</>'s custom dump format. + Use <application>pg_dump</application>'s custom dump format. If PostgreSQL was built on a system with the - zlib compression library installed, the custom dump + zlib compression library installed, the custom dump format will compress data as it writes it to the output file. This will produce dump file sizes similar to using gzip, but it has the added advantage that tables can be restored selectively. The @@ -301,8 +301,8 @@ cat filename* | psql dbname > filename - A custom-format dump is not a script for psql, but - instead must be restored with pg_restore, for example: + A custom-format dump is not a script for psql, but + instead must be restored with pg_restore, for example: pg_restore -d dbname filename @@ -314,12 +314,12 @@ pg_restore -d dbname - For very large databases, you might need to combine split + For very large databases, you might need to combine split with one of the other two approaches. - Use <application>pg_dump</>'s parallel dump feature. + Use <application>pg_dump</application>'s parallel dump feature. To speed up the dump of a large database, you can use pg_dump's parallel mode. This will dump @@ -344,7 +344,7 @@ pg_dump -j num -F d -f An alternative backup strategy is to directly copy the files that - PostgreSQL uses to store the data in the database; + PostgreSQL uses to store the data in the database; explains where these files are located. You can use whatever method you prefer for doing file system backups; for example: @@ -356,13 +356,13 @@ tar -cf backup.tar /usr/local/pgsql/data There are two restrictions, however, which make this method - impractical, or at least inferior to the pg_dump + impractical, or at least inferior to the pg_dump method: - The database server must be shut down in order to + The database server must be shut down in order to get a usable backup. Half-way measures such as disallowing all connections will not work (in part because tar and similar tools do not take @@ -379,7 +379,7 @@ tar -cf backup.tar /usr/local/pgsql/data If you have dug into the details of the file system layout of the database, you might be tempted to try to back up or restore only certain individual tables or databases from their respective files or - directories. This will not work because the + directories. This will not work because the information contained in these files is not usable without the commit log files, pg_xact/*, which contain the commit status of @@ -399,7 +399,7 @@ tar -cf backup.tar /usr/local/pgsql/data consistent snapshot of the data directory, if the file system supports that functionality (and you are willing to trust that it is implemented correctly). The typical procedure is - to make a frozen snapshot of the volume containing the + to make a frozen snapshot of the volume containing the database, then copy the whole data directory (not just parts, see above) from the snapshot to a backup device, then release the frozen snapshot. This will work even while the database server is running. @@ -419,7 +419,7 @@ tar -cf backup.tar /usr/local/pgsql/data the volumes. For example, if your data files and WAL log are on different disks, or if tablespaces are on different file systems, it might not be possible to use snapshot backup because the snapshots - must be simultaneous. + must be simultaneous. Read your file system documentation very carefully before trusting the consistent-snapshot technique in such situations. @@ -435,13 +435,13 @@ tar -cf backup.tar /usr/local/pgsql/data - Another option is to use rsync to perform a file - system backup. This is done by first running rsync + Another option is to use rsync to perform a file + system backup. This is done by first running rsync while the database server is running, then shutting down the database - server long enough to do an rsync --checksum. - ( @@ -508,7 +508,7 @@ tar -cf backup.tar /usr/local/pgsql/data It is not necessary to replay the WAL entries all the way to the end. We could stop the replay at any point and have a consistent snapshot of the database as it was at that time. Thus, - this technique supports point-in-time recovery: it is + this technique supports point-in-time recovery: it is possible to restore the database to its state at any time since your base backup was taken. @@ -517,7 +517,7 @@ tar -cf backup.tar /usr/local/pgsql/data If we continuously feed the series of WAL files to another machine that has been loaded with the same base backup file, we - have a warm standby system: at any point we can bring up + have a warm standby system: at any point we can bring up the second machine and it will have a nearly-current copy of the database. @@ -530,7 +530,7 @@ tar -cf backup.tar /usr/local/pgsql/data pg_dump and pg_dumpall do not produce file-system-level backups and cannot be used as part of a continuous-archiving solution. - Such dumps are logical and do not contain enough + Such dumps are logical and do not contain enough information to be used by WAL replay. @@ -546,10 +546,10 @@ tar -cf backup.tar /usr/local/pgsql/data To recover successfully using continuous archiving (also called - online backup by many database vendors), you need a continuous + online backup by many database vendors), you need a continuous sequence of archived WAL files that extends back at least as far as the start time of your backup. So to get started, you should set up and test - your procedure for archiving WAL files before you take your + your procedure for archiving WAL files before you take your first base backup. Accordingly, we first discuss the mechanics of archiving WAL files. @@ -558,15 +558,15 @@ tar -cf backup.tar /usr/local/pgsql/data Setting Up WAL Archiving - In an abstract sense, a running PostgreSQL system + In an abstract sense, a running PostgreSQL system produces an indefinitely long sequence of WAL records. The system physically divides this sequence into WAL segment - files, which are normally 16MB apiece (although the segment size - can be altered during initdb). The segment + files, which are normally 16MB apiece (although the segment size + can be altered during initdb). The segment files are given numeric names that reflect their position in the abstract WAL sequence. When not using WAL archiving, the system normally creates just a few segment files and then - recycles them by renaming no-longer-needed segment files + recycles them by renaming no-longer-needed segment files to higher segment numbers. It's assumed that segment files whose contents precede the checkpoint-before-last are no longer of interest and can be recycled. @@ -577,33 +577,33 @@ tar -cf backup.tar /usr/local/pgsql/data file once it is filled, and save that data somewhere before the segment file is recycled for reuse. Depending on the application and the available hardware, there could be many different ways of saving - the data somewhere: we could copy the segment files to an NFS-mounted + the data somewhere: we could copy the segment files to an NFS-mounted directory on another machine, write them onto a tape drive (ensuring that you have a way of identifying the original name of each file), or batch them together and burn them onto CDs, or something else entirely. To provide the database administrator with flexibility, - PostgreSQL tries not to make any assumptions about how - the archiving will be done. Instead, PostgreSQL lets + PostgreSQL tries not to make any assumptions about how + the archiving will be done. Instead, PostgreSQL lets the administrator specify a shell command to be executed to copy a completed segment file to wherever it needs to go. The command could be - as simple as a cp, or it could invoke a complex shell + as simple as a cp, or it could invoke a complex shell script — it's all up to you. To enable WAL archiving, set the - configuration parameter to replica or higher, - to on, + configuration parameter to replica or higher, + to on, and specify the shell command to use in the configuration parameter. In practice these settings will always be placed in the postgresql.conf file. - In archive_command, - %p is replaced by the path name of the file to - archive, while %f is replaced by only the file name. + In archive_command, + %p is replaced by the path name of the file to + archive, while %f is replaced by only the file name. (The path name is relative to the current working directory, i.e., the cluster's data directory.) - Use %% if you need to embed an actual % + Use %% if you need to embed an actual % character in the command. The simplest useful command is something like: @@ -611,9 +611,9 @@ archive_command = 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/ser archive_command = 'copy "%p" "C:\\server\\archivedir\\%f"' # Windows which will copy archivable WAL segments to the directory - /mnt/server/archivedir. (This is an example, not a + /mnt/server/archivedir. (This is an example, not a recommendation, and might not work on all platforms.) After the - %p and %f parameters have been replaced, + %p and %f parameters have been replaced, the actual command executed might look like this: test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/00000001000000A900000065 /mnt/server/archivedir/00000001000000A900000065 @@ -623,7 +623,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 The archive command will be executed under the ownership of the same - user that the PostgreSQL server is running as. Since + user that the PostgreSQL server is running as. Since the series of WAL files being archived contains effectively everything in your database, you will want to be sure that the archived data is protected from prying eyes; for example, archive into a directory that @@ -633,9 +633,9 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 It is important that the archive command return zero exit status if and only if it succeeds. Upon getting a zero result, - PostgreSQL will assume that the file has been + PostgreSQL will assume that the file has been successfully archived, and will remove or recycle it. However, a nonzero - status tells PostgreSQL that the file was not archived; + status tells PostgreSQL that the file was not archived; it will try again periodically until it succeeds. @@ -650,14 +650,14 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 It is advisable to test your proposed archive command to ensure that it indeed does not overwrite an existing file, and that it returns - nonzero status in this case. + nonzero status in this case. The example command above for Unix ensures this by including a separate - test step. On some Unix platforms, cp has - switches such as that can be used to do the same thing less verbosely, but you should not rely on these without verifying that - the right exit status is returned. (In particular, GNU cp - will return status zero when @@ -668,10 +668,10 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 fills, nothing further can be archived until the tape is swapped. You should ensure that any error condition or request to a human operator is reported appropriately so that the situation can be - resolved reasonably quickly. The pg_wal/ directory will + resolved reasonably quickly. The pg_wal/ directory will continue to fill with WAL segment files until the situation is resolved. - (If the file system containing pg_wal/ fills up, - PostgreSQL will do a PANIC shutdown. No committed + (If the file system containing pg_wal/ fills up, + PostgreSQL will do a PANIC shutdown. No committed transactions will be lost, but the database will remain offline until you free some space.) @@ -682,7 +682,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 operation continues even if the archiving process falls a little behind. If archiving falls significantly behind, this will increase the amount of data that would be lost in the event of a disaster. It will also mean that - the pg_wal/ directory will contain large numbers of + the pg_wal/ directory will contain large numbers of not-yet-archived segment files, which could eventually exceed available disk space. You are advised to monitor the archiving process to ensure that it is working as you intend. @@ -692,16 +692,16 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 In writing your archive command, you should assume that the file names to be archived can be up to 64 characters long and can contain any combination of ASCII letters, digits, and dots. It is not necessary to - preserve the original relative path (%p) but it is necessary to - preserve the file name (%f). + preserve the original relative path (%p) but it is necessary to + preserve the file name (%f). Note that although WAL archiving will allow you to restore any - modifications made to the data in your PostgreSQL database, + modifications made to the data in your PostgreSQL database, it will not restore changes made to configuration files (that is, - postgresql.conf, pg_hba.conf and - pg_ident.conf), since those are edited manually rather + postgresql.conf, pg_hba.conf and + pg_ident.conf), since those are edited manually rather than through SQL operations. You might wish to keep the configuration files in a location that will be backed up by your regular file system backup procedures. See @@ -719,32 +719,32 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 to a new WAL segment file at least that often. Note that archived files that are archived early due to a forced switch are still the same length as completely full files. It is therefore unwise to set a very - short archive_timeout — it will bloat your archive - storage. archive_timeout settings of a minute or so are + short archive_timeout — it will bloat your archive + storage. archive_timeout settings of a minute or so are usually reasonable. Also, you can force a segment switch manually with - pg_switch_wal if you want to ensure that a + pg_switch_wal if you want to ensure that a just-finished transaction is archived as soon as possible. Other utility functions related to WAL management are listed in . - When wal_level is minimal some SQL commands + When wal_level is minimal some SQL commands are optimized to avoid WAL logging, as described in . If archiving or streaming replication were turned on during execution of one of these statements, WAL would not contain enough information for archive recovery. (Crash recovery is - unaffected.) For this reason, wal_level can only be changed at - server start. However, archive_command can be changed with a + unaffected.) For this reason, wal_level can only be changed at + server start. However, archive_command can be changed with a configuration file reload. If you wish to temporarily stop archiving, - one way to do it is to set archive_command to the empty - string (''). - This will cause WAL files to accumulate in pg_wal/ until a - working archive_command is re-established. + one way to do it is to set archive_command to the empty + string (''). + This will cause WAL files to accumulate in pg_wal/ until a + working archive_command is re-established. @@ -763,8 +763,8 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 It is not necessary to be concerned about the amount of time it takes to make a base backup. However, if you normally run the - server with full_page_writes disabled, you might notice a drop - in performance while the backup runs since full_page_writes is + server with full_page_writes disabled, you might notice a drop + in performance while the backup runs since full_page_writes is effectively forced on during backup mode. @@ -772,13 +772,13 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 To make use of the backup, you will need to keep all the WAL segment files generated during and after the file system backup. To aid you in doing this, the base backup process - creates a backup history file that is immediately + creates a backup history file that is immediately stored into the WAL archive area. This file is named after the first WAL segment file that you need for the file system backup. For example, if the starting WAL file is - 0000000100001234000055CD the backup history file will be + 0000000100001234000055CD the backup history file will be named something like - 0000000100001234000055CD.007C9330.backup. (The second + 0000000100001234000055CD.007C9330.backup. (The second part of the file name stands for an exact position within the WAL file, and can ordinarily be ignored.) Once you have safely archived the file system backup and the WAL segment files used during the @@ -847,14 +847,14 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 && cp pg_wal/0 SELECT pg_start_backup('label', false, false); - where label is any string you want to use to uniquely + where label is any string you want to use to uniquely identify this backup operation. The connection - calling pg_start_backup must be maintained until the end of + calling pg_start_backup must be maintained until the end of the backup, or the backup will be automatically aborted. - By default, pg_start_backup can take a long time to finish. + By default, pg_start_backup can take a long time to finish. This is because it performs a checkpoint, and the I/O required for the checkpoint will be spread out over a significant period of time, by default half your inter-checkpoint interval @@ -862,19 +862,19 @@ SELECT pg_start_backup('label', false, false); ). This is usually what you want, because it minimizes the impact on query processing. If you want to start the backup as soon as - possible, change the second parameter to true, which will + possible, change the second parameter to true, which will issue an immediate checkpoint using as much I/O as available. - The third parameter being false tells - pg_start_backup to initiate a non-exclusive base backup. + The third parameter being false tells + pg_start_backup to initiate a non-exclusive base backup. Perform the backup, using any convenient file-system-backup tool - such as tar or cpio (not + such as tar or cpio (not pg_dump or pg_dumpall). It is neither necessary nor desirable to stop normal operation of the database @@ -898,45 +898,45 @@ SELECT * FROM pg_stop_backup(false, true); ready to archive. - The pg_stop_backup will return one row with three + The pg_stop_backup will return one row with three values. The second of these fields should be written to a file named - backup_label in the root directory of the backup. The + backup_label in the root directory of the backup. The third field should be written to a file named - tablespace_map unless the field is empty. These files are + tablespace_map unless the field is empty. These files are vital to the backup working, and must be written without modification. Once the WAL segment files active during the backup are archived, you are - done. The file identified by pg_stop_backup's first return + done. The file identified by pg_stop_backup's first return value is the last segment that is required to form a complete set of - backup files. On a primary, if archive_mode is enabled and the - wait_for_archive parameter is true, - pg_stop_backup does not return until the last segment has + backup files. On a primary, if archive_mode is enabled and the + wait_for_archive parameter is true, + pg_stop_backup does not return until the last segment has been archived. - On a standby, archive_mode must be always in order - for pg_stop_backup to wait. + On a standby, archive_mode must be always in order + for pg_stop_backup to wait. Archiving of these files happens automatically since you have - already configured archive_command. In most cases this + already configured archive_command. In most cases this happens quickly, but you are advised to monitor your archive system to ensure there are no delays. If the archive process has fallen behind because of failures of the archive command, it will keep retrying until the archive succeeds and the backup is complete. If you wish to place a time limit on the execution of - pg_stop_backup, set an appropriate + pg_stop_backup, set an appropriate statement_timeout value, but make note that if - pg_stop_backup terminates because of this your backup + pg_stop_backup terminates because of this your backup may not be valid. If the backup process monitors and ensures that all WAL segment files required for the backup are successfully archived then the - wait_for_archive parameter (which defaults to true) can be set + wait_for_archive parameter (which defaults to true) can be set to false to have - pg_stop_backup return as soon as the stop backup record is - written to the WAL. By default, pg_stop_backup will wait + pg_stop_backup return as soon as the stop backup record is + written to the WAL. By default, pg_stop_backup will wait until all WAL has been archived, which can take some time. This option must be used with caution: if WAL archiving is not monitored correctly then the backup might not include all of the WAL files and will @@ -952,7 +952,7 @@ SELECT * FROM pg_stop_backup(false, true); The process for an exclusive backup is mostly the same as for a non-exclusive one, but it differs in a few key steps. This type of backup can only be taken on a primary and does not allow concurrent backups. - Prior to PostgreSQL 9.6, this + Prior to PostgreSQL 9.6, this was the only low-level method available, but it is now recommended that all users upgrade their scripts to use non-exclusive backups if possible. @@ -971,20 +971,20 @@ SELECT * FROM pg_stop_backup(false, true); SELECT pg_start_backup('label'); - where label is any string you want to use to uniquely + where label is any string you want to use to uniquely identify this backup operation. - pg_start_backup creates a backup label file, - called backup_label, in the cluster directory with + pg_start_backup creates a backup label file, + called backup_label, in the cluster directory with information about your backup, including the start time and label string. - The function also creates a tablespace map file, - called tablespace_map, in the cluster directory with - information about tablespace symbolic links in pg_tblspc/ if + The function also creates a tablespace map file, + called tablespace_map, in the cluster directory with + information about tablespace symbolic links in pg_tblspc/ if one or more such link is present. Both files are critical to the integrity of the backup, should you need to restore from it. - By default, pg_start_backup can take a long time to finish. + By default, pg_start_backup can take a long time to finish. This is because it performs a checkpoint, and the I/O required for the checkpoint will be spread out over a significant period of time, by default half your inter-checkpoint interval @@ -1002,7 +1002,7 @@ SELECT pg_start_backup('label', true); Perform the backup, using any convenient file-system-backup tool - such as tar or cpio (not + such as tar or cpio (not pg_dump or pg_dumpall). It is neither necessary nor desirable to stop normal operation of the database @@ -1012,7 +1012,7 @@ SELECT pg_start_backup('label', true); Note that if the server crashes during the backup it may not be - possible to restart until the backup_label file has been + possible to restart until the backup_label file has been manually deleted from the PGDATA directory. @@ -1033,22 +1033,22 @@ SELECT pg_stop_backup(); Once the WAL segment files active during the backup are archived, you are - done. The file identified by pg_stop_backup's result is + done. The file identified by pg_stop_backup's result is the last segment that is required to form a complete set of backup files. - If archive_mode is enabled, - pg_stop_backup does not return until the last segment has + If archive_mode is enabled, + pg_stop_backup does not return until the last segment has been archived. Archiving of these files happens automatically since you have - already configured archive_command. In most cases this + already configured archive_command. In most cases this happens quickly, but you are advised to monitor your archive system to ensure there are no delays. If the archive process has fallen behind because of failures of the archive command, it will keep retrying until the archive succeeds and the backup is complete. If you wish to place a time limit on the execution of - pg_stop_backup, set an appropriate + pg_stop_backup, set an appropriate statement_timeout value, but make note that if - pg_stop_backup terminates because of this your backup + pg_stop_backup terminates because of this your backup may not be valid. @@ -1063,21 +1063,21 @@ SELECT pg_stop_backup(); When taking a base backup of an active database, this situation is normal and not an error. However, you need to ensure that you can distinguish complaints of this sort from real errors. For example, some versions - of rsync return a separate exit code for - vanished source files, and you can write a driver script to + of rsync return a separate exit code for + vanished source files, and you can write a driver script to accept this exit code as a non-error case. Also, some versions of - GNU tar return an error code indistinguishable from - a fatal error if a file was truncated while tar was - copying it. Fortunately, GNU tar versions 1.16 and + GNU tar return an error code indistinguishable from + a fatal error if a file was truncated while tar was + copying it. Fortunately, GNU tar versions 1.16 and later exit with 1 if a file was changed during the backup, - and 2 for other errors. With GNU tar version 1.23 and + and 2 for other errors. With GNU tar version 1.23 and later, you can use the warning options --warning=no-file-changed --warning=no-file-removed to hide the related warning messages. Be certain that your backup includes all of the files under - the database cluster directory (e.g., /usr/local/pgsql/data). + the database cluster directory (e.g., /usr/local/pgsql/data). If you are using tablespaces that do not reside underneath this directory, be careful to include them as well (and be sure that your backup archives symbolic links as links, otherwise the restore will corrupt @@ -1086,21 +1086,21 @@ SELECT pg_stop_backup(); You should, however, omit from the backup the files within the - cluster's pg_wal/ subdirectory. This + cluster's pg_wal/ subdirectory. This slight adjustment is worthwhile because it reduces the risk of mistakes when restoring. This is easy to arrange if - pg_wal/ is a symbolic link pointing to someplace outside + pg_wal/ is a symbolic link pointing to someplace outside the cluster directory, which is a common setup anyway for performance - reasons. You might also want to exclude postmaster.pid - and postmaster.opts, which record information - about the running postmaster, not about the - postmaster which will eventually use this backup. - (These files can confuse pg_ctl.) + reasons. You might also want to exclude postmaster.pid + and postmaster.opts, which record information + about the running postmaster, not about the + postmaster which will eventually use this backup. + (These files can confuse pg_ctl.) It is often a good idea to also omit from the backup the files - within the cluster's pg_replslot/ directory, so that + within the cluster's pg_replslot/ directory, so that replication slots that exist on the master do not become part of the backup. Otherwise, the subsequent use of the backup to create a standby may result in indefinite retention of WAL files on the standby, and @@ -1114,10 +1114,10 @@ SELECT pg_stop_backup(); - The contents of the directories pg_dynshmem/, - pg_notify/, pg_serial/, - pg_snapshots/, pg_stat_tmp/, - and pg_subtrans/ (but not the directories themselves) can be + The contents of the directories pg_dynshmem/, + pg_notify/, pg_serial/, + pg_snapshots/, pg_stat_tmp/, + and pg_subtrans/ (but not the directories themselves) can be omitted from the backup as they will be initialized on postmaster startup. If is set and is under the data directory then the contents of that directory can also be omitted. @@ -1131,13 +1131,13 @@ SELECT pg_stop_backup(); The backup label - file includes the label string you gave to pg_start_backup, - as well as the time at which pg_start_backup was run, and + file includes the label string you gave to pg_start_backup, + as well as the time at which pg_start_backup was run, and the name of the starting WAL file. In case of confusion it is therefore possible to look inside a backup file and determine exactly which backup session the dump file came from. The tablespace map file includes the symbolic link names as they exist in the directory - pg_tblspc/ and the full path of each symbolic link. + pg_tblspc/ and the full path of each symbolic link. These files are not merely for your information; their presence and contents are critical to the proper operation of the system's recovery process. @@ -1146,7 +1146,7 @@ SELECT pg_stop_backup(); It is also possible to make a backup while the server is stopped. In this case, you obviously cannot use - pg_start_backup or pg_stop_backup, and + pg_start_backup or pg_stop_backup, and you will therefore be left to your own devices to keep track of which backup is which and how far back the associated WAL files go. It is generally better to follow the continuous archiving procedure above. @@ -1173,7 +1173,7 @@ SELECT pg_stop_backup(); location in case you need them later. Note that this precaution will require that you have enough free space on your system to hold two copies of your existing database. If you do not have enough space, - you should at least save the contents of the cluster's pg_wal + you should at least save the contents of the cluster's pg_wal subdirectory, as it might contain logs which were not archived before the system went down. @@ -1188,17 +1188,17 @@ SELECT pg_stop_backup(); Restore the database files from your file system backup. Be sure that they are restored with the right ownership (the database system user, not - root!) and with the right permissions. If you are using + root!) and with the right permissions. If you are using tablespaces, - you should verify that the symbolic links in pg_tblspc/ + you should verify that the symbolic links in pg_tblspc/ were correctly restored. - Remove any files present in pg_wal/; these came from the + Remove any files present in pg_wal/; these came from the file system backup and are therefore probably obsolete rather than current. - If you didn't archive pg_wal/ at all, then recreate + If you didn't archive pg_wal/ at all, then recreate it with proper permissions, being careful to ensure that you re-establish it as a symbolic link if you had it set up that way before. @@ -1207,16 +1207,16 @@ SELECT pg_stop_backup(); If you have unarchived WAL segment files that you saved in step 2, - copy them into pg_wal/. (It is best to copy them, + copy them into pg_wal/. (It is best to copy them, not move them, so you still have the unmodified files if a problem occurs and you have to start over.) - Create a recovery command file recovery.conf in the cluster + Create a recovery command file recovery.conf in the cluster data directory (see ). You might - also want to temporarily modify pg_hba.conf to prevent + also want to temporarily modify pg_hba.conf to prevent ordinary users from connecting until you are sure the recovery was successful. @@ -1227,7 +1227,7 @@ SELECT pg_stop_backup(); recovery be terminated because of an external error, the server can simply be restarted and it will continue recovery. Upon completion of the recovery process, the server will rename - recovery.conf to recovery.done (to prevent + recovery.conf to recovery.done (to prevent accidentally re-entering recovery mode later) and then commence normal database operations. @@ -1236,7 +1236,7 @@ SELECT pg_stop_backup(); Inspect the contents of the database to ensure you have recovered to the desired state. If not, return to step 1. If all is well, - allow your users to connect by restoring pg_hba.conf to normal. + allow your users to connect by restoring pg_hba.conf to normal. @@ -1245,32 +1245,32 @@ SELECT pg_stop_backup(); The key part of all this is to set up a recovery configuration file that describes how you want to recover and how far the recovery should - run. You can use recovery.conf.sample (normally - located in the installation's share/ directory) as a + run. You can use recovery.conf.sample (normally + located in the installation's share/ directory) as a prototype. The one thing that you absolutely must specify in - recovery.conf is the restore_command, - which tells PostgreSQL how to retrieve archived - WAL file segments. Like the archive_command, this is - a shell command string. It can contain %f, which is - replaced by the name of the desired log file, and %p, + recovery.conf is the restore_command, + which tells PostgreSQL how to retrieve archived + WAL file segments. Like the archive_command, this is + a shell command string. It can contain %f, which is + replaced by the name of the desired log file, and %p, which is replaced by the path name to copy the log file to. (The path name is relative to the current working directory, i.e., the cluster's data directory.) - Write %% if you need to embed an actual % + Write %% if you need to embed an actual % character in the command. The simplest useful command is something like: restore_command = 'cp /mnt/server/archivedir/%f %p' which will copy previously archived WAL segments from the directory - /mnt/server/archivedir. Of course, you can use something + /mnt/server/archivedir. Of course, you can use something much more complicated, perhaps even a shell script that requests the operator to mount an appropriate tape. It is important that the command return nonzero exit status on failure. - The command will be called requesting files that are not + The command will be called requesting files that are not present in the archive; it must return nonzero when so asked. This is not an error condition. An exception is that if the command was terminated by a signal (other than SIGTERM, which is used as @@ -1282,27 +1282,27 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' Not all of the requested files will be WAL segment files; you should also expect requests for files with a suffix of - .backup or .history. Also be aware that - the base name of the %p path will be different from - %f; do not expect them to be interchangeable. + .backup or .history. Also be aware that + the base name of the %p path will be different from + %f; do not expect them to be interchangeable. WAL segments that cannot be found in the archive will be sought in - pg_wal/; this allows use of recent un-archived segments. + pg_wal/; this allows use of recent un-archived segments. However, segments that are available from the archive will be used in - preference to files in pg_wal/. + preference to files in pg_wal/. Normally, recovery will proceed through all available WAL segments, thereby restoring the database to the current point in time (or as close as possible given the available WAL segments). Therefore, a normal - recovery will end with a file not found message, the exact text + recovery will end with a file not found message, the exact text of the error message depending upon your choice of - restore_command. You may also see an error message + restore_command. You may also see an error message at the start of recovery for a file named something like - 00000001.history. This is also normal and does not + 00000001.history. This is also normal and does not indicate a problem in simple recovery situations; see for discussion. @@ -1310,8 +1310,8 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' If you want to recover to some previous point in time (say, right before the junior DBA dropped your main transaction table), just specify the - required stopping point in recovery.conf. You can specify - the stop point, known as the recovery target, either by + required stopping point in recovery.conf. You can specify + the stop point, known as the recovery target, either by date/time, named restore point or by completion of a specific transaction ID. As of this writing only the date/time and named restore point options are very usable, since there are no tools to help you identify with any @@ -1321,7 +1321,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' The stop point must be after the ending time of the base backup, i.e., - the end time of pg_stop_backup. You cannot use a base backup + the end time of pg_stop_backup. You cannot use a base backup to recover to a time when that backup was in progress. (To recover to such a time, you must go back to your previous base backup and roll forward from there.) @@ -1332,14 +1332,14 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' If recovery finds corrupted WAL data, recovery will halt at that point and the server will not start. In such a case the recovery process could be re-run from the beginning, specifying a - recovery target before the point of corruption so that recovery + recovery target before the point of corruption so that recovery can complete normally. If recovery fails for an external reason, such as a system crash or if the WAL archive has become inaccessible, then the recovery can simply be restarted and it will restart almost from where it failed. Recovery restart works much like checkpointing in normal operation: the server periodically forces all its state to disk, and then updates - the pg_control file to indicate that the already-processed + the pg_control file to indicate that the already-processed WAL data need not be scanned again. @@ -1359,7 +1359,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' suppose you dropped a critical table at 5:15PM on Tuesday evening, but didn't realize your mistake until Wednesday noon. Unfazed, you get out your backup, restore to the point-in-time 5:14PM - Tuesday evening, and are up and running. In this history of + Tuesday evening, and are up and running. In this history of the database universe, you never dropped the table. But suppose you later realize this wasn't such a great idea, and would like to return to sometime Wednesday morning in the original history. @@ -1372,8 +1372,8 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' - To deal with this problem, PostgreSQL has a notion - of timelines. Whenever an archive recovery completes, + To deal with this problem, PostgreSQL has a notion + of timelines. Whenever an archive recovery completes, a new timeline is created to identify the series of WAL records generated after that recovery. The timeline ID number is part of WAL segment file names so a new timeline does @@ -1384,13 +1384,13 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' and so have to do several point-in-time recoveries by trial and error until you find the best place to branch off from the old history. Without timelines this process would soon generate an unmanageable mess. With - timelines, you can recover to any prior state, including + timelines, you can recover to any prior state, including states in timeline branches that you abandoned earlier. - Every time a new timeline is created, PostgreSQL creates - a timeline history file that shows which timeline it branched + Every time a new timeline is created, PostgreSQL creates + a timeline history file that shows which timeline it branched off from and when. These history files are necessary to allow the system to pick the right WAL segment files when recovering from an archive that contains multiple timelines. Therefore, they are archived into the WAL @@ -1408,7 +1408,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' that was current when the base backup was taken. If you wish to recover into some child timeline (that is, you want to return to some state that was itself generated after a recovery attempt), you need to specify the - target timeline ID in recovery.conf. You cannot recover into + target timeline ID in recovery.conf. You cannot recover into timelines that branched off earlier than the base backup. @@ -1424,18 +1424,18 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' Standalone Hot Backups - It is possible to use PostgreSQL's backup facilities to + It is possible to use PostgreSQL's backup facilities to produce standalone hot backups. These are backups that cannot be used for point-in-time recovery, yet are typically much faster to backup and - restore than pg_dump dumps. (They are also much larger - than pg_dump dumps, so in some cases the speed advantage + restore than pg_dump dumps. (They are also much larger + than pg_dump dumps, so in some cases the speed advantage might be negated.) As with base backups, the easiest way to produce a standalone hot backup is to use the - tool. If you include the -X parameter when calling + tool. If you include the -X parameter when calling it, all the write-ahead log required to use the backup will be included in the backup automatically, and no special action is required to restore the backup. @@ -1445,16 +1445,16 @@ restore_command = 'cp /mnt/server/archivedir/%f %p' If more flexibility in copying the backup files is needed, a lower level process can be used for standalone hot backups as well. To prepare for low level standalone hot backups, make sure - wal_level is set to - replica or higher, archive_mode to - on, and set up an archive_command that performs - archiving only when a switch file exists. For example: + wal_level is set to + replica or higher, archive_mode to + on, and set up an archive_command that performs + archiving only when a switch file exists. For example: archive_command = 'test ! -f /var/lib/pgsql/backup_in_progress || (test ! -f /var/lib/pgsql/archive/%f && cp %p /var/lib/pgsql/archive/%f)' This command will perform archiving when - /var/lib/pgsql/backup_in_progress exists, and otherwise - silently return zero exit status (allowing PostgreSQL + /var/lib/pgsql/backup_in_progress exists, and otherwise + silently return zero exit status (allowing PostgreSQL to recycle the unwanted WAL file). @@ -1469,11 +1469,11 @@ psql -c "select pg_stop_backup();" rm /var/lib/pgsql/backup_in_progress tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/ - The switch file /var/lib/pgsql/backup_in_progress is + The switch file /var/lib/pgsql/backup_in_progress is created first, enabling archiving of completed WAL files to occur. After the backup the switch file is removed. Archived WAL files are then added to the backup so that both base backup and all required - WAL files are part of the same tar file. + WAL files are part of the same tar file. Please remember to add error handling to your backup scripts. @@ -1488,7 +1488,7 @@ tar -rf /var/lib/pgsql/backup.tar /var/lib/pgsql/archive/ archive_command = 'gzip < %p > /var/lib/pgsql/archive/%f' - You will then need to use gunzip during recovery: + You will then need to use gunzip during recovery: restore_command = 'gunzip < /mnt/server/archivedir/%f > %p' @@ -1501,7 +1501,7 @@ restore_command = 'gunzip < /mnt/server/archivedir/%f > %p' Many people choose to use scripts to define their archive_command, so that their - postgresql.conf entry looks very simple: + postgresql.conf entry looks very simple: archive_command = 'local_backup_script.sh "%p" "%f"' @@ -1509,7 +1509,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' more than a single command in the archiving process. This allows all complexity to be managed within the script, which can be written in a popular scripting language such as - bash or perl. + bash or perl. @@ -1543,7 +1543,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' When using an archive_command script, it's desirable to enable . - Any messages written to stderr from the script will then + Any messages written to stderr from the script will then appear in the database server log, allowing complex configurations to be diagnosed easily if they fail. @@ -1563,7 +1563,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' If a command is executed while a base backup is being taken, and then - the template database that the CREATE DATABASE copied + the template database that the CREATE DATABASE copied is modified while the base backup is still in progress, it is possible that recovery will cause those modifications to be propagated into the created database as well. This is of course @@ -1602,7 +1602,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"' before you do so.) Turning off page snapshots does not prevent use of the logs for PITR operations. An area for future development is to compress archived WAL data by removing - unnecessary page copies even when full_page_writes is + unnecessary page copies even when full_page_writes is on. In the meantime, administrators might wish to reduce the number of page snapshots included in WAL by increasing the checkpoint interval parameters as much as feasible. diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml index ea1b5c0c8e..0b092f6e49 100644 --- a/doc/src/sgml/bgworker.sgml +++ b/doc/src/sgml/bgworker.sgml @@ -11,17 +11,17 @@ PostgreSQL can be extended to run user-supplied code in separate processes. Such processes are started, stopped and monitored by postgres, which permits them to have a lifetime closely linked to the server's status. - These processes have the option to attach to PostgreSQL's + These processes have the option to attach to PostgreSQL's shared memory area and to connect to databases internally; they can also run multiple transactions serially, just like a regular client-connected server - process. Also, by linking to libpq they can connect to the + process. Also, by linking to libpq they can connect to the server and behave like a regular client application. There are considerable robustness and security risks in using background - worker processes because, being written in the C language, + worker processes because, being written in the C language, they have unrestricted access to data. Administrators wishing to enable modules that include background worker process should exercise extreme caution. Only carefully audited modules should be permitted to run @@ -31,15 +31,15 @@ Background workers can be initialized at the time that - PostgreSQL is started by including the module name in - shared_preload_libraries. A module wishing to run a background + PostgreSQL is started by including the module name in + shared_preload_libraries. A module wishing to run a background worker can register it by calling RegisterBackgroundWorker(BackgroundWorker *worker) - from its _PG_init(). Background workers can also be started + from its _PG_init(). Background workers can also be started after the system is up and running by calling the function RegisterDynamicBackgroundWorker(BackgroundWorker *worker, BackgroundWorkerHandle **handle). Unlike - RegisterBackgroundWorker, which can only be called from within + RegisterBackgroundWorker, which can only be called from within the postmaster, RegisterDynamicBackgroundWorker must be called from a regular backend. @@ -65,7 +65,7 @@ typedef struct BackgroundWorker - bgw_name and bgw_type are + bgw_name and bgw_type are strings to be used in log messages, process listings and similar contexts. bgw_type should be the same for all background workers of the same type, so that it is possible to group such workers in a @@ -76,7 +76,7 @@ typedef struct BackgroundWorker - bgw_flags is a bitwise-or'd bit mask indicating the + bgw_flags is a bitwise-or'd bit mask indicating the capabilities that the module wants. Possible values are: @@ -114,14 +114,14 @@ typedef struct BackgroundWorker bgw_start_time is the server state during which - postgres should start the process; it can be one of - BgWorkerStart_PostmasterStart (start as soon as - postgres itself has finished its own initialization; processes + postgres should start the process; it can be one of + BgWorkerStart_PostmasterStart (start as soon as + postgres itself has finished its own initialization; processes requesting this are not eligible for database connections), - BgWorkerStart_ConsistentState (start as soon as a consistent state + BgWorkerStart_ConsistentState (start as soon as a consistent state has been reached in a hot standby, allowing processes to connect to databases and run read-only queries), and - BgWorkerStart_RecoveryFinished (start as soon as the system has + BgWorkerStart_RecoveryFinished (start as soon as the system has entered normal read-write state). Note the last two values are equivalent in a server that's not a hot standby. Note that this setting only indicates when the processes are to be started; they do not stop when a different state @@ -152,9 +152,9 @@ typedef struct BackgroundWorker - bgw_main_arg is the Datum argument + bgw_main_arg is the Datum argument to the background worker main function. This main function should take a - single argument of type Datum and return void. + single argument of type Datum and return void. bgw_main_arg will be passed as the argument. In addition, the global variable MyBgworkerEntry points to a copy of the BackgroundWorker structure @@ -165,39 +165,39 @@ typedef struct BackgroundWorker On Windows (and anywhere else where EXEC_BACKEND is defined) or in dynamic background workers it is not safe to pass a - Datum by reference, only by value. If an argument is required, it + Datum by reference, only by value. If an argument is required, it is safest to pass an int32 or other small value and use that as an index - into an array allocated in shared memory. If a value like a cstring + into an array allocated in shared memory. If a value like a cstring or text is passed then the pointer won't be valid from the new background worker process. bgw_extra can contain extra data to be passed - to the background worker. Unlike bgw_main_arg, this data + to the background worker. Unlike bgw_main_arg, this data is not passed as an argument to the worker's main function, but it can be accessed via MyBgworkerEntry, as discussed above. bgw_notify_pid is the PID of a PostgreSQL - backend process to which the postmaster should send SIGUSR1 + backend process to which the postmaster should send SIGUSR1 when the process is started or exits. It should be 0 for workers registered at postmaster startup time, or when the backend registering the worker does not wish to wait for the worker to start up. Otherwise, it should be - initialized to MyProcPid. + initialized to MyProcPid. Once running, the process can connect to a database by calling BackgroundWorkerInitializeConnection(char *dbname, char *username) or BackgroundWorkerInitializeConnectionByOid(Oid dboid, Oid useroid). This allows the process to run transactions and queries using the - SPI interface. If dbname is NULL or - dboid is InvalidOid, the session is not connected + SPI interface. If dbname is NULL or + dboid is InvalidOid, the session is not connected to any particular database, but shared catalogs can be accessed. - If username is NULL or useroid is - InvalidOid, the process will run as the superuser created - during initdb. + If username is NULL or useroid is + InvalidOid, the process will run as the superuser created + during initdb. A background worker can only call one of these two functions, and only once. It is not possible to switch databases. @@ -207,24 +207,24 @@ typedef struct BackgroundWorker background worker's main function, and must be unblocked by it; this is to allow the process to customize its signal handlers, if necessary. Signals can be unblocked in the new process by calling - BackgroundWorkerUnblockSignals and blocked by calling - BackgroundWorkerBlockSignals. + BackgroundWorkerUnblockSignals and blocked by calling + BackgroundWorkerBlockSignals. If bgw_restart_time for a background worker is - configured as BGW_NEVER_RESTART, or if it exits with an exit - code of 0 or is terminated by TerminateBackgroundWorker, + configured as BGW_NEVER_RESTART, or if it exits with an exit + code of 0 or is terminated by TerminateBackgroundWorker, it will be automatically unregistered by the postmaster on exit. Otherwise, it will be restarted after the time period configured via - bgw_restart_time, or immediately if the postmaster + bgw_restart_time, or immediately if the postmaster reinitializes the cluster due to a backend failure. Backends which need to suspend execution only temporarily should use an interruptible sleep rather than exiting; this can be achieved by calling WaitLatch(). Make sure the - WL_POSTMASTER_DEATH flag is set when calling that function, and + WL_POSTMASTER_DEATH flag is set when calling that function, and verify the return code for a prompt exit in the emergency case that - postgres itself has terminated. + postgres itself has terminated. @@ -238,29 +238,29 @@ typedef struct BackgroundWorker opaque handle that can subsequently be passed to GetBackgroundWorkerPid(BackgroundWorkerHandle *, pid_t *) or TerminateBackgroundWorker(BackgroundWorkerHandle *). - GetBackgroundWorkerPid can be used to poll the status of the - worker: a return value of BGWH_NOT_YET_STARTED indicates that + GetBackgroundWorkerPid can be used to poll the status of the + worker: a return value of BGWH_NOT_YET_STARTED indicates that the worker has not yet been started by the postmaster; BGWH_STOPPED indicates that it has been started but is no longer running; and BGWH_STARTED indicates that it is currently running. In this last case, the PID will also be returned via the second argument. - TerminateBackgroundWorker causes the postmaster to send - SIGTERM to the worker if it is running, and to unregister it + TerminateBackgroundWorker causes the postmaster to send + SIGTERM to the worker if it is running, and to unregister it as soon as it is not. In some cases, a process which registers a background worker may wish to wait for the worker to start up. This can be accomplished by initializing - bgw_notify_pid to MyProcPid and + bgw_notify_pid to MyProcPid and then passing the BackgroundWorkerHandle * obtained at registration time to WaitForBackgroundWorkerStartup(BackgroundWorkerHandle *handle, pid_t *) function. This function will block until the postmaster has attempted to start the background worker, or until the postmaster dies. If the background runner - is running, the return value will BGWH_STARTED, and + is running, the return value will BGWH_STARTED, and the PID will be written to the provided address. Otherwise, the return value will be BGWH_STOPPED or BGWH_POSTMASTER_DIED. @@ -279,7 +279,7 @@ typedef struct BackgroundWorker - The src/test/modules/worker_spi module + The src/test/modules/worker_spi module contains a working example, which demonstrates some useful techniques. diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml index 5462bc38e4..d7547e6e92 100644 --- a/doc/src/sgml/biblio.sgml +++ b/doc/src/sgml/biblio.sgml @@ -171,7 +171,7 @@ ssimkovi@ag.or.at Discusses SQL history and syntax, and describes the addition of - INTERSECT and EXCEPT constructs into + INTERSECT and EXCEPT constructs into PostgreSQL. Prepared as a Master's Thesis with the support of O. Univ. Prof. Dr. Georg Gottlob and Univ. Ass. Mag. Katrin Seyr at Vienna University of Technology. diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml index af6d8d1d2a..33378b46ea 100644 --- a/doc/src/sgml/bki.sgml +++ b/doc/src/sgml/bki.sgml @@ -21,7 +21,7 @@ input file used by initdb is created as part of building and installing PostgreSQL by a program named genbki.pl, which reads some - specially formatted C header files in the src/include/catalog/ + specially formatted C header files in the src/include/catalog/ directory of the source tree. The created BKI file is called postgres.bki and is normally installed in the @@ -67,13 +67,13 @@ - create + create tablename tableoid - bootstrap - shared_relation - without_oids - rowtype_oid oid + bootstrap + shared_relation + without_oids + rowtype_oid oid (name1 = type1 FORCE NOT NULL | FORCE NULL , @@ -93,7 +93,7 @@ The following column types are supported directly by - bootstrap.c: bool, + bootstrap.c: bool, bytea, char (1 byte), name, int2, int4, regproc, regclass, @@ -104,31 +104,31 @@ _oid (array), _char (array), _aclitem (array). Although it is possible to create tables containing columns of other types, this cannot be done until - after pg_type has been created and filled with + after pg_type has been created and filled with appropriate entries. (That effectively means that only these column types can be used in bootstrapped tables, but non-bootstrap catalogs can contain any built-in type.) - When bootstrap is specified, + When bootstrap is specified, the table will only be created on disk; nothing is entered into pg_class, pg_attribute, etc, for it. Thus the table will not be accessible by ordinary SQL operations until - such entries are made the hard way (with insert + such entries are made the hard way (with insert commands). This option is used for creating pg_class etc themselves. - The table is created as shared if shared_relation is + The table is created as shared if shared_relation is specified. - It will have OIDs unless without_oids is specified. - The table's row type OID (pg_type OID) can optionally - be specified via the rowtype_oid clause; if not specified, - an OID is automatically generated for it. (The rowtype_oid - clause is useless if bootstrap is specified, but it can be + It will have OIDs unless without_oids is specified. + The table's row type OID (pg_type OID) can optionally + be specified via the rowtype_oid clause; if not specified, + an OID is automatically generated for it. (The rowtype_oid + clause is useless if bootstrap is specified, but it can be provided anyway for documentation.) @@ -136,7 +136,7 @@ - open tablename + open tablename @@ -150,7 +150,7 @@ - close tablename + close tablename @@ -163,7 +163,7 @@ - insert OID = oid_value ( value1 value2 ... ) + insert OID = oid_value ( value1 value2 ... ) @@ -188,14 +188,14 @@ - declare unique - index indexname + declare unique + index indexname indexoid - on tablename - using amname - ( opclass1 + on tablename + using amname + ( opclass1 name1 - , ... ) + , ... ) @@ -220,10 +220,10 @@ - declare toast + declare toast toasttableoid toastindexoid - on tablename + on tablename @@ -234,14 +234,14 @@ toasttableoid and its index is assigned OID toastindexoid. - As with declare index, filling of the index + As with declare index, filling of the index is postponed. - build indices + build indices @@ -257,17 +257,17 @@ Structure of the Bootstrap <acronym>BKI</acronym> File - The open command cannot be used until the tables it uses + The open command cannot be used until the tables it uses exist and have entries for the table that is to be opened. - (These minimum tables are pg_class, - pg_attribute, pg_proc, and - pg_type.) To allow those tables themselves to be filled, - create with the bootstrap option implicitly opens + (These minimum tables are pg_class, + pg_attribute, pg_proc, and + pg_type.) To allow those tables themselves to be filled, + create with the bootstrap option implicitly opens the created table for data insertion. - Also, the declare index and declare toast + Also, the declare index and declare toast commands cannot be used until the system catalogs they need have been created and filled in. @@ -278,17 +278,17 @@ - create bootstrap one of the critical tables + create bootstrap one of the critical tables - insert data describing at least the critical tables + insert data describing at least the critical tables - close + close @@ -298,22 +298,22 @@ - create (without bootstrap) a noncritical table + create (without bootstrap) a noncritical table - open + open - insert desired data + insert desired data - close + close @@ -328,7 +328,7 @@ - build indices + build indices diff --git a/doc/src/sgml/bloom.sgml b/doc/src/sgml/bloom.sgml index 396348c523..e13ebf80fd 100644 --- a/doc/src/sgml/bloom.sgml +++ b/doc/src/sgml/bloom.sgml @@ -8,7 +8,7 @@ - bloom provides an index access method based on + bloom provides an index access method based on Bloom filters. @@ -42,29 +42,29 @@ Parameters - A bloom index accepts the following parameters in its - WITH clause: + A bloom index accepts the following parameters in its + WITH clause: - length + length Length of each signature (index entry) in bits. The default - is 80 bits and maximum is 4096. + is 80 bits and maximum is 4096. - col1 — col32 + col1 — col32 Number of bits generated for each index column. Each parameter's name refers to the number of the index column that it controls. The default - is 2 bits and maximum is 4095. Parameters for + is 2 bits and maximum is 4095. Parameters for index columns not actually used are ignored. @@ -87,8 +87,8 @@ CREATE INDEX bloomidx ON tbloom USING bloom (i1,i2,i3) The index is created with a signature length of 80 bits, with attributes i1 and i2 mapped to 2 bits, and attribute i3 mapped to 4 bits. We could - have omitted the length, col1, - and col2 specifications since those have the default values. + have omitted the length, col1, + and col2 specifications since those have the default values. @@ -175,7 +175,7 @@ CREATE INDEX Note the relatively large number of false positives: 2439 rows were selected to be visited in the heap, but none actually matched the query. We could reduce that by specifying a larger signature length. - In this example, creating the index with length=200 + In this example, creating the index with length=200 reduced the number of false positives to 55; but it doubled the index size (to 306 MB) and ended up being slower for this query (125 ms overall). @@ -213,7 +213,7 @@ CREATE INDEX An operator class for bloom indexes requires only a hash function for the indexed data type and an equality operator for searching. This example - shows the operator class definition for the text data type: + shows the operator class definition for the text data type: @@ -230,7 +230,7 @@ DEFAULT FOR TYPE text USING bloom AS - Only operator classes for int4 and text are + Only operator classes for int4 and text are included with the module. diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml index 8dcc29925b..91c01700ed 100644 --- a/doc/src/sgml/brin.sgml +++ b/doc/src/sgml/brin.sgml @@ -16,7 +16,7 @@ BRIN is designed for handling very large tables in which certain columns have some natural correlation with their physical location within the table. - A block range is a group of pages that are physically + A block range is a group of pages that are physically adjacent in the table; for each block range, some summary info is stored by the index. For example, a table storing a store's sale orders might have @@ -29,7 +29,7 @@ BRIN indexes can satisfy queries via regular bitmap index scans, and will return all tuples in all pages within each range if - the summary info stored by the index is consistent with the + the summary info stored by the index is consistent with the query conditions. The query executor is in charge of rechecking these tuples and discarding those that do not match the query conditions — in other words, these @@ -51,9 +51,9 @@ The size of the block range is determined at index creation time by - the pages_per_range storage parameter. The number of index + the pages_per_range storage parameter. The number of index entries will be equal to the size of the relation in pages divided by - the selected value for pages_per_range. Therefore, the smaller + the selected value for pages_per_range. Therefore, the smaller the number, the larger the index becomes (because of the need to store more index entries), but at the same time the summary data stored can be more precise and more data blocks can be skipped during an index scan. @@ -99,9 +99,9 @@ - The minmax + The minmax operator classes store the minimum and the maximum values appearing - in the indexed column within the range. The inclusion + in the indexed column within the range. The inclusion operator classes store a value which includes the values in the indexed column within the range. @@ -162,21 +162,21 @@ - box_inclusion_ops + box_inclusion_ops box - << - &< - && - &> - >> - ~= - @> - <@ - &<| - <<| + << + &< + && + &> + >> + ~= + @> + <@ + &<| + <<| |>> - |&> + |&> @@ -249,11 +249,11 @@ network_inclusion_ops inet - && - >>= + && + >>= <<= = - >> + >> << @@ -346,18 +346,18 @@ - range_inclusion_ops + range_inclusion_ops any range type - << - &< - && - &> - >> - @> - <@ - -|- - = + << + &< + && + &> + >> + @> + <@ + -|- + = < <= = @@ -505,11 +505,11 @@ - BrinOpcInfo *opcInfo(Oid type_oid) + BrinOpcInfo *opcInfo(Oid type_oid) Returns internal information about the indexed columns' summary data. - The return value must point to a palloc'd BrinOpcInfo, + The return value must point to a palloc'd BrinOpcInfo, which has this definition: typedef struct BrinOpcInfo @@ -524,7 +524,7 @@ typedef struct BrinOpcInfo TypeCacheEntry *oi_typcache[FLEXIBLE_ARRAY_MEMBER]; } BrinOpcInfo; - BrinOpcInfo.oi_opaque can be used by the + BrinOpcInfo.oi_opaque can be used by the operator class routines to pass information between support procedures during an index scan. @@ -797,8 +797,8 @@ typedef struct BrinOpcInfo It should accept two arguments with the same data type as the operator class, and return the union of them. The inclusion operator class can store union values with different data types if it is defined with the - STORAGE parameter. The return value of the union - function should match the STORAGE data type. + STORAGE parameter. The return value of the union + function should match the STORAGE data type. @@ -823,11 +823,11 @@ typedef struct BrinOpcInfo on another operator strategy as shown in , or the same operator strategy as themselves. They require the dependency - operator to be defined with the STORAGE data type as the + operator to be defined with the STORAGE data type as the left-hand-side argument and the other supported data type to be the right-hand-side argument of the supported operator. See - float4_minmax_ops as an example of minmax, and - box_inclusion_ops as an example of inclusion. + float4_minmax_ops as an example of minmax, and + box_inclusion_ops as an example of inclusion. diff --git a/doc/src/sgml/btree-gin.sgml b/doc/src/sgml/btree-gin.sgml index 375e7ec4be..e491fa76e7 100644 --- a/doc/src/sgml/btree-gin.sgml +++ b/doc/src/sgml/btree-gin.sgml @@ -8,16 +8,16 @@ - btree_gin provides sample GIN operator classes that + btree_gin provides sample GIN operator classes that implement B-tree equivalent behavior for the data types - int2, int4, int8, float4, - float8, timestamp with time zone, - timestamp without time zone, time with time zone, - time without time zone, date, interval, - oid, money, "char", - varchar, text, bytea, bit, - varbit, macaddr, macaddr8, inet, - cidr, and all enum types. + int2, int4, int8, float4, + float8, timestamp with time zone, + timestamp without time zone, time with time zone, + time without time zone, date, interval, + oid, money, "char", + varchar, text, bytea, bit, + varbit, macaddr, macaddr8, inet, + cidr, and all enum types. diff --git a/doc/src/sgml/btree-gist.sgml b/doc/src/sgml/btree-gist.sgml index f3c639c2f3..dcb939f1fb 100644 --- a/doc/src/sgml/btree-gist.sgml +++ b/doc/src/sgml/btree-gist.sgml @@ -8,16 +8,16 @@ - btree_gist provides GiST index operator classes that + btree_gist provides GiST index operator classes that implement B-tree equivalent behavior for the data types - int2, int4, int8, float4, - float8, numeric, timestamp with time zone, - timestamp without time zone, time with time zone, - time without time zone, date, interval, - oid, money, char, - varchar, text, bytea, bit, - varbit, macaddr, macaddr8, inet, - cidr, uuid, and all enum types. + int2, int4, int8, float4, + float8, numeric, timestamp with time zone, + timestamp without time zone, time with time zone, + time without time zone, date, interval, + oid, money, char, + varchar, text, bytea, bit, + varbit, macaddr, macaddr8, inet, + cidr, uuid, and all enum types. @@ -33,7 +33,7 @@ - In addition to the typical B-tree search operators, btree_gist + In addition to the typical B-tree search operators, btree_gist also provides index support for <> (not equals). This may be useful in combination with an exclusion constraint, @@ -42,14 +42,14 @@ Also, for data types for which there is a natural distance metric, - btree_gist defines a distance operator <->, + btree_gist defines a distance operator <->, and provides GiST index support for nearest-neighbor searches using this operator. Distance operators are provided for - int2, int4, int8, float4, - float8, timestamp with time zone, - timestamp without time zone, - time without time zone, date, interval, - oid, and money. + int2, int4, int8, float4, + float8, timestamp with time zone, + timestamp without time zone, + time without time zone, date, interval, + oid, and money. diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index cfec2465d2..ef60a58631 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -387,7 +387,7 @@
- <structname>pg_aggregate</> Columns + <structname>pg_aggregate</structname> Columns @@ -410,9 +410,9 @@ charAggregate kind: - n for normal aggregates, - o for ordered-set aggregates, or - h for hypothetical-set aggregates + n for normal aggregates, + o for ordered-set aggregates, or + h for hypothetical-set aggregates @@ -421,7 +421,7 @@ Number of direct (non-aggregated) arguments of an ordered-set or hypothetical-set aggregate, counting a variadic array as one argument. - If equal to pronargs, the aggregate must be variadic + If equal to pronargs, the aggregate must be variadic and the variadic array describes the aggregated arguments as well as the final direct arguments. Always zero for normal aggregates. @@ -592,7 +592,7 @@
- <structname>pg_am</> Columns + <structname>pg_am</structname> Columns @@ -644,7 +644,7 @@ - Before PostgreSQL 9.6, pg_am + Before PostgreSQL 9.6, pg_am contained many additional columns representing properties of index access methods. That data is now only directly visible at the C code level. However, pg_index_column_has_property() and related @@ -667,8 +667,8 @@ The catalog pg_amop stores information about operators associated with access method operator families. There is one row for each operator that is a member of an operator family. A family - member can be either a search operator or an - ordering operator. An operator + member can be either a search operator or an + ordering operator. An operator can appear in more than one family, but cannot appear in more than one search position nor more than one ordering position within a family. (It is allowed, though unlikely, for an operator to be used for both @@ -676,7 +676,7 @@
- <structname>pg_amop</> Columns + <structname>pg_amop</structname> Columns @@ -728,8 +728,8 @@ amoppurposechar - Operator purpose, either s for search or - o for ordering + Operator purpose, either s for search or + o for ordering @@ -759,26 +759,26 @@
- A search operator entry indicates that an index of this operator + A search operator entry indicates that an index of this operator family can be searched to find all rows satisfying - WHERE - indexed_column - operator - constant. + WHERE + indexed_column + operator + constant. Obviously, such an operator must return boolean, and its left-hand input type must match the index's column data type. - An ordering operator entry indicates that an index of this + An ordering operator entry indicates that an index of this operator family can be scanned to return rows in the order represented by - ORDER BY - indexed_column - operator - constant. + ORDER BY + indexed_column + operator + constant. Such an operator could return any sortable data type, though again its left-hand input type must match the index's column data type. - The exact semantics of the ORDER BY are specified by the + The exact semantics of the ORDER BY are specified by the amopsortfamily column, which must reference a B-tree operator family for the operator's result type. @@ -787,19 +787,19 @@ At present, it's assumed that the sort order for an ordering operator is the default for the referenced operator family, i.e., ASC NULLS - LAST. This might someday be relaxed by adding additional columns + LAST. This might someday be relaxed by adding additional columns to specify sort options explicitly. - An entry's amopmethod must match the - opfmethod of its containing operator family (including - amopmethod here is an intentional denormalization of the + An entry's amopmethod must match the + opfmethod of its containing operator family (including + amopmethod here is an intentional denormalization of the catalog structure for performance reasons). Also, - amoplefttype and amoprighttype must match - the oprleft and oprright fields of the - referenced pg_operator entry. + amoplefttype and amoprighttype must match + the oprleft and oprright fields of the + referenced pg_operator entry. @@ -880,14 +880,14 @@ The usual interpretation of the - amproclefttype and amprocrighttype fields + amproclefttype and amprocrighttype fields is that they identify the left and right input types of the operator(s) that a particular support procedure supports. For some access methods these match the input data type(s) of the support procedure itself, for - others not. There is a notion of default support procedures for - an index, which are those with amproclefttype and - amprocrighttype both equal to the index operator class's - opcintype. + others not. There is a notion of default support procedures for + an index, which are those with amproclefttype and + amprocrighttype both equal to the index operator class's + opcintype. @@ -909,7 +909,7 @@ - <structname>pg_attrdef</> Columns + <structname>pg_attrdef</structname> Columns @@ -964,7 +964,7 @@ The adsrc field is historical, and is best not used, because it does not track outside changes that might affect the representation of the default value. Reverse-compiling the - adbin field (with pg_get_expr for + adbin field (with pg_get_expr for example) is a better way to display the default value. @@ -993,7 +993,7 @@
- <structname>pg_attribute</> Columns + <structname>pg_attribute</structname> Columns @@ -1072,7 +1072,7 @@ Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, - so any nonzero value effectively means it's an array.) + so any nonzero value effectively means it's an array.) @@ -1096,7 +1096,7 @@ supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. - The value will generally be -1 for types that do not need atttypmod. + The value will generally be -1 for types that do not need atttypmod. @@ -1105,7 +1105,7 @@ bool - A copy of pg_type.typbyval of this column's type + A copy of pg_type.typbyval of this column's type @@ -1114,7 +1114,7 @@ char - Normally a copy of pg_type.typstorage of this + Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered after column creation to control storage policy. @@ -1125,7 +1125,7 @@ char - A copy of pg_type.typalign of this column's type + A copy of pg_type.typalign of this column's type @@ -1216,7 +1216,7 @@ text[] - Attribute-level options, as keyword=value strings + Attribute-level options, as keyword=value strings @@ -1225,7 +1225,7 @@ text[] - Attribute-level foreign data wrapper options, as keyword=value strings + Attribute-level foreign data wrapper options, as keyword=value strings @@ -1237,9 +1237,9 @@ In a dropped column's pg_attribute entry, atttypid is reset to zero, but attlen and the other fields copied from - pg_type are still valid. This arrangement is needed + pg_type are still valid. This arrangement is needed to cope with the situation where the dropped column's data type was - later dropped, and so there is no pg_type row anymore. + later dropped, and so there is no pg_type row anymore. attlen and the other fields can be used to interpret the contents of a row of the table. @@ -1256,9 +1256,9 @@ The catalog pg_authid contains information about database authorization identifiers (roles). A role subsumes the concepts - of users and groups. A user is essentially just a - role with the rolcanlogin flag set. Any role (with or - without rolcanlogin) can have other roles as members; see + of users and groups. A user is essentially just a + role with the rolcanlogin flag set. Any role (with or + without rolcanlogin) can have other roles as members; see pg_auth_members. @@ -1283,7 +1283,7 @@
- <structname>pg_authid</> Columns + <structname>pg_authid</structname> Columns @@ -1390,20 +1390,20 @@ For an MD5 encrypted password, rolpassword - column will begin with the string md5 followed by a + column will begin with the string md5 followed by a 32-character hexadecimal MD5 hash. The MD5 hash will be of the user's password concatenated to their user name. For example, if user - joe has password xyzzy, PostgreSQL - will store the md5 hash of xyzzyjoe. + joe has password xyzzy, PostgreSQL + will store the md5 hash of xyzzyjoe. If the password is encrypted with SCRAM-SHA-256, it has the format: -SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey> +SCRAM-SHA-256$<iteration count>:<salt>$<StoredKey>:<ServerKey> - where salt, StoredKey and - ServerKey are in Base64 encoded format. This format is + where salt, StoredKey and + ServerKey are in Base64 encoded format. This format is the same as that specified by RFC 5803. @@ -1435,7 +1435,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_auth_members</> Columns + <structname>pg_auth_members</structname> Columns @@ -1459,7 +1459,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< member oid pg_authid.oid - ID of a role that is a member of roleid + ID of a role that is a member of roleid @@ -1473,8 +1473,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< admin_option bool - True if member can grant membership in - roleid to others + True if member can grant membership in + roleid to others @@ -1501,14 +1501,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< cannot be deduced from some generic rule. For example, casting between a domain and its base type is not explicitly represented in pg_cast. Another important exception is that - automatic I/O conversion casts, those performed using a data - type's own I/O functions to convert to or from text or other + automatic I/O conversion casts, those performed using a data + type's own I/O functions to convert to or from text or other string types, are not explicitly represented in pg_cast.
- <structname>pg_cast</> Columns + <structname>pg_cast</structname> Columns @@ -1558,11 +1558,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< Indicates what contexts the cast can be invoked in. - e means only as an explicit cast (using - CAST or :: syntax). - a means implicitly in assignment + e means only as an explicit cast (using + CAST or :: syntax). + a means implicitly in assignment to a target column, as well as explicitly. - i means implicitly in expressions, as well as the + i means implicitly in expressions, as well as the other cases. @@ -1572,9 +1572,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Indicates how the cast is performed. - f means that the function specified in the castfunc field is used. - i means that the input/output functions are used. - b means that the types are binary-coercible, thus no conversion is required. + f means that the function specified in the castfunc field is used. + i means that the input/output functions are used. + b means that the types are binary-coercible, thus no conversion is required. @@ -1586,18 +1586,18 @@ SCRAM-SHA-256$<iteration count>:<salt>< always take the cast source type as their first argument type, and return the cast destination type as their result type. A cast function can have up to three arguments. The second argument, - if present, must be type integer; it receives the type + if present, must be type integer; it receives the type modifier associated with the destination type, or -1 if there is none. The third argument, - if present, must be type boolean; it receives true - if the cast is an explicit cast, false otherwise. + if present, must be type boolean; it receives true + if the cast is an explicit cast, false otherwise. It is legitimate to create a pg_cast entry in which the source and target types are the same, if the associated function takes more than one argument. Such entries represent - length coercion functions that coerce values of the type + length coercion functions that coerce values of the type to be legal for a particular type modifier value. @@ -1624,14 +1624,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< table. This includes indexes (but see also pg_index), sequences (but see also pg_sequence), views, materialized - views, composite types, and TOAST tables; see relkind. + views, composite types, and TOAST tables; see relkind. Below, when we mean all of these kinds of objects we speak of relations. Not all columns are meaningful for all relation types.
- <structname>pg_class</> Columns + <structname>pg_class</structname> Columns @@ -1673,7 +1673,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_type.oid The OID of the data type that corresponds to this table's row type, - if any (zero for indexes, which have no pg_type entry) + if any (zero for indexes, which have no pg_type entry) @@ -1706,7 +1706,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< oid Name of the on-disk file of this relation; zero means this - is a mapped relation whose disk file name is determined + is a mapped relation whose disk file name is determined by low-level state @@ -1795,8 +1795,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - p = permanent table, u = unlogged table, - t = temporary table + p = permanent table, u = unlogged table, + t = temporary table @@ -1805,15 +1805,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - r = ordinary table, - i = index, - S = sequence, - t = TOAST table, - v = view, - m = materialized view, - c = composite type, - f = foreign table, - p = partitioned table + r = ordinary table, + i = index, + S = sequence, + t = TOAST table, + v = view, + m = materialized view, + c = composite type, + f = foreign table, + p = partitioned table @@ -1834,7 +1834,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< int2 - Number of CHECK constraints on the table; see + Number of CHECK constraints on the table; see pg_constraint catalog @@ -1917,11 +1917,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Columns used to form replica identity for rows: - d = default (primary key, if any), - n = nothing, - f = all columns - i = index with indisreplident set, or default + Columns used to form replica identity for rows: + d = default (primary key, if any), + n = nothing, + f = all columns + i = index with indisreplident set, or default @@ -1938,9 +1938,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< All transaction IDs before this one have been replaced with a permanent - (frozen) transaction ID in this table. This is used to track + (frozen) transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent transaction - ID wraparound or to allow pg_xact to be shrunk. Zero + ID wraparound or to allow pg_xact to be shrunk. Zero (InvalidTransactionId) if the relation is not a table. @@ -1953,7 +1953,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< All multixact IDs before this one have been replaced by a transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent multixact ID - wraparound or to allow pg_multixact to be shrunk. Zero + wraparound or to allow pg_multixact to be shrunk. Zero (InvalidMultiXactId) if the relation is not a table. @@ -1975,7 +1975,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Access-method-specific options, as keyword=value strings + Access-method-specific options, as keyword=value strings @@ -1993,13 +1993,13 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Several of the Boolean flags in pg_class are maintained + Several of the Boolean flags in pg_class are maintained lazily: they are guaranteed to be true if that's the correct state, but may not be reset to false immediately when the condition is no longer - true. For example, relhasindex is set by + true. For example, relhasindex is set by CREATE INDEX, but it is never cleared by DROP INDEX. Instead, VACUUM clears - relhasindex if it finds the table has no indexes. This + relhasindex if it finds the table has no indexes. This arrangement avoids race conditions and improves concurrency. @@ -2019,7 +2019,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_collation</> Columns + <structname>pg_collation</structname> Columns @@ -2082,14 +2082,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< collcollate name - LC_COLLATE for this collation object + LC_COLLATE for this collation object collctype name - LC_CTYPE for this collation object + LC_CTYPE for this collation object @@ -2107,27 +2107,27 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Note that the unique key on this catalog is (collname, - collencoding, collnamespace) not just - (collname, collnamespace). + Note that the unique key on this catalog is (collname, + collencoding, collnamespace) not just + (collname, collnamespace). PostgreSQL generally ignores all - collations that do not have collencoding equal to + collations that do not have collencoding equal to either the current database's encoding or -1, and creation of new entries - with the same name as an entry with collencoding = -1 + with the same name as an entry with collencoding = -1 is forbidden. Therefore it is sufficient to use a qualified SQL name - (schema.name) to identify a collation, + (schema.name) to identify a collation, even though this is not unique according to the catalog definition. The reason for defining the catalog this way is that - initdb fills it in at cluster initialization time with + initdb fills it in at cluster initialization time with entries for all locales available on the system, so it must be able to hold entries for all encodings that might ever be used in the cluster. - In the template0 database, it could be useful to create + In the template0 database, it could be useful to create collations whose encoding does not match the database encoding, since they could match the encodings of databases later cloned from - template0. This would currently have to be done manually. + template0. This would currently have to be done manually. @@ -2143,13 +2143,13 @@ SCRAM-SHA-256$<iteration count>:<salt>< key, unique, foreign key, and exclusion constraints on tables. (Column constraints are not treated specially. Every column constraint is equivalent to some table constraint.) - Not-null constraints are represented in the pg_attribute + Not-null constraints are represented in the pg_attribute catalog, not here. User-defined constraint triggers (created with CREATE CONSTRAINT - TRIGGER) also give rise to an entry in this table. + TRIGGER) also give rise to an entry in this table. @@ -2157,7 +2157,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_constraint</> Columns + <structname>pg_constraint</structname> Columns @@ -2198,12 +2198,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - c = check constraint, - f = foreign key constraint, - p = primary key constraint, - u = unique constraint, - t = constraint trigger, - x = exclusion constraint + c = check constraint, + f = foreign key constraint, + p = primary key constraint, + u = unique constraint, + t = constraint trigger, + x = exclusion constraint @@ -2263,11 +2263,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key update action code: - a = no action, - r = restrict, - c = cascade, - n = set null, - d = set default + a = no action, + r = restrict, + c = cascade, + n = set null, + d = set default @@ -2276,11 +2276,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key deletion action code: - a = no action, - r = restrict, - c = cascade, - n = set null, - d = set default + a = no action, + r = restrict, + c = cascade, + n = set null, + d = set default @@ -2289,9 +2289,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< char Foreign key match type: - f = full, - p = partial, - s = simple + f = full, + p = partial, + s = simple @@ -2329,7 +2329,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< conkey int2[] - pg_attribute.attnum + pg_attribute.attnum If a table constraint (including foreign keys, but not constraint triggers), list of the constrained columns @@ -2337,35 +2337,35 @@ SCRAM-SHA-256$<iteration count>:<salt>< confkey int2[] - pg_attribute.attnum + pg_attribute.attnum If a foreign key, list of the referenced columns conpfeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for PK = FK comparisons conppeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for PK = PK comparisons conffeqop oid[] - pg_operator.oid + pg_operator.oid If a foreign key, list of the equality operators for FK = FK comparisons conexclop oid[] - pg_operator.oid + pg_operator.oid If an exclusion constraint, list of the per-column exclusion operators @@ -2392,7 +2392,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For other cases, a zero appears in conkey and the associated index must be consulted to discover the expression that is constrained. (conkey thus has the - same contents as pg_index.indkey for the + same contents as pg_index.indkey for the index.) @@ -2400,7 +2400,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< consrc is not updated when referenced objects change; for example, it won't track renaming of columns. Rather than - relying on this field, it's best to use pg_get_constraintdef() + relying on this field, it's best to use pg_get_constraintdef() to extract the definition of a check constraint. @@ -2429,7 +2429,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_conversion</> Columns + <structname>pg_conversion</structname> Columns @@ -2529,7 +2529,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_database</> Columns + <structname>pg_database</structname> Columns @@ -2592,7 +2592,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, then this database can be cloned by - any user with CREATEDB privileges; + any user with CREATEDB privileges; if false, then only superusers or the owner of the database can clone it. @@ -2604,7 +2604,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If false then no one can connect to this database. This is - used to protect the template0 database from being altered. + used to protect the template0 database from being altered. @@ -2634,11 +2634,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< All transaction IDs before this one have been replaced with a permanent - (frozen) transaction ID in this database. This is used to + (frozen) transaction ID in this database. This is used to track whether the database needs to be vacuumed in order to prevent - transaction ID wraparound or to allow pg_xact to be shrunk. + transaction ID wraparound or to allow pg_xact to be shrunk. It is the minimum of the per-table - pg_class.relfrozenxid values. + pg_class.relfrozenxid values. @@ -2650,9 +2650,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< All multixact IDs before this one have been replaced with a transaction ID in this database. This is used to track whether the database needs to be vacuumed in order to prevent - multixact ID wraparound or to allow pg_multixact to be shrunk. + multixact ID wraparound or to allow pg_multixact to be shrunk. It is the minimum of the per-table - pg_class.relminmxid values. + pg_class.relminmxid values. @@ -2663,7 +2663,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The default tablespace for the database. Within this database, all tables for which - pg_class.reltablespace is zero + pg_class.reltablespace is zero will be stored in this tablespace; in particular, all the non-shared system catalogs will be there. @@ -2707,7 +2707,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_db_role_setting</> Columns + <structname>pg_db_role_setting</structname> Columns @@ -2754,12 +2754,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_default_acl stores initial + The catalog pg_default_acl stores initial privileges to be assigned to newly created objects.
- <structname>pg_default_acl</> Columns + <structname>pg_default_acl</structname> Columns @@ -2800,10 +2800,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< Type of object this entry is for: - r = relation (table, view), - S = sequence, - f = function, - T = type + r = relation (table, view), + S = sequence, + f = function, + T = type @@ -2820,21 +2820,21 @@ SCRAM-SHA-256$<iteration count>:<salt><
- A pg_default_acl entry shows the initial privileges to + A pg_default_acl entry shows the initial privileges to be assigned to an object belonging to the indicated user. There are - currently two types of entry: global entries with - defaclnamespace = 0, and per-schema entries + currently two types of entry: global entries with + defaclnamespace = 0, and per-schema entries that reference a particular schema. If a global entry is present then - it overrides the normal hard-wired default privileges + it overrides the normal hard-wired default privileges for the object type. A per-schema entry, if present, represents privileges - to be added to the global or hard-wired default privileges. + to be added to the global or hard-wired default privileges. Note that when an ACL entry in another catalog is null, it is taken to represent the hard-wired default privileges for its object, - not whatever might be in pg_default_acl - at the moment. pg_default_acl is only consulted during + not whatever might be in pg_default_acl + at the moment. pg_default_acl is only consulted during object creation. @@ -2851,9 +2851,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_depend records the dependency relationships between database objects. This information allows - DROP commands to find which other objects must be dropped - by DROP CASCADE or prevent dropping in the DROP - RESTRICT case. + DROP commands to find which other objects must be dropped + by DROP CASCADE or prevent dropping in the DROP + RESTRICT case. @@ -2863,7 +2863,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_depend</> Columns + <structname>pg_depend</structname> Columns @@ -2896,7 +2896,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objid and classid refer to the + objid and classid refer to the table itself). For all other object types, this column is zero. @@ -2922,7 +2922,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - refobjid and refclassid refer + refobjid and refclassid refer to the table itself). For all other object types, this column is zero. @@ -2945,17 +2945,17 @@ SCRAM-SHA-256$<iteration count>:<salt>< In all cases, a pg_depend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors identified by - deptype: + deptype: - DEPENDENCY_NORMAL (n) + DEPENDENCY_NORMAL (n) A normal relationship between separately-created objects. The dependent object can be dropped without affecting the referenced object. The referenced object can only be dropped - by specifying CASCADE, in which case the dependent + by specifying CASCADE, in which case the dependent object is dropped, too. Example: a table column has a normal dependency on its data type. @@ -2963,12 +2963,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_AUTO (a) + DEPENDENCY_AUTO (a) The dependent object can be dropped separately from the referenced object, and should be automatically dropped - (regardless of RESTRICT or CASCADE + (regardless of RESTRICT or CASCADE mode) if the referenced object is dropped. Example: a named constraint on a table is made autodependent on the table, so that it will go away if the table is dropped. @@ -2977,41 +2977,41 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_INTERNAL (i) + DEPENDENCY_INTERNAL (i) The dependent object was created as part of creation of the referenced object, and is really just a part of its internal - implementation. A DROP of the dependent object + implementation. A DROP of the dependent object will be disallowed outright (we'll tell the user to issue a - DROP against the referenced object, instead). A - DROP of the referenced object will be propagated + DROP against the referenced object, instead). A + DROP of the referenced object will be propagated through to drop the dependent object whether - CASCADE is specified or not. Example: a trigger + CASCADE is specified or not. Example: a trigger that's created to enforce a foreign-key constraint is made internally dependent on the constraint's - pg_constraint entry. + pg_constraint entry. - DEPENDENCY_EXTENSION (e) + DEPENDENCY_EXTENSION (e) - The dependent object is a member of the extension that is + The dependent object is a member of the extension that is the referenced object (see pg_extension). The dependent object can be dropped only via - DROP EXTENSION on the referenced object. Functionally + DROP EXTENSION on the referenced object. Functionally this dependency type acts the same as an internal dependency, but - it's kept separate for clarity and to simplify pg_dump. + it's kept separate for clarity and to simplify pg_dump. - DEPENDENCY_AUTO_EXTENSION (x) + DEPENDENCY_AUTO_EXTENSION (x) The dependent object is not a member of the extension that is the @@ -3024,7 +3024,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - DEPENDENCY_PIN (p) + DEPENDENCY_PIN (p) There is no dependent object; this type of entry is a signal @@ -3051,7 +3051,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_description stores optional descriptions + The catalog pg_description stores optional descriptions (comments) for each database object. Descriptions can be manipulated with the command and viewed with psql's \d commands. @@ -3066,7 +3066,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_description</> Columns + <structname>pg_description</structname> Columns @@ -3099,7 +3099,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a comment on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -3133,7 +3133,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_enum</> Columns + <structname>pg_enum</structname> Columns @@ -3157,7 +3157,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< enumtypid oid pg_type.oid - The OID of the pg_type entry owning this enum value + The OID of the pg_type entry owning this enum value @@ -3191,7 +3191,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< When an enum type is created, its members are assigned sort-order - positions 1..n. But members added later might be given + positions 1..n. But members added later might be given negative or fractional values of enumsortorder. The only requirement on these values is that they be correctly ordered and unique within each enum type. @@ -3212,7 +3212,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_event_trigger</> Columns + <structname>pg_event_trigger</structname> Columns @@ -3260,10 +3260,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< Controls in which modes the event trigger fires. - O = trigger fires in origin and local modes, - D = trigger is disabled, - R = trigger fires in replica mode, - A = trigger fires always. + O = trigger fires in origin and local modes, + D = trigger is disabled, + R = trigger fires in replica mode, + A = trigger fires always. @@ -3296,7 +3296,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_extension</> Columns + <structname>pg_extension</structname> Columns @@ -3355,16 +3355,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< extconfig oid[] pg_class.oid - Array of regclass OIDs for the extension's configuration - table(s), or NULL if none + Array of regclass OIDs for the extension's configuration + table(s), or NULL if none extcondition text[] - Array of WHERE-clause filter conditions for the - extension's configuration table(s), or NULL if none + Array of WHERE-clause filter conditions for the + extension's configuration table(s), or NULL if none @@ -3372,7 +3372,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- Note that unlike most catalogs with a namespace column, + Note that unlike most catalogs with a namespace column, extnamespace is not meant to imply that the extension belongs to that schema. Extension names are never schema-qualified. Rather, extnamespace @@ -3399,7 +3399,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_foreign_data_wrapper</> Columns + <structname>pg_foreign_data_wrapper</structname> Columns @@ -3474,7 +3474,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign-data wrapper specific options, as keyword=value strings + Foreign-data wrapper specific options, as keyword=value strings @@ -3498,7 +3498,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_foreign_server</> Columns + <structname>pg_foreign_server</structname> Columns @@ -3570,7 +3570,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign server specific options, as keyword=value strings + Foreign server specific options, as keyword=value strings @@ -3596,7 +3596,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_foreign_table</> Columns + <structname>pg_foreign_table</structname> Columns @@ -3613,7 +3613,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< ftrelid oid pg_class.oid - OID of the pg_class entry for this foreign table + OID of the pg_class entry for this foreign table @@ -3628,7 +3628,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Foreign table options, as keyword=value strings + Foreign table options, as keyword=value strings @@ -3651,7 +3651,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_index</> Columns + <structname>pg_index</structname> Columns @@ -3668,14 +3668,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< indexrelid oid pg_class.oid - The OID of the pg_class entry for this index + The OID of the pg_class entry for this index indrelid oid pg_class.oid - The OID of the pg_class entry for the table this index is for + The OID of the pg_class entry for the table this index is for @@ -3698,7 +3698,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool If true, this index represents the primary key of the table - (indisunique should always be true when this is true) + (indisunique should always be true when this is true) @@ -3714,7 +3714,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the uniqueness check is enforced immediately on insertion - (irrelevant if indisunique is not true) + (irrelevant if indisunique is not true) @@ -3731,7 +3731,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the index is currently valid for queries. False means the index is possibly incomplete: it must still be modified by - INSERT/UPDATE operations, but it cannot safely + INSERT/UPDATE operations, but it cannot safely be used for queries. If it is unique, the uniqueness property is not guaranteed true either. @@ -3742,8 +3742,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool - If true, queries must not use the index until the xmin - of this pg_index row is below their TransactionXmin + If true, queries must not use the index until the xmin + of this pg_index row is below their TransactionXmin event horizon, because the table may contain broken HOT chains with incompatible rows that they can see @@ -3755,7 +3755,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< If true, the index is currently ready for inserts. False means the - index must be ignored by INSERT/UPDATE + index must be ignored by INSERT/UPDATE operations. @@ -3775,9 +3775,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool - If true this index has been chosen as replica identity + If true this index has been chosen as replica identity using ALTER TABLE ... REPLICA IDENTITY USING INDEX - ... + ... @@ -3836,7 +3836,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for index attributes that are not simple column references. This is a list with one element for each zero - entry in indkey. Null if all index attributes + entry in indkey. Null if all index attributes are simple references. @@ -3866,14 +3866,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_inherits records information about + The catalog pg_inherits records information about table inheritance hierarchies. There is one entry for each direct parent-child table relationship in the database. (Indirect inheritance can be determined by following chains of entries.)
- <structname>pg_inherits</> Columns + <structname>pg_inherits</structname> Columns @@ -3928,7 +3928,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_init_privs records information about + The catalog pg_init_privs records information about the initial privileges of objects in the system. There is one entry for each object in the database which has a non-default (non-NULL) initial set of privileges. @@ -3936,7 +3936,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Objects can have initial privileges either by having those privileges set - when the system is initialized (by initdb) or when the + when the system is initialized (by initdb) or when the object is created during a CREATE EXTENSION and the extension script sets initial privileges using the GRANT system. Note that the system will automatically handle recording of the @@ -3944,12 +3944,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< only use the GRANT and REVOKE statements in their script to have the privileges recorded. The privtype column indicates if the initial privilege was - set by initdb or during a + set by initdb or during a CREATE EXTENSION command. - Objects which have initial privileges set by initdb will + Objects which have initial privileges set by initdb will have entries where privtype is 'i', while objects which have initial privileges set by CREATE EXTENSION will have entries where @@ -3957,7 +3957,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_init_privs</> Columns + <structname>pg_init_privs</structname> Columns @@ -3990,7 +3990,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objoid and classoid refer to the + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -4039,7 +4039,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_language</> Columns + <structname>pg_language</structname> Columns @@ -4116,7 +4116,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_proc.oid This references a function that is responsible for executing - inline anonymous code blocks + inline anonymous code blocks ( blocks). Zero if inline blocks are not supported. @@ -4162,24 +4162,24 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_largeobject holds the data making up large objects. A large object is identified by an OID assigned when it is created. Each large object is broken into - segments or pages small enough to be conveniently stored as rows + segments or pages small enough to be conveniently stored as rows in pg_largeobject. - The amount of data per page is defined to be LOBLKSIZE (which is currently - BLCKSZ/4, or typically 2 kB). + The amount of data per page is defined to be LOBLKSIZE (which is currently + BLCKSZ/4, or typically 2 kB). - Prior to PostgreSQL 9.0, there was no permission structure + Prior to PostgreSQL 9.0, there was no permission structure associated with large objects. As a result, pg_largeobject was publicly readable and could be used to obtain the OIDs (and contents) of all large objects in the system. This is no longer the case; use - pg_largeobject_metadata + pg_largeobject_metadata to obtain a list of large object OIDs.
- <structname>pg_largeobject</> Columns + <structname>pg_largeobject</structname> Columns @@ -4213,7 +4213,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Actual data stored in the large object. - This will never be more than LOBLKSIZE bytes and might be less. + This will never be more than LOBLKSIZE bytes and might be less. @@ -4223,9 +4223,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Each row of pg_largeobject holds data for one page of a large object, beginning at - byte offset (pageno * LOBLKSIZE) within the object. The implementation + byte offset (pageno * LOBLKSIZE) within the object. The implementation allows sparse storage: pages might be missing, and might be shorter than - LOBLKSIZE bytes even if they are not the last page of the object. + LOBLKSIZE bytes even if they are not the last page of the object. Missing regions within a large object read as zeroes. @@ -4242,11 +4242,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_largeobject_metadata holds metadata associated with large objects. The actual large object data is stored in - pg_largeobject. + pg_largeobject.
- <structname>pg_largeobject_metadata</> Columns + <structname>pg_largeobject_metadata</structname> Columns @@ -4299,14 +4299,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_namespace stores namespaces. + The catalog pg_namespace stores namespaces. A namespace is the structure underlying SQL schemas: each namespace can have a separate collection of relations, types, etc. without name conflicts.
- <structname>pg_namespace</> Columns + <structname>pg_namespace</structname> Columns @@ -4381,7 +4381,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_opclass</> Columns + <structname>pg_opclass</structname> Columns @@ -4447,14 +4447,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< opcdefault bool - True if this operator class is the default for opcintype + True if this operator class is the default for opcintype opckeytype oid pg_type.oid - Type of data stored in index, or zero if same as opcintype + Type of data stored in index, or zero if same as opcintype @@ -4462,11 +4462,11 @@ SCRAM-SHA-256$<iteration count>:<salt><
- An operator class's opcmethod must match the - opfmethod of its containing operator family. + An operator class's opcmethod must match the + opfmethod of its containing operator family. Also, there must be no more than one pg_opclass - row having opcdefault true for any given combination of - opcmethod and opcintype. + row having opcdefault true for any given combination of + opcmethod and opcintype. @@ -4480,13 +4480,13 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_operator stores information about operators. + The catalog pg_operator stores information about operators. See and for more information. - <structname>pg_operator</> Columns + <structname>pg_operator</structname> Columns @@ -4534,8 +4534,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - b = infix (both), l = prefix - (left), r = postfix (right) + b = infix (both), l = prefix + (left), r = postfix (right) @@ -4632,7 +4632,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Each operator family is a collection of operators and associated support routines that implement the semantics specified for a particular index access method. Furthermore, the operators in a family are all - compatible, in a way that is specified by the access method. + compatible, in a way that is specified by the access method. The operator family concept allows cross-data-type operators to be used with indexes and to be reasoned about using knowledge of access method semantics. @@ -4643,7 +4643,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_opfamily</> Columns + <structname>pg_opfamily</structname> Columns @@ -4720,7 +4720,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_partitioned_table</> Columns + <structname>pg_partitioned_table</structname> Columns @@ -4738,7 +4738,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< partrelid oid pg_class.oid - The OID of the pg_class entry for this partitioned table + The OID of the pg_class entry for this partitioned table @@ -4746,8 +4746,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Partitioning strategy; l = list partitioned table, - r = range partitioned table + Partitioning strategy; l = list partitioned table, + r = range partitioned table @@ -4763,7 +4763,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< oid pg_class.oid - The OID of the pg_class entry for the default partition + The OID of the pg_class entry for the default partition of this partitioned table, or zero if this partitioned table does not have a default partition. @@ -4813,7 +4813,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for partition key columns that are not simple column references. This is a list with one element for each zero - entry in partattrs. Null if all partition key columns + entry in partattrs. Null if all partition key columns are simple references. @@ -4833,9 +4833,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_pltemplate stores - template information for procedural languages. + template information for procedural languages. A template for a language allows the language to be created in a - particular database by a simple CREATE LANGUAGE command, + particular database by a simple CREATE LANGUAGE command, with no need to specify implementation details. @@ -4848,7 +4848,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_pltemplate</> Columns + <structname>pg_pltemplate</structname> Columns @@ -4921,7 +4921,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - It is likely that pg_pltemplate will be removed in some + It is likely that pg_pltemplate will be removed in some future release of PostgreSQL, in favor of keeping this knowledge about procedural languages in their respective extension installation scripts. @@ -4944,7 +4944,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< command that it applies to (possibly all commands), the roles that it applies to, the expression to be added as a security-barrier qualification to queries that include the table, and the expression - to be added as a WITH CHECK option for queries that attempt to + to be added as a WITH CHECK option for queries that attempt to add new records to the table. @@ -4982,11 +4982,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< char The command type to which the policy is applied: - r for SELECT, - a for INSERT, - w for UPDATE, - d for DELETE, - or * for all + r for SELECT, + a for INSERT, + w for UPDATE, + d for DELETE, + or * for all @@ -5023,8 +5023,8 @@ SCRAM-SHA-256$<iteration count>:<salt>< - Policies stored in pg_policy are applied only when - pg_class.relrowsecurity is set for + Policies stored in pg_policy are applied only when + pg_class.relrowsecurity is set for their table. @@ -5039,7 +5039,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - The catalog pg_proc stores information about functions (or procedures). + The catalog pg_proc stores information about functions (or procedures). See and for more information. @@ -5051,7 +5051,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_proc</> Columns + <structname>pg_proc</structname> Columns @@ -5106,7 +5106,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< float4 Estimated execution cost (in units of - ); if proretset, + ); if proretset, this is cost per row returned @@ -5114,7 +5114,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< prorows float4 - Estimated number of result rows (zero if not proretset) + Estimated number of result rows (zero if not proretset) @@ -5151,7 +5151,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< prosecdef bool - Function is a security definer (i.e., a setuid + Function is a security definer (i.e., a setuid function) @@ -5195,11 +5195,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< provolatile tells whether the function's result depends only on its input arguments, or is affected by outside factors. - It is i for immutable functions, + It is i for immutable functions, which always deliver the same result for the same inputs. - It is s for stable functions, + It is s for stable functions, whose results (for fixed inputs) do not change within a scan. - It is v for volatile functions, + It is v for volatile functions, whose results might change at any time. (Use v also for functions with side-effects, so that calls to them cannot get optimized away.) @@ -5251,7 +5251,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< An array with the data types of the function arguments. This includes only input arguments (including INOUT and - VARIADIC arguments), and thus represents + VARIADIC arguments), and thus represents the call signature of the function. @@ -5266,7 +5266,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< INOUT arguments); however, if all the arguments are IN arguments, this field will be null. Note that subscripting is 1-based, whereas for historical reasons - proargtypes is subscripted from 0. + proargtypes is subscripted from 0. @@ -5276,15 +5276,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< An array with the modes of the function arguments, encoded as - i for IN arguments, - o for OUT arguments, - b for INOUT arguments, - v for VARIADIC arguments, - t for TABLE arguments. + i for IN arguments, + o for OUT arguments, + b for INOUT arguments, + v for VARIADIC arguments, + t for TABLE arguments. If all the arguments are IN arguments, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes. + proallargtypes not proargtypes. @@ -5297,7 +5297,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Arguments without a name are set to empty strings in the array. If none of the arguments have a name, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes. + proallargtypes not proargtypes. @@ -5308,9 +5308,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< Expression trees (in nodeToString() representation) for default values. This is a list with - pronargdefaults elements, corresponding to the last - N input arguments (i.e., the last - N proargtypes positions). + pronargdefaults elements, corresponding to the last + N input arguments (i.e., the last + N proargtypes positions). If none of the arguments have defaults, this field will be null. @@ -5525,7 +5525,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_range</> Columns + <structname>pg_range</structname> Columns @@ -5586,10 +5586,10 @@ SCRAM-SHA-256$<iteration count>:<salt><
- rngsubopc (plus rngcollation, if the + rngsubopc (plus rngcollation, if the element type is collatable) determines the sort ordering used by the range - type. rngcanonical is used when the element type is - discrete. rngsubdiff is optional but should be supplied to + type. rngcanonical is used when the element type is + discrete. rngsubdiff is optional but should be supplied to improve performance of GiST indexes on the range type. @@ -5655,7 +5655,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - <structname>pg_rewrite</> Columns + <structname>pg_rewrite</structname> Columns @@ -5694,9 +5694,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< char - Event type that the rule is for: 1 = SELECT, 2 = - UPDATE, 3 = INSERT, 4 = - DELETE + Event type that the rule is for: 1 = SELECT, 2 = + UPDATE, 3 = INSERT, 4 = + DELETE @@ -5707,10 +5707,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< Controls in which modes the rule fires. - O = rule fires in origin and local modes, - D = rule is disabled, - R = rule fires in replica mode, - A = rule fires always. + O = rule fires in origin and local modes, + D = rule is disabled, + R = rule fires in replica mode, + A = rule fires always. @@ -5809,7 +5809,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a security label on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -5847,7 +5847,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_sequence</> Columns + <structname>pg_sequence</structname> Columns @@ -5864,7 +5864,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< seqrelid oid pg_class.oid - The OID of the pg_class entry for this sequence + The OID of the pg_class entry for this sequence @@ -5949,7 +5949,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_shdepend</> Columns + <structname>pg_shdepend</structname> Columns @@ -5990,7 +5990,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a table column, this is the column number (the - objid and classid refer to the + objid and classid refer to the table itself). For all other object types, this column is zero. @@ -6027,11 +6027,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< In all cases, a pg_shdepend entry indicates that the referenced object cannot be dropped without also dropping the dependent object. However, there are several subflavors identified by - deptype: + deptype: - SHARED_DEPENDENCY_OWNER (o) + SHARED_DEPENDENCY_OWNER (o) The referenced object (which must be a role) is the owner of the @@ -6041,20 +6041,20 @@ SCRAM-SHA-256$<iteration count>:<salt>< - SHARED_DEPENDENCY_ACL (a) + SHARED_DEPENDENCY_ACL (a) The referenced object (which must be a role) is mentioned in the ACL (access control list, i.e., privileges list) of the - dependent object. (A SHARED_DEPENDENCY_ACL entry is + dependent object. (A SHARED_DEPENDENCY_ACL entry is not made for the owner of the object, since the owner will have - a SHARED_DEPENDENCY_OWNER entry anyway.) + a SHARED_DEPENDENCY_OWNER entry anyway.) - SHARED_DEPENDENCY_POLICY (r) + SHARED_DEPENDENCY_POLICY (r) The referenced object (which must be a role) is mentioned as the @@ -6064,7 +6064,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< - SHARED_DEPENDENCY_PIN (p) + SHARED_DEPENDENCY_PIN (p) There is no dependent object; this type of entry is a signal @@ -6111,7 +6111,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_shdescription</> Columns + <structname>pg_shdescription</structname> Columns @@ -6235,16 +6235,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< - Normally there is one entry, with stainherit = - false, for each table column that has been analyzed. + Normally there is one entry, with stainherit = + false, for each table column that has been analyzed. If the table has inheritance children, a second entry with - stainherit = true is also created. This row + stainherit = true is also created. This row represents the column's statistics over the inheritance tree, i.e., statistics for the data you'd see with - SELECT column FROM table*, - whereas the stainherit = false row represents + SELECT column FROM table*, + whereas the stainherit = false row represents the results of - SELECT column FROM ONLY table. + SELECT column FROM ONLY table. @@ -6254,7 +6254,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< references the index. No entry is made for an ordinary non-expression index column, however, since it would be redundant with the entry for the underlying table column. Currently, entries for index expressions - always have stainherit = false. + always have stainherit = false. @@ -6281,7 +6281,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_statistic</> Columns + <structname>pg_statistic</structname> Columns @@ -6339,56 +6339,56 @@ SCRAM-SHA-256$<iteration count>:<salt>< A value less than zero is the negative of a multiplier for the number of rows in the table; for example, a column in which about 80% of the values are nonnull and each nonnull value appears about twice on - average could be represented by stadistinct = -0.4. + average could be represented by stadistinct = -0.4. A zero value means the number of distinct values is unknown. - stakindN + stakindN int2 A code number indicating the kind of statistics stored in the - Nth slot of the + Nth slot of the pg_statistic row. - staopN + staopN oid pg_operator.oid An operator used to derive the statistics stored in the - Nth slot. For example, a + Nth slot. For example, a histogram slot would show the < operator that defines the sort order of the data. - stanumbersN + stanumbersN float4[] Numerical statistics of the appropriate kind for the - Nth slot, or null if the slot + Nth slot, or null if the slot kind does not involve numerical values - stavaluesN + stavaluesN anyarray Column data values of the appropriate kind for the - Nth slot, or null if the slot + Nth slot, or null if the slot kind does not store any data values. Each array's element values are actually of the specific column's data type, or a related type such as an array's element type, so there is no way to define - these columns' type more specifically than anyarray. + these columns' type more specifically than anyarray. @@ -6407,12 +6407,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< The catalog pg_statistic_ext holds extended planner statistics. - Each row in this catalog corresponds to a statistics object + Each row in this catalog corresponds to a statistics object created with .
- <structname>pg_statistic_ext</> Columns + <structname>pg_statistic_ext</structname> Columns @@ -6485,7 +6485,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_ndistinct - N-distinct counts, serialized as pg_ndistinct type + N-distinct counts, serialized as pg_ndistinct type @@ -6495,7 +6495,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Functional dependency statistics, serialized - as pg_dependencies type + as pg_dependencies type @@ -6507,7 +6507,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The stxkind field is filled at creation of the statistics object, indicating which statistic type(s) are desired. The fields after it are initially NULL and are filled only when the - corresponding statistic has been computed by ANALYZE. + corresponding statistic has been computed by ANALYZE. @@ -6677,10 +6677,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< State code: - i = initialize, - d = data is being copied, - s = synchronized, - r = ready (normal replication) + i = initialize, + d = data is being copied, + s = synchronized, + r = ready (normal replication) @@ -6689,7 +6689,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_lsn - End LSN for s and r states. + End LSN for s and r states. @@ -6718,7 +6718,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_tablespace</> Columns + <structname>pg_tablespace</structname> Columns @@ -6769,7 +6769,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - Tablespace-level options, as keyword=value strings + Tablespace-level options, as keyword=value strings @@ -6792,7 +6792,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_transform</> Columns + <structname>pg_transform</structname> Columns @@ -6861,7 +6861,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_trigger</> Columns + <structname>pg_trigger</structname> Columns @@ -6916,10 +6916,10 @@ SCRAM-SHA-256$<iteration count>:<salt>< Controls in which modes the trigger fires. - O = trigger fires in origin and local modes, - D = trigger is disabled, - R = trigger fires in replica mode, - A = trigger fires always. + O = trigger fires in origin and local modes, + D = trigger is disabled, + R = trigger fires in replica mode, + A = trigger fires always. @@ -6928,7 +6928,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< bool True if trigger is internally generated (usually, to enforce - the constraint identified by tgconstraint) + the constraint identified by tgconstraint) @@ -6950,7 +6950,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgconstraint oid pg_constraint.oid - The pg_constraint entry associated with the trigger, if any + The pg_constraint entry associated with the trigger, if any @@ -6994,7 +6994,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_node_tree Expression tree (in nodeToString() - representation) for the trigger's WHEN condition, or null + representation) for the trigger's WHEN condition, or null if none @@ -7002,7 +7002,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgoldtable name - REFERENCING clause name for OLD TABLE, + REFERENCING clause name for OLD TABLE, or null if none @@ -7010,7 +7010,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< tgnewtable name - REFERENCING clause name for NEW TABLE, + REFERENCING clause name for NEW TABLE, or null if none @@ -7019,18 +7019,18 @@ SCRAM-SHA-256$<iteration count>:<salt>< Currently, column-specific triggering is supported only for - UPDATE events, and so tgattr is relevant + UPDATE events, and so tgattr is relevant only for that event type. tgtype might contain bits for other event types as well, but those are presumed - to be table-wide regardless of what is in tgattr. + to be table-wide regardless of what is in tgattr. - When tgconstraint is nonzero, - tgconstrrelid, tgconstrindid, - tgdeferrable, and tginitdeferred are - largely redundant with the referenced pg_constraint entry. + When tgconstraint is nonzero, + tgconstrrelid, tgconstrindid, + tgdeferrable, and tginitdeferred are + largely redundant with the referenced pg_constraint entry. However, it is possible for a non-deferrable trigger to be associated with a deferrable constraint: foreign key constraints can have some deferrable and some non-deferrable triggers. @@ -7070,7 +7070,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_ts_config</> Columns + <structname>pg_ts_config</structname> Columns @@ -7145,7 +7145,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_ts_config_map</> Columns + <structname>pg_ts_config_map</structname> Columns @@ -7162,7 +7162,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< mapcfg oid pg_ts_config.oid - The OID of the pg_ts_config entry owning this map entry + The OID of the pg_ts_config entry owning this map entry @@ -7177,7 +7177,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< integer Order in which to consult this entry (lower - mapseqnos first) + mapseqnos first) @@ -7206,7 +7206,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< needed; the dictionary itself provides values for the user-settable parameters supported by the template. This division of labor allows dictionaries to be created by unprivileged users. The parameters - are specified by a text string dictinitoption, + are specified by a text string dictinitoption, whose format and meaning vary depending on the template. @@ -7216,7 +7216,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_ts_dict</> Columns + <structname>pg_ts_dict</structname> Columns @@ -7299,7 +7299,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_ts_parser</> Columns + <structname>pg_ts_parser</structname> Columns @@ -7396,7 +7396,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_ts_template</> Columns + <structname>pg_ts_template</structname> Columns @@ -7470,7 +7470,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_type</> Columns + <structname>pg_type</structname> Columns @@ -7521,7 +7521,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< For a fixed-size type, typlen is the number of bytes in the internal representation of the type. But for a variable-length type, typlen is negative. - -1 indicates a varlena type (one that has a length word), + -1 indicates a varlena type (one that has a length word), -2 indicates a null-terminated C string. @@ -7566,7 +7566,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< typcategory is an arbitrary classification of data types that is used by the parser to determine which implicit - casts should be preferred. + casts should be preferred. See . @@ -7711,7 +7711,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< typalign is the alignment required when storing a value of this type. It applies to storage on disk as well as most representations of the value inside - PostgreSQL. + PostgreSQL. When multiple values are stored consecutively, such as in the representation of a complete row on disk, padding is inserted before a datum of this type so that it begins on the @@ -7723,16 +7723,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< Possible values are: - c = char alignment, i.e., no alignment needed. + c = char alignment, i.e., no alignment needed. - s = short alignment (2 bytes on most machines). + s = short alignment (2 bytes on most machines). - i = int alignment (4 bytes on most machines). + i = int alignment (4 bytes on most machines). - d = double alignment (8 bytes on many machines, but by no means all). + d = double alignment (8 bytes on many machines, but by no means all). @@ -7757,24 +7757,24 @@ SCRAM-SHA-256$<iteration count>:<salt>< Possible values are - p: Value must always be stored plain. + p: Value must always be stored plain. - e: Value can be stored in a secondary + e: Value can be stored in a secondary relation (if relation has one, see pg_class.reltoastrelid). - m: Value can be stored compressed inline. + m: Value can be stored compressed inline. - x: Value can be stored compressed inline or stored in secondary storage. + x: Value can be stored compressed inline or stored in secondary storage. - Note that m columns can also be moved out to secondary - storage, but only as a last resort (e and x columns are + Note that m columns can also be moved out to secondary + storage, but only as a last resort (e and x columns are moved first). @@ -7805,9 +7805,9 @@ SCRAM-SHA-256$<iteration count>:<salt>< int4 - Domains use typtypmod to record the typmod + Domains use typtypmod to record the typmod to be applied to their base type (-1 if base type does not use a - typmod). -1 if this type is not a domain. + typmod). -1 if this type is not a domain. @@ -7817,7 +7817,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< typndims is the number of array dimensions - for a domain over an array (that is, typbasetype is + for a domain over an array (that is, typbasetype is an array type). Zero for types other than domains over array types. @@ -7842,7 +7842,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< pg_node_tree - If typdefaultbin is not null, it is the + If typdefaultbin is not null, it is the nodeToString() representation of a default expression for the type. This is only used for domains. @@ -7854,12 +7854,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< text - typdefault is null if the type has no associated - default value. If typdefaultbin is not null, - typdefault must contain a human-readable version of the - default expression represented by typdefaultbin. If - typdefaultbin is null and typdefault is - not, then typdefault is the external representation of + typdefault is null if the type has no associated + default value. If typdefaultbin is not null, + typdefault must contain a human-readable version of the + default expression represented by typdefaultbin. If + typdefaultbin is null and typdefault is + not, then typdefault is the external representation of the type's default value, which can be fed to the type's input converter to produce a constant. @@ -7882,13 +7882,13 @@ SCRAM-SHA-256$<iteration count>:<salt>< lists the system-defined values - of typcategory. Any future additions to this list will + of typcategory. Any future additions to this list will also be upper-case ASCII letters. All other ASCII characters are reserved for user-defined categories.
- <structfield>typcategory</> Codes + <structfield>typcategory</structfield> Codes @@ -7957,7 +7957,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< X - unknown type + unknown type @@ -7982,7 +7982,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_user_mapping</> Columns + <structname>pg_user_mapping</structname> Columns @@ -8023,7 +8023,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< text[] - User mapping specific options, as keyword=value strings + User mapping specific options, as keyword=value strings @@ -8241,7 +8241,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_available_extensions</> Columns + <structname>pg_available_extensions</structname> Columns @@ -8303,7 +8303,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_available_extension_versions</> Columns + <structname>pg_available_extension_versions</structname> Columns @@ -8385,11 +8385,11 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_config describes the compile-time configuration parameters of the currently installed - version of PostgreSQL. It is intended, for example, to + version of PostgreSQL. It is intended, for example, to be used by software packages that want to interface to - PostgreSQL to facilitate finding the required header + PostgreSQL to facilitate finding the required header files and libraries. It provides the same basic information as the - PostgreSQL client + PostgreSQL client application. @@ -8399,7 +8399,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_config</> Columns + <structname>pg_config</structname> Columns @@ -8470,15 +8470,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< Cursors are used internally to implement some of the components - of PostgreSQL, such as procedural languages. - Therefore, the pg_cursors view might include cursors + of PostgreSQL, such as procedural languages. + Therefore, the pg_cursors view might include cursors that have not been explicitly created by the user.
- <structname>pg_cursors</> Columns + <structname>pg_cursors</structname> Columns @@ -8526,7 +8526,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< is_scrollable boolean - true if the cursor is scrollable (that is, it + true if the cursor is scrollable (that is, it allows rows to be retrieved in a nonsequential manner); false otherwise @@ -8557,16 +8557,16 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_file_settings provides a summary of the contents of the server's configuration file(s). A row appears in - this view for each name = value entry appearing in the files, + this view for each name = value entry appearing in the files, with annotations indicating whether the value could be applied successfully. Additional row(s) may appear for problems not linked to - a name = value entry, such as syntax errors in the files. + a name = value entry, such as syntax errors in the files. This view is helpful for checking whether planned changes in the configuration files will work, or for diagnosing a previous failure. - Note that this view reports on the current contents of the + Note that this view reports on the current contents of the files, not on what was last applied by the server. (The pg_settings view is usually sufficient to determine that.) @@ -8578,7 +8578,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_file_settings</> Columns + <structname>pg_file_settings</structname> Columns @@ -8604,7 +8604,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< seqno integer - Order in which the entries are processed (1..n) + Order in which the entries are processed (1..n) name @@ -8634,14 +8634,14 @@ SCRAM-SHA-256$<iteration count>:<salt>< If the configuration file contains syntax errors or invalid parameter names, the server will not attempt to apply any settings from it, and - therefore all the applied fields will read as false. + therefore all the applied fields will read as false. In such a case there will be one or more rows with non-null error fields indicating the problem(s). Otherwise, individual settings will be applied if possible. If an individual setting cannot be applied (e.g., invalid value, or the setting cannot be changed after server start) it will have an appropriate message in the error field. Another way that - an entry might have applied = false is that it is + an entry might have applied = false is that it is overridden by a later entry for the same parameter name; this case is not considered an error so nothing appears in the error field. @@ -8666,12 +8666,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< compatibility: it emulates a catalog that existed in PostgreSQL before version 8.1. It shows the names and members of all roles that are marked as not - rolcanlogin, which is an approximation to the set + rolcanlogin, which is an approximation to the set of roles that are being used as groups.
- <structname>pg_group</> Columns + <structname>pg_group</structname> Columns @@ -8720,7 +8720,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< The view pg_hba_file_rules provides a summary of the contents of the client authentication configuration - file, pg_hba.conf. A row appears in this view for each + file, pg_hba.conf. A row appears in this view for each non-empty, non-comment line in the file, with annotations indicating whether the rule could be applied successfully. @@ -8728,7 +8728,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< This view can be helpful for checking whether planned changes in the authentication configuration file will work, or for diagnosing a previous - failure. Note that this view reports on the current contents + failure. Note that this view reports on the current contents of the file, not on what was last loaded by the server. @@ -8738,7 +8738,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_hba_file_rules</> Columns + <structname>pg_hba_file_rules</structname> Columns @@ -8753,7 +8753,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< line_number integer - Line number of this rule in pg_hba.conf + Line number of this rule in pg_hba.conf @@ -8809,7 +8809,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Usually, a row reflecting an incorrect entry will have values for only - the line_number and error fields. + the line_number and error fields. @@ -8831,7 +8831,7 @@ SCRAM-SHA-256$<iteration count>:<salt><
- <structname>pg_indexes</> Columns + <structname>pg_indexes</structname> Columns @@ -8912,12 +8912,12 @@ SCRAM-SHA-256$<iteration count>:<salt>< in the same way as in pg_description or pg_depend). Also, the right to extend a relation is represented as a separate lockable object. - Also, advisory locks can be taken on numbers that have + Also, advisory locks can be taken on numbers that have user-defined meanings.
- <structname>pg_locks</> Columns + <structname>pg_locks</structname> Columns @@ -8935,15 +8935,15 @@ SCRAM-SHA-256$<iteration count>:<salt>< Type of the lockable object: - relation, - extend, - page, - tuple, - transactionid, - virtualxid, - object, - userlock, or - advisory + relation, + extend, + page, + tuple, + transactionid, + virtualxid, + object, + userlock, or + advisory @@ -9025,7 +9025,7 @@ SCRAM-SHA-256$<iteration count>:<salt>< Column number targeted by the lock (the - classid and objid refer to the + classid and objid refer to the table itself), or zero if the target is some other general database object, or null if the target is not a general database object @@ -9107,23 +9107,23 @@ SCRAM-SHA-256$<iteration count>:<salt>< Advisory locks can be acquired on keys consisting of either a single bigint value or two integer values. A bigint key is displayed with its - high-order half in the classid column, its low-order half - in the objid column, and objsubid equal + high-order half in the classid column, its low-order half + in the objid column, and objsubid equal to 1. The original bigint value can be reassembled with the expression (classid::bigint << 32) | objid::bigint. Integer keys are displayed with the first key in the - classid column, the second key in the objid - column, and objsubid equal to 2. The actual meaning of + classid column, the second key in the objid + column, and objsubid equal to 2. The actual meaning of the keys is up to the user. Advisory locks are local to each database, - so the database column is meaningful for an advisory lock. + so the database column is meaningful for an advisory lock. pg_locks provides a global view of all locks in the database cluster, not only those relevant to the current database. Although its relation column can be joined - against pg_class.oid to identify locked + against pg_class.oid to identify locked relations, this will only work correctly for relations in the current database (those for which the database column is either the current database's OID or zero). @@ -9141,7 +9141,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_stat_activity psa ON pl.pid = psa.pid; Also, if you are using prepared transactions, the - virtualtransaction column can be joined to the + virtualtransaction column can be joined to the transaction column of the pg_prepared_xacts view to get more information on prepared transactions that hold locks. @@ -9163,7 +9163,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx information about which processes are ahead of which others in lock wait queues, nor information about which processes are parallel workers running on behalf of which other client sessions. It is better to use - the pg_blocking_pids() function + the pg_blocking_pids() function (see ) to identify which process(es) a waiting process is blocked behind. @@ -9172,10 +9172,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The pg_locks view displays data from both the regular lock manager and the predicate lock manager, which are separate systems; in addition, the regular lock manager subdivides its - locks into regular and fast-path locks. + locks into regular and fast-path locks. This data is not guaranteed to be entirely consistent. When the view is queried, - data on fast-path locks (with fastpath = true) + data on fast-path locks (with fastpath = true) is gathered from each backend one at a time, without freezing the state of the entire lock manager, so it is possible for locks to be taken or released while information is gathered. Note, however, that these locks are @@ -9218,7 +9218,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_matviews</> Columns + <structname>pg_matviews</structname> Columns @@ -9291,7 +9291,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_policies</> Columns + <structname>pg_policies</structname> Columns @@ -9381,7 +9381,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_prepared_statements</> Columns + <structname>pg_prepared_statements</structname> Columns @@ -9467,7 +9467,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_prepared_xacts</> Columns + <structname>pg_prepared_xacts</structname> Columns @@ -9706,7 +9706,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx slot_typetext - The slot type - physical or logical + The slot type - physical or logical @@ -9787,7 +9787,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The address (LSN) up to which the logical slot's consumer has confirmed receiving data. Data older than this is - not available anymore. NULL for physical slots. + not available anymore. NULL for physical slots. @@ -9817,7 +9817,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_roles</> Columns + <structname>pg_roles</structname> Columns @@ -9900,7 +9900,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx rolpasswordtext - Not the password (always reads as ********) + Not the password (always reads as ********) @@ -9953,7 +9953,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_rules</> Columns + <structname>pg_rules</structname> Columns @@ -9994,9 +9994,9 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- The pg_rules view excludes the ON SELECT rules + The pg_rules view excludes the ON SELECT rules of views and materialized views; those can be seen in - pg_views and pg_matviews. + pg_views and pg_matviews. @@ -10011,11 +10011,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_seclabels provides information about security labels. It as an easier-to-query version of the - pg_seclabel catalog. + pg_seclabel catalog. - <structname>pg_seclabels</> Columns + <structname>pg_seclabels</structname> Columns @@ -10045,7 +10045,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx For a security label on a table column, this is the column number (the - objoid and classoid refer to + objoid and classoid refer to the table itself). For all other object types, this column is zero. @@ -10105,7 +10105,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_sequences</> Columns + <structname>pg_sequences</structname> Columns @@ -10206,12 +10206,12 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx interface to the and commands. It also provides access to some facts about each parameter that are - not directly available from SHOW, such as minimum and + not directly available from SHOW, such as minimum and maximum values.
- <structname>pg_settings</> Columns + <structname>pg_settings</structname> Columns @@ -10260,8 +10260,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx vartype text - Parameter type (bool, enum, - integer, real, or string) + Parameter type (bool, enum, + integer, real, or string) @@ -10306,7 +10306,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx values set from sources other than configuration files, or when examined by a user who is neither a superuser or a member of pg_read_all_settings); helpful when using - include directives in configuration files + include directives in configuration files sourceline @@ -10384,7 +10384,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx Changes to these settings can be made in postgresql.conf without restarting the server. They can also be set for a particular session in the connection request - packet (for example, via libpq's PGOPTIONS + packet (for example, via libpq's PGOPTIONS environment variable), but only if the connecting user is a superuser. However, these settings never change in a session after it is started. If you change them in postgresql.conf, send a @@ -10402,7 +10402,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx Changes to these settings can be made in postgresql.conf without restarting the server. They can also be set for a particular session in the connection request - packet (for example, via libpq's PGOPTIONS + packet (for example, via libpq's PGOPTIONS environment variable); any user can make such a change for their session. However, these settings never change in a session after it is started. If you change them in postgresql.conf, send a @@ -10418,10 +10418,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the SET command; but only superusers - can change them via SET. Changes in + or within a session via the SET command; but only superusers + can change them via SET. Changes in postgresql.conf will affect existing sessions - only if no session-local value has been established with SET. + only if no session-local value has been established with SET. @@ -10431,10 +10431,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx These settings can be set from postgresql.conf, - or within a session via the SET command. Any user is + or within a session via the SET command. Any user is allowed to change their session-local value. Changes in postgresql.conf will affect existing sessions - only if no session-local value has been established with SET. + only if no session-local value has been established with SET. @@ -10473,7 +10473,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx compatibility: it emulates a catalog that existed in PostgreSQL before version 8.1. It shows properties of all roles that are marked as - rolcanlogin in + rolcanlogin in pg_authid. @@ -10486,7 +10486,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_shadow</> Columns + <structname>pg_shadow</structname> Columns @@ -10600,7 +10600,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_stats</> Columns + <structname>pg_stats</structname> Columns @@ -10663,7 +10663,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx If greater than zero, the estimated number of distinct values in the column. If less than zero, the negative of the number of distinct values divided by the number of rows. (The negated form is used when - ANALYZE believes that the number of distinct values is + ANALYZE believes that the number of distinct values is likely to increase as the table grows; the positive form is used when the column seems to have a fixed number of possible values.) For example, -1 indicates a unique column in which the number of distinct @@ -10699,10 +10699,10 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx A list of values that divide the column's values into groups of approximately equal population. The values in - most_common_vals, if present, are omitted from this + most_common_vals, if present, are omitted from this histogram calculation. (This column is null if the column data type - does not have a < operator or if the - most_common_vals list accounts for the entire + does not have a < operator or if the + most_common_vals list accounts for the entire population.) @@ -10717,7 +10717,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx When the value is near -1 or +1, an index scan on the column will be estimated to be cheaper than when it is near zero, due to reduction of random access to the disk. (This column is null if the column data - type does not have a < operator.) + type does not have a < operator.) @@ -10761,7 +10761,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The maximum number of entries in the array fields can be controlled on a - column-by-column basis using the ALTER TABLE SET STATISTICS + column-by-column basis using the ALTER TABLE SET STATISTICS command, or globally by setting the run-time parameter. @@ -10781,7 +10781,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_tables</> Columns + <structname>pg_tables</structname> Columns @@ -10862,7 +10862,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_timezone_abbrevs</> Columns + <structname>pg_timezone_abbrevs</structname> Columns @@ -10910,7 +10910,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx The view pg_timezone_names provides a list - of time zone names that are recognized by SET TIMEZONE, + of time zone names that are recognized by SET TIMEZONE, along with their associated abbreviations, UTC offsets, and daylight-savings status. (Technically, PostgreSQL does not use UTC because leap @@ -10919,11 +10919,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx linkend="view-pg-timezone-abbrevs">pg_timezone_abbrevs, many of these names imply a set of daylight-savings transition date rules. Therefore, the associated information changes across local DST boundaries. The displayed information is computed based on the current - value of CURRENT_TIMESTAMP. + value of CURRENT_TIMESTAMP.
- <structname>pg_timezone_names</> Columns + <structname>pg_timezone_names</structname> Columns @@ -10976,7 +10976,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_user</> Columns + <structname>pg_user</structname> Columns @@ -11032,7 +11032,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx passwd text - Not the password (always reads as ********) + Not the password (always reads as ********) @@ -11069,7 +11069,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_user_mappings</> Columns + <structname>pg_user_mappings</structname> Columns @@ -11126,7 +11126,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx text[] - User mapping specific options, as keyword=value strings + User mapping specific options, as keyword=value strings @@ -11141,12 +11141,12 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx current user is the user being mapped, and owns the server or - holds USAGE privilege on it + holds USAGE privilege on it - current user is the server owner and mapping is for PUBLIC + current user is the server owner and mapping is for PUBLIC @@ -11173,7 +11173,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
- <structname>pg_views</> Columns + <structname>pg_views</structname> Columns diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index 63f7de5b43..3874a3f1ea 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -35,12 +35,12 @@ Locale Support - locale + locale - Locale support refers to an application respecting + Locale support refers to an application respecting cultural preferences regarding alphabets, sorting, number - formatting, etc. PostgreSQL uses the standard ISO + formatting, etc. PostgreSQL uses the standard ISO C and POSIX locale facilities provided by the server operating system. For additional information refer to the documentation of your system. @@ -67,14 +67,14 @@ initdb --locale=sv_SE This example for Unix systems sets the locale to Swedish - (sv) as spoken - in Sweden (SE). Other possibilities might include - en_US (U.S. English) and fr_CA (French + (sv) as spoken + in Sweden (SE). Other possibilities might include + en_US (U.S. English) and fr_CA (French Canadian). If more than one character set can be used for a locale then the specifications can take the form - language_territory.codeset. For example, - fr_BE.UTF-8 represents the French language (fr) as - spoken in Belgium (BE), with a UTF-8 character set + language_territory.codeset. For example, + fr_BE.UTF-8 represents the French language (fr) as + spoken in Belgium (BE), with a UTF-8 character set encoding. @@ -82,9 +82,9 @@ initdb --locale=sv_SE What locales are available on your system under what names depends on what was provided by the operating system vendor and what was installed. On most Unix systems, the command - locale -a will provide a list of available locales. - Windows uses more verbose locale names, such as German_Germany - or Swedish_Sweden.1252, but the principles are the same. + locale -a will provide a list of available locales. + Windows uses more verbose locale names, such as German_Germany + or Swedish_Sweden.1252, but the principles are the same. @@ -97,28 +97,28 @@ initdb --locale=sv_SE - LC_COLLATE - String sort order + LC_COLLATE + String sort order - LC_CTYPE - Character classification (What is a letter? Its upper-case equivalent?) + LC_CTYPE + Character classification (What is a letter? Its upper-case equivalent?) - LC_MESSAGES - Language of messages + LC_MESSAGES + Language of messages - LC_MONETARY - Formatting of currency amounts + LC_MONETARY + Formatting of currency amounts - LC_NUMERIC - Formatting of numbers + LC_NUMERIC + Formatting of numbers - LC_TIME - Formatting of dates and times + LC_TIME + Formatting of dates and times @@ -133,8 +133,8 @@ initdb --locale=sv_SE If you want the system to behave as if it had no locale support, - use the special locale name C, or equivalently - POSIX. + use the special locale name C, or equivalently + POSIX. @@ -192,14 +192,14 @@ initdb --locale=sv_SE settings for the purpose of setting the language of messages. If in doubt, please refer to the documentation of your operating system, in particular the documentation about - gettext. + gettext. To enable messages to be translated to the user's preferred language, NLS must have been selected at build time - (configure --enable-nls). All other locale support is + (configure --enable-nls). All other locale support is built in automatically. @@ -213,63 +213,63 @@ initdb --locale=sv_SE - Sort order in queries using ORDER BY or the standard + Sort order in queries using ORDER BY or the standard comparison operators on textual data - ORDER BYand locales + ORDER BYand locales - The upper, lower, and initcap + The upper, lower, and initcap functions - upperand locales - lowerand locales + upperand locales + lowerand locales - Pattern matching operators (LIKE, SIMILAR TO, + Pattern matching operators (LIKE, SIMILAR TO, and POSIX-style regular expressions); locales affect both case insensitive matching and the classification of characters by character-class regular expressions - LIKEand locales - regular expressionsand locales + LIKEand locales + regular expressionsand locales - The to_char family of functions - to_charand locales + The to_char family of functions + to_charand locales - The ability to use indexes with LIKE clauses + The ability to use indexes with LIKE clauses - The drawback of using locales other than C or - POSIX in PostgreSQL is its performance + The drawback of using locales other than C or + POSIX in PostgreSQL is its performance impact. It slows character handling and prevents ordinary indexes - from being used by LIKE. For this reason use locales + from being used by LIKE. For this reason use locales only if you actually need them. - As a workaround to allow PostgreSQL to use indexes - with LIKE clauses under a non-C locale, several custom + As a workaround to allow PostgreSQL to use indexes + with LIKE clauses under a non-C locale, several custom operator classes exist. These allow the creation of an index that performs a strict character-by-character comparison, ignoring locale comparison rules. Refer to for more information. Another approach is to create indexes using - the C collation, as discussed in + the C collation, as discussed in . @@ -286,20 +286,20 @@ initdb --locale=sv_SE - Check that PostgreSQL is actually using the locale - that you think it is. The LC_COLLATE and LC_CTYPE + Check that PostgreSQL is actually using the locale + that you think it is. The LC_COLLATE and LC_CTYPE settings are determined when a database is created, and cannot be changed except by creating a new database. Other locale - settings including LC_MESSAGES and LC_MONETARY + settings including LC_MESSAGES and LC_MONETARY are initially determined by the environment the server is started in, but can be changed on-the-fly. You can check the active locale - settings using the SHOW command. + settings using the SHOW command. - The directory src/test/locale in the source + The directory src/test/locale in the source distribution contains a test suite for - PostgreSQL's locale support. + PostgreSQL's locale support. @@ -313,7 +313,7 @@ initdb --locale=sv_SE Maintaining catalogs of message translations requires the on-going efforts of many volunteers that want to see - PostgreSQL speak their preferred language well. + PostgreSQL speak their preferred language well. If messages in your language are currently not available or not fully translated, your assistance would be appreciated. If you want to help, refer to or write to the developers' @@ -326,7 +326,7 @@ initdb --locale=sv_SE Collation Support - collation + collation The collation feature allows specifying the sort order and character @@ -370,9 +370,9 @@ initdb --locale=sv_SE function or operator call is derived from the arguments, as described below. In addition to comparison operators, collations are taken into account by functions that convert between lower and upper case - letters, such as lower, upper, and - initcap; by pattern matching operators; and by - to_char and related functions. + letters, such as lower, upper, and + initcap; by pattern matching operators; and by + to_char and related functions. @@ -452,7 +452,7 @@ SELECT a < ('foo' COLLATE "fr_FR") FROM test1; SELECT a < b FROM test1; the parser cannot determine which collation to apply, since the - a and b columns have conflicting + a and b columns have conflicting implicit collations. Since the < operator does need to know which collation to use, this will result in an error. The error can be resolved by attaching an explicit collation @@ -468,7 +468,7 @@ SELECT a COLLATE "de_DE" < b FROM test1; SELECT a || b FROM test1; - does not result in an error, because the || operator + does not result in an error, because the || operator does not care about collations: its result is the same regardless of the collation. @@ -486,8 +486,8 @@ SELECT * FROM test1 ORDER BY a || 'foo'; SELECT * FROM test1 ORDER BY a || b; - results in an error, because even though the || operator - doesn't need to know a collation, the ORDER BY clause does. + results in an error, because even though the || operator + doesn't need to know a collation, the ORDER BY clause does. As before, the conflict can be resolved with an explicit collation specifier: @@ -508,7 +508,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; operating system C library. These are the locales that most tools provided by the operating system use. Another provider is icu, which uses the external - ICUICU library. ICU locales can only be + ICUICU library. ICU locales can only be used if support for ICU was configured when PostgreSQL was built. @@ -541,14 +541,14 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; Standard Collations - On all platforms, the collations named default, - C, and POSIX are available. Additional + On all platforms, the collations named default, + C, and POSIX are available. Additional collations may be available depending on operating system support. - The default collation selects the LC_COLLATE + The default collation selects the LC_COLLATE and LC_CTYPE values specified at database creation time. - The C and POSIX collations both specify - traditional C behavior, in which only the ASCII letters - A through Z + The C and POSIX collations both specify + traditional C behavior, in which only the ASCII letters + A through Z are treated as letters, and sorting is done strictly by character code byte values. @@ -565,7 +565,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; If the operating system provides support for using multiple locales - within a single program (newlocale and related functions), + within a single program (newlocale and related functions), or if support for ICU is configured, then when a database cluster is initialized, initdb populates the system catalog pg_collation with @@ -618,8 +618,8 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; within a given database even though it would not be unique globally. Use of the stripped collation names is recommended, since it will make one less thing you need to change if you decide to change to - another database encoding. Note however that the default, - C, and POSIX collations can be used regardless of + another database encoding. Note however that the default, + C, and POSIX collations can be used regardless of the database encoding. @@ -630,7 +630,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR"; SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; - will draw an error even though the C and POSIX + will draw an error even though the C and POSIX collations have identical behaviors. Mixing stripped and non-stripped collation names is therefore not recommended. @@ -691,7 +691,7 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; database encoding is one of these, ICU collation entries in pg_collation are ignored. Attempting to use one will draw an error along the lines of collation "de-x-icu" for - encoding "WIN874" does not exist. + encoding "WIN874" does not exist. @@ -889,30 +889,30 @@ CREATE COLLATION french FROM "fr-x-icu"; Character Set Support - character set + character set The character set support in PostgreSQL allows you to store text in a variety of character sets (also called encodings), including single-byte character sets such as the ISO 8859 series and - multiple-byte character sets such as EUC (Extended Unix + multiple-byte character sets such as EUC (Extended Unix Code), UTF-8, and Mule internal code. All supported character sets can be used transparently by clients, but a few are not supported for use within the server (that is, as a server-side encoding). The default character set is selected while initializing your PostgreSQL database - cluster using initdb. It can be overridden when you + cluster using initdb. It can be overridden when you create a database, so you can have multiple databases each with a different character set. An important restriction, however, is that each database's character set - must be compatible with the database's LC_CTYPE (character - classification) and LC_COLLATE (string sort order) locale - settings. For C or - POSIX locale, any character set is allowed, but for other + must be compatible with the database's LC_CTYPE (character + classification) and LC_COLLATE (string sort order) locale + settings. For C or + POSIX locale, any character set is allowed, but for other libc-provided locales there is only one character set that will work correctly. (On Windows, however, UTF-8 encoding can be used with any locale.) @@ -954,7 +954,7 @@ CREATE COLLATION french FROM "fr-x-icu"; No No 1-2 - WIN950, Windows950 + WIN950, Windows950 EUC_CN @@ -1017,11 +1017,11 @@ CREATE COLLATION french FROM "fr-x-icu"; No No 1-2 - WIN936, Windows936 + WIN936, Windows936 ISO_8859_5 - ISO 8859-5, ECMA 113 + ISO 8859-5, ECMA 113 Latin/Cyrillic Yes Yes @@ -1030,7 +1030,7 @@ CREATE COLLATION french FROM "fr-x-icu"; ISO_8859_6 - ISO 8859-6, ECMA 114 + ISO 8859-6, ECMA 114 Latin/Arabic Yes Yes @@ -1039,7 +1039,7 @@ CREATE COLLATION french FROM "fr-x-icu"; ISO_8859_7 - ISO 8859-7, ECMA 118 + ISO 8859-7, ECMA 118 Latin/Greek Yes Yes @@ -1048,7 +1048,7 @@ CREATE COLLATION french FROM "fr-x-icu"; ISO_8859_8 - ISO 8859-8, ECMA 121 + ISO 8859-8, ECMA 121 Latin/Hebrew Yes Yes @@ -1057,7 +1057,7 @@ CREATE COLLATION french FROM "fr-x-icu"; JOHAB - JOHAB + JOHAB Korean (Hangul) No No @@ -1071,7 +1071,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - KOI8 + KOI8 KOI8U @@ -1084,57 +1084,57 @@ CREATE COLLATION french FROM "fr-x-icu"; LATIN1 - ISO 8859-1, ECMA 94 + ISO 8859-1, ECMA 94 Western European Yes Yes 1 - ISO88591 + ISO88591 LATIN2 - ISO 8859-2, ECMA 94 + ISO 8859-2, ECMA 94 Central European Yes Yes 1 - ISO88592 + ISO88592 LATIN3 - ISO 8859-3, ECMA 94 + ISO 8859-3, ECMA 94 South European Yes Yes 1 - ISO88593 + ISO88593 LATIN4 - ISO 8859-4, ECMA 94 + ISO 8859-4, ECMA 94 North European Yes Yes 1 - ISO88594 + ISO88594 LATIN5 - ISO 8859-9, ECMA 128 + ISO 8859-9, ECMA 128 Turkish Yes Yes 1 - ISO88599 + ISO88599 LATIN6 - ISO 8859-10, ECMA 144 + ISO 8859-10, ECMA 144 Nordic Yes Yes 1 - ISO885910 + ISO885910 LATIN7 @@ -1143,7 +1143,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - ISO885913 + ISO885913 LATIN8 @@ -1152,7 +1152,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - ISO885914 + ISO885914 LATIN9 @@ -1161,16 +1161,16 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - ISO885915 + ISO885915 LATIN10 - ISO 8859-16, ASRO SR 14111 + ISO 8859-16, ASRO SR 14111 Romanian Yes No 1 - ISO885916 + ISO885916 MULE_INTERNAL @@ -1188,7 +1188,7 @@ CREATE COLLATION french FROM "fr-x-icu"; No No 1-2 - Mskanji, ShiftJIS, WIN932, Windows932 + Mskanji, ShiftJIS, WIN932, Windows932 SHIFT_JIS_2004 @@ -1202,7 +1202,7 @@ CREATE COLLATION french FROM "fr-x-icu"; SQL_ASCII unspecified (see text) - any + any Yes No 1 @@ -1215,16 +1215,16 @@ CREATE COLLATION french FROM "fr-x-icu"; No No 1-2 - WIN949, Windows949 + WIN949, Windows949 UTF8 Unicode, 8-bit - all + all Yes Yes 1-4 - Unicode + Unicode WIN866 @@ -1233,7 +1233,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - ALT + ALT WIN874 @@ -1260,7 +1260,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - WIN + WIN WIN1252 @@ -1323,30 +1323,30 @@ CREATE COLLATION french FROM "fr-x-icu"; Yes Yes 1 - ABC, TCVN, TCVN5712, VSCII + ABC, TCVN, TCVN5712, VSCII
- Not all client APIs support all the listed character sets. For example, the - PostgreSQL - JDBC driver does not support MULE_INTERNAL, LATIN6, - LATIN8, and LATIN10. + Not all client APIs support all the listed character sets. For example, the + PostgreSQL + JDBC driver does not support MULE_INTERNAL, LATIN6, + LATIN8, and LATIN10. - The SQL_ASCII setting behaves considerably differently + The SQL_ASCII setting behaves considerably differently from the other settings. When the server character set is - SQL_ASCII, the server interprets byte values 0-127 + SQL_ASCII, the server interprets byte values 0-127 according to the ASCII standard, while byte values 128-255 are taken as uninterpreted characters. No encoding conversion will be done when - the setting is SQL_ASCII. Thus, this setting is not so + the setting is SQL_ASCII. Thus, this setting is not so much a declaration that a specific encoding is in use, as a declaration of ignorance about the encoding. In most cases, if you are working with any non-ASCII data, it is unwise to use the - SQL_ASCII setting because + SQL_ASCII setting because PostgreSQL will be unable to help you by converting or validating non-ASCII characters. @@ -1356,7 +1356,7 @@ CREATE COLLATION french FROM "fr-x-icu"; Setting the Character Set - initdb defines the default character set (encoding) + initdb defines the default character set (encoding) for a PostgreSQL cluster. For example, @@ -1367,8 +1367,8 @@ initdb -E EUC_JP EUC_JP (Extended Unix Code for Japanese). You can use instead of if you prefer longer option strings. - If no option is - given, initdb attempts to determine the appropriate + If no or option is + given, initdb attempts to determine the appropriate encoding to use based on the specified or default locale. @@ -1388,7 +1388,7 @@ createdb -E EUC_KR -T template0 --lc-collate=ko_KR.euckr --lc-ctype=ko_KR.euckr CREATE DATABASE korean WITH ENCODING 'EUC_KR' LC_COLLATE='ko_KR.euckr' LC_CTYPE='ko_KR.euckr' TEMPLATE=template0; - Notice that the above commands specify copying the template0 + Notice that the above commands specify copying the template0 database. When copying any other database, the encoding and locale settings cannot be changed from those of the source database, because that might result in corrupt data. For more information see @@ -1420,7 +1420,7 @@ $ psql -l On most modern operating systems, PostgreSQL - can determine which character set is implied by the LC_CTYPE + can determine which character set is implied by the LC_CTYPE setting, and it will enforce that only the matching database encoding is used. On older systems it is your responsibility to ensure that you use the encoding expected by the locale you have selected. A mistake in @@ -1430,9 +1430,9 @@ $ psql -l PostgreSQL will allow superusers to create - databases with SQL_ASCII encoding even when - LC_CTYPE is not C or POSIX. As noted - above, SQL_ASCII does not enforce that the data stored in + databases with SQL_ASCII encoding even when + LC_CTYPE is not C or POSIX. As noted + above, SQL_ASCII does not enforce that the data stored in the database has any particular encoding, and so this choice poses risks of locale-dependent misbehavior. Using this combination of settings is deprecated and may someday be forbidden altogether. @@ -1447,7 +1447,7 @@ $ psql -l PostgreSQL supports automatic character set conversion between server and client for certain character set combinations. The conversion information is stored in the - pg_conversion system catalog. PostgreSQL + pg_conversion system catalog. PostgreSQL comes with some predefined conversions, as shown in . You can create a new conversion using the SQL command CREATE CONVERSION. @@ -1763,7 +1763,7 @@ $ psql -l - libpq () has functions to control the client encoding. + libpq () has functions to control the client encoding. @@ -1774,14 +1774,14 @@ $ psql -l Setting the client encoding can be done with this SQL command: -SET CLIENT_ENCODING TO 'value'; +SET CLIENT_ENCODING TO 'value'; Also you can use the standard SQL syntax SET NAMES for this purpose: -SET NAMES 'value'; +SET NAMES 'value'; To query the current client encoding: @@ -1813,7 +1813,7 @@ RESET client_encoding; Using the configuration variable . If the - client_encoding variable is set, that client + client_encoding variable is set, that client encoding is automatically selected when a connection to the server is made. (This can subsequently be overridden using any of the other methods mentioned above.) @@ -1832,9 +1832,9 @@ RESET client_encoding; - If the client character set is defined as SQL_ASCII, + If the client character set is defined as SQL_ASCII, encoding conversion is disabled, regardless of the server's character - set. Just as for the server, use of SQL_ASCII is unwise + set. Just as for the server, use of SQL_ASCII is unwise unless you are working with all-ASCII data. diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml index 9b4c68f7d4..82251de852 100644 --- a/doc/src/sgml/citext.sgml +++ b/doc/src/sgml/citext.sgml @@ -8,10 +8,10 @@ - The citext module provides a case-insensitive - character string type, citext. Essentially, it internally calls - lower when comparing values. Otherwise, it behaves almost - exactly like text. + The citext module provides a case-insensitive + character string type, citext. Essentially, it internally calls + lower when comparing values. Otherwise, it behaves almost + exactly like text. @@ -19,7 +19,7 @@ The standard approach to doing case-insensitive matches - in PostgreSQL has been to use the lower + in PostgreSQL has been to use the lower function when comparing values, for example @@ -35,19 +35,19 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?); It makes your SQL statements verbose, and you always have to remember to - use lower on both the column and the query value. + use lower on both the column and the query value. It won't use an index, unless you create a functional index using - lower. + lower. - If you declare a column as UNIQUE or PRIMARY - KEY, the implicitly generated index is case-sensitive. So it's + If you declare a column as UNIQUE or PRIMARY + KEY, the implicitly generated index is case-sensitive. So it's useless for case-insensitive searches, and it won't enforce uniqueness case-insensitively. @@ -55,13 +55,13 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?); - The citext data type allows you to eliminate calls - to lower in SQL queries, and allows a primary key to - be case-insensitive. citext is locale-aware, just - like text, which means that the matching of upper case and + The citext data type allows you to eliminate calls + to lower in SQL queries, and allows a primary key to + be case-insensitive. citext is locale-aware, just + like text, which means that the matching of upper case and lower case characters is dependent on the rules of - the database's LC_CTYPE setting. Again, this behavior is - identical to the use of lower in queries. But because it's + the database's LC_CTYPE setting. Again, this behavior is + identical to the use of lower in queries. But because it's done transparently by the data type, you don't have to remember to do anything special in your queries. @@ -89,9 +89,9 @@ INSERT INTO users VALUES ( 'Bjørn', md5(random()::text) ); SELECT * FROM users WHERE nick = 'Larry'; - The SELECT statement will return one tuple, even though - the nick column was set to larry and the query - was for Larry. + The SELECT statement will return one tuple, even though + the nick column was set to larry and the query + was for Larry. @@ -99,82 +99,82 @@ SELECT * FROM users WHERE nick = 'Larry'; String Comparison Behavior - citext performs comparisons by converting each string to lower - case (as though lower were called) and then comparing the + citext performs comparisons by converting each string to lower + case (as though lower were called) and then comparing the results normally. Thus, for example, two strings are considered equal - if lower would produce identical results for them. + if lower would produce identical results for them. In order to emulate a case-insensitive collation as closely as possible, - there are citext-specific versions of a number of string-processing + there are citext-specific versions of a number of string-processing operators and functions. So, for example, the regular expression - operators ~ and ~* exhibit the same behavior when - applied to citext: they both match case-insensitively. + operators ~ and ~* exhibit the same behavior when + applied to citext: they both match case-insensitively. The same is true - for !~ and !~*, as well as for the - LIKE operators ~~ and ~~*, and - !~~ and !~~*. If you'd like to match - case-sensitively, you can cast the operator's arguments to text. + for !~ and !~*, as well as for the + LIKE operators ~~ and ~~*, and + !~~ and !~~*. If you'd like to match + case-sensitively, you can cast the operator's arguments to text. Similarly, all of the following functions perform matching - case-insensitively if their arguments are citext: + case-insensitively if their arguments are citext: - regexp_match() + regexp_match() - regexp_matches() + regexp_matches() - regexp_replace() + regexp_replace() - regexp_split_to_array() + regexp_split_to_array() - regexp_split_to_table() + regexp_split_to_table() - replace() + replace() - split_part() + split_part() - strpos() + strpos() - translate() + translate() For the regexp functions, if you want to match case-sensitively, you can - specify the c flag to force a case-sensitive match. Otherwise, - you must cast to text before using one of these functions if + specify the c flag to force a case-sensitive match. Otherwise, + you must cast to text before using one of these functions if you want case-sensitive behavior. @@ -186,13 +186,13 @@ SELECT * FROM users WHERE nick = 'Larry'; - citext's case-folding behavior depends on - the LC_CTYPE setting of your database. How it compares + citext's case-folding behavior depends on + the LC_CTYPE setting of your database. How it compares values is therefore determined when the database is created. It is not truly case-insensitive in the terms defined by the Unicode standard. Effectively, what this means is that, as long as you're happy with your - collation, you should be happy with citext's comparisons. But + collation, you should be happy with citext's comparisons. But if you have data in different languages stored in your database, users of one language may find their query results are not as expected if the collation is for another language. @@ -201,38 +201,38 @@ SELECT * FROM users WHERE nick = 'Larry'; - As of PostgreSQL 9.1, you can attach a - COLLATE specification to citext columns or data - values. Currently, citext operators will honor a non-default - COLLATE specification while comparing case-folded strings, + As of PostgreSQL 9.1, you can attach a + COLLATE specification to citext columns or data + values. Currently, citext operators will honor a non-default + COLLATE specification while comparing case-folded strings, but the initial folding to lower case is always done according to the - database's LC_CTYPE setting (that is, as though - COLLATE "default" were given). This may be changed in a - future release so that both steps follow the input COLLATE + database's LC_CTYPE setting (that is, as though + COLLATE "default" were given). This may be changed in a + future release so that both steps follow the input COLLATE specification. - citext is not as efficient as text because the + citext is not as efficient as text because the operator functions and the B-tree comparison functions must make copies of the data and convert it to lower case for comparisons. It is, - however, slightly more efficient than using lower to get + however, slightly more efficient than using lower to get case-insensitive matching. - citext doesn't help much if you need data to compare + citext doesn't help much if you need data to compare case-sensitively in some contexts and case-insensitively in other - contexts. The standard answer is to use the text type and - manually use the lower function when you need to compare + contexts. The standard answer is to use the text type and + manually use the lower function when you need to compare case-insensitively; this works all right if case-insensitive comparison is needed only infrequently. If you need case-insensitive behavior most of the time and case-sensitive infrequently, consider storing the data - as citext and explicitly casting the column to text + as citext and explicitly casting the column to text when you want case-sensitive comparison. In either situation, you will need two indexes if you want both types of searches to be fast. @@ -240,9 +240,9 @@ SELECT * FROM users WHERE nick = 'Larry'; - The schema containing the citext operators must be - in the current search_path (typically public); - if it is not, the normal case-sensitive text operators + The schema containing the citext operators must be + in the current search_path (typically public); + if it is not, the normal case-sensitive text operators will be invoked instead. @@ -257,7 +257,7 @@ SELECT * FROM users WHERE nick = 'Larry'; - Inspired by the original citext module by Donald Fraser. + Inspired by the original citext module by Donald Fraser. diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 78c594bbba..722f3da813 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -21,9 +21,9 @@ As explained in , PostgreSQL actually does privilege - management in terms of roles. In this chapter, we - consistently use database user to mean role with the - LOGIN privilege. + management in terms of roles. In this chapter, we + consistently use database user to mean role with the + LOGIN privilege. @@ -66,7 +66,7 @@ which traditionally is named pg_hba.conf and is stored in the database cluster's data directory. - (HBA stands for host-based authentication.) A default + (HBA stands for host-based authentication.) A default pg_hba.conf file is installed when the data directory is initialized by initdb. It is possible to place the authentication configuration file elsewhere, @@ -82,7 +82,7 @@ up of a number of fields which are separated by spaces and/or tabs. Fields can contain white space if the field value is double-quoted. Quoting one of the keywords in a database, user, or address field (e.g., - all or replication) makes the word lose its special + all or replication) makes the word lose its special meaning, and just match a database, user, or host with that name. @@ -92,8 +92,8 @@ and the authentication method to be used for connections matching these parameters. The first record with a matching connection type, client address, requested database, and user name is used to perform - authentication. There is no fall-through or - backup: if one record is chosen and the authentication + authentication. There is no fall-through or + backup: if one record is chosen and the authentication fails, subsequent records are not considered. If no record matches, access is denied. @@ -138,7 +138,7 @@ hostnossl database user the server is started with an appropriate value for the configuration parameter, since the default behavior is to listen for TCP/IP connections - only on the local loopback address localhost. + only on the local loopback address localhost. @@ -169,7 +169,7 @@ hostnossl database user hostnossl - This record type has the opposite behavior of hostssl; + This record type has the opposite behavior of hostssl; it only matches connection attempts made over TCP/IP that do not use SSL. @@ -182,24 +182,24 @@ hostnossl database user Specifies which database name(s) this record matches. The value all specifies that it matches all databases. - The value sameuser specifies that the record + The value sameuser specifies that the record matches if the requested database has the same name as the - requested user. The value samerole specifies that + requested user. The value samerole specifies that the requested user must be a member of the role with the same - name as the requested database. (samegroup is an - obsolete but still accepted spelling of samerole.) + name as the requested database. (samegroup is an + obsolete but still accepted spelling of samerole.) Superusers are not considered to be members of a role for the - purposes of samerole unless they are explicitly + purposes of samerole unless they are explicitly members of the role, directly or indirectly, and not just by virtue of being a superuser. - The value replication specifies that the record + The value replication specifies that the record matches if a physical replication connection is requested (note that replication connections do not specify any particular database). Otherwise, this is the name of a specific PostgreSQL database. Multiple database names can be supplied by separating them with commas. A separate file containing database names can be specified by - preceding the file name with @. + preceding the file name with @. @@ -211,18 +211,18 @@ hostnossl database user Specifies which database user name(s) this record matches. The value all specifies that it matches all users. Otherwise, this is either the name of a specific - database user, or a group name preceded by +. + database user, or a group name preceded by +. (Recall that there is no real distinction between users and groups - in PostgreSQL; a + mark really means + in PostgreSQL; a + mark really means match any of the roles that are directly or indirectly members - of this role, while a name without a + mark matches + of this role, while a name without a + mark matches only that specific role.) For this purpose, a superuser is only considered to be a member of a role if they are explicitly a member of the role, directly or indirectly, and not just by virtue of being a superuser. Multiple user names can be supplied by separating them with commas. A separate file containing user names can be specified by preceding the - file name with @. + file name with @. @@ -239,7 +239,7 @@ hostnossl database user An IP address range is specified using standard numeric notation for the range's starting address, then a slash (/) - and a CIDR mask length. The mask + and a CIDR mask length. The mask length indicates the number of high-order bits of the client IP address that must match. Bits to the right of this should be zero in the given IP address. @@ -317,7 +317,7 @@ hostnossl database user This field only applies to host, - hostssl, and hostnossl records. + hostssl, and hostnossl records. @@ -360,17 +360,17 @@ hostnossl database user These two fields can be used as an alternative to the - IP-address/mask-length + IP-address/mask-length notation. Instead of specifying the mask length, the actual mask is specified in a - separate column. For example, 255.0.0.0 represents an IPv4 - CIDR mask length of 8, and 255.255.255.255 represents a + separate column. For example, 255.0.0.0 represents an IPv4 + CIDR mask length of 8, and 255.255.255.255 represents a CIDR mask length of 32. These fields only apply to host, - hostssl, and hostnossl records. + hostssl, and hostnossl records. @@ -385,7 +385,7 @@ hostnossl database user - trust + trust Allow the connection unconditionally. This method @@ -399,12 +399,12 @@ hostnossl database user - reject + reject Reject the connection unconditionally. This is useful for - filtering out certain hosts from a group, for example a - reject line could block a specific host from connecting, + filtering out certain hosts from a group, for example a + reject line could block a specific host from connecting, while a later line allows the remaining hosts in a specific network to connect. @@ -412,7 +412,7 @@ hostnossl database user - scram-sha-256 + scram-sha-256 Perform SCRAM-SHA-256 authentication to verify the user's @@ -422,7 +422,7 @@ hostnossl database user - md5 + md5 Perform SCRAM-SHA-256 or MD5 authentication to verify the @@ -433,7 +433,7 @@ hostnossl database user - password + password Require the client to supply an unencrypted password for @@ -446,7 +446,7 @@ hostnossl database user - gss + gss Use GSSAPI to authenticate the user. This is only @@ -457,7 +457,7 @@ hostnossl database user - sspi + sspi Use SSPI to authenticate the user. This is only @@ -468,7 +468,7 @@ hostnossl database user - ident + ident Obtain the operating system user name of the client @@ -483,7 +483,7 @@ hostnossl database user - peer + peer Obtain the client's operating system user name from the operating @@ -495,17 +495,17 @@ hostnossl database user - ldap + ldap - Authenticate using an LDAP server. See LDAP server. See for details. - radius + radius Authenticate using a RADIUS server. See database
user - cert + cert Authenticate using SSL client certificates. See @@ -525,7 +525,7 @@ hostnossl database user - pam + pam Authenticate using the Pluggable Authentication Modules @@ -536,7 +536,7 @@ hostnossl database user - bsd + bsd Authenticate using the BSD Authentication service provided by the @@ -554,17 +554,17 @@ hostnossl database user auth-options - After the auth-method field, there can be field(s) of - the form name=value that + After the auth-method field, there can be field(s) of + the form name=value that specify options for the authentication method. Details about which options are available for which authentication methods appear below. In addition to the method-specific options listed below, there is one - method-independent authentication option clientcert, which - can be specified in any hostssl record. When set - to 1, this option requires the client to present a valid + method-independent authentication option clientcert, which + can be specified in any hostssl record. When set + to 1, this option requires the client to present a valid (trusted) SSL certificate, in addition to the other requirements of the authentication method. @@ -574,11 +574,11 @@ hostnossl database user - Files included by @ constructs are read as lists of names, + Files included by @ constructs are read as lists of names, which can be separated by either whitespace or commas. Comments are introduced by #, just as in - pg_hba.conf, and nested @ constructs are - allowed. Unless the file name following @ is an absolute + pg_hba.conf, and nested @ constructs are + allowed. Unless the file name following @ is an absolute path, it is taken to be relative to the directory containing the referencing file. @@ -589,10 +589,10 @@ hostnossl database user significant. Typically, earlier records will have tight connection match parameters and weaker authentication methods, while later records will have looser match parameters and stronger authentication - methods. For example, one might wish to use trust + methods. For example, one might wish to use trust authentication for local TCP/IP connections but require a password for remote TCP/IP connections. In this case a record specifying - trust authentication for connections from 127.0.0.1 would + trust authentication for connections from 127.0.0.1 would appear before a record specifying password authentication for a wider range of allowed client IP addresses. @@ -603,7 +603,7 @@ hostnossl database user SIGHUPSIGHUP signal. If you edit the file on an active system, you will need to signal the postmaster - (using pg_ctl reload or kill -HUP) to make it + (using pg_ctl reload or kill -HUP) to make it re-read the file. @@ -618,7 +618,7 @@ hostnossl database user The system view pg_hba_file_rules - can be helpful for pre-testing changes to the pg_hba.conf + can be helpful for pre-testing changes to the pg_hba.conf file, or for diagnosing problems if loading of the file did not have the desired effects. Rows in the view with non-null error fields indicate problems in the @@ -629,9 +629,9 @@ hostnossl database user To connect to a particular database, a user must not only pass the pg_hba.conf checks, but must have the - CONNECT privilege for the database. If you wish to + CONNECT privilege for the database. If you wish to restrict which users can connect to which databases, it's usually - easier to control this by granting/revoking CONNECT privilege + easier to control this by granting/revoking CONNECT privilege than to put the rules in pg_hba.conf entries. @@ -760,21 +760,21 @@ local db1,db2,@demodbs all md5 User name maps are defined in the ident map file, which by default is named - pg_ident.confpg_ident.conf + pg_ident.confpg_ident.conf and is stored in the cluster's data directory. (It is possible to place the map file elsewhere, however; see the configuration parameter.) The ident map file contains lines of the general form: -map-name system-username database-username +map-name system-username database-username Comments and whitespace are handled in the same way as in - pg_hba.conf. The - map-name is an arbitrary name that will be used to + pg_hba.conf. The + map-name is an arbitrary name that will be used to refer to this mapping in pg_hba.conf. The other two fields specify an operating system user name and a matching - database user name. The same map-name can be + database user name. The same map-name can be used repeatedly to specify multiple user-mappings within a single map. @@ -788,13 +788,13 @@ local db1,db2,@demodbs all md5 user has requested to connect as. - If the system-username field starts with a slash (/), + If the system-username field starts with a slash (/), the remainder of the field is treated as a regular expression. (See for details of - PostgreSQL's regular expression syntax.) The regular + PostgreSQL's regular expression syntax.) The regular expression can include a single capture, or parenthesized subexpression, - which can then be referenced in the database-username - field as \1 (backslash-one). This allows the mapping of + which can then be referenced in the database-username + field as \1 (backslash-one). This allows the mapping of multiple user names in a single line, which is particularly useful for simple syntax substitutions. For example, these entries @@ -802,14 +802,14 @@ mymap /^(.*)@mydomain\.com$ \1 mymap /^(.*)@otherdomain\.com$ guest will remove the domain part for users with system user names that end with - @mydomain.com, and allow any user whose system name ends with - @otherdomain.com to log in as guest. + @mydomain.com, and allow any user whose system name ends with + @otherdomain.com to log in as guest. Keep in mind that by default, a regular expression can match just part of - a string. It's usually wise to use ^ and $, as + a string. It's usually wise to use ^ and $, as shown in the above example, to force the match to be to the entire system user name. @@ -821,28 +821,28 @@ mymap /^(.*)@otherdomain\.com$ guest SIGHUPSIGHUP signal. If you edit the file on an active system, you will need to signal the postmaster - (using pg_ctl reload or kill -HUP) to make it + (using pg_ctl reload or kill -HUP) to make it re-read the file. A pg_ident.conf file that could be used in - conjunction with the pg_hba.conf file in pg_hba.conf file in is shown in . In this example, anyone logged in to a machine on the 192.168 network that does not have the - operating system user name bryanh, ann, or - robert would not be granted access. Unix user - robert would only be allowed access when he tries to - connect as PostgreSQL user bob, not - as robert or anyone else. ann would - only be allowed to connect as ann. User - bryanh would be allowed to connect as either - bryanh or as guest1. + operating system user name bryanh, ann, or + robert would not be granted access. Unix user + robert would only be allowed access when he tries to + connect as PostgreSQL user bob, not + as robert or anyone else. ann would + only be allowed to connect as ann. User + bryanh would be allowed to connect as either + bryanh or as guest1. - An Example <filename>pg_ident.conf</> File + An Example <filename>pg_ident.conf</filename> File # MAPNAME SYSTEM-USERNAME PG-USERNAME @@ -866,21 +866,21 @@ omicron bryanh guest1 Trust Authentication - When trust authentication is specified, + When trust authentication is specified, PostgreSQL assumes that anyone who can connect to the server is authorized to access the database with whatever database user name they specify (even superuser names). - Of course, restrictions made in the database and - user columns still apply. + Of course, restrictions made in the database and + user columns still apply. This method should only be used when there is adequate operating-system-level protection on connections to the server. - trust authentication is appropriate and very + trust authentication is appropriate and very convenient for local connections on a single-user workstation. It - is usually not appropriate by itself on a multiuser - machine. However, you might be able to use trust even + is usually not appropriate by itself on a multiuser + machine. However, you might be able to use trust even on a multiuser machine, if you restrict access to the server's Unix-domain socket file using file-system permissions. To do this, set the unix_socket_permissions (and possibly @@ -895,17 +895,17 @@ omicron bryanh guest1 Setting file-system permissions only helps for Unix-socket connections. Local TCP/IP connections are not restricted by file-system permissions. Therefore, if you want to use file-system permissions for local security, - remove the host ... 127.0.0.1 ... line from - pg_hba.conf, or change it to a - non-trust authentication method. + remove the host ... 127.0.0.1 ... line from + pg_hba.conf, or change it to a + non-trust authentication method. - trust authentication is only suitable for TCP/IP connections + trust authentication is only suitable for TCP/IP connections if you trust every user on every machine that is allowed to connect - to the server by the pg_hba.conf lines that specify - trust. It is seldom reasonable to use trust - for any TCP/IP connections other than those from localhost (127.0.0.1). + to the server by the pg_hba.conf lines that specify + trust. It is seldom reasonable to use trust + for any TCP/IP connections other than those from localhost (127.0.0.1). @@ -914,10 +914,10 @@ omicron bryanh guest1 Password Authentication - MD5 + MD5 - SCRAM + SCRAM password @@ -936,7 +936,7 @@ omicron bryanh guest1 scram-sha-256 - The method scram-sha-256 performs SCRAM-SHA-256 + The method scram-sha-256 performs SCRAM-SHA-256 authentication, as described in RFC 7677. It is a challenge-response scheme that prevents password sniffing on @@ -955,7 +955,7 @@ omicron bryanh guest1 md5 - The method md5 uses a custom less secure challenge-response + The method md5 uses a custom less secure challenge-response mechanism. It prevents password sniffing and avoids storing passwords on the server in plain text but provides no protection if an attacker manages to steal the password hash from the server. Also, the MD5 hash @@ -982,10 +982,10 @@ omicron bryanh guest1 password - The method password sends the password in clear-text and is - therefore vulnerable to password sniffing attacks. It should + The method password sends the password in clear-text and is + therefore vulnerable to password sniffing attacks. It should always be avoided if possible. If the connection is protected by SSL - encryption then password can be used safely, though. + encryption then password can be used safely, though. (Though SSL certificate authentication might be a better choice if one is depending on using SSL). @@ -996,7 +996,7 @@ omicron bryanh guest1 PostgreSQL database passwords are separate from operating system user passwords. The password for - each database user is stored in the pg_authid system + each database user is stored in the pg_authid system catalog. Passwords can be managed with the SQL commands and , @@ -1060,7 +1060,7 @@ omicron bryanh guest1 - GSSAPI support has to be enabled when PostgreSQL is built; + GSSAPI support has to be enabled when PostgreSQL is built; see for more information. @@ -1068,13 +1068,13 @@ omicron bryanh guest1 When GSSAPI uses Kerberos, it uses a standard principal in the format - servicename/hostname@realm. + servicename/hostname@realm. The PostgreSQL server will accept any principal that is included in the keytab used by the server, but care needs to be taken to specify the correct principal details when - making the connection from the client using the krbsrvname connection parameter. (See + making the connection from the client using the krbsrvname connection parameter. (See also .) The installation default can be changed from the default postgres at build time using - ./configure --with-krb-srvnam=whatever. + ./configure --with-krb-srvnam=whatever. In most environments, this parameter never needs to be changed. Some Kerberos implementations might require a different service name, @@ -1082,31 +1082,31 @@ omicron bryanh guest1 to be in upper case (POSTGRES). - hostname is the fully qualified host name of the + hostname is the fully qualified host name of the server machine. The service principal's realm is the preferred realm of the server machine. - Client principals can be mapped to different PostgreSQL - database user names with pg_ident.conf. For example, - pgusername@realm could be mapped to just pgusername. - Alternatively, you can use the full username@realm principal as - the role name in PostgreSQL without any mapping. + Client principals can be mapped to different PostgreSQL + database user names with pg_ident.conf. For example, + pgusername@realm could be mapped to just pgusername. + Alternatively, you can use the full username@realm principal as + the role name in PostgreSQL without any mapping. - PostgreSQL also supports a parameter to strip the realm from + PostgreSQL also supports a parameter to strip the realm from the principal. This method is supported for backwards compatibility and is strongly discouraged as it is then impossible to distinguish different users with the same user name but coming from different realms. To enable this, - set include_realm to 0. For simple single-realm + set include_realm to 0. For simple single-realm installations, doing that combined with setting the - krb_realm parameter (which checks that the principal's realm + krb_realm parameter (which checks that the principal's realm matches exactly what is in the krb_realm parameter) is still secure; but this is a less capable approach compared to specifying an explicit mapping in - pg_ident.conf. + pg_ident.conf. @@ -1116,8 +1116,8 @@ omicron bryanh guest1 of the key file is specified by the configuration parameter. The default is - /usr/local/pgsql/etc/krb5.keytab (or whatever - directory was specified as sysconfdir at build time). + /usr/local/pgsql/etc/krb5.keytab (or whatever + directory was specified as sysconfdir at build time). For security reasons, it is recommended to use a separate keytab just for the PostgreSQL server rather than opening up permissions on the system keytab file. @@ -1127,17 +1127,17 @@ omicron bryanh guest1 Kerberos documentation for details. The following example is for MIT-compatible Kerberos 5 implementations: -kadmin% ank -randkey postgres/server.my.domain.org -kadmin% ktadd -k krb5.keytab postgres/server.my.domain.org +kadmin% ank -randkey postgres/server.my.domain.org +kadmin% ktadd -k krb5.keytab postgres/server.my.domain.org When connecting to the database make sure you have a ticket for a principal matching the requested database user name. For example, for - database user name fred, principal - fred@EXAMPLE.COM would be able to connect. To also allow - principal fred/users.example.com@EXAMPLE.COM, use a user name + database user name fred, principal + fred@EXAMPLE.COM would be able to connect. To also allow + principal fred/users.example.com@EXAMPLE.COM, use a user name map, as described in . @@ -1155,8 +1155,8 @@ omicron bryanh guest1 in multi-realm environments unless krb_realm is also used. It is recommended to leave include_realm set to the default (1) and to - provide an explicit mapping in pg_ident.conf to convert - principal names to PostgreSQL user names. + provide an explicit mapping in pg_ident.conf to convert + principal names to PostgreSQL user names. @@ -1236,8 +1236,8 @@ omicron bryanh guest1 in multi-realm environments unless krb_realm is also used. It is recommended to leave include_realm set to the default (1) and to - provide an explicit mapping in pg_ident.conf to convert - principal names to PostgreSQL user names. + provide an explicit mapping in pg_ident.conf to convert + principal names to PostgreSQL user names. @@ -1270,9 +1270,9 @@ omicron bryanh guest1 By default, these two names are identical for new user accounts. - Note that libpq uses the SAM-compatible name if no + Note that libpq uses the SAM-compatible name if no explicit user name is specified. If you use - libpq or a driver based on it, you should + libpq or a driver based on it, you should leave this option disabled or explicitly specify user name in the connection string. @@ -1357,8 +1357,8 @@ omicron bryanh guest1 is to answer questions like What user initiated the connection that goes out of your port X and connects to my port Y?. - Since PostgreSQL knows both X and - Y when a physical connection is established, it + Since PostgreSQL knows both X and + Y when a physical connection is established, it can interrogate the ident server on the host of the connecting client and can theoretically determine the operating system user for any given connection. @@ -1386,9 +1386,9 @@ omicron bryanh guest1 Some ident servers have a nonstandard option that causes the returned user name to be encrypted, using a key that only the originating - machine's administrator knows. This option must not be - used when using the ident server with PostgreSQL, - since PostgreSQL does not have any way to decrypt the + machine's administrator knows. This option must not be + used when using the ident server with PostgreSQL, + since PostgreSQL does not have any way to decrypt the returned string to determine the actual user name. @@ -1424,11 +1424,11 @@ omicron bryanh guest1 Peer authentication is only available on operating systems providing - the getpeereid() function, the SO_PEERCRED + the getpeereid() function, the SO_PEERCRED socket parameter, or similar mechanisms. Currently that includes - Linux, - most flavors of BSD including - macOS, + Linux, + most flavors of BSD including + macOS, and Solaris. @@ -1454,23 +1454,23 @@ omicron bryanh guest1 LDAP authentication can operate in two modes. In the first mode, which we will call the simple bind mode, the server will bind to the distinguished name constructed as - prefix username suffix. - Typically, the prefix parameter is used to specify - cn=, or DOMAIN\ in an Active - Directory environment. suffix is used to specify the + prefix username suffix. + Typically, the prefix parameter is used to specify + cn=, or DOMAIN\ in an Active + Directory environment. suffix is used to specify the remaining part of the DN in a non-Active Directory environment. In the second mode, which we will call the search+bind mode, the server first binds to the LDAP directory with - a fixed user name and password, specified with ldapbinddn - and ldapbindpasswd, and performs a search for the user trying + a fixed user name and password, specified with ldapbinddn + and ldapbindpasswd, and performs a search for the user trying to log in to the database. If no user and password is configured, an anonymous bind will be attempted to the directory. The search will be - performed over the subtree at ldapbasedn, and will try to + performed over the subtree at ldapbasedn, and will try to do an exact match of the attribute specified in - ldapsearchattribute. + ldapsearchattribute. Once the user has been found in this search, the server disconnects and re-binds to the directory as this user, using the password specified by the client, to verify that the @@ -1572,7 +1572,7 @@ omicron bryanh guest1 Attribute to match against the user name in the search when doing search+bind authentication. If no attribute is specified, the - uid attribute will be used. + uid attribute will be used. @@ -1719,11 +1719,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse When using RADIUS authentication, an Access Request message will be sent to the configured RADIUS server. This request will be of type Authenticate Only, and include parameters for - user name, password (encrypted) and - NAS Identifier. The request will be encrypted using + user name, password (encrypted) and + NAS Identifier. The request will be encrypted using a secret shared with the server. The RADIUS server will respond to - this server with either Access Accept or - Access Reject. There is no support for RADIUS accounting. + this server with either Access Accept or + Access Reject. There is no support for RADIUS accounting. @@ -1762,8 +1762,8 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse The encryption vector used will only be cryptographically - strong if PostgreSQL is built with support for - OpenSSL. In other cases, the transmission to the + strong if PostgreSQL is built with support for + OpenSSL. In other cases, the transmission to the RADIUS server should only be considered obfuscated, not secured, and external security measures should be applied if necessary. @@ -1777,7 +1777,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse The port number on the RADIUS servers to connect to. If no port - is specified, the default port 1812 will be used. + is specified, the default port 1812 will be used. @@ -1786,12 +1786,12 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse radiusidentifiers - The string used as NAS Identifier in the RADIUS + The string used as NAS Identifier in the RADIUS requests. This parameter can be used as a second parameter identifying for example which database user the user is attempting to authenticate as, which can be used for policy matching on the RADIUS server. If no identifier is specified, the default - postgresql will be used. + postgresql will be used. @@ -1836,11 +1836,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse - In a pg_hba.conf record specifying certificate - authentication, the authentication option clientcert is - assumed to be 1, and it cannot be turned off since a client - certificate is necessary for this method. What the cert - method adds to the basic clientcert certificate validity test + In a pg_hba.conf record specifying certificate + authentication, the authentication option clientcert is + assumed to be 1, and it cannot be turned off since a client + certificate is necessary for this method. What the cert + method adds to the basic clientcert certificate validity test is a check that the cn attribute matches the database user name. @@ -1863,7 +1863,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse exist in the database before PAM can be used for authentication. For more information about PAM, please read the - Linux-PAM Page. + Linux-PAM Page. @@ -1896,7 +1896,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse - If PAM is set up to read /etc/shadow, authentication + If PAM is set up to read /etc/shadow, authentication will fail because the PostgreSQL server is started by a non-root user. However, this is not an issue when PAM is configured to use LDAP or other authentication methods. @@ -1922,11 +1922,11 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse - BSD Authentication in PostgreSQL uses + BSD Authentication in PostgreSQL uses the auth-postgresql login type and authenticates with the postgresql login class if that's defined in login.conf. By default that login class does not - exist, and PostgreSQL will use the default login class. + exist, and PostgreSQL will use the default login class. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index b012a26991..aeda826d87 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -70,9 +70,9 @@ (typically eight kilobytes), milliseconds, seconds, or minutes. An unadorned numeric value for one of these settings will use the setting's default unit, which can be learned from - pg_settings.unit. + pg_settings.unit. For convenience, settings can be given with a unit specified explicitly, - for example '120 ms' for a time value, and they will be + for example '120 ms' for a time value, and they will be converted to whatever the parameter's actual unit is. Note that the value must be written as a string (with quotes) to use this feature. The unit name is case-sensitive, and there can be whitespace between @@ -105,7 +105,7 @@ Enumerated-type parameters are written in the same way as string parameters, but are restricted to have one of a limited set of values. The values allowable for such a parameter can be found from - pg_settings.enumvals. + pg_settings.enumvals. Enum parameter values are case-insensitive. @@ -117,7 +117,7 @@ The most fundamental way to set these parameters is to edit the file - postgresql.confpostgresql.conf, + postgresql.confpostgresql.conf, which is normally kept in the data directory. A default copy is installed when the database cluster directory is initialized. An example of what this file might look like is: @@ -150,8 +150,8 @@ shared_buffers = 128MB SIGHUP The configuration file is reread whenever the main server process - receives a SIGHUP signal; this signal is most easily - sent by running pg_ctl reload from the command line or by + receives a SIGHUP signal; this signal is most easily + sent by running pg_ctl reload from the command line or by calling the SQL function pg_reload_conf(). The main server process also propagates this signal to all currently running server processes, so that existing sessions also adopt the new values @@ -161,26 +161,26 @@ shared_buffers = 128MB can only be set at server start; any changes to their entries in the configuration file will be ignored until the server is restarted. Invalid parameter settings in the configuration file are likewise - ignored (but logged) during SIGHUP processing. + ignored (but logged) during SIGHUP processing. - In addition to postgresql.conf, + In addition to postgresql.conf, a PostgreSQL data directory contains a file - postgresql.auto.confpostgresql.auto.conf, - which has the same format as postgresql.conf but should + postgresql.auto.confpostgresql.auto.conf, + which has the same format as postgresql.conf but should never be edited manually. This file holds settings provided through the command. This file is automatically - read whenever postgresql.conf is, and its settings take - effect in the same way. Settings in postgresql.auto.conf - override those in postgresql.conf. + read whenever postgresql.conf is, and its settings take + effect in the same way. Settings in postgresql.auto.conf + override those in postgresql.conf. The system view pg_file_settings can be helpful for pre-testing changes to the configuration file, or for - diagnosing problems if a SIGHUP signal did not have the + diagnosing problems if a SIGHUP signal did not have the desired effects. @@ -193,7 +193,7 @@ shared_buffers = 128MB commands to establish configuration defaults. The already-mentioned command provides a SQL-accessible means of changing global defaults; it is - functionally equivalent to editing postgresql.conf. + functionally equivalent to editing postgresql.conf. In addition, there are two commands that allow setting of defaults on a per-database or per-role basis: @@ -215,7 +215,7 @@ shared_buffers = 128MB - Values set with ALTER DATABASE and ALTER ROLE + Values set with ALTER DATABASE and ALTER ROLE are applied only when starting a fresh database session. They override values obtained from the configuration files or server command line, and constitute defaults for the rest of the session. @@ -224,7 +224,7 @@ shared_buffers = 128MB - Once a client is connected to the database, PostgreSQL + Once a client is connected to the database, PostgreSQL provides two additional SQL commands (and equivalent functions) to interact with session-local configuration settings: @@ -251,14 +251,14 @@ shared_buffers = 128MB In addition, the system view pg_settings can be + linkend="view-pg-settings">pg_settings can be used to view and change session-local values: - Querying this view is similar to using SHOW ALL but + Querying this view is similar to using SHOW ALL but provides more detail. It is also more flexible, since it's possible to specify filter conditions or join against other relations. @@ -267,8 +267,8 @@ shared_buffers = 128MB Using on this view, specifically - updating the setting column, is the equivalent - of issuing SET commands. For example, the equivalent of + updating the setting column, is the equivalent + of issuing SET commands. For example, the equivalent of SET configuration_parameter TO DEFAULT; @@ -289,7 +289,7 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter In addition to setting global defaults or attaching overrides at the database or role level, you can pass settings to PostgreSQL via shell facilities. - Both the server and libpq client library + Both the server and libpq client library accept parameter values via the shell. @@ -298,26 +298,26 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter During server startup, parameter settings can be passed to the postgres command via the - command-line parameter. For example, postgres -c log_connections=yes -c log_destination='syslog' Settings provided in this way override those set via - postgresql.conf or ALTER SYSTEM, + postgresql.conf or ALTER SYSTEM, so they cannot be changed globally without restarting the server. - When starting a client session via libpq, + When starting a client session via libpq, parameter settings can be specified using the PGOPTIONS environment variable. Settings established in this way constitute defaults for the life of the session, but do not affect other sessions. For historical reasons, the format of PGOPTIONS is similar to that used when launching the postgres - command; specifically, the flag must be specified. For example, env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql @@ -338,20 +338,20 @@ env PGOPTIONS="-c geqo=off -c statement_timeout=5min" psql Managing Configuration File Contents - PostgreSQL provides several features for breaking - down complex postgresql.conf files into sub-files. + PostgreSQL provides several features for breaking + down complex postgresql.conf files into sub-files. These features are especially useful when managing multiple servers with related, but not identical, configurations. - include + include in configuration file In addition to individual parameter settings, - the postgresql.conf file can contain include - directives, which specify another file to read and process as if + the postgresql.conf file can contain include + directives, which specify another file to read and process as if it were inserted into the configuration file at this point. This feature allows a configuration file to be divided into physically separate parts. Include directives simply look like: @@ -365,23 +365,23 @@ include 'filename' - include_if_exists + include_if_exists in configuration file - There is also an include_if_exists directive, which acts - the same as the include directive, except + There is also an include_if_exists directive, which acts + the same as the include directive, except when the referenced file does not exist or cannot be read. A regular - include will consider this an error condition, but - include_if_exists merely logs a message and continues + include will consider this an error condition, but + include_if_exists merely logs a message and continues processing the referencing configuration file. - include_dir + include_dir in configuration file - The postgresql.conf file can also contain + The postgresql.conf file can also contain include_dir directives, which specify an entire directory of configuration files to include. These look like @@ -401,36 +401,36 @@ include_dir 'directory' Include files or directories can be used to logically separate portions of the database configuration, rather than having a single large - postgresql.conf file. Consider a company that has two + postgresql.conf file. Consider a company that has two database servers, each with a different amount of memory. There are likely elements of the configuration both will share, for things such as logging. But memory-related parameters on the server will vary between the two. And there might be server specific customizations, too. One way to manage this situation is to break the custom configuration changes for your site into three files. You could add - this to the end of your postgresql.conf file to include + this to the end of your postgresql.conf file to include them: include 'shared.conf' include 'memory.conf' include 'server.conf' - All systems would have the same shared.conf. Each + All systems would have the same shared.conf. Each server with a particular amount of memory could share the - same memory.conf; you might have one for all servers + same memory.conf; you might have one for all servers with 8GB of RAM, another for those having 16GB. And - finally server.conf could have truly server-specific + finally server.conf could have truly server-specific configuration information in it. Another possibility is to create a configuration file directory and - put this information into files there. For example, a conf.d - directory could be referenced at the end of postgresql.conf: + put this information into files there. For example, a conf.d + directory could be referenced at the end of postgresql.conf: include_dir 'conf.d' - Then you could name the files in the conf.d directory + Then you could name the files in the conf.d directory like this: 00shared.conf @@ -441,8 +441,8 @@ include_dir 'conf.d' files will be loaded. This is important because only the last setting encountered for a particular parameter while the server is reading configuration files will be used. In this example, - something set in conf.d/02server.conf would override a - value set in conf.d/01memory.conf. + something set in conf.d/02server.conf would override a + value set in conf.d/01memory.conf. @@ -483,7 +483,7 @@ include_dir 'conf.d' data_directory (string) - data_directory configuration parameter + data_directory configuration parameter @@ -497,13 +497,13 @@ include_dir 'conf.d' config_file (string) - config_file configuration parameter + config_file configuration parameter Specifies the main server configuration file - (customarily called postgresql.conf). + (customarily called postgresql.conf). This parameter can only be set on the postgres command line. @@ -512,13 +512,13 @@ include_dir 'conf.d' hba_file (string) - hba_file configuration parameter + hba_file configuration parameter Specifies the configuration file for host-based authentication - (customarily called pg_hba.conf). + (customarily called pg_hba.conf). This parameter can only be set at server start. @@ -527,13 +527,13 @@ include_dir 'conf.d' ident_file (string) - ident_file configuration parameter + ident_file configuration parameter Specifies the configuration file for user name mapping - (customarily called pg_ident.conf). + (customarily called pg_ident.conf). This parameter can only be set at server start. See also . @@ -543,7 +543,7 @@ include_dir 'conf.d' external_pid_file (string) - external_pid_file configuration parameter + external_pid_file configuration parameter @@ -569,10 +569,10 @@ include_dir 'conf.d' data directory, the postgres command-line option or PGDATA environment variable must point to the directory containing the configuration files, - and the data_directory parameter must be set in + and the data_directory parameter must be set in postgresql.conf (or on the command line) to show where the data directory is actually located. Notice that - data_directory overrides and + data_directory overrides and PGDATA for the location of the data directory, but not for the location of the configuration files. @@ -580,12 +580,12 @@ include_dir 'conf.d' If you wish, you can specify the configuration file names and locations - individually using the parameters config_file, - hba_file and/or ident_file. - config_file can only be specified on the + individually using the parameters config_file, + hba_file and/or ident_file. + config_file can only be specified on the postgres command line, but the others can be set within the main configuration file. If all three parameters plus - data_directory are explicitly set, then it is not necessary + data_directory are explicitly set, then it is not necessary to specify or PGDATA. @@ -607,7 +607,7 @@ include_dir 'conf.d' listen_addresses (string) - listen_addresses configuration parameter + listen_addresses configuration parameter @@ -615,15 +615,15 @@ include_dir 'conf.d' Specifies the TCP/IP address(es) on which the server is to listen for connections from client applications. The value takes the form of a comma-separated list of host names - and/or numeric IP addresses. The special entry * + and/or numeric IP addresses. The special entry * corresponds to all available IP interfaces. The entry - 0.0.0.0 allows listening for all IPv4 addresses and - :: allows listening for all IPv6 addresses. + 0.0.0.0 allows listening for all IPv4 addresses and + :: allows listening for all IPv6 addresses. If the list is empty, the server does not listen on any IP interface at all, in which case only Unix-domain sockets can be used to connect to it. - The default value is localhost, - which allows only local TCP/IP loopback connections to be + The default value is localhost, + which allows only local TCP/IP loopback connections to be made. While client authentication () allows fine-grained control over who can access the server, listen_addresses @@ -638,7 +638,7 @@ include_dir 'conf.d' port (integer) - port configuration parameter + port configuration parameter @@ -653,7 +653,7 @@ include_dir 'conf.d' max_connections (integer) - max_connections configuration parameter + max_connections configuration parameter @@ -661,7 +661,7 @@ include_dir 'conf.d' Determines the maximum number of concurrent connections to the database server. The default is typically 100 connections, but might be less if your kernel settings will not support it (as - determined during initdb). This parameter can + determined during initdb). This parameter can only be set at server start. @@ -678,17 +678,17 @@ include_dir 'conf.d' superuser_reserved_connections (integer) - superuser_reserved_connections configuration parameter + superuser_reserved_connections configuration parameter Determines the number of connection slots that - are reserved for connections by PostgreSQL + are reserved for connections by PostgreSQL superusers. At most connections can ever be active simultaneously. Whenever the number of active concurrent connections is at least - max_connections minus + max_connections minus superuser_reserved_connections, new connections will be accepted only for superusers, and no new replication connections will be accepted. @@ -705,7 +705,7 @@ include_dir 'conf.d' unix_socket_directories (string) - unix_socket_directories configuration parameter + unix_socket_directories configuration parameter @@ -726,10 +726,10 @@ include_dir 'conf.d' In addition to the socket file itself, which is named - .s.PGSQL.nnnn where - nnnn is the server's port number, an ordinary file - named .s.PGSQL.nnnn.lock will be - created in each of the unix_socket_directories directories. + .s.PGSQL.nnnn where + nnnn is the server's port number, an ordinary file + named .s.PGSQL.nnnn.lock will be + created in each of the unix_socket_directories directories. Neither file should ever be removed manually. @@ -743,7 +743,7 @@ include_dir 'conf.d' unix_socket_group (string) - unix_socket_group configuration parameter + unix_socket_group configuration parameter @@ -768,7 +768,7 @@ include_dir 'conf.d' unix_socket_permissions (integer) - unix_socket_permissions configuration parameter + unix_socket_permissions configuration parameter @@ -804,7 +804,7 @@ include_dir 'conf.d' This parameter is irrelevant on systems, notably Solaris as of Solaris 10, that ignore socket permissions entirely. There, one can achieve a - similar effect by pointing unix_socket_directories to a + similar effect by pointing unix_socket_directories to a directory having search permission limited to the desired audience. This parameter is also irrelevant on Windows, which does not have Unix-domain sockets. @@ -815,7 +815,7 @@ include_dir 'conf.d' bonjour (boolean) - bonjour configuration parameter + bonjour configuration parameter @@ -830,14 +830,14 @@ include_dir 'conf.d' bonjour_name (string) - bonjour_name configuration parameter + bonjour_name configuration parameter Specifies the Bonjour service name. The computer name is used if this parameter is set to the - empty string '' (which is the default). This parameter is + empty string '' (which is the default). This parameter is ignored if the server was not compiled with Bonjour support. This parameter can only be set at server start. @@ -848,7 +848,7 @@ include_dir 'conf.d' tcp_keepalives_idle (integer) - tcp_keepalives_idle configuration parameter + tcp_keepalives_idle configuration parameter @@ -857,7 +857,7 @@ include_dir 'conf.d' should send a keepalive message to the client. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPIDLE or an equivalent socket option, and on + TCP_KEEPIDLE or an equivalent socket option, and on Windows; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -874,7 +874,7 @@ include_dir 'conf.d' tcp_keepalives_interval (integer) - tcp_keepalives_interval configuration parameter + tcp_keepalives_interval configuration parameter @@ -883,7 +883,7 @@ include_dir 'conf.d' that is not acknowledged by the client should be retransmitted. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPINTVL or an equivalent socket option, and on + TCP_KEEPINTVL or an equivalent socket option, and on Windows; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -900,7 +900,7 @@ include_dir 'conf.d' tcp_keepalives_count (integer) - tcp_keepalives_count configuration parameter + tcp_keepalives_count configuration parameter @@ -909,7 +909,7 @@ include_dir 'conf.d' the server's connection to the client is considered dead. A value of 0 uses the system default. This parameter is supported only on systems that support - TCP_KEEPCNT or an equivalent socket option; + TCP_KEEPCNT or an equivalent socket option; on other systems, it must be zero. In sessions connected via a Unix-domain socket, this parameter is ignored and always reads as zero. @@ -930,10 +930,10 @@ include_dir 'conf.d' authentication_timeout (integer) - timeoutclient authentication - client authenticationtimeout during + timeoutclient authentication + client authenticationtimeout during - authentication_timeout configuration parameter + authentication_timeout configuration parameter @@ -943,8 +943,8 @@ include_dir 'conf.d' would-be client has not completed the authentication protocol in this much time, the server closes the connection. This prevents hung clients from occupying a connection indefinitely. - The default is one minute (1m). - This parameter can only be set in the postgresql.conf + The default is one minute (1m). + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -953,16 +953,16 @@ include_dir 'conf.d' ssl (boolean) - ssl configuration parameter + ssl configuration parameter - Enables SSL connections. Please read + Enables SSL connections. Please read before using this. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is off. + The default is off. @@ -970,7 +970,7 @@ include_dir 'conf.d' ssl_ca_file (string) - ssl_ca_file configuration parameter + ssl_ca_file configuration parameter @@ -978,7 +978,7 @@ include_dir 'conf.d' Specifies the name of the file containing the SSL server certificate authority (CA). Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is empty, meaning no CA file is loaded, and client certificate verification is not performed. @@ -989,14 +989,14 @@ include_dir 'conf.d' ssl_cert_file (string) - ssl_cert_file configuration parameter + ssl_cert_file configuration parameter Specifies the name of the file containing the SSL server certificate. Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is server.crt. @@ -1006,7 +1006,7 @@ include_dir 'conf.d' ssl_crl_file (string) - ssl_crl_file configuration parameter + ssl_crl_file configuration parameter @@ -1014,7 +1014,7 @@ include_dir 'conf.d' Specifies the name of the file containing the SSL server certificate revocation list (CRL). Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is empty, meaning no CRL file is loaded. @@ -1024,14 +1024,14 @@ include_dir 'conf.d' ssl_key_file (string) - ssl_key_file configuration parameter + ssl_key_file configuration parameter Specifies the name of the file containing the SSL server private key. Relative paths are relative to the data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is server.key. @@ -1041,19 +1041,19 @@ include_dir 'conf.d' ssl_ciphers (string) - ssl_ciphers configuration parameter + ssl_ciphers configuration parameter - Specifies a list of SSL cipher suites that are allowed to be + Specifies a list of SSL cipher suites that are allowed to be used on secure connections. See - the ciphers manual page - in the OpenSSL package for the syntax of this setting + the ciphers manual page + in the OpenSSL package for the syntax of this setting and a list of supported values. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default value is HIGH:MEDIUM:+3DES:!aNULL. The + The default value is HIGH:MEDIUM:+3DES:!aNULL. The default is usually a reasonable choice unless you have specific security requirements. @@ -1065,7 +1065,7 @@ include_dir 'conf.d' HIGH - Cipher suites that use ciphers from HIGH group (e.g., + Cipher suites that use ciphers from HIGH group (e.g., AES, Camellia, 3DES) @@ -1075,7 +1075,7 @@ include_dir 'conf.d' MEDIUM - Cipher suites that use ciphers from MEDIUM group + Cipher suites that use ciphers from MEDIUM group (e.g., RC4, SEED) @@ -1085,11 +1085,11 @@ include_dir 'conf.d' +3DES - The OpenSSL default order for HIGH is problematic + The OpenSSL default order for HIGH is problematic because it orders 3DES higher than AES128. This is wrong because 3DES offers less security than AES128, and it is also much - slower. +3DES reorders it after all other - HIGH and MEDIUM ciphers. + slower. +3DES reorders it after all other + HIGH and MEDIUM ciphers. @@ -1111,7 +1111,7 @@ include_dir 'conf.d' Available cipher suite details will vary across OpenSSL versions. Use the command openssl ciphers -v 'HIGH:MEDIUM:+3DES:!aNULL' to - see actual details for the currently installed OpenSSL + see actual details for the currently installed OpenSSL version. Note that this list is filtered at run time based on the server key type. @@ -1121,16 +1121,16 @@ include_dir 'conf.d' ssl_prefer_server_ciphers (boolean) - ssl_prefer_server_ciphers configuration parameter + ssl_prefer_server_ciphers configuration parameter Specifies whether to use the server's SSL cipher preferences, rather than the client's. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is true. + The default is true. @@ -1146,28 +1146,28 @@ include_dir 'conf.d' ssl_ecdh_curve (string) - ssl_ecdh_curve configuration parameter + ssl_ecdh_curve configuration parameter - Specifies the name of the curve to use in ECDH key + Specifies the name of the curve to use in ECDH key exchange. It needs to be supported by all clients that connect. It does not need to be the same curve used by the server's Elliptic Curve key. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is prime256v1. + The default is prime256v1. OpenSSL names for the most common curves are: - prime256v1 (NIST P-256), - secp384r1 (NIST P-384), - secp521r1 (NIST P-521). + prime256v1 (NIST P-256), + secp384r1 (NIST P-384), + secp521r1 (NIST P-521). The full list of available curves can be shown with the command openssl ecparam -list_curves. Not all of them - are usable in TLS though. + are usable in TLS though. @@ -1175,17 +1175,17 @@ include_dir 'conf.d' password_encryption (enum) - password_encryption configuration parameter + password_encryption configuration parameter When a password is specified in or , this parameter determines the algorithm - to use to encrypt the password. The default value is md5, - which stores the password as an MD5 hash (on is also - accepted, as alias for md5). Setting this parameter to - scram-sha-256 will encrypt the password with SCRAM-SHA-256. + to use to encrypt the password. The default value is md5, + which stores the password as an MD5 hash (on is also + accepted, as alias for md5). Setting this parameter to + scram-sha-256 will encrypt the password with SCRAM-SHA-256. Note that older clients might lack support for the SCRAM authentication @@ -1198,7 +1198,7 @@ include_dir 'conf.d' ssl_dh_params_file (string) - ssl_dh_params_file configuration parameter + ssl_dh_params_file configuration parameter @@ -1213,7 +1213,7 @@ include_dir 'conf.d' - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1222,7 +1222,7 @@ include_dir 'conf.d' krb_server_keyfile (string) - krb_server_keyfile configuration parameter + krb_server_keyfile configuration parameter @@ -1230,7 +1230,7 @@ include_dir 'conf.d' Sets the location of the Kerberos server key file. See for details. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -1245,8 +1245,8 @@ include_dir 'conf.d' Sets whether GSSAPI user names should be treated case-insensitively. - The default is off (case sensitive). This parameter can only be - set in the postgresql.conf file or on the server command line. + The default is off (case sensitive). This parameter can only be + set in the postgresql.conf file or on the server command line. @@ -1254,43 +1254,43 @@ include_dir 'conf.d' db_user_namespace (boolean) - db_user_namespace configuration parameter + db_user_namespace configuration parameter This parameter enables per-database user names. It is off by default. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - If this is on, you should create users as username@dbname. - When username is passed by a connecting client, - @ and the database name are appended to the user + If this is on, you should create users as username@dbname. + When username is passed by a connecting client, + @ and the database name are appended to the user name and that database-specific user name is looked up by the server. Note that when you create users with names containing - @ within the SQL environment, you will need to + @ within the SQL environment, you will need to quote the user name. With this parameter enabled, you can still create ordinary global - users. Simply append @ when specifying the user - name in the client, e.g. joe@. The @ + users. Simply append @ when specifying the user + name in the client, e.g. joe@. The @ will be stripped off before the user name is looked up by the server. - db_user_namespace causes the client's and + db_user_namespace causes the client's and server's user name representation to differ. Authentication checks are always done with the server's user name so authentication methods must be configured for the server's user name, not the client's. Because - md5 uses the user name as salt on both the - client and server, md5 cannot be used with - db_user_namespace. + md5 uses the user name as salt on both the + client and server, md5 cannot be used with + db_user_namespace. @@ -1317,15 +1317,15 @@ include_dir 'conf.d' shared_buffers (integer) - shared_buffers configuration parameter + shared_buffers configuration parameter Sets the amount of memory the database server uses for shared memory buffers. The default is typically 128 megabytes - (128MB), but might be less if your kernel settings will - not support it (as determined during initdb). + (128MB), but might be less if your kernel settings will + not support it (as determined during initdb). This setting must be at least 128 kilobytes. (Non-default values of BLCKSZ change the minimum.) However, settings significantly higher than the minimum are usually needed @@ -1358,7 +1358,7 @@ include_dir 'conf.d' huge_pages (enum) - huge_pages configuration parameter + huge_pages configuration parameter @@ -1392,7 +1392,7 @@ include_dir 'conf.d' temp_buffers (integer) - temp_buffers configuration parameter + temp_buffers configuration parameter @@ -1400,7 +1400,7 @@ include_dir 'conf.d' Sets the maximum number of temporary buffers used by each database session. These are session-local buffers used only for access to temporary tables. The default is eight megabytes - (8MB). The setting can be changed within individual + (8MB). The setting can be changed within individual sessions, but only before the first use of temporary tables within the session; subsequent attempts to change the value will have no effect on that session. @@ -1408,10 +1408,10 @@ include_dir 'conf.d' A session will allocate temporary buffers as needed up to the limit - given by temp_buffers. The cost of setting a large + given by temp_buffers. The cost of setting a large value in sessions that do not actually need many temporary buffers is only a buffer descriptor, or about 64 bytes, per - increment in temp_buffers. However if a buffer is + increment in temp_buffers. However if a buffer is actually used an additional 8192 bytes will be consumed for it (or in general, BLCKSZ bytes). @@ -1421,13 +1421,13 @@ include_dir 'conf.d' max_prepared_transactions (integer) - max_prepared_transactions configuration parameter + max_prepared_transactions configuration parameter Sets the maximum number of transactions that can be in the - prepared state simultaneously (see prepared state simultaneously (see ). Setting this parameter to zero (which is the default) disables the prepared-transaction feature. @@ -1454,14 +1454,14 @@ include_dir 'conf.d' work_mem (integer) - work_mem configuration parameter + work_mem configuration parameter Specifies the amount of memory to be used by internal sort operations and hash tables before writing to temporary disk files. The value - defaults to four megabytes (4MB). + defaults to four megabytes (4MB). Note that for a complex query, several sort or hash operations might be running in parallel; each operation will be allowed to use as much memory as this value specifies before it starts to write data into temporary @@ -1469,10 +1469,10 @@ include_dir 'conf.d' concurrently. Therefore, the total memory used could be many times the value of work_mem; it is necessary to keep this fact in mind when choosing the value. Sort operations are - used for ORDER BY, DISTINCT, and + used for ORDER BY, DISTINCT, and merge joins. Hash tables are used in hash joins, hash-based aggregation, and - hash-based processing of IN subqueries. + hash-based processing of IN subqueries. @@ -1480,15 +1480,15 @@ include_dir 'conf.d' maintenance_work_mem (integer) - maintenance_work_mem configuration parameter + maintenance_work_mem configuration parameter Specifies the maximum amount of memory to be used by maintenance operations, such as VACUUM, CREATE - INDEX, and ALTER TABLE ADD FOREIGN KEY. It defaults - to 64 megabytes (64MB). Since only one of these + INDEX, and ALTER TABLE ADD FOREIGN KEY. It defaults + to 64 megabytes (64MB). Since only one of these operations can be executed at a time by a database session, and an installation normally doesn't have many of them running concurrently, it's safe to set this value significantly larger @@ -1508,7 +1508,7 @@ include_dir 'conf.d' autovacuum_work_mem (integer) - autovacuum_work_mem configuration parameter + autovacuum_work_mem configuration parameter @@ -1525,26 +1525,26 @@ include_dir 'conf.d' max_stack_depth (integer) - max_stack_depth configuration parameter + max_stack_depth configuration parameter Specifies the maximum safe depth of the server's execution stack. The ideal setting for this parameter is the actual stack size limit - enforced by the kernel (as set by ulimit -s or local + enforced by the kernel (as set by ulimit -s or local equivalent), less a safety margin of a megabyte or so. The safety margin is needed because the stack depth is not checked in every routine in the server, but only in key potentially-recursive routines such as expression evaluation. The default setting is two - megabytes (2MB), which is conservatively small and + megabytes (2MB), which is conservatively small and unlikely to risk crashes. However, it might be too small to allow execution of complex functions. Only superusers can change this setting. - Setting max_stack_depth higher than + Setting max_stack_depth higher than the actual kernel limit will mean that a runaway recursive function can crash an individual backend process. On platforms where PostgreSQL can determine the kernel limit, @@ -1558,25 +1558,25 @@ include_dir 'conf.d' dynamic_shared_memory_type (enum) - dynamic_shared_memory_type configuration parameter + dynamic_shared_memory_type configuration parameter Specifies the dynamic shared memory implementation that the server - should use. Possible values are posix (for POSIX shared - memory allocated using shm_open), sysv - (for System V shared memory allocated via shmget), - windows (for Windows shared memory), mmap + should use. Possible values are posix (for POSIX shared + memory allocated using shm_open), sysv + (for System V shared memory allocated via shmget), + windows (for Windows shared memory), mmap (to simulate shared memory using memory-mapped files stored in the - data directory), and none (to disable this feature). + data directory), and none (to disable this feature). Not all values are supported on all platforms; the first supported option is the default for that platform. The use of the - mmap option, which is not the default on any platform, + mmap option, which is not the default on any platform, is generally discouraged because the operating system may write modified pages back to disk repeatedly, increasing system I/O load; however, it may be useful for debugging, when the - pg_dynshmem directory is stored on a RAM disk, or when + pg_dynshmem directory is stored on a RAM disk, or when other shared memory facilities are not available. @@ -1592,7 +1592,7 @@ include_dir 'conf.d' temp_file_limit (integer) - temp_file_limit configuration parameter + temp_file_limit configuration parameter @@ -1601,13 +1601,13 @@ include_dir 'conf.d' for temporary files, such as sort and hash temporary files, or the storage file for a held cursor. A transaction attempting to exceed this limit will be canceled. - The value is specified in kilobytes, and -1 (the + The value is specified in kilobytes, and -1 (the default) means no limit. Only superusers can change this setting. This setting constrains the total space used at any instant by all - temporary files used by a given PostgreSQL process. + temporary files used by a given PostgreSQL process. It should be noted that disk space used for explicit temporary tables, as opposed to temporary files used behind-the-scenes in query execution, does not count against this limit. @@ -1625,7 +1625,7 @@ include_dir 'conf.d' max_files_per_process (integer) - max_files_per_process configuration parameter + max_files_per_process configuration parameter @@ -1637,7 +1637,7 @@ include_dir 'conf.d' allow individual processes to open many more files than the system can actually support if many processes all try to open that many files. If you find yourself seeing Too many open - files failures, try reducing this setting. + files failures, try reducing this setting. This parameter can only be set at server start. @@ -1684,7 +1684,7 @@ include_dir 'conf.d' vacuum_cost_delay (integer) - vacuum_cost_delay configuration parameter + vacuum_cost_delay configuration parameter @@ -1702,7 +1702,7 @@ include_dir 'conf.d' When using cost-based vacuuming, appropriate values for - vacuum_cost_delay are usually quite small, perhaps + vacuum_cost_delay are usually quite small, perhaps 10 or 20 milliseconds. Adjusting vacuum's resource consumption is best done by changing the other vacuum cost parameters. @@ -1712,7 +1712,7 @@ include_dir 'conf.d' vacuum_cost_page_hit (integer) - vacuum_cost_page_hit configuration parameter + vacuum_cost_page_hit configuration parameter @@ -1728,7 +1728,7 @@ include_dir 'conf.d' vacuum_cost_page_miss (integer) - vacuum_cost_page_miss configuration parameter + vacuum_cost_page_miss configuration parameter @@ -1744,7 +1744,7 @@ include_dir 'conf.d' vacuum_cost_page_dirty (integer) - vacuum_cost_page_dirty configuration parameter + vacuum_cost_page_dirty configuration parameter @@ -1760,7 +1760,7 @@ include_dir 'conf.d' vacuum_cost_limit (integer) - vacuum_cost_limit configuration parameter + vacuum_cost_limit configuration parameter @@ -1792,8 +1792,8 @@ include_dir 'conf.d' There is a separate server - process called the background writer, whose function - is to issue writes of dirty (new or modified) shared + process called the background writer, whose function + is to issue writes of dirty (new or modified) shared buffers. It writes shared buffers so server processes handling user queries seldom or never need to wait for a write to occur. However, the background writer does cause a net overall @@ -1808,7 +1808,7 @@ include_dir 'conf.d' bgwriter_delay (integer) - bgwriter_delay configuration parameter + bgwriter_delay configuration parameter @@ -1816,16 +1816,16 @@ include_dir 'conf.d' Specifies the delay between activity rounds for the background writer. In each round the writer issues writes for some number of dirty buffers (controllable by the - following parameters). It then sleeps for bgwriter_delay + following parameters). It then sleeps for bgwriter_delay milliseconds, and repeats. When there are no dirty buffers in the buffer pool, though, it goes into a longer sleep regardless of - bgwriter_delay. The default value is 200 - milliseconds (200ms). Note that on many systems, the + bgwriter_delay. The default value is 200 + milliseconds (200ms). Note that on many systems, the effective resolution of sleep delays is 10 milliseconds; setting - bgwriter_delay to a value that is not a multiple of 10 + bgwriter_delay to a value that is not a multiple of 10 might have the same results as setting it to the next higher multiple of 10. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -1833,7 +1833,7 @@ include_dir 'conf.d' bgwriter_lru_maxpages (integer) - bgwriter_lru_maxpages configuration parameter + bgwriter_lru_maxpages configuration parameter @@ -1843,7 +1843,7 @@ include_dir 'conf.d' background writing. (Note that checkpoints, which are managed by a separate, dedicated auxiliary process, are unaffected.) The default value is 100 buffers. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1852,7 +1852,7 @@ include_dir 'conf.d' bgwriter_lru_multiplier (floating point) - bgwriter_lru_multiplier configuration parameter + bgwriter_lru_multiplier configuration parameter @@ -1860,18 +1860,18 @@ include_dir 'conf.d' The number of dirty buffers written in each round is based on the number of new buffers that have been needed by server processes during recent rounds. The average recent need is multiplied by - bgwriter_lru_multiplier to arrive at an estimate of the + bgwriter_lru_multiplier to arrive at an estimate of the number of buffers that will be needed during the next round. Dirty buffers are written until there are that many clean, reusable buffers - available. (However, no more than bgwriter_lru_maxpages + available. (However, no more than bgwriter_lru_maxpages buffers will be written per round.) - Thus, a setting of 1.0 represents a just in time policy + Thus, a setting of 1.0 represents a just in time policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1880,7 +1880,7 @@ include_dir 'conf.d' bgwriter_flush_after (integer) - bgwriter_flush_after configuration parameter + bgwriter_flush_after configuration parameter @@ -1897,10 +1897,10 @@ include_dir 'conf.d' cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, and - 2MB. The default is 512kB on Linux, - 0 elsewhere. (If BLCKSZ is not 8kB, + 2MB. The default is 512kB on Linux, + 0 elsewhere. (If BLCKSZ is not 8kB, the default and maximum values scale proportionally to it.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -1923,15 +1923,15 @@ include_dir 'conf.d' effective_io_concurrency (integer) - effective_io_concurrency configuration parameter + effective_io_concurrency configuration parameter Sets the number of concurrent disk I/O operations that - PostgreSQL expects can be executed + PostgreSQL expects can be executed simultaneously. Raising this value will increase the number of I/O - operations that any individual PostgreSQL session + operations that any individual PostgreSQL session attempts to initiate in parallel. The allowed range is 1 to 1000, or zero to disable issuance of asynchronous I/O requests. Currently, this setting only affects bitmap heap scans. @@ -1951,7 +1951,7 @@ include_dir 'conf.d' - Asynchronous I/O depends on an effective posix_fadvise + Asynchronous I/O depends on an effective posix_fadvise function, which some operating systems lack. If the function is not present then setting this parameter to anything but zero will result in an error. On some operating systems (e.g., Solaris), the function @@ -1970,7 +1970,7 @@ include_dir 'conf.d' max_worker_processes (integer) - max_worker_processes configuration parameter + max_worker_processes configuration parameter @@ -1997,7 +1997,7 @@ include_dir 'conf.d' max_parallel_workers_per_gather (integer) - max_parallel_workers_per_gather configuration parameter + max_parallel_workers_per_gather configuration parameter @@ -2021,7 +2021,7 @@ include_dir 'conf.d' account when choosing a value for this setting, as well as when configuring other settings that control resource utilization, such as . Resource limits such as - work_mem are applied individually to each worker, + work_mem are applied individually to each worker, which means the total utilization may be much higher across all processes than it would normally be for any single process. For example, a parallel query using 4 workers may use up to 5 times @@ -2039,7 +2039,7 @@ include_dir 'conf.d' max_parallel_workers (integer) - max_parallel_workers configuration parameter + max_parallel_workers configuration parameter @@ -2059,7 +2059,7 @@ include_dir 'conf.d' backend_flush_after (integer) - backend_flush_after configuration parameter + backend_flush_after configuration parameter @@ -2076,7 +2076,7 @@ include_dir 'conf.d' than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, - and 2MB. The default is 0, i.e., no + and 2MB. The default is 0, i.e., no forced writeback. (If BLCKSZ is not 8kB, the maximum value scales proportionally to it.) @@ -2086,13 +2086,13 @@ include_dir 'conf.d' old_snapshot_threshold (integer) - old_snapshot_threshold configuration parameter + old_snapshot_threshold configuration parameter Sets the minimum time that a snapshot can be used without risk of a - snapshot too old error occurring when using the snapshot. + snapshot too old error occurring when using the snapshot. This parameter can only be set at server start. @@ -2107,12 +2107,12 @@ include_dir 'conf.d' - A value of -1 disables this feature, and is the default. + A value of -1 disables this feature, and is the default. Useful values for production work probably range from a small number of hours to a few days. The setting will be coerced to a granularity - of minutes, and small numbers (such as 0 or - 1min) are only allowed because they may sometimes be - useful for testing. While a setting as high as 60d is + of minutes, and small numbers (such as 0 or + 1min) are only allowed because they may sometimes be + useful for testing. While a setting as high as 60d is allowed, please note that in many workloads extreme bloat or transaction ID wraparound may occur in much shorter time frames. @@ -2120,10 +2120,10 @@ include_dir 'conf.d' When this feature is enabled, freed space at the end of a relation cannot be released to the operating system, since that could remove - information needed to detect the snapshot too old + information needed to detect the snapshot too old condition. All space allocated to a relation remains associated with that relation for reuse only within that relation unless explicitly - freed (for example, with VACUUM FULL). + freed (for example, with VACUUM FULL). @@ -2135,7 +2135,7 @@ include_dir 'conf.d' Some tables cannot safely be vacuumed early, and so will not be affected by this setting, such as system catalogs. For such tables this setting will neither reduce bloat nor create a possibility - of a snapshot too old error on scanning. + of a snapshot too old error on scanning. @@ -2158,45 +2158,45 @@ include_dir 'conf.d' wal_level (enum) - wal_level configuration parameter + wal_level configuration parameter - wal_level determines how much information is written to - the WAL. The default value is replica, which writes enough + wal_level determines how much information is written to + the WAL. The default value is replica, which writes enough data to support WAL archiving and replication, including running - read-only queries on a standby server. minimal removes all + read-only queries on a standby server. minimal removes all logging except the information required to recover from a crash or immediate shutdown. Finally, - logical adds information necessary to support logical + logical adds information necessary to support logical decoding. Each level includes the information logged at all lower levels. This parameter can only be set at server start. - In minimal level, WAL-logging of some bulk + In minimal level, WAL-logging of some bulk operations can be safely skipped, which can make those operations much faster (see ). Operations in which this optimization can be applied include: - CREATE TABLE AS - CREATE INDEX - CLUSTER - COPY into tables that were created or truncated in the same + CREATE TABLE AS + CREATE INDEX + CLUSTER + COPY into tables that were created or truncated in the same transaction But minimal WAL does not contain enough information to reconstruct the - data from a base backup and the WAL logs, so replica or + data from a base backup and the WAL logs, so replica or higher must be used to enable WAL archiving () and streaming replication. - In logical level, the same information is logged as - with replica, plus information needed to allow + In logical level, the same information is logged as + with replica, plus information needed to allow extracting logical change sets from the WAL. Using a level of - logical will increase the WAL volume, particularly if many + logical will increase the WAL volume, particularly if many tables are configured for REPLICA IDENTITY FULL and - many UPDATE and DELETE statements are + many UPDATE and DELETE statements are executed. @@ -2210,14 +2210,14 @@ include_dir 'conf.d' fsync (boolean) - fsync configuration parameter + fsync configuration parameter - If this parameter is on, the PostgreSQL server + If this parameter is on, the PostgreSQL server will try to make sure that updates are physically written to - disk, by issuing fsync() system calls or various + disk, by issuing fsync() system calls or various equivalent methods (see ). This ensures that the database cluster can recover to a consistent state after an operating system or hardware crash. @@ -2249,7 +2249,7 @@ include_dir 'conf.d' off to on, it is necessary to force all modified buffers in the kernel to durable storage. This can be done while the cluster is shutdown or while fsync is on by running initdb - --sync-only, running sync, unmounting the + --sync-only, running sync, unmounting the file system, or rebooting the server. @@ -2261,7 +2261,7 @@ include_dir 'conf.d' - fsync can only be set in the postgresql.conf + fsync can only be set in the postgresql.conf file or on the server command line. If you turn this parameter off, also consider turning off . @@ -2272,26 +2272,26 @@ include_dir 'conf.d' synchronous_commit (enum) - synchronous_commit configuration parameter + synchronous_commit configuration parameter Specifies whether transaction commit will wait for WAL records - to be written to disk before the command returns a success - indication to the client. Valid values are on, - remote_apply, remote_write, local, - and off. The default, and safe, setting - is on. When off, there can be a delay between + to be written to disk before the command returns a success + indication to the client. Valid values are on, + remote_apply, remote_write, local, + and off. The default, and safe, setting + is on. When off, there can be a delay between when success is reported to the client and when the transaction is really guaranteed to be safe against a server crash. (The maximum delay is three times .) Unlike - , setting this parameter to off + , setting this parameter to off does not create any risk of database inconsistency: an operating system or database crash might result in some recent allegedly-committed transactions being lost, but the database state will be just the same as if those transactions had - been aborted cleanly. So, turning synchronous_commit off + been aborted cleanly. So, turning synchronous_commit off can be a useful alternative when performance is more important than exact certainty about the durability of a transaction. For more discussion see . @@ -2300,32 +2300,32 @@ include_dir 'conf.d' If is non-empty, this parameter also controls whether or not transaction commits will wait for their WAL records to be replicated to the standby server(s). - When set to on, commits will wait until replies + When set to on, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and flushed it to disk. This ensures the transaction will not be lost unless both the primary and all synchronous standbys suffer corruption of their database storage. - When set to remote_apply, commits will wait until replies + When set to remote_apply, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and applied it, so that it has become visible to queries on the standby(s). - When set to remote_write, commits will wait until replies + When set to remote_write, commits will wait until replies from the current synchronous standby(s) indicate they have received the commit record of the transaction and written it out to their operating system. This setting is sufficient to ensure data preservation even if a standby instance of - PostgreSQL were to crash, but not if the standby + PostgreSQL were to crash, but not if the standby suffers an operating-system-level crash, since the data has not necessarily reached stable storage on the standby. - Finally, the setting local causes commits to wait for + Finally, the setting local causes commits to wait for local flush to disk, but not for replication. This is not usually desirable when synchronous replication is in use, but is provided for completeness. - If synchronous_standby_names is empty, the settings - on, remote_apply, remote_write - and local all provide the same synchronization level: + If synchronous_standby_names is empty, the settings + on, remote_apply, remote_write + and local all provide the same synchronization level: transaction commits only wait for local flush to disk. @@ -2335,7 +2335,7 @@ include_dir 'conf.d' transactions commit synchronously and others asynchronously. For example, to make a single multistatement transaction commit asynchronously when the default is the opposite, issue SET - LOCAL synchronous_commit TO OFF within the transaction. + LOCAL synchronous_commit TO OFF within the transaction. @@ -2343,7 +2343,7 @@ include_dir 'conf.d' wal_sync_method (enum) - wal_sync_method configuration parameter + wal_sync_method configuration parameter @@ -2356,41 +2356,41 @@ include_dir 'conf.d' - open_datasync (write WAL files with open() option O_DSYNC) + open_datasync (write WAL files with open() option O_DSYNC) - fdatasync (call fdatasync() at each commit) + fdatasync (call fdatasync() at each commit) - fsync (call fsync() at each commit) + fsync (call fsync() at each commit) - fsync_writethrough (call fsync() at each commit, forcing write-through of any disk write cache) + fsync_writethrough (call fsync() at each commit, forcing write-through of any disk write cache) - open_sync (write WAL files with open() option O_SYNC) + open_sync (write WAL files with open() option O_SYNC) - The open_* options also use O_DIRECT if available. + The open_* options also use O_DIRECT if available. Not all of these choices are available on all platforms. The default is the first method in the above list that is supported - by the platform, except that fdatasync is the default on + by the platform, except that fdatasync is the default on Linux. The default is not necessarily ideal; it might be necessary to change this setting or other aspects of your system configuration in order to create a crash-safe configuration or achieve optimal performance. These aspects are discussed in . - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2399,12 +2399,12 @@ include_dir 'conf.d' full_page_writes (boolean) - full_page_writes configuration parameter + full_page_writes configuration parameter - When this parameter is on, the PostgreSQL server + When this parameter is on, the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after a checkpoint. This is needed because @@ -2436,9 +2436,9 @@ include_dir 'conf.d' - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - The default is on. + The default is on. @@ -2446,12 +2446,12 @@ include_dir 'conf.d' wal_log_hints (boolean) - wal_log_hints configuration parameter + wal_log_hints configuration parameter - When this parameter is on, the PostgreSQL + When this parameter is on, the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after a checkpoint, even for non-critical modifications of so-called hint bits. @@ -2465,7 +2465,7 @@ include_dir 'conf.d' - This parameter can only be set at server start. The default value is off. + This parameter can only be set at server start. The default value is off. @@ -2473,16 +2473,16 @@ include_dir 'conf.d' wal_compression (boolean) - wal_compression configuration parameter + wal_compression configuration parameter - When this parameter is on, the PostgreSQL + When this parameter is on, the PostgreSQL server compresses a full page image written to WAL when is on or during a base backup. A compressed page image will be decompressed during WAL replay. - The default value is off. + The default value is off. Only superusers can change this setting. @@ -2498,7 +2498,7 @@ include_dir 'conf.d' wal_buffers (integer) - wal_buffers configuration parameter + wal_buffers configuration parameter @@ -2530,24 +2530,24 @@ include_dir 'conf.d' wal_writer_delay (integer) - wal_writer_delay configuration parameter + wal_writer_delay configuration parameter Specifies how often the WAL writer flushes WAL. After flushing WAL it - sleeps for wal_writer_delay milliseconds, unless woken up + sleeps for wal_writer_delay milliseconds, unless woken up by an asynchronously committing transaction. If the last flush - happened less than wal_writer_delay milliseconds ago and - less than wal_writer_flush_after bytes of WAL have been + happened less than wal_writer_delay milliseconds ago and + less than wal_writer_flush_after bytes of WAL have been produced since, then WAL is only written to the operating system, not flushed to disk. - The default value is 200 milliseconds (200ms). Note that + The default value is 200 milliseconds (200ms). Note that on many systems, the effective resolution of sleep delays is 10 - milliseconds; setting wal_writer_delay to a value that is + milliseconds; setting wal_writer_delay to a value that is not a multiple of 10 might have the same results as setting it to the next higher multiple of 10. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2555,19 +2555,19 @@ include_dir 'conf.d' wal_writer_flush_after (integer) - wal_writer_flush_after configuration parameter + wal_writer_flush_after configuration parameter Specifies how often the WAL writer flushes WAL. If the last flush - happened less than wal_writer_delay milliseconds ago and - less than wal_writer_flush_after bytes of WAL have been + happened less than wal_writer_delay milliseconds ago and + less than wal_writer_flush_after bytes of WAL have been produced since, then WAL is only written to the operating system, not - flushed to disk. If wal_writer_flush_after is set - to 0 then WAL data is flushed immediately. The default is + flushed to disk. If wal_writer_flush_after is set + to 0 then WAL data is flushed immediately. The default is 1MB. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2575,7 +2575,7 @@ include_dir 'conf.d' commit_delay (integer) - commit_delay configuration parameter + commit_delay configuration parameter @@ -2592,15 +2592,15 @@ include_dir 'conf.d' commit_siblings other transactions are active when a flush is about to be initiated. Also, no delays are performed if fsync is disabled. - The default commit_delay is zero (no delay). + The default commit_delay is zero (no delay). Only superusers can change this setting. - In PostgreSQL releases prior to 9.3, + In PostgreSQL releases prior to 9.3, commit_delay behaved differently and was much less effective: it affected only commits, rather than all WAL flushes, and waited for the entire configured delay even if the WAL flush - was completed sooner. Beginning in PostgreSQL 9.3, + was completed sooner. Beginning in PostgreSQL 9.3, the first process that becomes ready to flush waits for the configured interval, while subsequent processes wait only until the leader completes the flush operation. @@ -2611,13 +2611,13 @@ include_dir 'conf.d' commit_siblings (integer) - commit_siblings configuration parameter + commit_siblings configuration parameter Minimum number of concurrent open transactions to require - before performing the commit_delay delay. A larger + before performing the commit_delay delay. A larger value makes it more probable that at least one other transaction will become ready to commit during the delay interval. The default is five transactions. @@ -2634,17 +2634,17 @@ include_dir 'conf.d' checkpoint_timeout (integer) - checkpoint_timeout configuration parameter + checkpoint_timeout configuration parameter Maximum time between automatic WAL checkpoints, in seconds. The valid range is between 30 seconds and one day. - The default is five minutes (5min). + The default is five minutes (5min). Increasing this parameter can increase the amount of time needed for crash recovery. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2653,14 +2653,14 @@ include_dir 'conf.d' checkpoint_completion_target (floating point) - checkpoint_completion_target configuration parameter + checkpoint_completion_target configuration parameter Specifies the target of checkpoint completion, as a fraction of total time between checkpoints. The default is 0.5. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2669,7 +2669,7 @@ include_dir 'conf.d' checkpoint_flush_after (integer) - checkpoint_flush_after configuration parameter + checkpoint_flush_after configuration parameter @@ -2686,10 +2686,10 @@ include_dir 'conf.d' than the OS's page cache, where performance might degrade. This setting may have no effect on some platforms. The valid range is between 0, which disables forced writeback, - and 2MB. The default is 256kB on - Linux, 0 elsewhere. (If BLCKSZ is not + and 2MB. The default is 256kB on + Linux, 0 elsewhere. (If BLCKSZ is not 8kB, the default and maximum values scale proportionally to it.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2698,7 +2698,7 @@ include_dir 'conf.d' checkpoint_warning (integer) - checkpoint_warning configuration parameter + checkpoint_warning configuration parameter @@ -2706,11 +2706,11 @@ include_dir 'conf.d' Write a message to the server log if checkpoints caused by the filling of checkpoint segment files happen closer together than this many seconds (which suggests that - max_wal_size ought to be raised). The default is - 30 seconds (30s). Zero disables the warning. + max_wal_size ought to be raised). The default is + 30 seconds (30s). Zero disables the warning. No warnings will be generated if checkpoint_timeout is less than checkpoint_warning. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2719,19 +2719,19 @@ include_dir 'conf.d' max_wal_size (integer) - max_wal_size configuration parameter + max_wal_size configuration parameter Maximum size to let the WAL grow to between automatic WAL checkpoints. This is a soft limit; WAL size can exceed - max_wal_size under special circumstances, like - under heavy load, a failing archive_command, or a high - wal_keep_segments setting. The default is 1 GB. + max_wal_size under special circumstances, like + under heavy load, a failing archive_command, or a high + wal_keep_segments setting. The default is 1 GB. Increasing this parameter can increase the amount of time needed for crash recovery. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2740,7 +2740,7 @@ include_dir 'conf.d' min_wal_size (integer) - min_wal_size configuration parameter + min_wal_size configuration parameter @@ -2750,7 +2750,7 @@ include_dir 'conf.d' This can be used to ensure that enough WAL space is reserved to handle spikes in WAL usage, for example when running large batch jobs. The default is 80 MB. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -2765,29 +2765,29 @@ include_dir 'conf.d' archive_mode (enum) - archive_mode configuration parameter + archive_mode configuration parameter - When archive_mode is enabled, completed WAL segments + When archive_mode is enabled, completed WAL segments are sent to archive storage by setting - . In addition to off, - to disable, there are two modes: on, and - always. During normal operation, there is no - difference between the two modes, but when set to always + . In addition to off, + to disable, there are two modes: on, and + always. During normal operation, there is no + difference between the two modes, but when set to always the WAL archiver is enabled also during archive recovery or standby - mode. In always mode, all files restored from the archive + mode. In always mode, all files restored from the archive or streamed with streaming replication will be archived (again). See for details. - archive_mode and archive_command are - separate variables so that archive_command can be + archive_mode and archive_command are + separate variables so that archive_command can be changed without leaving archiving mode. This parameter can only be set at server start. - archive_mode cannot be enabled when - wal_level is set to minimal. + archive_mode cannot be enabled when + wal_level is set to minimal. @@ -2795,32 +2795,32 @@ include_dir 'conf.d' archive_command (string) - archive_command configuration parameter + archive_command configuration parameter The local shell command to execute to archive a completed WAL file - segment. Any %p in the string is + segment. Any %p in the string is replaced by the path name of the file to archive, and any - %f is replaced by only the file name. + %f is replaced by only the file name. (The path name is relative to the working directory of the server, i.e., the cluster's data directory.) - Use %% to embed an actual % character in the + Use %% to embed an actual % character in the command. It is important for the command to return a zero exit status only if it succeeds. For more information see . - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. It is ignored unless - archive_mode was enabled at server start. - If archive_command is an empty string (the default) while - archive_mode is enabled, WAL archiving is temporarily + archive_mode was enabled at server start. + If archive_command is an empty string (the default) while + archive_mode is enabled, WAL archiving is temporarily disabled, but the server continues to accumulate WAL segment files in the expectation that a command will soon be provided. Setting - archive_command to a command that does nothing but - return true, e.g. /bin/true (REM on + archive_command to a command that does nothing but + return true, e.g. /bin/true (REM on Windows), effectively disables archiving, but also breaks the chain of WAL files needed for archive recovery, so it should only be used in unusual circumstances. @@ -2831,7 +2831,7 @@ include_dir 'conf.d' archive_timeout (integer) - archive_timeout configuration parameter + archive_timeout configuration parameter @@ -2841,7 +2841,7 @@ include_dir 'conf.d' traffic (or has slack periods where it does so), there could be a long delay between the completion of a transaction and its safe recording in archive storage. To limit how old unarchived - data can be, you can set archive_timeout to force the + data can be, you can set archive_timeout to force the server to switch to a new WAL segment file periodically. When this parameter is greater than zero, the server will switch to a new segment file whenever this many seconds have elapsed since the last @@ -2850,13 +2850,13 @@ include_dir 'conf.d' no database activity). Note that archived files that are closed early due to a forced switch are still the same length as completely full files. Therefore, it is unwise to use a very short - archive_timeout — it will bloat your archive - storage. archive_timeout settings of a minute or so are + archive_timeout — it will bloat your archive + storage. archive_timeout settings of a minute or so are usually reasonable. You should consider using streaming replication, instead of archiving, if you want data to be copied off the master server more quickly than that. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2871,7 +2871,7 @@ include_dir 'conf.d' These settings control the behavior of the built-in - streaming replication feature (see + streaming replication feature (see ). Servers will be either a Master or a Standby server. Masters can send data, while Standby(s) are always receivers of replicated data. When cascading replication @@ -2898,7 +2898,7 @@ include_dir 'conf.d' max_wal_senders (integer) - max_wal_senders configuration parameter + max_wal_senders configuration parameter @@ -2914,8 +2914,8 @@ include_dir 'conf.d' a timeout is reached, so this parameter should be set slightly higher than the maximum number of expected clients so disconnected clients can immediately reconnect. This parameter can only - be set at server start. wal_level must be set to - replica or higher to allow connections from standby + be set at server start. wal_level must be set to + replica or higher to allow connections from standby servers. @@ -2924,7 +2924,7 @@ include_dir 'conf.d' max_replication_slots (integer) - max_replication_slots configuration parameter + max_replication_slots configuration parameter @@ -2944,17 +2944,17 @@ include_dir 'conf.d' wal_keep_segments (integer) - wal_keep_segments configuration parameter + wal_keep_segments configuration parameter Specifies the minimum number of past log file segments kept in the - pg_wal + pg_wal directory, in case a standby server needs to fetch them for streaming replication. Each segment is normally 16 megabytes. If a standby server connected to the sending server falls behind by more than - wal_keep_segments segments, the sending server might remove + wal_keep_segments segments, the sending server might remove a WAL segment still needed by the standby, in which case the replication connection will be terminated. Downstream connections will also eventually fail as a result. (However, the standby @@ -2964,15 +2964,15 @@ include_dir 'conf.d' This sets only the minimum number of segments retained in - pg_wal; the system might need to retain more segments + pg_wal; the system might need to retain more segments for WAL archival or to recover from a checkpoint. If - wal_keep_segments is zero (the default), the system + wal_keep_segments is zero (the default), the system doesn't keep any extra segments for standby purposes, so the number of old WAL segments available to standby servers is a function of the location of the previous checkpoint and status of WAL archiving. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. @@ -2980,7 +2980,7 @@ include_dir 'conf.d' wal_sender_timeout (integer) - wal_sender_timeout configuration parameter + wal_sender_timeout configuration parameter @@ -2990,7 +2990,7 @@ include_dir 'conf.d' the sending server to detect a standby crash or network outage. A value of zero disables the timeout mechanism. This parameter can only be set in - the postgresql.conf file or on the server command line. + the postgresql.conf file or on the server command line. The default value is 60 seconds. @@ -2999,13 +2999,13 @@ include_dir 'conf.d' track_commit_timestamp (boolean) - track_commit_timestamp configuration parameter + track_commit_timestamp configuration parameter Record commit time of transactions. This parameter - can only be set in postgresql.conf file or on the server + can only be set in postgresql.conf file or on the server command line. The default value is off. @@ -3034,13 +3034,13 @@ include_dir 'conf.d' synchronous_standby_names (string) - synchronous_standby_names configuration parameter + synchronous_standby_names configuration parameter Specifies a list of standby servers that can support - synchronous replication, as described in + synchronous replication, as described in . There will be one or more active synchronous standbys; transactions waiting for commit will be allowed to proceed after @@ -3050,15 +3050,15 @@ include_dir 'conf.d' that are both currently connected and streaming data in real-time (as shown by a state of streaming in the - pg_stat_replication view). + pg_stat_replication view). Specifying more than one synchronous standby can allow for very high availability and protection against data loss. The name of a standby server for this purpose is the - application_name setting of the standby, as set in the + application_name setting of the standby, as set in the standby's connection information. In case of a physical replication - standby, this should be set in the primary_conninfo + standby, this should be set in the primary_conninfo setting in recovery.conf; the default is walreceiver. For logical replication, this can be set in the connection information of the subscription, and it @@ -3078,54 +3078,54 @@ ANY num_sync ( standby_name is the name of a standby server. - FIRST and ANY specify the method to choose + FIRST and ANY specify the method to choose synchronous standbys from the listed servers. - The keyword FIRST, coupled with + The keyword FIRST, coupled with num_sync, specifies a priority-based synchronous replication and makes transaction commits wait until their WAL records are replicated to num_sync synchronous standbys chosen based on their priorities. For example, a setting of - FIRST 3 (s1, s2, s3, s4) will cause each commit to wait for + FIRST 3 (s1, s2, s3, s4) will cause each commit to wait for replies from three higher-priority standbys chosen from standby servers - s1, s2, s3 and s4. + s1, s2, s3 and s4. The standbys whose names appear earlier in the list are given higher priority and will be considered as synchronous. Other standby servers appearing later in this list represent potential synchronous standbys. If any of the current synchronous standbys disconnects for whatever reason, it will be replaced immediately with the next-highest-priority - standby. The keyword FIRST is optional. + standby. The keyword FIRST is optional. - The keyword ANY, coupled with + The keyword ANY, coupled with num_sync, specifies a quorum-based synchronous replication and makes transaction commits - wait until their WAL records are replicated to at least + wait until their WAL records are replicated to at least num_sync listed standbys. - For example, a setting of ANY 3 (s1, s2, s3, s4) will cause + For example, a setting of ANY 3 (s1, s2, s3, s4) will cause each commit to proceed as soon as at least any three standbys of - s1, s2, s3 and s4 + s1, s2, s3 and s4 reply. - FIRST and ANY are case-insensitive. If these + FIRST and ANY are case-insensitive. If these keywords are used as the name of a standby server, its standby_name must be double-quoted. - The third syntax was used before PostgreSQL + The third syntax was used before PostgreSQL version 9.6 and is still supported. It's the same as the first syntax - with FIRST and + with FIRST and num_sync equal to 1. - For example, FIRST 1 (s1, s2) and s1, s2 have - the same meaning: either s1 or s2 is chosen + For example, FIRST 1 (s1, s2) and s1, s2 have + the same meaning: either s1 or s2 is chosen as a synchronous standby. - The special entry * matches any standby name. + The special entry * matches any standby name. There is no mechanism to enforce uniqueness of standby names. In case @@ -3136,7 +3136,7 @@ ANY num_sync ( standby_name should have the form of a valid SQL identifier, unless it - is *. You can use double-quoting if necessary. But note + is *. You can use double-quoting if necessary. But note that standby_names are compared to standby application names case-insensitively, whether double-quoted or not. @@ -3149,10 +3149,10 @@ ANY num_sync ( parameter to - local or off. + local or off. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -3161,13 +3161,13 @@ ANY num_sync ( vacuum_defer_cleanup_age (integer) - vacuum_defer_cleanup_age configuration parameter + vacuum_defer_cleanup_age configuration parameter - Specifies the number of transactions by which VACUUM and - HOT updates will defer cleanup of dead row versions. The + Specifies the number of transactions by which VACUUM and + HOT updates will defer cleanup of dead row versions. The default is zero transactions, meaning that dead row versions can be removed as soon as possible, that is, as soon as they are no longer visible to any open transaction. You may wish to set this to a @@ -3178,16 +3178,16 @@ ANY num_sync ( num_sync ( hot_standby (boolean) - hot_standby configuration parameter + hot_standby configuration parameter @@ -3226,7 +3226,7 @@ ANY num_sync ( max_standby_archive_delay (integer) - max_standby_archive_delay configuration parameter + max_standby_archive_delay configuration parameter @@ -3235,16 +3235,16 @@ ANY num_sync ( . - max_standby_archive_delay applies when WAL data is + max_standby_archive_delay applies when WAL data is being read from WAL archive (and is therefore not current). The default is 30 seconds. Units are milliseconds if not specified. A value of -1 allows the standby to wait forever for conflicting queries to complete. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - Note that max_standby_archive_delay is not the same as the + Note that max_standby_archive_delay is not the same as the maximum length of time a query can run before cancellation; rather it is the maximum total time allowed to apply any one WAL segment's data. Thus, if one query has resulted in significant delay earlier in the @@ -3257,7 +3257,7 @@ ANY num_sync ( max_standby_streaming_delay (integer) - max_standby_streaming_delay configuration parameter + max_standby_streaming_delay configuration parameter @@ -3266,16 +3266,16 @@ ANY num_sync ( . - max_standby_streaming_delay applies when WAL data is + max_standby_streaming_delay applies when WAL data is being received via streaming replication. The default is 30 seconds. Units are milliseconds if not specified. A value of -1 allows the standby to wait forever for conflicting queries to complete. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - Note that max_standby_streaming_delay is not the same as + Note that max_standby_streaming_delay is not the same as the maximum length of time a query can run before cancellation; rather it is the maximum total time allowed to apply WAL data once it has been received from the primary server. Thus, if one query has @@ -3289,7 +3289,7 @@ ANY num_sync ( wal_receiver_status_interval (integer) - wal_receiver_status_interval configuration parameter + wal_receiver_status_interval configuration parameter @@ -3298,7 +3298,7 @@ ANY num_sync ( - pg_stat_replication view. The standby will report + pg_stat_replication view. The standby will report the last write-ahead log location it has written, the last position it has flushed to disk, and the last position it has applied. This parameter's @@ -3307,7 +3307,7 @@ ANY num_sync ( num_sync ( hot_standby_feedback (boolean) - hot_standby_feedback configuration parameter + hot_standby_feedback configuration parameter @@ -3327,9 +3327,9 @@ ANY num_sync ( ( num_sync ( wal_receiver_timeout (integer) - wal_receiver_timeout configuration parameter + wal_receiver_timeout configuration parameter @@ -3363,7 +3363,7 @@ ANY num_sync ( num_sync ( wal_retrieve_retry_interval (integer) - wal_retrieve_retry_interval configuration parameter + wal_retrieve_retry_interval configuration parameter Specify how long the standby server should wait when WAL data is not available from any sources (streaming replication, - local pg_wal or WAL archive) before retrying to + local pg_wal or WAL archive) before retrying to retrieve WAL data. This parameter can only be set in the - postgresql.conf file or on the server command line. + postgresql.conf file or on the server command line. The default value is 5 seconds. Units are milliseconds if not specified. @@ -3420,7 +3420,7 @@ ANY num_sync ( max_logical_replication_workers (int) - max_logical_replication_workers configuration parameter + max_logical_replication_workers configuration parameter @@ -3441,7 +3441,7 @@ ANY num_sync ( max_sync_workers_per_subscription (integer) - max_sync_workers_per_subscription configuration parameter + max_sync_workers_per_subscription configuration parameter @@ -3478,7 +3478,7 @@ ANY num_sync ( num_sync ( num_sync ( enable_gathermerge (boolean) - enable_gathermerge configuration parameter + enable_gathermerge configuration parameter Enables or disables the query planner's use of gather - merge plan types. The default is on. + merge plan types. The default is on. @@ -3527,13 +3527,13 @@ ANY num_sync ( enable_hashagg (boolean) - enable_hashagg configuration parameter + enable_hashagg configuration parameter Enables or disables the query planner's use of hashed - aggregation plan types. The default is on. + aggregation plan types. The default is on. @@ -3541,13 +3541,13 @@ ANY num_sync ( enable_hashjoin (boolean) - enable_hashjoin configuration parameter + enable_hashjoin configuration parameter Enables or disables the query planner's use of hash-join plan - types. The default is on. + types. The default is on. @@ -3558,13 +3558,13 @@ ANY num_sync ( num_sync ( enable_indexonlyscan (boolean) - enable_indexonlyscan configuration parameter + enable_indexonlyscan configuration parameter Enables or disables the query planner's use of index-only-scan plan types (see ). - The default is on. + The default is on. @@ -3587,7 +3587,7 @@ ANY num_sync ( enable_material (boolean) - enable_material configuration parameter + enable_material configuration parameter @@ -3596,7 +3596,7 @@ ANY num_sync ( num_sync ( enable_mergejoin (boolean) - enable_mergejoin configuration parameter + enable_mergejoin configuration parameter Enables or disables the query planner's use of merge-join plan - types. The default is on. + types. The default is on. @@ -3618,7 +3618,7 @@ ANY num_sync ( enable_nestloop (boolean) - enable_nestloop configuration parameter + enable_nestloop configuration parameter @@ -3627,7 +3627,7 @@ ANY num_sync ( num_sync ( enable_partition_wise_join (boolean) - enable_partition_wise_join configuration parameter + enable_partition_wise_join configuration parameter @@ -3647,7 +3647,7 @@ ANY num_sync ( num_sync ( num_sync ( num_sync ( enable_sort (boolean) - enable_sort configuration parameter + enable_sort configuration parameter @@ -3684,7 +3684,7 @@ ANY num_sync ( num_sync ( enable_tidscan (boolean) - enable_tidscan configuration parameter + enable_tidscan configuration parameter - Enables or disables the query planner's use of TID - scan plan types. The default is on. + Enables or disables the query planner's use of TID + scan plan types. The default is on. @@ -3709,12 +3709,12 @@ ANY num_sync ( num_sync ( seq_page_cost (floating point) - seq_page_cost configuration parameter + seq_page_cost configuration parameter @@ -3752,7 +3752,7 @@ ANY num_sync ( random_page_cost (floating point) - random_page_cost configuration parameter + random_page_cost configuration parameter @@ -3765,7 +3765,7 @@ ANY num_sync ( num_sync ( num_sync ( cpu_tuple_cost (floating point) - cpu_tuple_cost configuration parameter + cpu_tuple_cost configuration parameter @@ -3826,7 +3826,7 @@ ANY num_sync ( cpu_index_tuple_cost (floating point) - cpu_index_tuple_cost configuration parameter + cpu_index_tuple_cost configuration parameter @@ -3841,7 +3841,7 @@ ANY num_sync ( cpu_operator_cost (floating point) - cpu_operator_cost configuration parameter + cpu_operator_cost configuration parameter @@ -3856,7 +3856,7 @@ ANY num_sync ( parallel_setup_cost (floating point) - parallel_setup_cost configuration parameter + parallel_setup_cost configuration parameter @@ -3871,7 +3871,7 @@ ANY num_sync ( parallel_tuple_cost (floating point) - parallel_tuple_cost configuration parameter + parallel_tuple_cost configuration parameter @@ -3886,7 +3886,7 @@ ANY num_sync ( min_parallel_table_scan_size (integer) - min_parallel_table_scan_size configuration parameter + min_parallel_table_scan_size configuration parameter @@ -3896,7 +3896,7 @@ ANY num_sync ( num_sync ( min_parallel_index_scan_size (integer) - min_parallel_index_scan_size configuration parameter + min_parallel_index_scan_size configuration parameter @@ -3913,7 +3913,7 @@ ANY num_sync ( num_sync ( effective_cache_size (integer) - effective_cache_size configuration parameter + effective_cache_size configuration parameter @@ -3942,7 +3942,7 @@ ANY num_sync ( num_sync ( num_sync ( geqo_threshold (integer) - geqo_threshold configuration parameter + geqo_threshold configuration parameter Use genetic query optimization to plan queries with at least - this many FROM items involved. (Note that a - FULL OUTER JOIN construct counts as only one FROM + this many FROM items involved. (Note that a + FULL OUTER JOIN construct counts as only one FROM item.) The default is 12. For simpler queries it is usually best to use the regular, exhaustive-search planner, but for queries with many tables the exhaustive search takes too long, often @@ -4011,7 +4011,7 @@ ANY num_sync ( geqo_effort (integer) - geqo_effort configuration parameter + geqo_effort configuration parameter @@ -4037,7 +4037,7 @@ ANY num_sync ( geqo_pool_size (integer) - geqo_pool_size configuration parameter + geqo_pool_size configuration parameter @@ -4055,7 +4055,7 @@ ANY num_sync ( geqo_generations (integer) - geqo_generations configuration parameter + geqo_generations configuration parameter @@ -4073,7 +4073,7 @@ ANY num_sync ( geqo_selection_bias (floating point) - geqo_selection_bias configuration parameter + geqo_selection_bias configuration parameter @@ -4088,7 +4088,7 @@ ANY num_sync ( geqo_seed (floating point) - geqo_seed configuration parameter + geqo_seed configuration parameter @@ -4112,17 +4112,17 @@ ANY num_sync ( default_statistics_target (integer) - default_statistics_target configuration parameter + default_statistics_target configuration parameter Sets the default statistics target for table columns without a column-specific target set via ALTER TABLE - SET STATISTICS. Larger values increase the time needed to - do ANALYZE, but might improve the quality of the + SET STATISTICS. Larger values increase the time needed to + do ANALYZE, but might improve the quality of the planner's estimates. The default is 100. For more information - on the use of statistics by the PostgreSQL + on the use of statistics by the PostgreSQL query planner, refer to . @@ -4134,26 +4134,26 @@ ANY num_sync ( cursor_tuple_fraction (floating point) - cursor_tuple_fraction configuration parameter + cursor_tuple_fraction configuration parameter Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved. The default is 0.1. Smaller values of this - setting bias the planner towards using fast start plans + setting bias the planner towards using fast start plans for cursors, which will retrieve the first few rows quickly while perhaps taking a long time to fetch all rows. Larger values put more emphasis on the total estimated time. At the maximum @@ -4209,7 +4209,7 @@ SELECT * FROM parent WHERE key = 2400; from_collapse_limit (integer) - from_collapse_limit configuration parameter + from_collapse_limit configuration parameter @@ -4232,14 +4232,14 @@ SELECT * FROM parent WHERE key = 2400; join_collapse_limit (integer) - join_collapse_limit configuration parameter + join_collapse_limit configuration parameter - The planner will rewrite explicit JOIN - constructs (except FULL JOINs) into lists of - FROM items whenever a list of no more than this many items + The planner will rewrite explicit JOIN + constructs (except FULL JOINs) into lists of + FROM items whenever a list of no more than this many items would result. Smaller values reduce planning time but might yield inferior query plans. @@ -4248,7 +4248,7 @@ SELECT * FROM parent WHERE key = 2400; By default, this variable is set the same as from_collapse_limit, which is appropriate for most uses. Setting it to 1 prevents any reordering of - explicit JOINs. Thus, the explicit join order + explicit JOINs. Thus, the explicit join order specified in the query will be the actual order in which the relations are joined. Because the query planner does not always choose the optimal join order, advanced users can elect to @@ -4268,24 +4268,24 @@ SELECT * FROM parent WHERE key = 2400; force_parallel_mode (enum) - force_parallel_mode configuration parameter + force_parallel_mode configuration parameter Allows the use of parallel queries for testing purposes even in cases where no performance benefit is expected. - The allowed values of force_parallel_mode are - off (use parallel mode only when it is expected to improve - performance), on (force parallel query for all queries - for which it is thought to be safe), and regress (like - on, but with additional behavior changes as explained + The allowed values of force_parallel_mode are + off (use parallel mode only when it is expected to improve + performance), on (force parallel query for all queries + for which it is thought to be safe), and regress (like + on, but with additional behavior changes as explained below). - More specifically, setting this value to on will add - a Gather node to the top of any query plan for which this + More specifically, setting this value to on will add + a Gather node to the top of any query plan for which this appears to be safe, so that the query runs inside of a parallel worker. Even when a parallel worker is not available or cannot be used, operations such as starting a subtransaction that would be prohibited @@ -4297,15 +4297,15 @@ SELECT * FROM parent WHERE key = 2400; - Setting this value to regress has all of the same effects - as setting it to on plus some additional effects that are + Setting this value to regress has all of the same effects + as setting it to on plus some additional effects that are intended to facilitate automated regression testing. Normally, messages from a parallel worker include a context line indicating that, - but a setting of regress suppresses this line so that the + but a setting of regress suppresses this line so that the output is the same as in non-parallel execution. Also, - the Gather nodes added to plans by this setting are hidden - in EXPLAIN output so that the output matches what - would be obtained if this setting were turned off. + the Gather nodes added to plans by this setting are hidden + in EXPLAIN output so that the output matches what + would be obtained if this setting were turned off. @@ -4338,7 +4338,7 @@ SELECT * FROM parent WHERE key = 2400; log_destination (string) - log_destination configuration parameter + log_destination configuration parameter @@ -4351,13 +4351,13 @@ SELECT * FROM parent WHERE key = 2400; parameter to a list of desired log destinations separated by commas. The default is to log to stderr only. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. - If csvlog is included in log_destination, + If csvlog is included in log_destination, log entries are output in comma separated - value (CSV) format, which is convenient for + value (CSV) format, which is convenient for loading logs into programs. See for details. must be enabled to generate @@ -4366,7 +4366,7 @@ SELECT * FROM parent WHERE key = 2400; When either stderr or csvlog are included, the file - current_logfiles is created to record the location + current_logfiles is created to record the location of the log file(s) currently in use by the logging collector and the associated logging destination. This provides a convenient way to find the logs currently in use by the instance. Here is an example of @@ -4378,10 +4378,10 @@ csvlog log/postgresql.csv current_logfiles is recreated when a new log file is created as an effect of rotation, and - when log_destination is reloaded. It is removed when + when log_destination is reloaded. It is removed when neither stderr nor csvlog are included - in log_destination, and when the logging collector is + in log_destination, and when the logging collector is disabled. @@ -4390,9 +4390,9 @@ csvlog log/postgresql.csv On most Unix systems, you will need to alter the configuration of your system's syslog daemon in order to make use of the syslog option for - log_destination. PostgreSQL + log_destination. PostgreSQL can log to syslog facilities - LOCAL0 through LOCAL7 (see LOCAL0 through LOCAL7 (see ), but the default syslog configuration on most platforms will discard all such messages. You will need to add something like: @@ -4404,7 +4404,7 @@ local0.* /var/log/postgresql On Windows, when you use the eventlog - option for log_destination, you should + option for log_destination, you should register an event source and its library with the operating system so that the Windows Event Viewer can display event log messages cleanly. @@ -4417,27 +4417,27 @@ local0.* /var/log/postgresql logging_collector (boolean) - logging_collector configuration parameter + logging_collector configuration parameter - This parameter enables the logging collector, which + This parameter enables the logging collector, which is a background process that captures log messages - sent to stderr and redirects them into log files. + sent to stderr and redirects them into log files. This approach is often more useful than - logging to syslog, since some types of messages - might not appear in syslog output. (One common + logging to syslog, since some types of messages + might not appear in syslog output. (One common example is dynamic-linker failure messages; another is error messages - produced by scripts such as archive_command.) + produced by scripts such as archive_command.) This parameter can only be set at server start. - It is possible to log to stderr without using the + It is possible to log to stderr without using the logging collector; the log messages will just go to wherever the - server's stderr is directed. However, that method is + server's stderr is directed. However, that method is only suitable for low log volumes, since it provides no convenient way to rotate log files. Also, on some platforms not using the logging collector can result in lost or garbled log output, because @@ -4451,7 +4451,7 @@ local0.* /var/log/postgresql The logging collector is designed to never lose messages. This means that in case of extremely high load, server processes could be blocked while trying to send additional log messages when the - collector has fallen behind. In contrast, syslog + collector has fallen behind. In contrast, syslog prefers to drop messages if it cannot write them, which means it may fail to log some messages in such cases but it will not block the rest of the system. @@ -4464,16 +4464,16 @@ local0.* /var/log/postgresql log_directory (string) - log_directory configuration parameter + log_directory configuration parameter - When logging_collector is enabled, + When logging_collector is enabled, this parameter determines the directory in which log files will be created. It can be specified as an absolute path, or relative to the cluster data directory. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is log. @@ -4483,7 +4483,7 @@ local0.* /var/log/postgresql log_filename (string) - log_filename configuration parameter + log_filename configuration parameter @@ -4514,14 +4514,14 @@ local0.* /var/log/postgresql longer the case. - If CSV-format output is enabled in log_destination, - .csv will be appended to the timestamped + If CSV-format output is enabled in log_destination, + .csv will be appended to the timestamped log file name to create the file name for CSV-format output. - (If log_filename ends in .log, the suffix is + (If log_filename ends in .log, the suffix is replaced instead.) - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4530,7 +4530,7 @@ local0.* /var/log/postgresql log_file_mode (integer) - log_file_mode configuration parameter + log_file_mode configuration parameter @@ -4545,9 +4545,9 @@ local0.* /var/log/postgresql must start with a 0 (zero).) - The default permissions are 0600, meaning only the + The default permissions are 0600, meaning only the server owner can read or write the log files. The other commonly - useful setting is 0640, allowing members of the owner's + useful setting is 0640, allowing members of the owner's group to read the files. Note however that to make use of such a setting, you'll need to alter to store the files somewhere outside the cluster data directory. In @@ -4555,7 +4555,7 @@ local0.* /var/log/postgresql they might contain sensitive data. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4564,7 +4564,7 @@ local0.* /var/log/postgresql log_rotation_age (integer) - log_rotation_age configuration parameter + log_rotation_age configuration parameter @@ -4574,7 +4574,7 @@ local0.* /var/log/postgresql After this many minutes have elapsed, a new log file will be created. Set to zero to disable time-based creation of new log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4583,7 +4583,7 @@ local0.* /var/log/postgresql log_rotation_size (integer) - log_rotation_size configuration parameter + log_rotation_size configuration parameter @@ -4593,7 +4593,7 @@ local0.* /var/log/postgresql After this many kilobytes have been emitted into a log file, a new log file will be created. Set to zero to disable size-based creation of new log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4602,7 +4602,7 @@ local0.* /var/log/postgresql log_truncate_on_rotation (boolean) - log_truncate_on_rotation configuration parameter + log_truncate_on_rotation configuration parameter @@ -4617,7 +4617,7 @@ local0.* /var/log/postgresql a log_filename like postgresql-%H.log would result in generating twenty-four hourly log files and then cyclically overwriting them. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4635,7 +4635,7 @@ local0.* /var/log/postgresql log_truncate_on_rotation to on, log_rotation_age to 60, and log_rotation_size to 1000000. - Including %M in log_filename allows + Including %M in log_filename allows any size-driven rotations that might occur to select a file name different from the hour's initial file name. @@ -4645,21 +4645,21 @@ local0.* /var/log/postgresql syslog_facility (enum) - syslog_facility configuration parameter + syslog_facility configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines the syslog facility to be used. You can choose - from LOCAL0, LOCAL1, - LOCAL2, LOCAL3, LOCAL4, - LOCAL5, LOCAL6, LOCAL7; - the default is LOCAL0. See also the + from LOCAL0, LOCAL1, + LOCAL2, LOCAL3, LOCAL4, + LOCAL5, LOCAL6, LOCAL7; + the default is LOCAL0. See also the documentation of your system's syslog daemon. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4668,17 +4668,17 @@ local0.* /var/log/postgresql syslog_ident (string) - syslog_ident configuration parameter + syslog_ident configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines the program name used to identify PostgreSQL messages in syslog logs. The default is postgres. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4687,7 +4687,7 @@ local0.* /var/log/postgresql syslog_sequence_numbers (boolean) - syslog_sequence_numbers configuration parameter + syslog_sequence_numbers configuration parameter @@ -4706,7 +4706,7 @@ local0.* /var/log/postgresql - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4715,12 +4715,12 @@ local0.* /var/log/postgresql syslog_split_messages (boolean) - syslog_split_messages configuration parameter + syslog_split_messages configuration parameter - When logging to syslog is enabled, this parameter + When logging to syslog is enabled, this parameter determines how messages are delivered to syslog. When on (the default), messages are split by lines, and long lines are split so that they will fit into 1024 bytes, which is a typical size limit for @@ -4739,7 +4739,7 @@ local0.* /var/log/postgresql - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4748,16 +4748,16 @@ local0.* /var/log/postgresql event_source (string) - event_source configuration parameter + event_source configuration parameter - When logging to event log is enabled, this parameter + When logging to event log is enabled, this parameter determines the program name used to identify PostgreSQL messages in the log. The default is PostgreSQL. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -4773,21 +4773,21 @@ local0.* /var/log/postgresql client_min_messages (enum) - client_min_messages configuration parameter + client_min_messages configuration parameter Controls which message levels are sent to the client. - Valid values are DEBUG5, - DEBUG4, DEBUG3, DEBUG2, - DEBUG1, LOG, NOTICE, - WARNING, ERROR, FATAL, - and PANIC. Each level + Valid values are DEBUG5, + DEBUG4, DEBUG3, DEBUG2, + DEBUG1, LOG, NOTICE, + WARNING, ERROR, FATAL, + and PANIC. Each level includes all the levels that follow it. The later the level, the fewer messages are sent. The default is - NOTICE. Note that LOG has a different - rank here than in log_min_messages. + NOTICE. Note that LOG has a different + rank here than in log_min_messages. @@ -4795,21 +4795,21 @@ local0.* /var/log/postgresql log_min_messages (enum) - log_min_messages configuration parameter + log_min_messages configuration parameter Controls which message levels are written to the server log. - Valid values are DEBUG5, DEBUG4, - DEBUG3, DEBUG2, DEBUG1, - INFO, NOTICE, WARNING, - ERROR, LOG, FATAL, and - PANIC. Each level includes all the levels that + Valid values are DEBUG5, DEBUG4, + DEBUG3, DEBUG2, DEBUG1, + INFO, NOTICE, WARNING, + ERROR, LOG, FATAL, and + PANIC. Each level includes all the levels that follow it. The later the level, the fewer messages are sent - to the log. The default is WARNING. Note that - LOG has a different rank here than in - client_min_messages. + to the log. The default is WARNING. Note that + LOG has a different rank here than in + client_min_messages. Only superusers can change this setting. @@ -4818,7 +4818,7 @@ local0.* /var/log/postgresql log_min_error_statement (enum) - log_min_error_statement configuration parameter + log_min_error_statement configuration parameter @@ -4846,7 +4846,7 @@ local0.* /var/log/postgresql log_min_duration_statement (integer) - log_min_duration_statement configuration parameter + log_min_duration_statement configuration parameter @@ -4872,9 +4872,9 @@ local0.* /var/log/postgresql When using this option together with , the text of statements that are logged because of - log_statement will not be repeated in the + log_statement will not be repeated in the duration log message. - If you are not using syslog, it is recommended + If you are not using syslog, it is recommended that you log the PID or session ID using so that you can link the statement message to the later @@ -4888,7 +4888,7 @@ local0.* /var/log/postgresql explains the message - severity levels used by PostgreSQL. If logging output + severity levels used by PostgreSQL. If logging output is sent to syslog or Windows' eventlog, the severity levels are translated as shown in the table. @@ -4901,73 +4901,73 @@ local0.* /var/log/postgresql Severity Usage - syslog - eventlog + syslog + eventlog - DEBUG1..DEBUG5 + DEBUG1..DEBUG5 Provides successively-more-detailed information for use by developers. - DEBUG - INFORMATION + DEBUG + INFORMATION - INFO + INFO Provides information implicitly requested by the user, - e.g., output from VACUUM VERBOSE. - INFO - INFORMATION + e.g., output from VACUUM VERBOSE. + INFO + INFORMATION - NOTICE + NOTICE Provides information that might be helpful to users, e.g., notice of truncation of long identifiers. - NOTICE - INFORMATION + NOTICE + INFORMATION - WARNING - Provides warnings of likely problems, e.g., COMMIT + WARNING + Provides warnings of likely problems, e.g., COMMIT outside a transaction block. - NOTICE - WARNING + NOTICE + WARNING - ERROR + ERROR Reports an error that caused the current command to abort. - WARNING - ERROR + WARNING + ERROR - LOG + LOG Reports information of interest to administrators, e.g., checkpoint activity. - INFO - INFORMATION + INFO + INFORMATION - FATAL + FATAL Reports an error that caused the current session to abort. - ERR - ERROR + ERR + ERROR - PANIC + PANIC Reports an error that caused all database sessions to abort. - CRIT - ERROR + CRIT + ERROR @@ -4982,15 +4982,15 @@ local0.* /var/log/postgresql application_name (string) - application_name configuration parameter + application_name configuration parameter The application_name can be any string of less than - NAMEDATALEN characters (64 characters in a standard build). + NAMEDATALEN characters (64 characters in a standard build). It is typically set by an application upon connection to the server. - The name will be displayed in the pg_stat_activity view + The name will be displayed in the pg_stat_activity view and included in CSV log entries. It can also be included in regular log entries via the parameter. Only printable ASCII characters may be used in the @@ -5003,17 +5003,17 @@ local0.* /var/log/postgresql debug_print_parse (boolean) - debug_print_parse configuration parameter + debug_print_parse configuration parameter debug_print_rewritten (boolean) - debug_print_rewritten configuration parameter + debug_print_rewritten configuration parameter debug_print_plan (boolean) - debug_print_plan configuration parameter + debug_print_plan configuration parameter @@ -5021,7 +5021,7 @@ local0.* /var/log/postgresql These parameters enable various debugging output to be emitted. When set, they print the resulting parse tree, the query rewriter output, or the execution plan for each executed query. - These messages are emitted at LOG message level, so by + These messages are emitted at LOG message level, so by default they will appear in the server log but will not be sent to the client. You can change that by adjusting and/or @@ -5034,7 +5034,7 @@ local0.* /var/log/postgresql debug_pretty_print (boolean) - debug_pretty_print configuration parameter + debug_pretty_print configuration parameter @@ -5043,7 +5043,7 @@ local0.* /var/log/postgresql produced by debug_print_parse, debug_print_rewritten, or debug_print_plan. This results in more readable - but much longer output than the compact format used when + but much longer output than the compact format used when it is off. It is on by default. @@ -5052,7 +5052,7 @@ local0.* /var/log/postgresql log_checkpoints (boolean) - log_checkpoints configuration parameter + log_checkpoints configuration parameter @@ -5060,7 +5060,7 @@ local0.* /var/log/postgresql Causes checkpoints and restartpoints to be logged in the server log. Some statistics are included in the log messages, including the number of buffers written and the time spent writing them. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is off. @@ -5069,7 +5069,7 @@ local0.* /var/log/postgresql log_connections (boolean) - log_connections configuration parameter + log_connections configuration parameter @@ -5078,14 +5078,14 @@ local0.* /var/log/postgresql as well as successful completion of client authentication. Only superusers can change this parameter at session start, and it cannot be changed at all within a session. - The default is off. + The default is off. - Some client programs, like psql, attempt + Some client programs, like psql, attempt to connect twice while determining if a password is required, so - duplicate connection received messages do not + duplicate connection received messages do not necessarily indicate a problem. @@ -5095,7 +5095,7 @@ local0.* /var/log/postgresql log_disconnections (boolean) - log_disconnections configuration parameter + log_disconnections configuration parameter @@ -5105,7 +5105,7 @@ local0.* /var/log/postgresql plus the duration of the session. Only superusers can change this parameter at session start, and it cannot be changed at all within a session. - The default is off. + The default is off. @@ -5114,13 +5114,13 @@ local0.* /var/log/postgresql log_duration (boolean) - log_duration configuration parameter + log_duration configuration parameter Causes the duration of every completed statement to be logged. - The default is off. + The default is off. Only superusers can change this setting. @@ -5133,10 +5133,10 @@ local0.* /var/log/postgresql The difference between setting this option and setting to zero is that - exceeding log_min_duration_statement forces the text of + exceeding log_min_duration_statement forces the text of the query to be logged, but this option doesn't. Thus, if - log_duration is on and - log_min_duration_statement has a positive value, all + log_duration is on and + log_min_duration_statement has a positive value, all durations are logged but the query text is included only for statements exceeding the threshold. This behavior can be useful for gathering statistics in high-load installations. @@ -5148,18 +5148,18 @@ local0.* /var/log/postgresql log_error_verbosity (enum) - log_error_verbosity configuration parameter + log_error_verbosity configuration parameter Controls the amount of detail written in the server log for each - message that is logged. Valid values are TERSE, - DEFAULT, and VERBOSE, each adding more - fields to displayed messages. TERSE excludes - the logging of DETAIL, HINT, - QUERY, and CONTEXT error information. - VERBOSE output includes the SQLSTATE error + message that is logged. Valid values are TERSE, + DEFAULT, and VERBOSE, each adding more + fields to displayed messages. TERSE excludes + the logging of DETAIL, HINT, + QUERY, and CONTEXT error information. + VERBOSE output includes the SQLSTATE error code (see also ) and the source code file name, function name, and line number that generated the error. Only superusers can change this setting. @@ -5170,7 +5170,7 @@ local0.* /var/log/postgresql log_hostname (boolean) - log_hostname configuration parameter + log_hostname configuration parameter @@ -5179,7 +5179,7 @@ local0.* /var/log/postgresql connecting host. Turning this parameter on causes logging of the host name as well. Note that depending on your host name resolution setup this might impose a non-negligible performance penalty. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5188,14 +5188,14 @@ local0.* /var/log/postgresql log_line_prefix (string) - log_line_prefix configuration parameter + log_line_prefix configuration parameter - This is a printf-style string that is output at the + This is a printf-style string that is output at the beginning of each log line. - % characters begin escape sequences + % characters begin escape sequences that are replaced with status information as outlined below. Unrecognized escapes are ignored. Other characters are copied straight to the log line. Some escapes are @@ -5207,9 +5207,9 @@ local0.* /var/log/postgresql right with spaces to give it a minimum width, whereas a positive value will pad on the left. Padding can be useful to aid human readability in log files. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. The default is - '%m [%p] ' which logs a time stamp and the process ID. + '%m [%p] ' which logs a time stamp and the process ID. @@ -5310,19 +5310,19 @@ local0.* /var/log/postgresql %% - Literal % + Literal % no - The %c escape prints a quasi-unique session identifier, + The %c escape prints a quasi-unique session identifier, consisting of two 4-byte hexadecimal numbers (without leading zeros) separated by a dot. The numbers are the process start time and the - process ID, so %c can also be used as a space saving way + process ID, so %c can also be used as a space saving way of printing those items. For example, to generate the session - identifier from pg_stat_activity, use this query: + identifier from pg_stat_activity, use this query: SELECT to_hex(trunc(EXTRACT(EPOCH FROM backend_start))::integer) || '.' || to_hex(pid) @@ -5333,7 +5333,7 @@ FROM pg_stat_activity; - If you set a nonempty value for log_line_prefix, + If you set a nonempty value for log_line_prefix, you should usually make its last character be a space, to provide visual separation from the rest of the log line. A punctuation character can be used too. @@ -5342,15 +5342,15 @@ FROM pg_stat_activity; - Syslog produces its own + Syslog produces its own time stamp and process ID information, so you probably do not want to - include those escapes if you are logging to syslog. + include those escapes if you are logging to syslog. - The %q escape is useful when including information that is + The %q escape is useful when including information that is only available in session (backend) context like user or database name. For example: @@ -5364,7 +5364,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_lock_waits (boolean) - log_lock_waits configuration parameter + log_lock_waits configuration parameter @@ -5372,7 +5372,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' Controls whether a log message is produced when a session waits longer than to acquire a lock. This is useful in determining if lock waits are causing - poor performance. The default is off. + poor performance. The default is off. Only superusers can change this setting. @@ -5381,22 +5381,22 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_statement (enum) - log_statement configuration parameter + log_statement configuration parameter Controls which SQL statements are logged. Valid values are - none (off), ddl, mod, and - all (all statements). ddl logs all data definition - statements, such as CREATE, ALTER, and - DROP statements. mod logs all - ddl statements, plus data-modifying statements - such as INSERT, - UPDATE, DELETE, TRUNCATE, - and COPY FROM. - PREPARE, EXECUTE, and - EXPLAIN ANALYZE statements are also logged if their + none (off), ddl, mod, and + all (all statements). ddl logs all data definition + statements, such as CREATE, ALTER, and + DROP statements. mod logs all + ddl statements, plus data-modifying statements + such as INSERT, + UPDATE, DELETE, TRUNCATE, + and COPY FROM. + PREPARE, EXECUTE, and + EXPLAIN ANALYZE statements are also logged if their contained command is of an appropriate type. For clients using extended query protocol, logging occurs when an Execute message is received, and values of the Bind parameters are included @@ -5404,20 +5404,20 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' - The default is none. Only superusers can change this + The default is none. Only superusers can change this setting. Statements that contain simple syntax errors are not logged - even by the log_statement = all setting, + even by the log_statement = all setting, because the log message is emitted only after basic parsing has been done to determine the statement type. In the case of extended query protocol, this setting likewise does not log statements that fail before the Execute phase (i.e., during parse analysis or - planning). Set log_min_error_statement to - ERROR (or lower) to log such statements. + planning). Set log_min_error_statement to + ERROR (or lower) to log such statements. @@ -5426,14 +5426,14 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_replication_commands (boolean) - log_replication_commands configuration parameter + log_replication_commands configuration parameter Causes each replication command to be logged in the server log. See for more information about - replication command. The default value is off. + replication command. The default value is off. Only superusers can change this setting. @@ -5442,7 +5442,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_temp_files (integer) - log_temp_files configuration parameter + log_temp_files configuration parameter @@ -5463,7 +5463,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' log_timezone (string) - log_timezone configuration parameter + log_timezone configuration parameter @@ -5471,11 +5471,11 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' Sets the time zone used for timestamps written in the server log. Unlike , this value is cluster-wide, so that all sessions will report timestamps consistently. - The built-in default is GMT, but that is typically - overridden in postgresql.conf; initdb + The built-in default is GMT, but that is typically + overridden in postgresql.conf; initdb will install a setting there corresponding to its system environment. See for more information. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5487,10 +5487,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' Using CSV-Format Log Output - Including csvlog in the log_destination list + Including csvlog in the log_destination list provides a convenient way to import log files into a database table. This option emits log lines in comma-separated-values - (CSV) format, + (CSV) format, with these columns: time stamp with milliseconds, user name, @@ -5512,10 +5512,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a ' character count of the error position therein, error context, user query that led to the error (if any and enabled by - log_min_error_statement), + log_min_error_statement), character count of the error position therein, location of the error in the PostgreSQL source code - (if log_error_verbosity is set to verbose), + (if log_error_verbosity is set to verbose), and application name. Here is a sample table definition for storing CSV-format log output: @@ -5551,7 +5551,7 @@ CREATE TABLE postgres_log - To import a log file into this table, use the COPY FROM + To import a log file into this table, use the COPY FROM command: @@ -5567,7 +5567,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Set log_filename and - log_rotation_age to provide a consistent, + log_rotation_age to provide a consistent, predictable naming scheme for your log files. This lets you predict what the file name will be and know when an individual log file is complete and therefore ready to be imported. @@ -5584,7 +5584,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - Set log_truncate_on_rotation to on so + Set log_truncate_on_rotation to on so that old log data isn't mixed with the new in the same file. @@ -5593,14 +5593,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The table definition above includes a primary key specification. This is useful to protect against accidentally importing the same - information twice. The COPY command commits all of the + information twice. The COPY command commits all of the data it imports at one time, so any error will cause the entire import to fail. If you import a partial log file and later import the file again when it is complete, the primary key violation will cause the import to fail. Wait until the log is complete and closed before importing. This procedure will also protect against accidentally importing a partial line that hasn't been completely - written, which would also cause COPY to fail. + written, which would also cause COPY to fail. @@ -5613,7 +5613,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; These settings control how process titles of server processes are modified. Process titles are typically viewed using programs like - ps or, on Windows, Process Explorer. + ps or, on Windows, Process Explorer. See for details. @@ -5621,18 +5621,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; cluster_name (string) - cluster_name configuration parameter + cluster_name configuration parameter Sets the cluster name that appears in the process title for all server processes in this cluster. The name can be any string of less - than NAMEDATALEN characters (64 characters in a standard + than NAMEDATALEN characters (64 characters in a standard build). Only printable ASCII characters may be used in the cluster_name value. Other characters will be replaced with question marks (?). No name is shown - if this parameter is set to the empty string '' (which is + if this parameter is set to the empty string '' (which is the default). This parameter can only be set at server start. @@ -5641,15 +5641,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; update_process_title (boolean) - update_process_title configuration parameter + update_process_title configuration parameter Enables updating of the process title every time a new SQL command is received by the server. - This setting defaults to on on most platforms, but it - defaults to off on Windows due to that platform's larger + This setting defaults to on on most platforms, but it + defaults to off on Windows due to that platform's larger overhead for updating the process title. Only superusers can change this setting. @@ -5678,7 +5678,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_activities (boolean) - track_activities configuration parameter + track_activities configuration parameter @@ -5698,14 +5698,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_activity_query_size (integer) - track_activity_query_size configuration parameter + track_activity_query_size configuration parameter Specifies the number of bytes reserved to track the currently executing command for each active session, for the - pg_stat_activity.query field. + pg_stat_activity.query field. The default value is 1024. This parameter can only be set at server start. @@ -5715,7 +5715,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_counts (boolean) - track_counts configuration parameter + track_counts configuration parameter @@ -5731,7 +5731,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_io_timing (boolean) - track_io_timing configuration parameter + track_io_timing configuration parameter @@ -5743,7 +5743,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; measure the overhead of timing on your system. I/O timing information is displayed in , in the output of - when the BUFFERS option is + when the BUFFERS option is used, and by . Only superusers can change this setting. @@ -5753,7 +5753,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; track_functions (enum) - track_functions configuration parameter + track_functions configuration parameter @@ -5767,7 +5767,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - SQL-language functions that are simple enough to be inlined + SQL-language functions that are simple enough to be inlined into the calling query will not be tracked, regardless of this setting. @@ -5778,7 +5778,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; stats_temp_directory (string) - stats_temp_directory configuration parameter + stats_temp_directory configuration parameter @@ -5788,7 +5788,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; is pg_stat_tmp. Pointing this at a RAM-based file system will decrease physical I/O requirements and can lead to improved performance. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5804,29 +5804,29 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; log_statement_stats (boolean) - log_statement_stats configuration parameter + log_statement_stats configuration parameter log_parser_stats (boolean) - log_parser_stats configuration parameter + log_parser_stats configuration parameter log_planner_stats (boolean) - log_planner_stats configuration parameter + log_planner_stats configuration parameter log_executor_stats (boolean) - log_executor_stats configuration parameter + log_executor_stats configuration parameter For each query, output performance statistics of the respective module to the server log. This is a crude profiling - instrument, similar to the Unix getrusage() operating + instrument, similar to the Unix getrusage() operating system facility. log_statement_stats reports total statement statistics, while the others report per-module statistics. log_statement_stats cannot be enabled together with @@ -5850,7 +5850,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; - These settings control the behavior of the autovacuum + These settings control the behavior of the autovacuum feature. Refer to for more information. Note that many of these settings can be overridden on a per-table basis; see autovacuum (boolean) - autovacuum configuration parameter + autovacuum configuration parameter @@ -5871,7 +5871,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum launcher daemon. This is on by default; however, must also be enabled for autovacuum to work. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; however, autovacuuming can be disabled for individual tables by changing table storage parameters. @@ -5887,7 +5887,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; log_autovacuum_min_duration (integer) - log_autovacuum_min_duration configuration parameter + log_autovacuum_min_duration configuration parameter @@ -5902,7 +5902,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; logged if an autovacuum action is skipped due to the existence of a conflicting lock. Enabling this parameter can be helpful in tracking autovacuum activity. This parameter can only be set in - the postgresql.conf file or on the server command line; + the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -5912,7 +5912,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_max_workers (integer) - autovacuum_max_workers configuration parameter + autovacuum_max_workers configuration parameter @@ -5927,17 +5927,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_naptime (integer) - autovacuum_naptime configuration parameter + autovacuum_naptime configuration parameter Specifies the minimum delay between autovacuum runs on any given database. In each round the daemon examines the - database and issues VACUUM and ANALYZE commands + database and issues VACUUM and ANALYZE commands as needed for tables in that database. The delay is measured - in seconds, and the default is one minute (1min). - This parameter can only be set in the postgresql.conf + in seconds, and the default is one minute (1min). + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -5946,15 +5946,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_threshold (integer) - autovacuum_vacuum_threshold configuration parameter + autovacuum_vacuum_threshold configuration parameter Specifies the minimum number of updated or deleted tuples needed - to trigger a VACUUM in any one table. + to trigger a VACUUM in any one table. The default is 50 tuples. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -5965,15 +5965,15 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_analyze_threshold (integer) - autovacuum_analyze_threshold configuration parameter + autovacuum_analyze_threshold configuration parameter Specifies the minimum number of inserted, updated or deleted tuples - needed to trigger an ANALYZE in any one table. + needed to trigger an ANALYZE in any one table. The default is 50 tuples. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -5984,16 +5984,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_scale_factor (floating point) - autovacuum_vacuum_scale_factor configuration parameter + autovacuum_vacuum_scale_factor configuration parameter Specifies a fraction of the table size to add to autovacuum_vacuum_threshold - when deciding whether to trigger a VACUUM. + when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size). - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6004,16 +6004,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_analyze_scale_factor (floating point) - autovacuum_analyze_scale_factor configuration parameter + autovacuum_analyze_scale_factor configuration parameter Specifies a fraction of the table size to add to autovacuum_analyze_threshold - when deciding whether to trigger an ANALYZE. + when deciding whether to trigger an ANALYZE. The default is 0.1 (10% of table size). - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6024,14 +6024,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_freeze_max_age (integer) - autovacuum_freeze_max_age configuration parameter + autovacuum_freeze_max_age configuration parameter Specifies the maximum age (in transactions) that a table's - pg_class.relfrozenxid field can - attain before a VACUUM operation is forced + pg_class.relfrozenxid field can + attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. @@ -6039,7 +6039,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Vacuum also allows removal of old files from the - pg_xact subdirectory, which is why the default + pg_xact subdirectory, which is why the default is a relatively low 200 million transactions. This parameter can only be set at server start, but the setting can be reduced for individual tables by @@ -6058,8 +6058,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Specifies the maximum age (in multixacts) that a table's - pg_class.relminmxid field can - attain before a VACUUM operation is forced to + pg_class.relminmxid field can + attain before a VACUUM operation is forced to prevent multixact ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. @@ -6067,7 +6067,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Vacuuming multixacts also allows removal of old files from the - pg_multixact/members and pg_multixact/offsets + pg_multixact/members and pg_multixact/offsets subdirectories, which is why the default is a relatively low 400 million multixacts. This parameter can only be set at server start, but the setting can @@ -6080,16 +6080,16 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_cost_delay (integer) - autovacuum_vacuum_cost_delay configuration parameter + autovacuum_vacuum_cost_delay configuration parameter Specifies the cost delay value that will be used in automatic - VACUUM operations. If -1 is specified, the regular + VACUUM operations. If -1 is specified, the regular value will be used. The default value is 20 milliseconds. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6100,19 +6100,19 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; autovacuum_vacuum_cost_limit (integer) - autovacuum_vacuum_cost_limit configuration parameter + autovacuum_vacuum_cost_limit configuration parameter Specifies the cost limit value that will be used in automatic - VACUUM operations. If -1 is specified (which is the + VACUUM operations. If -1 is specified (which is the default), the regular value will be used. Note that the value is distributed proportionally among the running autovacuum workers, if there is more than one, so that the sum of the limits for each worker does not exceed the value of this variable. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line; but the setting can be overridden for individual tables by changing table storage parameters. @@ -6133,9 +6133,9 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; search_path (string) - search_path configuration parameter + search_path configuration parameter - pathfor schemas + pathfor schemas @@ -6151,32 +6151,32 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The value for search_path must be a comma-separated list of schema names. Any name that is not an existing schema, or is - a schema for which the user does not have USAGE + a schema for which the user does not have USAGE permission, is silently ignored. If one of the list items is the special name $user, then the schema having the name returned by - SESSION_USER is substituted, if there is such a schema - and the user has USAGE permission for it. + SESSION_USER is substituted, if there is such a schema + and the user has USAGE permission for it. (If not, $user is ignored.) - The system catalog schema, pg_catalog, is always + The system catalog schema, pg_catalog, is always searched, whether it is mentioned in the path or not. If it is mentioned in the path then it will be searched in the specified - order. If pg_catalog is not in the path then it will - be searched before searching any of the path items. + order. If pg_catalog is not in the path then it will + be searched before searching any of the path items. Likewise, the current session's temporary-table schema, - pg_temp_nnn, is always searched if it + pg_temp_nnn, is always searched if it exists. It can be explicitly listed in the path by using the - alias pg_temppg_temp. If it is not listed in the path then - it is searched first (even before pg_catalog). However, + alias pg_temppg_temp. If it is not listed in the path then + it is searched first (even before pg_catalog). However, the temporary schema is only searched for relation (table, view, sequence, etc) and data type names. It is never searched for function or operator names. @@ -6193,7 +6193,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The default value for this parameter is "$user", public. This setting supports shared use of a database (where no users - have private schemas, and all share use of public), + have private schemas, and all share use of public), private per-user schemas, and combinations of these. Other effects can be obtained by altering the default search path setting, either globally or per-user. @@ -6202,11 +6202,11 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The current effective value of the search path can be examined via the SQL function - current_schemas + current_schemas (see ). This is not quite the same as examining the value of search_path, since - current_schemas shows how the items + current_schemas shows how the items appearing in search_path were resolved. @@ -6219,20 +6219,20 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; row_security (boolean) - row_security configuration parameter + row_security configuration parameter This variable controls whether to raise an error in lieu of applying a - row security policy. When set to on, policies apply - normally. When set to off, queries fail which would - otherwise apply at least one policy. The default is on. - Change to off where limited row visibility could cause - incorrect results; for example, pg_dump makes that + row security policy. When set to on, policies apply + normally. When set to off, queries fail which would + otherwise apply at least one policy. The default is on. + Change to off where limited row visibility could cause + incorrect results; for example, pg_dump makes that change by default. This variable has no effect on roles which bypass every row security policy, to wit, superusers and roles with - the BYPASSRLS attribute. + the BYPASSRLS attribute. @@ -6245,14 +6245,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; default_tablespace (string) - default_tablespace configuration parameter + default_tablespace configuration parameter - tablespacedefault + tablespacedefault This variable specifies the default tablespace in which to create - objects (tables and indexes) when a CREATE command does + objects (tables and indexes) when a CREATE command does not explicitly specify a tablespace. @@ -6260,9 +6260,9 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; The value is either the name of a tablespace, or an empty string to specify using the default tablespace of the current database. If the value does not match the name of any existing tablespace, - PostgreSQL will automatically use the default + PostgreSQL will automatically use the default tablespace of the current database. If a nondefault tablespace - is specified, the user must have CREATE privilege + is specified, the user must have CREATE privilege for it, or creation attempts will fail. @@ -6287,38 +6287,38 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; temp_tablespaces (string) - temp_tablespaces configuration parameter + temp_tablespaces configuration parameter - tablespacetemporary + tablespacetemporary This variable specifies tablespaces in which to create temporary objects (temp tables and indexes on temp tables) when a - CREATE command does not explicitly specify a tablespace. + CREATE command does not explicitly specify a tablespace. Temporary files for purposes such as sorting large data sets are also created in these tablespaces. The value is a list of names of tablespaces. When there is more than - one name in the list, PostgreSQL chooses a random + one name in the list, PostgreSQL chooses a random member of the list each time a temporary object is to be created; except that within a transaction, successively created temporary objects are placed in successive tablespaces from the list. If the selected element of the list is an empty string, - PostgreSQL will automatically use the default + PostgreSQL will automatically use the default tablespace of the current database instead. - When temp_tablespaces is set interactively, specifying a + When temp_tablespaces is set interactively, specifying a nonexistent tablespace is an error, as is specifying a tablespace for - which the user does not have CREATE privilege. However, + which the user does not have CREATE privilege. However, when using a previously set value, nonexistent tablespaces are ignored, as are tablespaces for which the user lacks - CREATE privilege. In particular, this rule applies when - using a value set in postgresql.conf. + CREATE privilege. In particular, this rule applies when + using a value set in postgresql.conf. @@ -6336,18 +6336,18 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; check_function_bodies (boolean) - check_function_bodies configuration parameter + check_function_bodies configuration parameter - This parameter is normally on. When set to off, it + This parameter is normally on. When set to off, it disables validation of the function body string during . Disabling validation avoids side effects of the validation process and avoids false positives due to problems such as forward references. Set this parameter - to off before loading functions on behalf of other - users; pg_dump does so automatically. + to off before loading functions on behalf of other + users; pg_dump does so automatically. @@ -6359,7 +6359,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_isolation configuration parameter + default_transaction_isolation configuration parameter @@ -6386,14 +6386,14 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_read_only configuration parameter + default_transaction_read_only configuration parameter A read-only SQL transaction cannot alter non-temporary tables. This parameter controls the default read-only status of each new - transaction. The default is off (read/write). + transaction. The default is off (read/write). @@ -6409,12 +6409,12 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; setting default - default_transaction_deferrable configuration parameter + default_transaction_deferrable configuration parameter - When running at the serializable isolation level, + When running at the serializable isolation level, a deferrable read-only SQL transaction may be delayed before it is allowed to proceed. However, once it begins executing it does not incur any of the overhead required to ensure @@ -6427,7 +6427,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; This parameter controls the default deferrable status of each new transaction. It currently has no effect on read-write transactions or those operating at isolation levels lower - than serializable. The default is off. + than serializable. The default is off. @@ -6440,7 +6440,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; session_replication_role (enum) - session_replication_role configuration parameter + session_replication_role configuration parameter @@ -6448,8 +6448,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; Controls firing of replication-related triggers and rules for the current session. Setting this variable requires superuser privilege and results in discarding any previously cached - query plans. Possible values are origin (the default), - replica and local. + query plans. Possible values are origin (the default), + replica and local. See for more information. @@ -6459,21 +6459,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; statement_timeout (integer) - statement_timeout configuration parameter + statement_timeout configuration parameter Abort any statement that takes more than the specified number of milliseconds, starting from the time the command arrives at the server - from the client. If log_min_error_statement is set to - ERROR or lower, the statement that timed out will also be + from the client. If log_min_error_statement is set to + ERROR or lower, the statement that timed out will also be logged. A value of zero (the default) turns this off. - Setting statement_timeout in - postgresql.conf is not recommended because it would + Setting statement_timeout in + postgresql.conf is not recommended because it would affect all sessions. @@ -6482,7 +6482,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; lock_timeout (integer) - lock_timeout configuration parameter + lock_timeout configuration parameter @@ -6491,24 +6491,24 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; milliseconds while attempting to acquire a lock on a table, index, row, or other database object. The time limit applies separately to each lock acquisition attempt. The limit applies both to explicit - locking requests (such as LOCK TABLE, or SELECT - FOR UPDATE without NOWAIT) and to implicitly-acquired - locks. If log_min_error_statement is set to - ERROR or lower, the statement that timed out will be + locking requests (such as LOCK TABLE, or SELECT + FOR UPDATE without NOWAIT) and to implicitly-acquired + locks. If log_min_error_statement is set to + ERROR or lower, the statement that timed out will be logged. A value of zero (the default) turns this off. - Unlike statement_timeout, this timeout can only occur - while waiting for locks. Note that if statement_timeout - is nonzero, it is rather pointless to set lock_timeout to + Unlike statement_timeout, this timeout can only occur + while waiting for locks. Note that if statement_timeout + is nonzero, it is rather pointless to set lock_timeout to the same or larger value, since the statement timeout would always trigger first. - Setting lock_timeout in - postgresql.conf is not recommended because it would + Setting lock_timeout in + postgresql.conf is not recommended because it would affect all sessions. @@ -6517,7 +6517,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; idle_in_transaction_session_timeout (integer) - idle_in_transaction_session_timeout configuration parameter + idle_in_transaction_session_timeout configuration parameter @@ -6537,21 +6537,21 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_freeze_table_age (integer) - vacuum_freeze_table_age configuration parameter + vacuum_freeze_table_age configuration parameter - VACUUM performs an aggressive scan if the table's - pg_class.relfrozenxid field has reached + VACUUM performs an aggressive scan if the table's + pg_class.relfrozenxid field has reached the age specified by this setting. An aggressive scan differs from - a regular VACUUM in that it visits every page that might + a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million transactions. Although users can - set this value anywhere from zero to two billions, VACUUM + set this value anywhere from zero to two billions, VACUUM will silently limit the effective value to 95% of , so that a - periodical manual VACUUM has a chance to run before an + periodical manual VACUUM has a chance to run before an anti-wraparound autovacuum is launched for the table. For more information see . @@ -6562,17 +6562,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_freeze_min_age (integer) - vacuum_freeze_min_age configuration parameter + vacuum_freeze_min_age configuration parameter - Specifies the cutoff age (in transactions) that VACUUM + Specifies the cutoff age (in transactions) that VACUUM should use to decide whether to freeze row versions while scanning a table. The default is 50 million transactions. Although users can set this value anywhere from zero to one billion, - VACUUM will silently limit the effective value to half + VACUUM will silently limit the effective value to half the value of , so that there is not an unreasonably short time between forced autovacuums. For more information see vacuum_multixact_freeze_table_age (integer) - vacuum_multixact_freeze_table_age configuration parameter + vacuum_multixact_freeze_table_age configuration parameter - VACUUM performs an aggressive scan if the table's - pg_class.relminmxid field has reached + VACUUM performs an aggressive scan if the table's + pg_class.relminmxid field has reached the age specified by this setting. An aggressive scan differs from - a regular VACUUM in that it visits every page that might + a regular VACUUM in that it visits every page that might contain unfrozen XIDs or MXIDs, not just those that might contain dead tuples. The default is 150 million multixacts. Although users can set this value anywhere from zero to two billions, - VACUUM will silently limit the effective value to 95% of + VACUUM will silently limit the effective value to 95% of , so that a - periodical manual VACUUM has a chance to run before an + periodical manual VACUUM has a chance to run before an anti-wraparound is launched for the table. For more information see . @@ -6608,17 +6608,17 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; vacuum_multixact_freeze_min_age (integer) - vacuum_multixact_freeze_min_age configuration parameter + vacuum_multixact_freeze_min_age configuration parameter - Specifies the cutoff age (in multixacts) that VACUUM + Specifies the cutoff age (in multixacts) that VACUUM should use to decide whether to replace multixact IDs with a newer transaction ID or multixact ID while scanning a table. The default is 5 million multixacts. Although users can set this value anywhere from zero to one billion, - VACUUM will silently limit the effective value to half + VACUUM will silently limit the effective value to half the value of , so that there is not an unreasonably short time between forced autovacuums. @@ -6630,7 +6630,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; bytea_output (enum) - bytea_output configuration parameter + bytea_output configuration parameter @@ -6648,7 +6648,7 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; xmlbinary (enum) - xmlbinary configuration parameter + xmlbinary configuration parameter @@ -6676,10 +6676,10 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; xmloption (enum) - xmloption configuration parameter + xmloption configuration parameter - SET XML OPTION + SET XML OPTION XML option @@ -6709,16 +6709,16 @@ SET XML OPTION { DOCUMENT | CONTENT }; gin_pending_list_limit (integer) - gin_pending_list_limit configuration parameter + gin_pending_list_limit configuration parameter Sets the maximum size of the GIN pending list which is used - when fastupdate is enabled. If the list grows + when fastupdate is enabled. If the list grows larger than this maximum size, it is cleaned up by moving the entries in it to the main GIN data structure in bulk. - The default is four megabytes (4MB). This setting + The default is four megabytes (4MB). This setting can be overridden for individual GIN indexes by changing index storage parameters. See and @@ -6737,7 +6737,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; DateStyle (string) - DateStyle configuration parameter + DateStyle configuration parameter @@ -6745,16 +6745,16 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the display format for date and time values, as well as the rules for interpreting ambiguous date input values. For historical reasons, this variable contains two independent - components: the output format specification (ISO, - Postgres, SQL, or German) + components: the output format specification (ISO, + Postgres, SQL, or German) and the input/output specification for year/month/day ordering - (DMY, MDY, or YMD). These - can be set separately or together. The keywords Euro - and European are synonyms for DMY; the - keywords US, NonEuro, and - NonEuropean are synonyms for MDY. See + (DMY, MDY, or YMD). These + can be set separately or together. The keywords Euro + and European are synonyms for DMY; the + keywords US, NonEuro, and + NonEuropean are synonyms for MDY. See for more information. The - built-in default is ISO, MDY, but + built-in default is ISO, MDY, but initdb will initialize the configuration file with a setting that corresponds to the behavior of the chosen lc_time locale. @@ -6765,28 +6765,28 @@ SET XML OPTION { DOCUMENT | CONTENT }; IntervalStyle (enum) - IntervalStyle configuration parameter + IntervalStyle configuration parameter Sets the display format for interval values. - The value sql_standard will produce + The value sql_standard will produce output matching SQL standard interval literals. - The value postgres (which is the default) will produce - output matching PostgreSQL releases prior to 8.4 + The value postgres (which is the default) will produce + output matching PostgreSQL releases prior to 8.4 when the - parameter was set to ISO. - The value postgres_verbose will produce output - matching PostgreSQL releases prior to 8.4 - when the DateStyle - parameter was set to non-ISO output. - The value iso_8601 will produce output matching the time - interval format with designators defined in section + parameter was set to ISO. + The value postgres_verbose will produce output + matching PostgreSQL releases prior to 8.4 + when the DateStyle + parameter was set to non-ISO output. + The value iso_8601 will produce output matching the time + interval format with designators defined in section 4.4.3.2 of ISO 8601. - The IntervalStyle parameter also affects the + The IntervalStyle parameter also affects the interpretation of ambiguous interval input. See for more information. @@ -6796,15 +6796,15 @@ SET XML OPTION { DOCUMENT | CONTENT }; TimeZone (string) - TimeZone configuration parameter + TimeZone configuration parameter - time zone + time zone Sets the time zone for displaying and interpreting time stamps. - The built-in default is GMT, but that is typically - overridden in postgresql.conf; initdb + The built-in default is GMT, but that is typically + overridden in postgresql.conf; initdb will install a setting there corresponding to its system environment. See for more information. @@ -6814,14 +6814,14 @@ SET XML OPTION { DOCUMENT | CONTENT }; timezone_abbreviations (string) - timezone_abbreviations configuration parameter + timezone_abbreviations configuration parameter - time zone names + time zone names Sets the collection of time zone abbreviations that will be accepted - by the server for datetime input. The default is 'Default', + by the server for datetime input. The default is 'Default', which is a collection that works in most of the world; there are also 'Australia' and 'India', and other collections can be defined for a particular installation. @@ -6840,15 +6840,15 @@ SET XML OPTION { DOCUMENT | CONTENT }; display - extra_float_digits configuration parameter + extra_float_digits configuration parameter This parameter adjusts the number of digits displayed for - floating-point values, including float4, float8, + floating-point values, including float4, float8, and geometric data types. The parameter value is added to the - standard number of digits (FLT_DIG or DBL_DIG + standard number of digits (FLT_DIG or DBL_DIG as appropriate). The value can be set as high as 3, to include partially-significant digits; this is especially useful for dumping float data that needs to be restored exactly. Or it can be set @@ -6861,9 +6861,9 @@ SET XML OPTION { DOCUMENT | CONTENT }; client_encoding (string) - client_encoding configuration parameter + client_encoding configuration parameter - character set + character set @@ -6878,7 +6878,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_messages (string) - lc_messages configuration parameter + lc_messages configuration parameter @@ -6910,7 +6910,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_monetary (string) - lc_monetary configuration parameter + lc_monetary configuration parameter @@ -6929,7 +6929,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_numeric (string) - lc_numeric configuration parameter + lc_numeric configuration parameter @@ -6948,7 +6948,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; lc_time (string) - lc_time configuration parameter + lc_time configuration parameter @@ -6967,7 +6967,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; default_text_search_config (string) - default_text_search_config configuration parameter + default_text_search_config configuration parameter @@ -6976,7 +6976,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; of the text search functions that do not have an explicit argument specifying the configuration. See for further information. - The built-in default is pg_catalog.simple, but + The built-in default is pg_catalog.simple, but initdb will initialize the configuration file with a setting that corresponds to the chosen lc_ctype locale, if a configuration @@ -6997,8 +6997,8 @@ SET XML OPTION { DOCUMENT | CONTENT }; server, in order to load additional functionality or achieve performance benefits. For example, a setting of '$libdir/mylib' would cause - mylib.so (or on some platforms, - mylib.sl) to be preloaded from the installation's standard + mylib.so (or on some platforms, + mylib.sl) to be preloaded from the installation's standard library directory. The differences between the settings are when they take effect and what privileges are required to change them. @@ -7007,14 +7007,14 @@ SET XML OPTION { DOCUMENT | CONTENT }; PostgreSQL procedural language libraries can be preloaded in this way, typically by using the syntax '$libdir/plXXX' where - XXX is pgsql, perl, - tcl, or python. + XXX is pgsql, perl, + tcl, or python. Only shared libraries specifically intended to be used with PostgreSQL can be loaded this way. Every PostgreSQL-supported library has - a magic block that is checked to guarantee compatibility. For + a magic block that is checked to guarantee compatibility. For this reason, non-PostgreSQL libraries cannot be loaded in this way. You might be able to use operating-system facilities such as LD_PRELOAD for that. @@ -7029,10 +7029,10 @@ SET XML OPTION { DOCUMENT | CONTENT }; local_preload_libraries (string) - local_preload_libraries configuration parameter + local_preload_libraries configuration parameter - $libdir/plugins + $libdir/plugins @@ -7051,10 +7051,10 @@ SET XML OPTION { DOCUMENT | CONTENT }; This option can be set by any user. Because of that, the libraries that can be loaded are restricted to those appearing in the - plugins subdirectory of the installation's + plugins subdirectory of the installation's standard library directory. (It is the database administrator's - responsibility to ensure that only safe libraries - are installed there.) Entries in local_preload_libraries + responsibility to ensure that only safe libraries + are installed there.) Entries in local_preload_libraries can specify this directory explicitly, for example $libdir/plugins/mylib, or just specify the library name — mylib would have @@ -7064,11 +7064,11 @@ SET XML OPTION { DOCUMENT | CONTENT }; The intent of this feature is to allow unprivileged users to load debugging or performance-measurement libraries into specific sessions - without requiring an explicit LOAD command. To that end, + without requiring an explicit LOAD command. To that end, it would be typical to set this parameter using the PGOPTIONS environment variable on the client or by using - ALTER ROLE SET. + ALTER ROLE SET. @@ -7083,7 +7083,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; session_preload_libraries (string) - session_preload_libraries configuration parameter + session_preload_libraries configuration parameter @@ -7104,10 +7104,10 @@ SET XML OPTION { DOCUMENT | CONTENT }; The intent of this feature is to allow debugging or performance-measurement libraries to be loaded into specific sessions without an explicit - LOAD command being given. For + LOAD command being given. For example, could be enabled for all sessions under a given user name by setting this parameter - with ALTER ROLE SET. Also, this parameter can be changed + with ALTER ROLE SET. Also, this parameter can be changed without restarting the server (but changes only take effect when a new session is started), so it is easier to add new modules this way, even if they should apply to all sessions. @@ -7125,7 +7125,7 @@ SET XML OPTION { DOCUMENT | CONTENT }; shared_preload_libraries (string) - shared_preload_libraries configuration parameter + shared_preload_libraries configuration parameter @@ -7182,9 +7182,9 @@ SET XML OPTION { DOCUMENT | CONTENT }; dynamic_library_path (string) - dynamic_library_path configuration parameter + dynamic_library_path configuration parameter - dynamic loading + dynamic loading @@ -7236,7 +7236,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' gin_fuzzy_search_limit (integer) - gin_fuzzy_search_limit configuration parameter + gin_fuzzy_search_limit configuration parameter @@ -7267,7 +7267,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' deadlock - deadlock_timeout configuration parameter + deadlock_timeout configuration parameter @@ -7280,7 +7280,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' just wait on the lock for a while before checking for a deadlock. Increasing this value reduces the amount of time wasted in needless deadlock checks, but slows down reporting of - real deadlock errors. The default is one second (1s), + real deadlock errors. The default is one second (1s), which is probably about the smallest value you would want in practice. On a heavily loaded server you might want to raise it. Ideally the setting should exceed your typical transaction time, @@ -7302,7 +7302,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_locks_per_transaction (integer) - max_locks_per_transaction configuration parameter + max_locks_per_transaction configuration parameter @@ -7315,7 +7315,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' any one time. This parameter controls the average number of object locks allocated for each transaction; individual transactions can lock more objects as long as the locks of all transactions - fit in the lock table. This is not the number of + fit in the lock table. This is not the number of rows that can be locked; that value is unlimited. The default, 64, has historically proven sufficient, but you might need to raise this value if you have queries that touch many different @@ -7334,7 +7334,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_transaction (integer) - max_pred_locks_per_transaction configuration parameter + max_pred_locks_per_transaction configuration parameter @@ -7347,7 +7347,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' any one time. This parameter controls the average number of object locks allocated for each transaction; individual transactions can lock more objects as long as the locks of all transactions - fit in the lock table. This is not the number of + fit in the lock table. This is not the number of rows that can be locked; that value is unlimited. The default, 64, has generally been sufficient in testing, but you might need to raise this value if you have clients that touch many different @@ -7360,7 +7360,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_relation (integer) - max_pred_locks_per_relation configuration parameter + max_pred_locks_per_relation configuration parameter @@ -7371,8 +7371,8 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' limit, while negative values mean divided by the absolute value of this setting. The default is -2, which keeps - the behavior from previous versions of PostgreSQL. - This parameter can only be set in the postgresql.conf + the behavior from previous versions of PostgreSQL. + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -7381,7 +7381,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_pred_locks_per_page (integer) - max_pred_locks_per_page configuration parameter + max_pred_locks_per_page configuration parameter @@ -7389,7 +7389,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' This controls how many rows on a single page can be predicate-locked before the lock is promoted to covering the whole page. The default is 2. This parameter can only be set in - the postgresql.conf file or on the server command line. + the postgresql.conf file or on the server command line. @@ -7408,62 +7408,62 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' array_nulls (boolean) - array_nulls configuration parameter + array_nulls configuration parameter This controls whether the array input parser recognizes - unquoted NULL as specifying a null array element. - By default, this is on, allowing array values containing - null values to be entered. However, PostgreSQL versions + unquoted NULL as specifying a null array element. + By default, this is on, allowing array values containing + null values to be entered. However, PostgreSQL versions before 8.2 did not support null values in arrays, and therefore would - treat NULL as specifying a normal array element with - the string value NULL. For backward compatibility with + treat NULL as specifying a normal array element with + the string value NULL. For backward compatibility with applications that require the old behavior, this variable can be - turned off. + turned off. Note that it is possible to create array values containing null values - even when this variable is off. + even when this variable is off. backslash_quote (enum) - stringsbackslash quotes + stringsbackslash quotes - backslash_quote configuration parameter + backslash_quote configuration parameter This controls whether a quote mark can be represented by - \' in a string literal. The preferred, SQL-standard way - to represent a quote mark is by doubling it ('') but - PostgreSQL has historically also accepted - \'. However, use of \' creates security risks + \' in a string literal. The preferred, SQL-standard way + to represent a quote mark is by doubling it ('') but + PostgreSQL has historically also accepted + \'. However, use of \' creates security risks because in some client character set encodings, there are multibyte characters in which the last byte is numerically equivalent to ASCII - \. If client-side code does escaping incorrectly then a + \. If client-side code does escaping incorrectly then a SQL-injection attack is possible. This risk can be prevented by making the server reject queries in which a quote mark appears to be escaped by a backslash. - The allowed values of backslash_quote are - on (allow \' always), - off (reject always), and - safe_encoding (allow only if client encoding does not - allow ASCII \ within a multibyte character). - safe_encoding is the default setting. + The allowed values of backslash_quote are + on (allow \' always), + off (reject always), and + safe_encoding (allow only if client encoding does not + allow ASCII \ within a multibyte character). + safe_encoding is the default setting. - Note that in a standard-conforming string literal, \ just - means \ anyway. This parameter only affects the handling of + Note that in a standard-conforming string literal, \ just + means \ anyway. This parameter only affects the handling of non-standard-conforming literals, including - escape string syntax (E'...'). + escape string syntax (E'...'). @@ -7471,7 +7471,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' default_with_oids (boolean) - default_with_oids configuration parameter + default_with_oids configuration parameter @@ -7481,9 +7481,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' newly-created tables, if neither WITH OIDS nor WITHOUT OIDS is specified. It also determines whether OIDs will be included in tables created by - SELECT INTO. The parameter is off - by default; in PostgreSQL 8.0 and earlier, it - was on by default. + SELECT INTO. The parameter is off + by default; in PostgreSQL 8.0 and earlier, it + was on by default. @@ -7499,21 +7499,21 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' escape_string_warning (boolean) - stringsescape warning + stringsescape warning - escape_string_warning configuration parameter + escape_string_warning configuration parameter - When on, a warning is issued if a backslash (\) - appears in an ordinary string literal ('...' + When on, a warning is issued if a backslash (\) + appears in an ordinary string literal ('...' syntax) and standard_conforming_strings is off. - The default is on. + The default is on. Applications that wish to use backslash as escape should be - modified to use escape string syntax (E'...'), + modified to use escape string syntax (E'...'), because the default behavior of ordinary strings is now to treat backslash as an ordinary character, per SQL standard. This variable can be enabled to help locate code that needs to be changed. @@ -7524,22 +7524,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lo_compat_privileges (boolean) - lo_compat_privileges configuration parameter + lo_compat_privileges configuration parameter - In PostgreSQL releases prior to 9.0, large objects + In PostgreSQL releases prior to 9.0, large objects did not have access privileges and were, therefore, always readable - and writable by all users. Setting this variable to on + and writable by all users. Setting this variable to on disables the new privilege checks, for compatibility with prior - releases. The default is off. + releases. The default is off. Only superusers can change this setting. Setting this variable does not disable all security checks related to large objects — only those for which the default behavior has - changed in PostgreSQL 9.0. + changed in PostgreSQL 9.0. For example, lo_import() and lo_export() need superuser privileges regardless of this setting. @@ -7550,18 +7550,18 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' operator_precedence_warning (boolean) - operator_precedence_warning configuration parameter + operator_precedence_warning configuration parameter When on, the parser will emit a warning for any construct that might - have changed meanings since PostgreSQL 9.4 as a result + have changed meanings since PostgreSQL 9.4 as a result of changes in operator precedence. This is useful for auditing applications to see if precedence changes have broken anything; but it is not meant to be kept turned on in production, since it will warn about some perfectly valid, standard-compliant SQL code. - The default is off. + The default is off. @@ -7573,15 +7573,15 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' quote_all_identifiers (boolean) - quote_all_identifiers configuration parameter + quote_all_identifiers configuration parameter When the database generates SQL, force all identifiers to be quoted, even if they are not (currently) keywords. This will affect the - output of EXPLAIN as well as the results of functions - like pg_get_viewdef. See also the + output of EXPLAIN as well as the results of functions + like pg_get_viewdef. See also the option of and . @@ -7590,22 +7590,22 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' standard_conforming_strings (boolean) - stringsstandard conforming + stringsstandard conforming - standard_conforming_strings configuration parameter + standard_conforming_strings configuration parameter This controls whether ordinary string literals - ('...') treat backslashes literally, as specified in + ('...') treat backslashes literally, as specified in the SQL standard. Beginning in PostgreSQL 9.1, the default is - on (prior releases defaulted to off). + on (prior releases defaulted to off). Applications can check this parameter to determine how string literals will be processed. The presence of this parameter can also be taken as an indication - that the escape string syntax (E'...') is supported. + that the escape string syntax (E'...') is supported. Escape string syntax () should be used if an application desires backslashes to be treated as escape characters. @@ -7616,7 +7616,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' synchronize_seqscans (boolean) - synchronize_seqscans configuration parameter + synchronize_seqscans configuration parameter @@ -7625,13 +7625,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' other, so that concurrent scans read the same block at about the same time and hence share the I/O workload. When this is enabled, a scan might start in the middle of the table and then wrap - around the end to cover all rows, so as to synchronize with the + around the end to cover all rows, so as to synchronize with the activity of scans already in progress. This can result in unpredictable changes in the row ordering returned by queries that - have no ORDER BY clause. Setting this parameter to - off ensures the pre-8.3 behavior in which a sequential + have no ORDER BY clause. Setting this parameter to + off ensures the pre-8.3 behavior in which a sequential scan always starts from the beginning of the table. The default - is on. + is on. @@ -7645,31 +7645,31 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' transform_null_equals (boolean) - IS NULL + IS NULL - transform_null_equals configuration parameter + transform_null_equals configuration parameter - When on, expressions of the form expr = + When on, expressions of the form expr = NULL (or NULL = - expr) are treated as - expr IS NULL, that is, they - return true if expr evaluates to the null value, + expr) are treated as + expr IS NULL, that is, they + return true if expr evaluates to the null value, and false otherwise. The correct SQL-spec-compliant behavior of - expr = NULL is to always + expr = NULL is to always return null (unknown). Therefore this parameter defaults to - off. + off. However, filtered forms in Microsoft Access generate queries that appear to use - expr = NULL to test for + expr = NULL to test for null values, so if you use that interface to access the database you might want to turn this option on. Since expressions of the - form expr = NULL always + form expr = NULL always return the null value (using the SQL standard interpretation), they are not very useful and do not appear often in normal applications so this option does little harm in practice. But new users are @@ -7678,7 +7678,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' - Note that this option only affects the exact form = NULL, + Note that this option only affects the exact form = NULL, not other comparison operators or other expressions that are computationally equivalent to some expression involving the equals operator (such as IN). @@ -7703,7 +7703,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' exit_on_error (boolean) - exit_on_error configuration parameter + exit_on_error configuration parameter @@ -7718,16 +7718,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' restart_after_crash (boolean) - restart_after_crash configuration parameter + restart_after_crash configuration parameter - When set to true, which is the default, PostgreSQL + When set to true, which is the default, PostgreSQL will automatically reinitialize after a backend crash. Leaving this value set to true is normally the best way to maximize the availability of the database. However, in some circumstances, such as when - PostgreSQL is being invoked by clusterware, it may be + PostgreSQL is being invoked by clusterware, it may be useful to disable the restart so that the clusterware can gain control and take any actions it deems appropriate. @@ -7742,10 +7742,10 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' Preset Options - The following parameters are read-only, and are determined + The following parameters are read-only, and are determined when PostgreSQL is compiled or when it is installed. As such, they have been excluded from the sample - postgresql.conf file. These options report + postgresql.conf file. These options report various aspects of PostgreSQL behavior that might be of interest to certain applications, particularly administrative front-ends. @@ -7756,13 +7756,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' block_size (integer) - block_size configuration parameter + block_size configuration parameter Reports the size of a disk block. It is determined by the value - of BLCKSZ when building the server. The default + of BLCKSZ when building the server. The default value is 8192 bytes. The meaning of some configuration variables (such as ) is influenced by block_size. See data_checksums (boolean) - data_checksums configuration parameter + data_checksums configuration parameter @@ -7788,7 +7788,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' debug_assertions (boolean) - debug_assertions configuration parameter + debug_assertions configuration parameter @@ -7808,13 +7808,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' integer_datetimes (boolean) - integer_datetimes configuration parameter + integer_datetimes configuration parameter - Reports whether PostgreSQL was built with support for - 64-bit-integer dates and times. As of PostgreSQL 10, + Reports whether PostgreSQL was built with support for + 64-bit-integer dates and times. As of PostgreSQL 10, this is always on. @@ -7823,7 +7823,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lc_collate (string) - lc_collate configuration parameter + lc_collate configuration parameter @@ -7838,7 +7838,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' lc_ctype (string) - lc_ctype configuration parameter + lc_ctype configuration parameter @@ -7855,13 +7855,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_function_args (integer) - max_function_args configuration parameter + max_function_args configuration parameter Reports the maximum number of function arguments. It is determined by - the value of FUNC_MAX_ARGS when building the server. The + the value of FUNC_MAX_ARGS when building the server. The default value is 100 arguments. @@ -7870,14 +7870,14 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_identifier_length (integer) - max_identifier_length configuration parameter + max_identifier_length configuration parameter Reports the maximum identifier length. It is determined as one - less than the value of NAMEDATALEN when building - the server. The default value of NAMEDATALEN is + less than the value of NAMEDATALEN when building + the server. The default value of NAMEDATALEN is 64; therefore the default max_identifier_length is 63 bytes, which can be less than 63 characters when using multibyte encodings. @@ -7888,13 +7888,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' max_index_keys (integer) - max_index_keys configuration parameter + max_index_keys configuration parameter Reports the maximum number of index keys. It is determined by - the value of INDEX_MAX_KEYS when building the server. The + the value of INDEX_MAX_KEYS when building the server. The default value is 32 keys. @@ -7903,16 +7903,16 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' segment_size (integer) - segment_size configuration parameter + segment_size configuration parameter Reports the number of blocks (pages) that can be stored within a file - segment. It is determined by the value of RELSEG_SIZE + segment. It is determined by the value of RELSEG_SIZE when building the server. The maximum size of a segment file in bytes - is equal to segment_size multiplied by - block_size; by default this is 1GB. + is equal to segment_size multiplied by + block_size; by default this is 1GB. @@ -7920,9 +7920,9 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_encoding (string) - server_encoding configuration parameter + server_encoding configuration parameter - character set + character set @@ -7937,13 +7937,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_version (string) - server_version configuration parameter + server_version configuration parameter Reports the version number of the server. It is determined by the - value of PG_VERSION when building the server. + value of PG_VERSION when building the server. @@ -7951,13 +7951,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' server_version_num (integer) - server_version_num configuration parameter + server_version_num configuration parameter Reports the version number of the server as an integer. It is determined - by the value of PG_VERSION_NUM when building the server. + by the value of PG_VERSION_NUM when building the server. @@ -7965,13 +7965,13 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' wal_block_size (integer) - wal_block_size configuration parameter + wal_block_size configuration parameter Reports the size of a WAL disk block. It is determined by the value - of XLOG_BLCKSZ when building the server. The default value + of XLOG_BLCKSZ when building the server. The default value is 8192 bytes. @@ -7980,14 +7980,14 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' wal_segment_size (integer) - wal_segment_size configuration parameter + wal_segment_size configuration parameter Reports the number of blocks (pages) in a WAL segment file. The total size of a WAL segment file in bytes is equal to - wal_segment_size multiplied by wal_block_size; + wal_segment_size multiplied by wal_block_size; by default this is 16MB. See for more information. @@ -8010,12 +8010,12 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' Custom options have two-part names: an extension name, then a dot, then the parameter name proper, much like qualified names in SQL. An example - is plpgsql.variable_conflict. + is plpgsql.variable_conflict. Because custom options may need to be set in processes that have not - loaded the relevant extension module, PostgreSQL + loaded the relevant extension module, PostgreSQL will accept a setting for any two-part parameter name. Such variables are treated as placeholders and have no function until the module that defines them is loaded. When an extension module is loaded, it will add @@ -8034,7 +8034,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' to assist with recovery of severely damaged databases. There should be no reason to use them on a production database. As such, they have been excluded from the sample - postgresql.conf file. Note that many of these + postgresql.conf file. Note that many of these parameters require special source compilation flags to work at all. @@ -8073,7 +8073,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' post_auth_delay (integer) - post_auth_delay configuration parameter + post_auth_delay configuration parameter @@ -8090,7 +8090,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' pre_auth_delay (integer) - pre_auth_delay configuration parameter + pre_auth_delay configuration parameter @@ -8100,7 +8100,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' authentication procedure. This is intended to give developers an opportunity to attach to the server process with a debugger to trace down misbehavior in authentication. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -8109,7 +8109,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_notify (boolean) - trace_notify configuration parameter + trace_notify configuration parameter @@ -8127,7 +8127,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_recovery_messages (enum) - trace_recovery_messages configuration parameter + trace_recovery_messages configuration parameter @@ -8136,15 +8136,15 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' would not be logged. This parameter allows the user to override the normal setting of , but only for specific messages. This is intended for use in debugging Hot Standby. - Valid values are DEBUG5, DEBUG4, - DEBUG3, DEBUG2, DEBUG1, and - LOG. The default, LOG, does not affect + Valid values are DEBUG5, DEBUG4, + DEBUG3, DEBUG2, DEBUG1, and + LOG. The default, LOG, does not affect logging decisions at all. The other values cause recovery-related debug messages of that priority or higher to be logged as though they - had LOG priority; for common settings of - log_min_messages this results in unconditionally sending + had LOG priority; for common settings of + log_min_messages this results in unconditionally sending them to the server log. - This parameter can only be set in the postgresql.conf + This parameter can only be set in the postgresql.conf file or on the server command line. @@ -8153,7 +8153,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_sort (boolean) - trace_sort configuration parameter + trace_sort configuration parameter @@ -8169,7 +8169,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' trace_locks (boolean) - trace_locks configuration parameter + trace_locks configuration parameter @@ -8210,7 +8210,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lwlocks (boolean) - trace_lwlocks configuration parameter + trace_lwlocks configuration parameter @@ -8230,7 +8230,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_userlocks (boolean) - trace_userlocks configuration parameter + trace_userlocks configuration parameter @@ -8249,7 +8249,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lock_oidmin (integer) - trace_lock_oidmin configuration parameter + trace_lock_oidmin configuration parameter @@ -8268,7 +8268,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) trace_lock_table (integer) - trace_lock_table configuration parameter + trace_lock_table configuration parameter @@ -8286,7 +8286,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) debug_deadlocks (boolean) - debug_deadlocks configuration parameter + debug_deadlocks configuration parameter @@ -8305,7 +8305,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) log_btree_build_stats (boolean) - log_btree_build_stats configuration parameter + log_btree_build_stats configuration parameter @@ -8324,7 +8324,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) wal_consistency_checking (string) - wal_consistency_checking configuration parameter + wal_consistency_checking configuration parameter @@ -8344,10 +8344,10 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) the feature. It can be set to all to check all records, or to a comma-separated list of resource managers to check only records originating from those resource managers. Currently, - the supported resource managers are heap, - heap2, btree, hash, - gin, gist, sequence, - spgist, brin, and generic. Only + the supported resource managers are heap, + heap2, btree, hash, + gin, gist, sequence, + spgist, brin, and generic. Only superusers can change this setting. @@ -8356,7 +8356,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) wal_debug (boolean) - wal_debug configuration parameter + wal_debug configuration parameter @@ -8372,7 +8372,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) ignore_checksum_failure (boolean) - ignore_checksum_failure configuration parameter + ignore_checksum_failure configuration parameter @@ -8381,15 +8381,15 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) Detection of a checksum failure during a read normally causes - PostgreSQL to report an error, aborting the current - transaction. Setting ignore_checksum_failure to on causes + PostgreSQL to report an error, aborting the current + transaction. Setting ignore_checksum_failure to on causes the system to ignore the failure (but still report a warning), and continue processing. This behavior may cause crashes, propagate - or hide corruption, or other serious problems. However, it may allow + or hide corruption, or other serious problems. However, it may allow you to get past the error and retrieve undamaged tuples that might still be present in the table if the block header is still sane. If the header is corrupt an error will be reported even if this option is enabled. The - default setting is off, and it can only be changed by a superuser. + default setting is off, and it can only be changed by a superuser. @@ -8397,16 +8397,16 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) zero_damaged_pages (boolean) - zero_damaged_pages configuration parameter + zero_damaged_pages configuration parameter Detection of a damaged page header normally causes - PostgreSQL to report an error, aborting the current - transaction. Setting zero_damaged_pages to on causes + PostgreSQL to report an error, aborting the current + transaction. Setting zero_damaged_pages to on causes the system to instead report a warning, zero out the damaged - page in memory, and continue processing. This behavior will destroy data, + page in memory, and continue processing. This behavior will destroy data, namely all the rows on the damaged page. However, it does allow you to get past the error and retrieve rows from any undamaged pages that might be present in the table. It is useful for recovering data if @@ -8415,7 +8415,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) data from the damaged pages of a table. Zeroed-out pages are not forced to disk so it is recommended to recreate the table or the index before turning this parameter off again. The - default setting is off, and it can only be changed + default setting is off, and it can only be changed by a superuser. @@ -8447,15 +8447,15 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) - shared_buffers = x + shared_buffers = x - log_min_messages = DEBUGx + log_min_messages = DEBUGx - datestyle = euro + datestyle = euro @@ -8464,69 +8464,69 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) , - enable_bitmapscan = off, - enable_hashjoin = off, - enable_indexscan = off, - enable_mergejoin = off, - enable_nestloop = off, - enable_indexonlyscan = off, - enable_seqscan = off, - enable_tidscan = off + enable_bitmapscan = off, + enable_hashjoin = off, + enable_indexscan = off, + enable_mergejoin = off, + enable_nestloop = off, + enable_indexonlyscan = off, + enable_seqscan = off, + enable_tidscan = off - fsync = off + fsync = off - listen_addresses = x + listen_addresses = x - listen_addresses = '*' + listen_addresses = '*' - unix_socket_directories = x + unix_socket_directories = x - ssl = on + ssl = on - max_connections = x + max_connections = x - allow_system_table_mods = on + allow_system_table_mods = on - port = x + port = x - ignore_system_indexes = on + ignore_system_indexes = on - log_statement_stats = on + log_statement_stats = on - work_mem = x + work_mem = x , , - log_parser_stats = on, - log_planner_stats = on, - log_executor_stats = on + log_parser_stats = on, + log_planner_stats = on, + log_executor_stats = on - post_auth_delay = x + post_auth_delay = x diff --git a/doc/src/sgml/contrib-spi.sgml b/doc/src/sgml/contrib-spi.sgml index 3287c18d27..32c7105cf6 100644 --- a/doc/src/sgml/contrib-spi.sgml +++ b/doc/src/sgml/contrib-spi.sgml @@ -9,7 +9,7 @@ - The spi module provides several workable examples + The spi module provides several workable examples of using SPI and triggers. While these functions are of some value in their own right, they are even more useful as examples to modify for your own purposes. The functions are general enough to be used @@ -26,15 +26,15 @@ refint — Functions for Implementing Referential Integrity - check_primary_key() and - check_foreign_key() are used to check foreign key constraints. + check_primary_key() and + check_foreign_key() are used to check foreign key constraints. (This functionality is long since superseded by the built-in foreign key mechanism, of course, but the module is still useful as an example.) - check_primary_key() checks the referencing table. - To use, create a BEFORE INSERT OR UPDATE trigger using this + check_primary_key() checks the referencing table. + To use, create a BEFORE INSERT OR UPDATE trigger using this function on a table referencing another table. Specify as the trigger arguments: the referencing table's column name(s) which form the foreign key, the referenced table name, and the column names in the referenced table @@ -43,14 +43,14 @@ - check_foreign_key() checks the referenced table. - To use, create a BEFORE DELETE OR UPDATE trigger using this + check_foreign_key() checks the referenced table. + To use, create a BEFORE DELETE OR UPDATE trigger using this function on a table referenced by other table(s). Specify as the trigger arguments: the number of referencing tables for which the function has to perform checking, the action if a referencing key is found - (cascade — to delete the referencing row, - restrict — to abort transaction if referencing keys - exist, setnull — to set referencing key fields to null), + (cascade — to delete the referencing row, + restrict — to abort transaction if referencing keys + exist, setnull — to set referencing key fields to null), the triggered table's column names which form the primary/unique key, then the referencing table name and column names (repeated for as many referencing tables as were specified by first argument). Note that the @@ -59,7 +59,7 @@ - There are examples in refint.example. + There are examples in refint.example. @@ -67,10 +67,10 @@ timetravel — Functions for Implementing Time Travel - Long ago, PostgreSQL had a built-in time travel feature + Long ago, PostgreSQL had a built-in time travel feature that kept the insert and delete times for each tuple. This can be emulated using these functions. To use these functions, - you must add to a table two columns of abstime type to store + you must add to a table two columns of abstime type to store the date when a tuple was inserted (start_date) and changed/deleted (stop_date): @@ -89,7 +89,7 @@ CREATE TABLE mytab ( When a new row is inserted, start_date should normally be set to - current time, and stop_date to infinity. The trigger + current time, and stop_date to infinity. The trigger will automatically substitute these values if the inserted data contains nulls in these columns. Generally, inserting explicit non-null data in these columns should only be done when re-loading @@ -97,7 +97,7 @@ CREATE TABLE mytab ( - Tuples with stop_date equal to infinity are valid + Tuples with stop_date equal to infinity are valid now, and can be modified. Tuples with a finite stop_date cannot be modified anymore — the trigger will prevent it. (If you need to do that, you can turn off time travel as shown below.) @@ -107,7 +107,7 @@ CREATE TABLE mytab ( For a modifiable row, on update only the stop_date in the tuple being updated will be changed (to current time) and a new tuple with the modified data will be inserted. Start_date in this new tuple will be set to current - time and stop_date to infinity. + time and stop_date to infinity. @@ -117,29 +117,29 @@ CREATE TABLE mytab ( To query for tuples valid now, include - stop_date = 'infinity' in the query's WHERE condition. + stop_date = 'infinity' in the query's WHERE condition. (You might wish to incorporate that in a view.) Similarly, you can query for tuples valid at any past time with suitable conditions on start_date and stop_date. - timetravel() is the general trigger function that supports - this behavior. Create a BEFORE INSERT OR UPDATE OR DELETE + timetravel() is the general trigger function that supports + this behavior. Create a BEFORE INSERT OR UPDATE OR DELETE trigger using this function on each time-traveled table. Specify two trigger arguments: the actual names of the start_date and stop_date columns. Optionally, you can specify one to three more arguments, which must refer - to columns of type text. The trigger will store the name of + to columns of type text. The trigger will store the name of the current user into the first of these columns during INSERT, the second column during UPDATE, and the third during DELETE. - set_timetravel() allows you to turn time-travel on or off for + set_timetravel() allows you to turn time-travel on or off for a table. - set_timetravel('mytab', 1) will turn TT ON for table mytab. - set_timetravel('mytab', 0) will turn TT OFF for table mytab. + set_timetravel('mytab', 1) will turn TT ON for table mytab. + set_timetravel('mytab', 0) will turn TT OFF for table mytab. In both cases the old status is reported. While TT is off, you can modify the start_date and stop_date columns freely. Note that the on/off status is local to the current database session — fresh sessions will @@ -147,12 +147,12 @@ CREATE TABLE mytab ( - get_timetravel() returns the TT state for a table without + get_timetravel() returns the TT state for a table without changing it. - There is an example in timetravel.example. + There is an example in timetravel.example. @@ -160,17 +160,17 @@ CREATE TABLE mytab ( autoinc — Functions for Autoincrementing Fields - autoinc() is a trigger that stores the next value of + autoinc() is a trigger that stores the next value of a sequence into an integer field. This has some overlap with the - built-in serial column feature, but it is not the same: - autoinc() will override attempts to substitute a + built-in serial column feature, but it is not the same: + autoinc() will override attempts to substitute a different field value during inserts, and optionally it can be used to increment the field during updates, too. - To use, create a BEFORE INSERT (or optionally BEFORE - INSERT OR UPDATE) trigger using this function. Specify two + To use, create a BEFORE INSERT (or optionally BEFORE + INSERT OR UPDATE) trigger using this function. Specify two trigger arguments: the name of the integer column to be modified, and the name of the sequence object that will supply values. (Actually, you can specify any number of pairs of such names, if @@ -178,7 +178,7 @@ CREATE TABLE mytab ( - There is an example in autoinc.example. + There is an example in autoinc.example. @@ -187,19 +187,19 @@ CREATE TABLE mytab ( insert_username — Functions for Tracking Who Changed a Table - insert_username() is a trigger that stores the current + insert_username() is a trigger that stores the current user's name into a text field. This can be useful for tracking who last modified a particular row within a table. - To use, create a BEFORE INSERT and/or UPDATE + To use, create a BEFORE INSERT and/or UPDATE trigger using this function. Specify a single trigger argument: the name of the text column to be modified. - There is an example in insert_username.example. + There is an example in insert_username.example. @@ -208,21 +208,21 @@ CREATE TABLE mytab ( moddatetime — Functions for Tracking Last Modification Time - moddatetime() is a trigger that stores the current - time into a timestamp field. This can be useful for tracking + moddatetime() is a trigger that stores the current + time into a timestamp field. This can be useful for tracking the last modification time of a particular row within a table. - To use, create a BEFORE UPDATE + To use, create a BEFORE UPDATE trigger using this function. Specify a single trigger argument: the name of the column to be modified. - The column must be of type timestamp or timestamp with - time zone. + The column must be of type timestamp or timestamp with + time zone. - There is an example in moddatetime.example. + There is an example in moddatetime.example. diff --git a/doc/src/sgml/contrib.sgml b/doc/src/sgml/contrib.sgml index f32b8a81a2..7dd203e9cd 100644 --- a/doc/src/sgml/contrib.sgml +++ b/doc/src/sgml/contrib.sgml @@ -6,7 +6,7 @@ This appendix and the next one contain information regarding the modules that can be found in the contrib directory of the - PostgreSQL distribution. + PostgreSQL distribution. These include porting tools, analysis utilities, and plug-in features that are not part of the core PostgreSQL system, mainly because they address a limited audience or are too experimental @@ -41,54 +41,54 @@ make installcheck - once you have a PostgreSQL server running. + once you have a PostgreSQL server running. - If you are using a pre-packaged version of PostgreSQL, + If you are using a pre-packaged version of PostgreSQL, these modules are typically made available as a separate subpackage, - such as postgresql-contrib. + such as postgresql-contrib. Many modules supply new user-defined functions, operators, or types. To make use of one of these modules, after you have installed the code you need to register the new SQL objects in the database system. - In PostgreSQL 9.1 and later, this is done by executing + In PostgreSQL 9.1 and later, this is done by executing a command. In a fresh database, you can simply do -CREATE EXTENSION module_name; +CREATE EXTENSION module_name; This command must be run by a database superuser. This registers the new SQL objects in the current database only, so you need to run this command in each database that you want the module's facilities to be available in. Alternatively, run it in - database template1 so that the extension will be copied into + database template1 so that the extension will be copied into subsequently-created databases by default. Many modules allow you to install their objects in a schema of your choice. To do that, add SCHEMA - schema_name to the CREATE EXTENSION + schema_name to the CREATE EXTENSION command. By default, the objects will be placed in your current creation - target schema, typically public. + target schema, typically public. If your database was brought forward by dump and reload from a pre-9.1 - version of PostgreSQL, and you had been using the pre-9.1 + version of PostgreSQL, and you had been using the pre-9.1 version of the module in it, you should instead do -CREATE EXTENSION module_name FROM unpackaged; +CREATE EXTENSION module_name FROM unpackaged; This will update the pre-9.1 objects of the module into a proper - extension object. Future updates to the module will be + extension object. Future updates to the module will be managed by . For more information about extension updates, see . @@ -163,7 +163,7 @@ pages. This appendix and the previous one contain information regarding the modules that can be found in the contrib directory of the - PostgreSQL distribution. See for + PostgreSQL distribution. See for more information about the contrib section in general and server extensions and plug-ins found in contrib specifically. diff --git a/doc/src/sgml/cube.sgml b/doc/src/sgml/cube.sgml index 1ffc40f1a5..46d8e4eb8f 100644 --- a/doc/src/sgml/cube.sgml +++ b/doc/src/sgml/cube.sgml @@ -8,7 +8,7 @@ - This module implements a data type cube for + This module implements a data type cube for representing multidimensional cubes. @@ -17,8 +17,8 @@ shows the valid external - representations for the cube - type. x, y, etc. denote + representations for the cube + type. x, y, etc. denote floating-point numbers. @@ -34,43 +34,43 @@ - x + x A one-dimensional point (or, zero-length one-dimensional interval) - (x) + (x) Same as above - x1,x2,...,xn + x1,x2,...,xn A point in n-dimensional space, represented internally as a zero-volume cube - (x1,x2,...,xn) + (x1,x2,...,xn) Same as above - (x),(y) - A one-dimensional interval starting at x and ending at y or vice versa; the + (x),(y) + A one-dimensional interval starting at x and ending at y or vice versa; the order does not matter - [(x),(y)] + [(x),(y)] Same as above - (x1,...,xn),(y1,...,yn) + (x1,...,xn),(y1,...,yn) An n-dimensional cube represented by a pair of its diagonally opposite corners - [(x1,...,xn),(y1,...,yn)] + [(x1,...,xn),(y1,...,yn)] Same as above @@ -79,17 +79,17 @@ It does not matter which order the opposite corners of a cube are - entered in. The cube functions + entered in. The cube functions automatically swap values if needed to create a uniform - lower left — upper right internal representation. - When the corners coincide, cube stores only one corner - along with an is point flag to avoid wasting space. + lower left — upper right internal representation. + When the corners coincide, cube stores only one corner + along with an is point flag to avoid wasting space. White space is ignored on input, so - [(x),(y)] is the same as - [ ( x ), ( y ) ]. + [(x),(y)] is the same as + [ ( x ), ( y ) ]. @@ -107,7 +107,7 @@ shows the operators provided for - type cube. + type cube. @@ -123,91 +123,91 @@ - a = b - boolean + a = b + boolean The cubes a and b are identical. - a && b - boolean + a && b + boolean The cubes a and b overlap. - a @> b - boolean + a @> b + boolean The cube a contains the cube b. - a <@ b - boolean + a <@ b + boolean The cube a is contained in the cube b. - a < b - boolean + a < b + boolean The cube a is less than the cube b. - a <= b - boolean + a <= b + boolean The cube a is less than or equal to the cube b. - a > b - boolean + a > b + boolean The cube a is greater than the cube b. - a >= b - boolean + a >= b + boolean The cube a is greater than or equal to the cube b. - a <> b - boolean + a <> b + boolean The cube a is not equal to the cube b. - a -> n - float8 - Get n-th coordinate of cube (counting from 1). + a -> n + float8 + Get n-th coordinate of cube (counting from 1). - a ~> n - float8 + a ~> n + float8 - Get n-th coordinate in normalized cube + Get n-th coordinate in normalized cube representation, in which the coordinates have been rearranged into - the form lower left — upper right; that is, the + the form lower left — upper right; that is, the smaller endpoint along each dimension appears first. - a <-> b - float8 + a <-> b + float8 Euclidean distance between a and b. - a <#> b - float8 + a <#> b + float8 Taxicab (L-1 metric) distance between a and b. - a <=> b - float8 + a <=> b + float8 Chebyshev (L-inf metric) distance between a and b. @@ -216,35 +216,35 @@
- (Before PostgreSQL 8.2, the containment operators @> and <@ were - respectively called @ and ~. These names are still available, but are + (Before PostgreSQL 8.2, the containment operators @> and <@ were + respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric data types!) - The scalar ordering operators (<, >=, etc) + The scalar ordering operators (<, >=, etc) do not make a lot of sense for any practical purpose but sorting. These operators first compare the first coordinates, and if those are equal, compare the second coordinates, etc. They exist mainly to support the - b-tree index operator class for cube, which can be useful for - example if you would like a UNIQUE constraint on a cube column. + b-tree index operator class for cube, which can be useful for + example if you would like a UNIQUE constraint on a cube column. - The cube module also provides a GiST index operator class for - cube values. - A cube GiST index can be used to search for values using the - =, &&, @>, and - <@ operators in WHERE clauses. + The cube module also provides a GiST index operator class for + cube values. + A cube GiST index can be used to search for values using the + =, &&, @>, and + <@ operators in WHERE clauses. - In addition, a cube GiST index can be used to find nearest + In addition, a cube GiST index can be used to find nearest neighbors using the metric operators - <->, <#>, and - <=> in ORDER BY clauses. + <->, <#>, and + <=> in ORDER BY clauses. For example, the nearest neighbor of the 3-D point (0.5, 0.5, 0.5) could be found efficiently with: @@ -253,7 +253,7 @@ SELECT c FROM test ORDER BY c <-> cube(array[0.5,0.5,0.5]) LIMIT 1; - The ~> operator can also be used in this way to + The ~> operator can also be used in this way to efficiently retrieve the first few values sorted by a selected coordinate. For example, to get the first few cubes ordered by the first coordinate (lower left corner) ascending one could use the following query: @@ -365,7 +365,7 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_ll_coord(cube, integer) float8 - Returns the n-th coordinate value for the lower + Returns the n-th coordinate value for the lower left corner of the cube. @@ -376,7 +376,7 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_ur_coord(cube, integer) float8 - Returns the n-th coordinate value for the + Returns the n-th coordinate value for the upper right corner of the cube. @@ -412,9 +412,9 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; desired. - cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)' + cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[2]) == '(3),(7)' cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]) == - '(5,3,1,1),(8,7,6,6)' + '(5,3,1,1),(8,7,6,6)' @@ -440,24 +440,24 @@ SELECT c FROM test ORDER BY c ~> 3 DESC LIMIT 5; cube_enlarge(c cube, r double, n integer) cube Increases the size of the cube by the specified - radius r in at least n dimensions. + radius r in at least n dimensions. If the radius is negative the cube is shrunk instead. - All defined dimensions are changed by the radius r. - Lower-left coordinates are decreased by r and - upper-right coordinates are increased by r. If a + All defined dimensions are changed by the radius r. + Lower-left coordinates are decreased by r and + upper-right coordinates are increased by r. If a lower-left coordinate is increased to more than the corresponding - upper-right coordinate (this can only happen when r + upper-right coordinate (this can only happen when r < 0) than both coordinates are set to their average. - If n is greater than the number of defined dimensions - and the cube is being enlarged (r > 0), then extra - dimensions are added to make n altogether; + If n is greater than the number of defined dimensions + and the cube is being enlarged (r > 0), then extra + dimensions are added to make n altogether; 0 is used as the initial value for the extra coordinates. This function is useful for creating bounding boxes around a point for searching for nearby points. cube_enlarge('(1,2),(3,4)', 0.5, 3) == - '(0.5,1.5,-0.5),(3.5,4.5,0.5)' + '(0.5,1.5,-0.5),(3.5,4.5,0.5)' @@ -523,13 +523,13 @@ t Notes - For examples of usage, see the regression test sql/cube.sql. + For examples of usage, see the regression test sql/cube.sql. To make it harder for people to break things, there is a limit of 100 on the number of dimensions of cubes. This is set - in cubedata.h if you need something bigger. + in cubedata.h if you need something bigger. diff --git a/doc/src/sgml/custom-scan.sgml b/doc/src/sgml/custom-scan.sgml index 9d1ca7bfe1..a46641674f 100644 --- a/doc/src/sgml/custom-scan.sgml +++ b/doc/src/sgml/custom-scan.sgml @@ -9,9 +9,9 @@ - PostgreSQL supports a set of experimental facilities which + PostgreSQL supports a set of experimental facilities which are intended to allow extension modules to add new scan types to the system. - Unlike a foreign data wrapper, which is only + Unlike a foreign data wrapper, which is only responsible for knowing how to scan its own foreign tables, a custom scan provider can provide an alternative method of scanning any relation in the system. Typically, the motivation for writing a custom scan provider will @@ -51,9 +51,9 @@ extern PGDLLIMPORT set_rel_pathlist_hook_type set_rel_pathlist_hook; Although this hook function can be used to examine, modify, or remove paths generated by the core system, a custom scan provider will typically - confine itself to generating CustomPath objects and adding - them to rel using add_path. The custom scan - provider is responsible for initializing the CustomPath + confine itself to generating CustomPath objects and adding + them to rel using add_path. The custom scan + provider is responsible for initializing the CustomPath object, which is declared like this: typedef struct CustomPath @@ -68,22 +68,22 @@ typedef struct CustomPath - path must be initialized as for any other path, including + path must be initialized as for any other path, including the row-count estimate, start and total cost, and sort ordering provided - by this path. flags is a bit mask, which should include - CUSTOMPATH_SUPPORT_BACKWARD_SCAN if the custom path can support - a backward scan and CUSTOMPATH_SUPPORT_MARK_RESTORE if it + by this path. flags is a bit mask, which should include + CUSTOMPATH_SUPPORT_BACKWARD_SCAN if the custom path can support + a backward scan and CUSTOMPATH_SUPPORT_MARK_RESTORE if it can support mark and restore. Both capabilities are optional. - An optional custom_paths is a list of Path + An optional custom_paths is a list of Path nodes used by this custom-path node; these will be transformed into - Plan nodes by planner. - custom_private can be used to store the custom path's + Plan nodes by planner. + custom_private can be used to store the custom path's private data. Private data should be stored in a form that can be handled - by nodeToString, so that debugging routines that attempt to - print the custom path will work as designed. methods must + by nodeToString, so that debugging routines that attempt to + print the custom path will work as designed. methods must point to a (usually statically allocated) object implementing the required custom path methods, of which there is currently only one. The - LibraryName and SymbolName fields must also + LibraryName and SymbolName fields must also be initialized so that the dynamic loader can resolve them to locate the method table. @@ -93,7 +93,7 @@ typedef struct CustomPath relations, such a path must produce the same output as would normally be produced by the join it replaces. To do this, the join provider should set the following hook, and then within the hook function, - create CustomPath path(s) for the join relation. + create CustomPath path(s) for the join relation. typedef void (*set_join_pathlist_hook_type) (PlannerInfo *root, RelOptInfo *joinrel, @@ -122,7 +122,7 @@ Plan *(*PlanCustomPath) (PlannerInfo *root, List *custom_plans); Convert a custom path to a finished plan. The return value will generally - be a CustomScan object, which the callback must allocate and + be a CustomScan object, which the callback must allocate and initialize. See for more details. @@ -150,45 +150,45 @@ typedef struct CustomScan - scan must be initialized as for any other scan, including + scan must be initialized as for any other scan, including estimated costs, target lists, qualifications, and so on. - flags is a bit mask with the same meaning as in - CustomPath. - custom_plans can be used to store child - Plan nodes. - custom_exprs should be used to + flags is a bit mask with the same meaning as in + CustomPath. + custom_plans can be used to store child + Plan nodes. + custom_exprs should be used to store expression trees that will need to be fixed up by - setrefs.c and subselect.c, while - custom_private should be used to store other private data + setrefs.c and subselect.c, while + custom_private should be used to store other private data that is only used by the custom scan provider itself. - custom_scan_tlist can be NIL when scanning a base + custom_scan_tlist can be NIL when scanning a base relation, indicating that the custom scan returns scan tuples that match the base relation's row type. Otherwise it is a target list describing - the actual scan tuples. custom_scan_tlist must be + the actual scan tuples. custom_scan_tlist must be provided for joins, and could be provided for scans if the custom scan provider can compute some non-Var expressions. - custom_relids is set by the core code to the set of + custom_relids is set by the core code to the set of relations (range table indexes) that this scan node handles; except when this scan is replacing a join, it will have only one member. - methods must point to a (usually statically allocated) + methods must point to a (usually statically allocated) object implementing the required custom scan methods, which are further detailed below. - When a CustomScan scans a single relation, - scan.scanrelid must be the range table index of the table - to be scanned. When it replaces a join, scan.scanrelid + When a CustomScan scans a single relation, + scan.scanrelid must be the range table index of the table + to be scanned. When it replaces a join, scan.scanrelid should be zero. - Plan trees must be able to be duplicated using copyObject, - so all the data stored within the custom fields must consist of + Plan trees must be able to be duplicated using copyObject, + so all the data stored within the custom fields must consist of nodes that that function can handle. Furthermore, custom scan providers cannot substitute a larger structure that embeds - a CustomScan for the structure itself, as would be possible - for a CustomPath or CustomScanState. + a CustomScan for the structure itself, as would be possible + for a CustomPath or CustomScanState. @@ -197,14 +197,14 @@ typedef struct CustomScan Node *(*CreateCustomScanState) (CustomScan *cscan); - Allocate a CustomScanState for this - CustomScan. The actual allocation will often be larger than - required for an ordinary CustomScanState, because many + Allocate a CustomScanState for this + CustomScan. The actual allocation will often be larger than + required for an ordinary CustomScanState, because many providers will wish to embed that as the first field of a larger structure. - The value returned must have the node tag and methods + The value returned must have the node tag and methods set appropriately, but other fields should be left as zeroes at this - stage; after ExecInitCustomScan performs basic initialization, - the BeginCustomScan callback will be invoked to give the + stage; after ExecInitCustomScan performs basic initialization, + the BeginCustomScan callback will be invoked to give the custom scan provider a chance to do whatever else is needed.
@@ -214,8 +214,8 @@ Node *(*CreateCustomScanState) (CustomScan *cscan); Executing Custom Scans - When a CustomScan is executed, its execution state is - represented by a CustomScanState, which is declared as + When a CustomScan is executed, its execution state is + represented by a CustomScanState, which is declared as follows: typedef struct CustomScanState @@ -228,15 +228,15 @@ typedef struct CustomScanState - ss is initialized as for any other scan state, + ss is initialized as for any other scan state, except that if the scan is for a join rather than a base relation, - ss.ss_currentRelation is left NULL. - flags is a bit mask with the same meaning as in - CustomPath and CustomScan. - methods must point to a (usually statically allocated) + ss.ss_currentRelation is left NULL. + flags is a bit mask with the same meaning as in + CustomPath and CustomScan. + methods must point to a (usually statically allocated) object implementing the required custom scan state methods, which are - further detailed below. Typically, a CustomScanState, which - need not support copyObject, will actually be a larger + further detailed below. Typically, a CustomScanState, which + need not support copyObject, will actually be a larger structure embedding the above as its first member. @@ -249,8 +249,8 @@ void (*BeginCustomScan) (CustomScanState *node, EState *estate, int eflags); - Complete initialization of the supplied CustomScanState. - Standard fields have been initialized by ExecInitCustomScan, + Complete initialization of the supplied CustomScanState. + Standard fields have been initialized by ExecInitCustomScan, but any private fields should be initialized here.
@@ -259,16 +259,16 @@ void (*BeginCustomScan) (CustomScanState *node, TupleTableSlot *(*ExecCustomScan) (CustomScanState *node); Fetch the next scan tuple. If any tuples remain, it should fill - ps_ResultTupleSlot with the next tuple in the current scan + ps_ResultTupleSlot with the next tuple in the current scan direction, and then return the tuple slot. If not, - NULL or an empty slot should be returned. + NULL or an empty slot should be returned.
void (*EndCustomScan) (CustomScanState *node); - Clean up any private data associated with the CustomScanState. + Clean up any private data associated with the CustomScanState. This method is required, but it does not need to do anything if there is no associated data or it will be cleaned up automatically. @@ -286,9 +286,9 @@ void (*ReScanCustomScan) (CustomScanState *node); void (*MarkPosCustomScan) (CustomScanState *node); Save the current scan position so that it can subsequently be restored - by the RestrPosCustomScan callback. This callback is + by the RestrPosCustomScan callback. This callback is optional, and need only be supplied if the - CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. + CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set.
@@ -296,9 +296,9 @@ void (*MarkPosCustomScan) (CustomScanState *node); void (*RestrPosCustomScan) (CustomScanState *node); Restore the previous scan position as saved by the - MarkPosCustomScan callback. This callback is optional, + MarkPosCustomScan callback. This callback is optional, and need only be supplied if the - CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. + CUSTOMPATH_SUPPORT_MARK_RESTORE flag is set. @@ -320,8 +320,8 @@ void (*InitializeDSMCustomScan) (CustomScanState *node, void *coordinate); Initialize the dynamic shared memory that will be required for parallel - operation. coordinate points to a shared memory area of - size equal to the return value of EstimateDSMCustomScan. + operation. coordinate points to a shared memory area of + size equal to the return value of EstimateDSMCustomScan. This callback is optional, and need only be supplied if this custom scan provider supports parallel execution. @@ -337,9 +337,9 @@ void (*ReInitializeDSMCustomScan) (CustomScanState *node, This callback is optional, and need only be supplied if this custom scan provider supports parallel execution. Recommended practice is that this callback reset only shared state, - while the ReScanCustomScan callback resets only local + while the ReScanCustomScan callback resets only local state. Currently, this callback will be called - before ReScanCustomScan, but it's best not to rely on + before ReScanCustomScan, but it's best not to rely on that ordering.
@@ -350,7 +350,7 @@ void (*InitializeWorkerCustomScan) (CustomScanState *node, void *coordinate); Initialize a parallel worker's local state based on the shared state - set up by the leader during InitializeDSMCustomScan. + set up by the leader during InitializeDSMCustomScan. This callback is optional, and need only be supplied if this custom scan provider supports parallel execution.
@@ -361,7 +361,7 @@ void (*ShutdownCustomScan) (CustomScanState *node); Release resources when it is anticipated the node will not be executed to completion. This is not called in all cases; sometimes, - EndCustomScan may be called without this function having + EndCustomScan may be called without this function having been called first. Since the DSM segment used by parallel query is destroyed just after this callback is invoked, custom scan providers that wish to take some action before the DSM segment goes away should implement @@ -374,9 +374,9 @@ void (*ExplainCustomScan) (CustomScanState *node, List *ancestors, ExplainState *es); - Output additional information for EXPLAIN of a custom-scan + Output additional information for EXPLAIN of a custom-scan plan node. This callback is optional. Common data stored in the - ScanState, such as the target list and scan relation, will + ScanState, such as the target list and scan relation, will be shown even without this callback, but the callback allows the display of additional, private state.
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 512756df4a..6a15f9030c 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -79,7 +79,7 @@ bytea - binary data (byte array) + binary data (byte array) @@ -354,45 +354,45 @@ - smallint + smallint 2 bytes small-range integer -32768 to +32767 - integer + integer 4 bytes typical choice for integer -2147483648 to +2147483647 - bigint + bigint 8 bytes large-range integer -9223372036854775808 to +9223372036854775807 - decimal + decimal variable user-specified precision, exact up to 131072 digits before the decimal point; up to 16383 digits after the decimal point - numeric + numeric variable user-specified precision, exact up to 131072 digits before the decimal point; up to 16383 digits after the decimal point - real + real 4 bytes variable-precision, inexact 6 decimal digits precision - double precision + double precision 8 bytes variable-precision, inexact 15 decimal digits precision @@ -406,7 +406,7 @@ - serial + serial 4 bytes autoincrementing integer 1 to 2147483647 @@ -574,9 +574,9 @@ NUMERIC Numeric values are physically stored without any extra leading or trailing zeroes. Thus, the declared precision and scale of a column - are maximums, not fixed allocations. (In this sense the numeric - type is more akin to varchar(n) - than to char(n).) The actual storage + are maximums, not fixed allocations. (In this sense the numeric + type is more akin to varchar(n) + than to char(n).) The actual storage requirement is two bytes for each group of four decimal digits, plus three to eight bytes overhead. @@ -593,22 +593,22 @@ NUMERIC In addition to ordinary numeric values, the numeric - type allows the special value NaN, meaning - not-a-number. Any operation on NaN - yields another NaN. When writing this value + type allows the special value NaN, meaning + not-a-number. Any operation on NaN + yields another NaN. When writing this value as a constant in an SQL command, you must put quotes around it, - for example UPDATE table SET x = 'NaN'. On input, - the string NaN is recognized in a case-insensitive manner. + for example UPDATE table SET x = 'NaN'. On input, + the string NaN is recognized in a case-insensitive manner. - In most implementations of the not-a-number concept, - NaN is not considered equal to any other numeric - value (including NaN). In order to allow - numeric values to be sorted and used in tree-based - indexes, PostgreSQL treats NaN - values as equal, and greater than all non-NaN + In most implementations of the not-a-number concept, + NaN is not considered equal to any other numeric + value (including NaN). In order to allow + numeric values to be sorted and used in tree-based + indexes, PostgreSQL treats NaN + values as equal, and greater than all non-NaN values. @@ -756,18 +756,18 @@ FROM generate_series(-3.5, 3.5, 1) as x; floating-point arithmetic does not follow IEEE 754, these values will probably not work as expected.) When writing these values as constants in an SQL command, you must put quotes around them, - for example UPDATE table SET x = '-Infinity'. On input, + for example UPDATE table SET x = '-Infinity'. On input, these strings are recognized in a case-insensitive manner.
- IEEE754 specifies that NaN should not compare equal - to any other floating-point value (including NaN). + IEEE754 specifies that NaN should not compare equal + to any other floating-point value (including NaN). In order to allow floating-point values to be sorted and used - in tree-based indexes, PostgreSQL treats - NaN values as equal, and greater than all - non-NaN values. + in tree-based indexes, PostgreSQL treats + NaN values as equal, and greater than all + non-NaN values. @@ -776,7 +776,7 @@ FROM generate_series(-3.5, 3.5, 1) as x; notations float and float(p) for specifying inexact numeric types. Here, p specifies - the minimum acceptable precision in binary digits. + the minimum acceptable precision in binary digits. PostgreSQL accepts float(1) to float(24) as selecting the real type, while @@ -870,12 +870,12 @@ ALTER SEQUENCE tablename_ Thus, we have created an integer column and arranged for its default - values to be assigned from a sequence generator. A NOT NULL + values to be assigned from a sequence generator. A NOT NULL constraint is applied to ensure that a null value cannot be inserted. (In most cases you would also want to attach a - UNIQUE or PRIMARY KEY constraint to prevent + UNIQUE or PRIMARY KEY constraint to prevent duplicate values from being inserted by accident, but this is - not automatic.) Lastly, the sequence is marked as owned by + not automatic.) Lastly, the sequence is marked as owned by the column, so that it will be dropped if the column or table is dropped.
@@ -908,7 +908,7 @@ ALTER SEQUENCE tablename_bigserial and serial8 work the same way, except that they create a bigint column. bigserial should be used if you anticipate - the use of more than 231 identifiers over the + the use of more than 231 identifiers over the lifetime of the table. The type names smallserial and serial2 also work the same way, except that they create a smallint column. @@ -962,9 +962,9 @@ ALTER SEQUENCE tablename_ Since the output of this data type is locale-sensitive, it might not - work to load money data into a database that has a different - setting of lc_monetary. To avoid problems, before - restoring a dump into a new database make sure lc_monetary has + work to load money data into a database that has a different + setting of lc_monetary. To avoid problems, before + restoring a dump into a new database make sure lc_monetary has the same or equivalent value as in the database that was dumped.
@@ -994,7 +994,7 @@ SELECT '52093.89'::money::numeric::float8; Division of a money value by an integer value is performed with truncation of the fractional part towards zero. To get a rounded result, divide by a floating-point value, or cast the money - value to numeric before dividing and back to money + value to numeric before dividing and back to money afterwards. (The latter is preferable to avoid risking precision loss.) When a money value is divided by another money value, the result is double precision (i.e., a pure number, @@ -1047,11 +1047,11 @@ SELECT '52093.89'::money::numeric::float8; - character varying(n), varchar(n) + character varying(n), varchar(n) variable-length with limit - character(n), char(n) + character(n), char(n) fixed-length, blank padded @@ -1070,10 +1070,10 @@ SELECT '52093.89'::money::numeric::float8; SQL defines two primary character types: - character varying(n) and - character(n), where n + character varying(n) and + character(n), where n is a positive integer. Both of these types can store strings up to - n characters (not bytes) in length. An attempt to store a + n characters (not bytes) in length. An attempt to store a longer string into a column of these types will result in an error, unless the excess characters are all spaces, in which case the string will be truncated to the maximum length. (This somewhat @@ -1087,22 +1087,22 @@ SELECT '52093.89'::money::numeric::float8; If one explicitly casts a value to character - varying(n) or - character(n), then an over-length - value will be truncated to n characters without + varying(n) or + character(n), then an over-length + value will be truncated to n characters without raising an error. (This too is required by the SQL standard.)
- The notations varchar(n) and - char(n) are aliases for character - varying(n) and - character(n), respectively. + The notations varchar(n) and + char(n) are aliases for character + varying(n) and + character(n), respectively. character without length specifier is equivalent to character(1). If character varying is used without length specifier, the type accepts strings of any size. The - latter is a PostgreSQL extension. + latter is a PostgreSQL extension. @@ -1115,19 +1115,19 @@ SELECT '52093.89'::money::numeric::float8; Values of type character are physically padded - with spaces to the specified width n, and are + with spaces to the specified width n, and are stored and displayed that way. However, trailing spaces are treated as semantically insignificant and disregarded when comparing two values of type character. In collations where whitespace is significant, this behavior can produce unexpected results; for example SELECT 'a '::CHAR(2) collate "C" < - E'a\n'::CHAR(2) returns true, even though C + E'a\n'::CHAR(2)
returns true, even though C locale would consider a space to be greater than a newline. Trailing spaces are removed when converting a character value to one of the other string types. Note that trailing spaces - are semantically significant in + are semantically significant in character varying and text values, and - when using pattern matching, that is LIKE and + when using pattern matching, that is LIKE and regular expressions.
@@ -1140,7 +1140,7 @@ SELECT '52093.89'::money::numeric::float8; stored in background tables so that they do not interfere with rapid access to shorter column values. In any case, the longest possible character string that can be stored is about 1 GB. (The - maximum value that will be allowed for n in the data + maximum value that will be allowed for n in the data type declaration is less than that. It wouldn't be useful to change this because with multibyte character encodings the number of characters and bytes can be quite different. If you desire to @@ -1155,10 +1155,10 @@ SELECT '52093.89'::money::numeric::float8; apart from increased storage space when using the blank-padded type, and a few extra CPU cycles to check the length when storing into a length-constrained column. While - character(n) has performance + character(n) has performance advantages in some other database systems, there is no such advantage in PostgreSQL; in fact - character(n) is usually the slowest of + character(n) is usually the slowest of the three because of its additional storage costs. In most situations text or character varying should be used instead. @@ -1220,7 +1220,7 @@ SELECT b, char_length(b) FROM test2; in the internal system catalogs and is not intended for use by the general user. Its length is currently defined as 64 bytes (63 usable characters plus terminator) but should be referenced using the constant - NAMEDATALEN in C source code. + NAMEDATALEN in C source code. The length is set at compile time (and is therefore adjustable for special uses); the default maximum length might change in a future release. The type "char" @@ -1304,7 +1304,7 @@ SELECT b, char_length(b) FROM test2; Second, operations on binary strings process the actual bytes, whereas the processing of character strings depends on locale settings. In short, binary strings are appropriate for storing data that the - programmer thinks of as raw bytes, whereas character + programmer thinks of as raw bytes, whereas character strings are appropriate for storing text.
@@ -1328,10 +1328,10 @@ SELECT b, char_length(b) FROM test2;
- <type>bytea</> Hex Format + <type>bytea</type> Hex Format - The hex format encodes binary data as 2 hexadecimal digits + The hex format encodes binary data as 2 hexadecimal digits per byte, most significant nibble first. The entire string is preceded by the sequence \x (to distinguish it from the escape format). In some contexts, the initial backslash may @@ -1355,7 +1355,7 @@ SELECT E'\\xDEADBEEF'; - <type>bytea</> Escape Format + <type>bytea</type> Escape Format The escape format is the traditional @@ -1390,7 +1390,7 @@ SELECT E'\\xDEADBEEF'; - <type>bytea</> Literal Escaped Octets + <type>bytea</type> Literal Escaped Octets @@ -1430,7 +1430,7 @@ SELECT E'\\xDEADBEEF'; 0 to 31 and 127 to 255 non-printable octets - E'\\xxx' (octal value) + E'\\xxx' (octal value) SELECT E'\\001'::bytea; \001 @@ -1481,7 +1481,7 @@ SELECT E'\\xDEADBEEF';
- <type>bytea</> Output Escaped Octets + <type>bytea</type> Output Escaped Octets @@ -1506,7 +1506,7 @@ SELECT E'\\xDEADBEEF'; 0 to 31 and 127 to 255 non-printable octets - \xxx (octal value) + \xxx (octal value) SELECT E'\\001'::bytea; \001 @@ -1524,7 +1524,7 @@ SELECT E'\\xDEADBEEF';
- Depending on the front end to PostgreSQL you use, + Depending on the front end to PostgreSQL you use, you might have additional work to do in terms of escaping and unescaping bytea strings. For example, you might also have to escape line feeds and carriage returns if your interface @@ -1685,7 +1685,7 @@ MINUTE TO SECOND Note that if both fields and p are specified, the - fields must include SECOND, + fields must include SECOND, since the precision applies only to the seconds. @@ -1717,9 +1717,9 @@ MINUTE TO SECOND For some formats, ordering of day, month, and year in date input is ambiguous and there is support for specifying the expected ordering of these fields. Set the parameter - to MDY to select month-day-year interpretation, - DMY to select day-month-year interpretation, or - YMD to select year-month-day interpretation. + to MDY to select month-day-year interpretation, + DMY to select day-month-year interpretation, or + YMD to select year-month-day interpretation.
@@ -1784,19 +1784,19 @@ MINUTE TO SECOND 1/8/1999 - January 8 in MDY mode; - August 1 in DMY mode + January 8 in MDY mode; + August 1 in DMY mode 1/18/1999 - January 18 in MDY mode; + January 18 in MDY mode; rejected in other modes 01/02/03 - January 2, 2003 in MDY mode; - February 1, 2003 in DMY mode; - February 3, 2001 in YMD mode + January 2, 2003 in MDY mode; + February 1, 2003 in DMY mode; + February 3, 2001 in YMD mode @@ -1813,15 +1813,15 @@ MINUTE TO SECOND 99-Jan-08 - January 8 in YMD mode, else error + January 8 in YMD mode, else error 08-Jan-99 - January 8, except error in YMD mode + January 8, except error in YMD mode Jan-08-99 - January 8, except error in YMD mode + January 8, except error in YMD mode 19990108 @@ -2070,20 +2070,20 @@ January 8 04:05:06 1999 PST For timestamp with time zone, the internally stored value is always in UTC (Universal Coordinated Time, traditionally known as Greenwich Mean Time, - GMT). An input value that has an explicit + GMT). An input value that has an explicit time zone specified is converted to UTC using the appropriate offset for that time zone. If no time zone is stated in the input string, then it is assumed to be in the time zone indicated by the system's parameter, and is converted to UTC using the - offset for the timezone zone. + offset for the timezone zone. When a timestamp with time zone value is output, it is always converted from UTC to the - current timezone zone, and displayed as local time in that + current timezone zone, and displayed as local time in that zone. To see the time in another time zone, either change - timezone or use the AT TIME ZONE construct + timezone or use the AT TIME ZONE construct (see ). @@ -2091,8 +2091,8 @@ January 8 04:05:06 1999 PST Conversions between timestamp without time zone and timestamp with time zone normally assume that the timestamp without time zone value should be taken or given - as timezone local time. A different time zone can - be specified for the conversion using AT TIME ZONE. + as timezone local time. A different time zone can + be specified for the conversion using AT TIME ZONE.
@@ -2117,7 +2117,7 @@ January 8 04:05:06 1999 PST are specially represented inside the system and will be displayed unchanged; but the others are simply notational shorthands that will be converted to ordinary date/time values when read. - (In particular, now and related strings are converted + (In particular, now and related strings are converted to a specific time value as soon as they are read.) All of these values need to be enclosed in single quotes when used as constants in SQL commands. @@ -2187,7 +2187,7 @@ January 8 04:05:06 1999 PST LOCALTIMESTAMP. The latter four accept an optional subsecond precision specification. (See .) Note that these are - SQL functions and are not recognized in data input strings. + SQL functions and are not recognized in data input strings.
@@ -2211,8 +2211,8 @@ January 8 04:05:06 1999 PST The output format of the date/time types can be set to one of the four styles ISO 8601, - SQL (Ingres), traditional POSTGRES - (Unix date format), or + SQL (Ingres), traditional POSTGRES + (Unix date format), or German. The default is the ISO format. (The SQL standard requires the use of the ISO 8601 @@ -2222,7 +2222,7 @@ January 8 04:05:06 1999 PST output style. The output of the date and time types is generally only the date or time part in accordance with the given examples. However, the - POSTGRES style outputs date-only values in + POSTGRES style outputs date-only values in ISO format. @@ -2263,9 +2263,9 @@ January 8 04:05:06 1999 PST - ISO 8601 specifies the use of uppercase letter T to separate - the date and time. PostgreSQL accepts that format on - input, but on output it uses a space rather than T, as shown + ISO 8601 specifies the use of uppercase letter T to separate + the date and time. PostgreSQL accepts that format on + input, but on output it uses a space rather than T, as shown above. This is for readability and for consistency with RFC 3339 as well as some other database systems. @@ -2292,17 +2292,17 @@ January 8 04:05:06 1999 PST - SQL, DMY + SQL, DMY day/month/year 17/12/1997 15:37:16.00 CET - SQL, MDY + SQL, MDY month/day/year 12/17/1997 07:37:16.00 PST - Postgres, DMY + Postgres, DMY day/month/year Wed 17 Dec 07:37:16 1997 PST @@ -2368,7 +2368,7 @@ January 8 04:05:06 1999 PST The default time zone is specified as a constant numeric offset - from UTC. It is therefore impossible to adapt to + from UTC. It is therefore impossible to adapt to daylight-saving time when doing date/time arithmetic across DST boundaries. @@ -2380,7 +2380,7 @@ January 8 04:05:06 1999 PST To address these difficulties, we recommend using date/time types that contain both date and time when using time zones. We - do not recommend using the type time with + do not recommend using the type time with time zone (though it is supported by PostgreSQL for legacy applications and for compliance with the SQL standard). @@ -2401,7 +2401,7 @@ January 8 04:05:06 1999 PST - A full time zone name, for example America/New_York. + A full time zone name, for example America/New_York. The recognized time zone names are listed in the pg_timezone_names view (see ). @@ -2412,16 +2412,16 @@ January 8 04:05:06 1999 PST - A time zone abbreviation, for example PST. Such a + A time zone abbreviation, for example PST. Such a specification merely defines a particular offset from UTC, in contrast to full time zone names which can imply a set of daylight savings transition-date rules as well. The recognized abbreviations - are listed in the pg_timezone_abbrevs view (see pg_timezone_abbrevs view (see ). You cannot set the configuration parameters or to a time zone abbreviation, but you can use abbreviations in - date/time input values and with the AT TIME ZONE + date/time input values and with the AT TIME ZONE operator. @@ -2429,25 +2429,25 @@ January 8 04:05:06 1999 PST In addition to the timezone names and abbreviations, PostgreSQL will accept POSIX-style time zone - specifications of the form STDoffset or - STDoffsetDST, where - STD is a zone abbreviation, offset is a - numeric offset in hours west from UTC, and DST is an + specifications of the form STDoffset or + STDoffsetDST, where + STD is a zone abbreviation, offset is a + numeric offset in hours west from UTC, and DST is an optional daylight-savings zone abbreviation, assumed to stand for one - hour ahead of the given offset. For example, if EST5EDT + hour ahead of the given offset. For example, if EST5EDT were not already a recognized zone name, it would be accepted and would be functionally equivalent to United States East Coast time. In this syntax, a zone abbreviation can be a string of letters, or an - arbitrary string surrounded by angle brackets (<>). + arbitrary string surrounded by angle brackets (<>). When a daylight-savings zone abbreviation is present, it is assumed to be used according to the same daylight-savings transition rules used in the - IANA time zone database's posixrules entry. + IANA time zone database's posixrules entry. In a standard PostgreSQL installation, - posixrules is the same as US/Eastern, so + posixrules is the same as US/Eastern, so that POSIX-style time zone specifications follow USA daylight-savings rules. If needed, you can adjust this behavior by replacing the - posixrules file. + posixrules file.
@@ -2456,10 +2456,10 @@ January 8 04:05:06 1999 PST and full names: abbreviations represent a specific offset from UTC, whereas many of the full names imply a local daylight-savings time rule, and so have two possible UTC offsets. As an example, - 2014-06-04 12:00 America/New_York represents noon local + 2014-06-04 12:00 America/New_York represents noon local time in New York, which for this particular date was Eastern Daylight - Time (UTC-4). So 2014-06-04 12:00 EDT specifies that - same time instant. But 2014-06-04 12:00 EST specifies + Time (UTC-4). So 2014-06-04 12:00 EDT specifies that + same time instant. But 2014-06-04 12:00 EST specifies noon Eastern Standard Time (UTC-5), regardless of whether daylight savings was nominally in effect on that date. @@ -2467,10 +2467,10 @@ January 8 04:05:06 1999 PST To complicate matters, some jurisdictions have used the same timezone abbreviation to mean different UTC offsets at different times; for - example, in Moscow MSK has meant UTC+3 in some years and - UTC+4 in others. PostgreSQL interprets such + example, in Moscow MSK has meant UTC+3 in some years and + UTC+4 in others. PostgreSQL interprets such abbreviations according to whatever they meant (or had most recently - meant) on the specified date; but, as with the EST example + meant) on the specified date; but, as with the EST example above, this is not necessarily the same as local civil time on that date. @@ -2478,18 +2478,18 @@ January 8 04:05:06 1999 PST One should be wary that the POSIX-style time zone feature can lead to silently accepting bogus input, since there is no check on the reasonableness of the zone abbreviations. For example, SET - TIMEZONE TO FOOBAR0 will work, leaving the system effectively using + TIMEZONE TO FOOBAR0 will work, leaving the system effectively using a rather peculiar abbreviation for UTC. Another issue to keep in mind is that in POSIX time zone names, - positive offsets are used for locations west of Greenwich. + positive offsets are used for locations west of Greenwich. Everywhere else, PostgreSQL follows the - ISO-8601 convention that positive timezone offsets are east + ISO-8601 convention that positive timezone offsets are east of Greenwich. In all cases, timezone names and abbreviations are recognized - case-insensitively. (This is a change from PostgreSQL + case-insensitively. (This is a change from PostgreSQL versions prior to 8.2, which were case-sensitive in some contexts but not others.) @@ -2497,14 +2497,14 @@ January 8 04:05:06 1999 PST Neither timezone names nor abbreviations are hard-wired into the server; they are obtained from configuration files stored under - .../share/timezone/ and .../share/timezonesets/ + .../share/timezone/ and .../share/timezonesets/ of the installation directory (see ). The configuration parameter can - be set in the file postgresql.conf, or in any of the + be set in the file postgresql.conf, or in any of the other standard ways described in . There are also some special ways to set it: @@ -2513,7 +2513,7 @@ January 8 04:05:06 1999 PST The SQL command SET TIME ZONE sets the time zone for the session. This is an alternative spelling - of SET TIMEZONE TO with a more SQL-spec-compatible syntax. + of SET TIMEZONE TO with a more SQL-spec-compatible syntax. @@ -2541,52 +2541,52 @@ January 8 04:05:06 1999 PST verbose syntax: -@ quantity unit quantity unit... direction +@ quantity unit quantity unit... direction - where quantity is a number (possibly signed); - unit is microsecond, + where quantity is a number (possibly signed); + unit is microsecond, millisecond, second, minute, hour, day, week, month, year, decade, century, millennium, or abbreviations or plurals of these units; - direction can be ago or - empty. The at sign (@) is optional noise. The amounts + direction can be ago or + empty. The at sign (@) is optional noise. The amounts of the different units are implicitly added with appropriate sign accounting. ago negates all the fields. This syntax is also used for interval output, if is set to - postgres_verbose. + postgres_verbose. Quantities of days, hours, minutes, and seconds can be specified without - explicit unit markings. For example, '1 12:59:10' is read - the same as '1 day 12 hours 59 min 10 sec'. Also, + explicit unit markings. For example, '1 12:59:10' is read + the same as '1 day 12 hours 59 min 10 sec'. Also, a combination of years and months can be specified with a dash; - for example '200-10' is read the same as '200 years - 10 months'. (These shorter forms are in fact the only ones allowed + for example '200-10' is read the same as '200 years + 10 months'. (These shorter forms are in fact the only ones allowed by the SQL standard, and are used for output when - IntervalStyle is set to sql_standard.) + IntervalStyle is set to sql_standard.) Interval values can also be written as ISO 8601 time intervals, using - either the format with designators of the standard's section - 4.4.3.2 or the alternative format of section 4.4.3.3. The + either the format with designators of the standard's section + 4.4.3.2 or the alternative format of section 4.4.3.3. The format with designators looks like this: -P quantity unit quantity unit ... T quantity unit ... +P quantity unit quantity unit ... T quantity unit ... - The string must start with a P, and may include a - T that introduces the time-of-day units. The + The string must start with a P, and may include a + T that introduces the time-of-day units. The available unit abbreviations are given in . Units may be omitted, and may be specified in any order, but units smaller than - a day must appear after T. In particular, the meaning of - M depends on whether it is before or after - T. + a day must appear after T. In particular, the meaning of + M depends on whether it is before or after + T. @@ -2634,51 +2634,51 @@ P quantity unit quantity In the alternative format: -P years-months-days T hours:minutes:seconds +P years-months-days T hours:minutes:seconds the string must begin with P, and a - T separates the date and time parts of the interval. + T separates the date and time parts of the interval. The values are given as numbers similar to ISO 8601 dates. - When writing an interval constant with a fields + When writing an interval constant with a fields specification, or when assigning a string to an interval column that was - defined with a fields specification, the interpretation of - unmarked quantities depends on the fields. For - example INTERVAL '1' YEAR is read as 1 year, whereas - INTERVAL '1' means 1 second. Also, field values - to the right of the least significant field allowed by the - fields specification are silently discarded. For - example, writing INTERVAL '1 day 2:03:04' HOUR TO MINUTE + defined with a fields specification, the interpretation of + unmarked quantities depends on the fields. For + example INTERVAL '1' YEAR is read as 1 year, whereas + INTERVAL '1' means 1 second. Also, field values + to the right of the least significant field allowed by the + fields specification are silently discarded. For + example, writing INTERVAL '1 day 2:03:04' HOUR TO MINUTE results in dropping the seconds field, but not the day field. - According to the SQL standard all fields of an interval + According to the SQL standard all fields of an interval value must have the same sign, so a leading negative sign applies to all fields; for example the negative sign in the interval literal - '-1 2:03:04' applies to both the days and hour/minute/second - parts. PostgreSQL allows the fields to have different + '-1 2:03:04' applies to both the days and hour/minute/second + parts. PostgreSQL allows the fields to have different signs, and traditionally treats each field in the textual representation as independently signed, so that the hour/minute/second part is - considered positive in this example. If IntervalStyle is + considered positive in this example. If IntervalStyle is set to sql_standard then a leading sign is considered to apply to all fields (but only if no additional signs appear). - Otherwise the traditional PostgreSQL interpretation is + Otherwise the traditional PostgreSQL interpretation is used. To avoid ambiguity, it's recommended to attach an explicit sign to each field if any field is negative. - Internally interval values are stored as months, days, + Internally interval values are stored as months, days, and seconds. This is done because the number of days in a month varies, and a day can have 23 or 25 hours if a daylight savings time adjustment is involved. The months and days fields are integers while the seconds field can store fractions. Because intervals are - usually created from constant strings or timestamp subtraction, + usually created from constant strings or timestamp subtraction, this storage method works well in most cases. Functions - justify_days and justify_hours are + justify_days and justify_hours are available for adjusting days and hours that overflow their normal ranges. @@ -2686,18 +2686,18 @@ P years-months-days < In the verbose input format, and in some fields of the more compact input formats, field values can have fractional parts; for example - '1.5 week' or '01:02:03.45'. Such input is + '1.5 week' or '01:02:03.45'. Such input is converted to the appropriate number of months, days, and seconds for storage. When this would result in a fractional number of months or days, the fraction is added to the lower-order fields using the conversion factors 1 month = 30 days and 1 day = 24 hours. - For example, '1.5 month' becomes 1 month and 15 days. + For example, '1.5 month' becomes 1 month and 15 days. Only seconds will ever be shown as fractional on output. shows some examples - of valid interval input. + of valid interval input.
@@ -2724,11 +2724,11 @@ P years-months-days < P1Y2M3DT4H5M6S - ISO 8601 format with designators: same meaning as above + ISO 8601 format with designators: same meaning as above P0001-02-03T04:05:06 - ISO 8601 alternative format: same meaning as above + ISO 8601 alternative format: same meaning as above @@ -2747,16 +2747,16 @@ P years-months-days < The output format of the interval type can be set to one of the - four styles sql_standard, postgres, - postgres_verbose, or iso_8601, + four styles sql_standard, postgres, + postgres_verbose, or iso_8601, using the command SET intervalstyle. - The default is the postgres format. + The default is the postgres format. shows examples of each output style. - The sql_standard style produces output that conforms to + The sql_standard style produces output that conforms to the SQL standard's specification for interval literal strings, if the interval value meets the standard's restrictions (either year-month only or day-time only, with no mixing of positive @@ -2766,20 +2766,20 @@ P years-months-days < - The output of the postgres style matches the output of - PostgreSQL releases prior to 8.4 when the - parameter was set to ISO. + The output of the postgres style matches the output of + PostgreSQL releases prior to 8.4 when the + parameter was set to ISO. - The output of the postgres_verbose style matches the output of - PostgreSQL releases prior to 8.4 when the - DateStyle parameter was set to non-ISO output. + The output of the postgres_verbose style matches the output of + PostgreSQL releases prior to 8.4 when the + DateStyle parameter was set to non-ISO output. - The output of the iso_8601 style matches the format - with designators described in section 4.4.3.2 of the + The output of the iso_8601 style matches the format + with designators described in section 4.4.3.2 of the ISO 8601 standard. @@ -2796,25 +2796,25 @@ P years-months-days < - sql_standard + sql_standard 1-2 3 4:05:06 -1-2 +3 -4:05:06 - postgres + postgres 1 year 2 mons 3 days 04:05:06 -1 year -2 mons +3 days -04:05:06 - postgres_verbose + postgres_verbose @ 1 year 2 mons @ 3 days 4 hours 5 mins 6 secs @ 1 year 2 mons -3 days 4 hours 5 mins 6 secs ago - iso_8601 + iso_8601 P1Y2M P3DT4H5M6S P-1Y-2M3DT-4H-5M-6S @@ -3178,7 +3178,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays x , y - where x and y are the respective + where x and y are the respective coordinates, as floating-point numbers. @@ -3196,8 +3196,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Lines are represented by the linear - equation Ax + By + C = 0, - where A and B are not both zero. Values + equation Ax + By + C = 0, + where A and B are not both zero. Values of type line are input and output in the following form: { A, B, C } @@ -3324,8 +3324,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays where the points are the end points of the line segments - comprising the path. Square brackets ([]) indicate - an open path, while parentheses (()) indicate a + comprising the path. Square brackets ([]) indicate + an open path, while parentheses (()) indicate a closed path. When the outermost parentheses are omitted, as in the third through fifth syntaxes, a closed path is assumed. @@ -3388,7 +3388,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays where - (x,y) + (x,y) is the center point and r is the radius of the circle. @@ -3409,7 +3409,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - PostgreSQL offers data types to store IPv4, IPv6, and MAC + PostgreSQL offers data types to store IPv4, IPv6, and MAC addresses, as shown in . It is better to use these types instead of plain text types to store network addresses, because @@ -3503,7 +3503,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - <type>cidr</> + <type>cidr</type> cidr @@ -3514,11 +3514,11 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Input and output formats follow Classless Internet Domain Routing conventions. The format for specifying networks is address/y where address is the network represented as an + class="parameter">address/y where address is the network represented as an IPv4 or IPv6 address, and y is the number of bits in the netmask. If - y is omitted, it is calculated + class="parameter">y is the number of bits in the netmask. If + y is omitted, it is calculated using assumptions from the older classful network numbering system, except it will be at least large enough to include all of the octets written in the input. It is an error to specify a network address @@ -3530,7 +3530,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
- <type>cidr</> Type Input Examples + <type>cidr</type> Type Input Examples @@ -3639,8 +3639,8 @@ SELECT person.name, holidays.num_weeks FROM person, holidays If you do not like the output format for inet or - cidr values, try the functions host, - text, and abbrev. + cidr values, try the functions host, + text, and abbrev. @@ -3658,24 +3658,24 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - The macaddr type stores MAC addresses, known for example + The macaddr type stores MAC addresses, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). Input is accepted in the following formats: - '08:00:2b:01:02:03' - '08-00-2b-01-02-03' - '08002b:010203' - '08002b-010203' - '0800.2b01.0203' - '0800-2b01-0203' - '08002b010203' + '08:00:2b:01:02:03' + '08-00-2b-01-02-03' + '08002b:010203' + '08002b-010203' + '0800.2b01.0203' + '0800-2b01-0203' + '08002b010203' These examples would all specify the same address. Upper and lower case is accepted for the digits - a through f. Output is always in the + a through f. Output is always in the first of the forms shown. @@ -3708,7 +3708,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - The macaddr8 type stores MAC addresses in EUI-64 + The macaddr8 type stores MAC addresses in EUI-64 format, known for example from Ethernet card hardware addresses (although MAC addresses are used for other purposes as well). This type can accept both 6 and 8 byte length MAC addresses @@ -3718,31 +3718,31 @@ SELECT person.name, holidays.num_weeks FROM person, holidays Note that IPv6 uses a modified EUI-64 format where the 7th bit should be set to one after the conversion from EUI-48. The - function macaddr8_set7bit is provided to make this + function macaddr8_set7bit is provided to make this change. Generally speaking, any input which is comprised of pairs of hex digits (on byte boundaries), optionally separated consistently by - one of ':', '-' or '.', is + one of ':', '-' or '.', is accepted. The number of hex digits must be either 16 (8 bytes) or 12 (6 bytes). Leading and trailing whitespace is ignored. The following are examples of input formats that are accepted: - '08:00:2b:01:02:03:04:05' - '08-00-2b-01-02-03-04-05' - '08002b:0102030405' - '08002b-0102030405' - '0800.2b01.0203.0405' - '0800-2b01-0203-0405' - '08002b01:02030405' - '08002b0102030405' + '08:00:2b:01:02:03:04:05' + '08-00-2b-01-02-03-04-05' + '08002b:0102030405' + '08002b-0102030405' + '0800.2b01.0203.0405' + '0800-2b01-0203-0405' + '08002b01:02030405' + '08002b0102030405' These examples would all specify the same address. Upper and lower case is accepted for the digits - a through f. Output is always in the + a through f. Output is always in the first of the forms shown. The last six input formats that are mentioned above are not part @@ -3750,7 +3750,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays To convert a traditional 48 bit MAC address in EUI-48 format to modified EUI-64 format to be included as the host portion of an - IPv6 address, use macaddr8_set7bit as shown: + IPv6 address, use macaddr8_set7bit as shown: SELECT macaddr8_set7bit('08:00:2b:01:02:03'); @@ -3798,12 +3798,12 @@ SELECT macaddr8_set7bit('08:00:2b:01:02:03'); If one explicitly casts a bit-string value to - bit(n), it will be truncated or - zero-padded on the right to be exactly n bits, + bit(n), it will be truncated or + zero-padded on the right to be exactly n bits, without raising an error. Similarly, if one explicitly casts a bit-string value to - bit varying(n), it will be truncated - on the right if it is more than n bits. + bit varying(n), it will be truncated + on the right if it is more than n bits. @@ -3860,8 +3860,8 @@ SELECT * FROM test; PostgreSQL provides two data types that are designed to support full text search, which is the activity of - searching through a collection of natural-language documents - to locate those that best match a query. + searching through a collection of natural-language documents + to locate those that best match a query. The tsvector type represents a document in a form optimized for text search; the tsquery type similarly represents a text query. @@ -3879,8 +3879,8 @@ SELECT * FROM test; A tsvector value is a sorted list of distinct - lexemes, which are words that have been - normalized to merge different variants of the same word + lexemes, which are words that have been + normalized to merge different variants of the same word (see for details). Sorting and duplicate-elimination are done automatically during input, as shown in this example: @@ -3913,7 +3913,7 @@ SELECT $$the lexeme 'Joe''s' contains a quote$$::tsvector; 'Joe''s' 'a' 'contains' 'lexeme' 'quote' 'the' - Optionally, integer positions + Optionally, integer positions can be attached to lexemes: @@ -3932,7 +3932,7 @@ SELECT 'a:1 fat:2 cat:3 sat:4 on:5 a:6 mat:7 and:8 ate:9 a:10 fat:11 rat:12'::ts Lexemes that have positions can further be labeled with a - weight, which can be A, + weight, which can be A, B, C, or D. D is the default and hence is not shown on output: @@ -3965,7 +3965,7 @@ SELECT 'The Fat Rats'::tsvector; For most English-text-searching applications the above words would be considered non-normalized, but tsvector doesn't care. Raw document text should usually be passed through - to_tsvector to normalize the words appropriately + to_tsvector to normalize the words appropriately for searching: @@ -3991,17 +3991,17 @@ SELECT to_tsvector('english', 'The Fat Rats'); A tsquery value stores lexemes that are to be searched for, and can combine them using the Boolean operators & (AND), | (OR), and - ! (NOT), as well as the phrase search operator - <-> (FOLLOWED BY). There is also a variant - <N> of the FOLLOWED BY - operator, where N is an integer constant that + ! (NOT), as well as the phrase search operator + <-> (FOLLOWED BY). There is also a variant + <N> of the FOLLOWED BY + operator, where N is an integer constant that specifies the distance between the two lexemes being searched - for. <-> is equivalent to <1>. + for. <-> is equivalent to <1>. Parentheses can be used to enforce grouping of these operators. - In the absence of parentheses, ! (NOT) binds most tightly, + In the absence of parentheses, ! (NOT) binds most tightly, <-> (FOLLOWED BY) next most tightly, then & (AND), with | (OR) binding the least tightly. @@ -4031,7 +4031,7 @@ SELECT 'fat & rat & ! cat'::tsquery; Optionally, lexemes in a tsquery can be labeled with one or more weight letters, which restricts them to match only - tsvector lexemes with one of those weights: + tsvector lexemes with one of those weights: SELECT 'fat:ab & cat'::tsquery; @@ -4042,7 +4042,7 @@ SELECT 'fat:ab & cat'::tsquery; - Also, lexemes in a tsquery can be labeled with * + Also, lexemes in a tsquery can be labeled with * to specify prefix matching: SELECT 'super:*'::tsquery; @@ -4050,15 +4050,15 @@ SELECT 'super:*'::tsquery; ----------- 'super':* - This query will match any word in a tsvector that begins - with super. + This query will match any word in a tsvector that begins + with super. Quoting rules for lexemes are the same as described previously for - lexemes in tsvector; and, as with tsvector, + lexemes in tsvector; and, as with tsvector, any required normalization of words must be done before converting - to the tsquery type. The to_tsquery + to the tsquery type. The to_tsquery function is convenient for performing such normalization: @@ -4068,7 +4068,7 @@ SELECT to_tsquery('Fat:ab & Cats'); 'fat':AB & 'cat' - Note that to_tsquery will process prefixes in the same way + Note that to_tsquery will process prefixes in the same way as other words, which means this comparison returns true: @@ -4077,14 +4077,14 @@ SELECT to_tsvector( 'postgraduate' ) @@ to_tsquery( 'postgres:*' ); ---------- t - because postgres gets stemmed to postgr: + because postgres gets stemmed to postgr: SELECT to_tsvector( 'postgraduate' ), to_tsquery( 'postgres:*' ); to_tsvector | to_tsquery ---------------+------------ 'postgradu':1 | 'postgr':* - which will match the stemmed form of postgraduate. + which will match the stemmed form of postgraduate. @@ -4150,7 +4150,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 - <acronym>XML</> Type + <acronym>XML</acronym> Type XML @@ -4163,7 +4163,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11 functions to perform type-safe operations on it; see . Use of this data type requires the installation to have been built with configure - --with-libxml. + --with-libxml. @@ -4311,7 +4311,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; Some XML-related functions may not work at all on non-ASCII data when the server encoding is not UTF-8. This is known to be an - issue for xmltable() and xpath() in particular. + issue for xmltable() and xpath() in particular. @@ -4421,17 +4421,17 @@ SET xmloption TO { DOCUMENT | CONTENT }; system tables. OIDs are not added to user-created tables, unless WITH OIDS is specified when the table is created, or the - configuration variable is enabled. Type oid represents + configuration variable is enabled. Type oid represents an object identifier. There are also several alias types for - oid: regproc, regprocedure, - regoper, regoperator, regclass, - regtype, regrole, regnamespace, - regconfig, and regdictionary. + oid: regproc, regprocedure, + regoper, regoperator, regclass, + regtype, regrole, regnamespace, + regconfig, and regdictionary. shows an overview. - The oid type is currently implemented as an unsigned + The oid type is currently implemented as an unsigned four-byte integer. Therefore, it is not large enough to provide database-wide uniqueness in large databases, or even in large individual tables. So, using a user-created table's OID column as @@ -4440,7 +4440,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; - The oid type itself has few operations beyond comparison. + The oid type itself has few operations beyond comparison. It can be cast to integer, however, and then manipulated using the standard integer operators. (Beware of possible signed-versus-unsigned confusion if you do this.) @@ -4450,10 +4450,10 @@ SET xmloption TO { DOCUMENT | CONTENT }; The OID alias types have no operations of their own except for specialized input and output routines. These routines are able to accept and display symbolic names for system objects, rather than - the raw numeric value that type oid would use. The alias + the raw numeric value that type oid would use. The alias types allow simplified lookup of OID values for objects. For example, - to examine the pg_attribute rows related to a table - mytable, one could write: + to examine the pg_attribute rows related to a table + mytable, one could write: SELECT * FROM pg_attribute WHERE attrelid = 'mytable'::regclass; @@ -4465,11 +4465,11 @@ SELECT * FROM pg_attribute While that doesn't look all that bad by itself, it's still oversimplified. A far more complicated sub-select would be needed to select the right OID if there are multiple tables named - mytable in different schemas. - The regclass input converter handles the table lookup according - to the schema path setting, and so it does the right thing + mytable in different schemas. + The regclass input converter handles the table lookup according + to the schema path setting, and so it does the right thing automatically. Similarly, casting a table's OID to - regclass is handy for symbolic display of a numeric OID. + regclass is handy for symbolic display of a numeric OID.
@@ -4487,80 +4487,80 @@ SELECT * FROM pg_attribute - oid + oid any numeric object identifier - 564182 + 564182 - regproc - pg_proc + regproc + pg_proc function name - sum + sum - regprocedure - pg_proc + regprocedure + pg_proc function with argument types - sum(int4) + sum(int4) - regoper - pg_operator + regoper + pg_operator operator name - + + + - regoperator - pg_operator + regoperator + pg_operator operator with argument types - *(integer,integer) or -(NONE,integer) + *(integer,integer) or -(NONE,integer) - regclass - pg_class + regclass + pg_class relation name - pg_type + pg_type - regtype - pg_type + regtype + pg_type data type name - integer + integer - regrole - pg_authid + regrole + pg_authid role name - smithee + smithee - regnamespace - pg_namespace + regnamespace + pg_namespace namespace name - pg_catalog + pg_catalog - regconfig - pg_ts_config + regconfig + pg_ts_config text search configuration - english + english - regdictionary - pg_ts_dict + regdictionary + pg_ts_dict text search dictionary - simple + simple @@ -4571,11 +4571,11 @@ SELECT * FROM pg_attribute schema-qualified names, and will display schema-qualified names on output if the object would not be found in the current search path without being qualified. - The regproc and regoper alias types will only + The regproc and regoper alias types will only accept input names that are unique (not overloaded), so they are - of limited use; for most uses regprocedure or - regoperator are more appropriate. For regoperator, - unary operators are identified by writing NONE for the unused + of limited use; for most uses regprocedure or + regoperator are more appropriate. For regoperator, + unary operators are identified by writing NONE for the unused operand. @@ -4585,12 +4585,12 @@ SELECT * FROM pg_attribute constant of one of these types appears in a stored expression (such as a column default expression or view), it creates a dependency on the referenced object. For example, if a column has a default - expression nextval('my_seq'::regclass), + expression nextval('my_seq'::regclass), PostgreSQL understands that the default expression depends on the sequence - my_seq; the system will not let the sequence be dropped + my_seq; the system will not let the sequence be dropped without first removing the default expression. - regrole is the only exception for the property. Constants of this + regrole is the only exception for the property. Constants of this type are not allowed in such expressions. @@ -4603,21 +4603,21 @@ SELECT * FROM pg_attribute - Another identifier type used by the system is xid, or transaction - (abbreviated xact) identifier. This is the data type of the system columns - xmin and xmax. Transaction identifiers are 32-bit quantities. + Another identifier type used by the system is xid, or transaction + (abbreviated xact) identifier. This is the data type of the system columns + xmin and xmax. Transaction identifiers are 32-bit quantities. - A third identifier type used by the system is cid, or + A third identifier type used by the system is cid, or command identifier. This is the data type of the system columns - cmin and cmax. Command identifiers are also 32-bit quantities. + cmin and cmax. Command identifiers are also 32-bit quantities. - A final identifier type used by the system is tid, or tuple + A final identifier type used by the system is tid, or tuple identifier (row identifier). This is the data type of the system column - ctid. A tuple ID is a pair + ctid. A tuple ID is a pair (block number, tuple index within block) that identifies the physical location of the row within its table. @@ -4646,7 +4646,7 @@ SELECT * FROM pg_attribute Internally, an LSN is a 64-bit integer, representing a byte position in the write-ahead log stream. It is printed as two hexadecimal numbers of up to 8 digits each, separated by a slash; for example, - 16/B374D848. The pg_lsn type supports the + 16/B374D848. The pg_lsn type supports the standard comparison operators, like = and >. Two LSNs can be subtracted using the - operator; the result is the number of bytes separating @@ -4736,7 +4736,7 @@ SELECT * FROM pg_attribute The PostgreSQL type system contains a number of special-purpose entries that are collectively called - pseudo-types. A pseudo-type cannot be used as a + pseudo-types. A pseudo-type cannot be used as a column data type, but it can be used to declare a function's argument or result type. Each of the available pseudo-types is useful in situations where a function's behavior does not @@ -4758,106 +4758,106 @@ SELECT * FROM pg_attribute - any + any Indicates that a function accepts any input data type. - anyelement + anyelement Indicates that a function accepts any data type (see ). - anyarray + anyarray Indicates that a function accepts any array data type (see ). - anynonarray + anynonarray Indicates that a function accepts any non-array data type (see ). - anyenum + anyenum Indicates that a function accepts any enum data type (see and ). - anyrange + anyrange Indicates that a function accepts any range data type (see and ). - cstring + cstring Indicates that a function accepts or returns a null-terminated C string. - internal + internal Indicates that a function accepts or returns a server-internal data type. - language_handler - A procedural language call handler is declared to return language_handler. + language_handler + A procedural language call handler is declared to return language_handler. - fdw_handler - A foreign-data wrapper handler is declared to return fdw_handler. + fdw_handler + A foreign-data wrapper handler is declared to return fdw_handler. - index_am_handler - An index access method handler is declared to return index_am_handler. + index_am_handler + An index access method handler is declared to return index_am_handler. - tsm_handler - A tablesample method handler is declared to return tsm_handler. + tsm_handler + A tablesample method handler is declared to return tsm_handler. - record + record Identifies a function taking or returning an unspecified row type. - trigger - A trigger function is declared to return trigger. + trigger + A trigger function is declared to return trigger. - event_trigger - An event trigger function is declared to return event_trigger. + event_trigger + An event trigger function is declared to return event_trigger. - pg_ddl_command + pg_ddl_command Identifies a representation of DDL commands that is available to event triggers. - void + void Indicates that a function returns no value. - unknown + unknown Identifies a not-yet-resolved type, e.g. of an undecorated string literal. - opaque + opaque An obsolete type name that formerly served many of the above purposes. @@ -4876,24 +4876,24 @@ SELECT * FROM pg_attribute Functions coded in procedural languages can use pseudo-types only as allowed by their implementation languages. At present most procedural languages forbid use of a pseudo-type as an argument type, and allow - only void and record as a result type (plus - trigger or event_trigger when the function is used + only void and record as a result type (plus + trigger or event_trigger when the function is used as a trigger or event trigger). Some also - support polymorphic functions using the types anyelement, - anyarray, anynonarray, anyenum, and - anyrange. + support polymorphic functions using the types anyelement, + anyarray, anynonarray, anyenum, and + anyrange. - The internal pseudo-type is used to declare functions + The internal pseudo-type is used to declare functions that are meant only to be called internally by the database system, and not by direct invocation in an SQL - query. If a function has at least one internal-type + query. If a function has at least one internal-type argument then it cannot be called from SQL. To preserve the type safety of this restriction it is important to follow this coding rule: do not create any function that is - declared to return internal unless it has at least one - internal argument. + declared to return internal unless it has at least one + internal argument. diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index ef9139f9e3..a533bbf8d2 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -37,18 +37,18 @@ - If the numeric token contains a colon (:), this is + If the numeric token contains a colon (:), this is a time string. Include all subsequent digits and colons. - If the numeric token contains a dash (-), slash - (/), or two or more dots (.), this is + If the numeric token contains a dash (-), slash + (/), or two or more dots (.), this is a date string which might have a text month. If a date token has already been seen, it is instead interpreted as a time zone - name (e.g., America/New_York). + name (e.g., America/New_York). @@ -63,8 +63,8 @@ - If the token starts with a plus (+) or minus - (-), then it is either a numeric time zone or a special + If the token starts with a plus (+) or minus + (-), then it is either a numeric time zone or a special field. @@ -114,7 +114,7 @@ and if no other date fields have been previously read, then interpret as a concatenated date (e.g., 19990118 or 990118). - The interpretation is YYYYMMDD or YYMMDD. + The interpretation is YYYYMMDD or YYMMDD. @@ -128,7 +128,7 @@ If four or six digits and a year has already been read, then - interpret as a time (HHMM or HHMMSS). + interpret as a time (HHMM or HHMMSS). @@ -143,7 +143,7 @@ Otherwise the date field ordering is assumed to follow the - DateStyle setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd. + DateStyle setting: mm-dd-yy, dd-mm-yy, or yy-mm-dd. Throw an error if a month or day field is found to be out of range. @@ -167,7 +167,7 @@ Gregorian years AD 1-99 can be entered by using 4 digits with leading - zeros (e.g., 0099 is AD 99). + zeros (e.g., 0099 is AD 99). @@ -317,7 +317,7 @@ Ignored - JULIAN, JD, J + JULIAN, JD, J Next field is Julian Date @@ -354,23 +354,23 @@ can be altered by any database user, the possible values for it are under the control of the database administrator — they are in fact names of configuration files stored in - .../share/timezonesets/ of the installation directory. + .../share/timezonesets/ of the installation directory. By adding or altering files in that directory, the administrator can set local policy for timezone abbreviations. - timezone_abbreviations can be set to any file name - found in .../share/timezonesets/, if the file's name + timezone_abbreviations can be set to any file name + found in .../share/timezonesets/, if the file's name is entirely alphabetic. (The prohibition against non-alphabetic - characters in timezone_abbreviations prevents reading + characters in timezone_abbreviations prevents reading files outside the intended directory, as well as reading editor backup files and other extraneous files.) A timezone abbreviation file can contain blank lines and comments - beginning with #. Non-comment lines must have one of + beginning with #. Non-comment lines must have one of these formats: @@ -388,12 +388,12 @@ the equivalent offset in seconds from UTC, positive being east from Greenwich and negative being west. For example, -18000 would be five hours west of Greenwich, or North American east coast standard time. - D indicates that the zone name represents local + D indicates that the zone name represents local daylight-savings time rather than standard time. - Alternatively, a time_zone_name can be given, referencing + Alternatively, a time_zone_name can be given, referencing a zone name defined in the IANA timezone database. The zone's definition is consulted to see whether the abbreviation is or has been in use in that zone, and if so, the appropriate meaning is used — that is, @@ -417,34 +417,34 @@ - The @INCLUDE syntax allows inclusion of another file in the - .../share/timezonesets/ directory. Inclusion can be nested, + The @INCLUDE syntax allows inclusion of another file in the + .../share/timezonesets/ directory. Inclusion can be nested, to a limited depth. - The @OVERRIDE syntax indicates that subsequent entries in the + The @OVERRIDE syntax indicates that subsequent entries in the file can override previous entries (typically, entries obtained from included files). Without this, conflicting definitions of the same timezone abbreviation are considered an error. - In an unmodified installation, the file Default contains + In an unmodified installation, the file Default contains all the non-conflicting time zone abbreviations for most of the world. - Additional files Australia and India are + Additional files Australia and India are provided for those regions: these files first include the - Default file and then add or modify abbreviations as needed. + Default file and then add or modify abbreviations as needed. For reference purposes, a standard installation also contains files - Africa.txt, America.txt, etc, containing + Africa.txt, America.txt, etc, containing information about every time zone abbreviation known to be in use according to the IANA timezone database. The zone name definitions found in these files can be copied and pasted into a custom configuration file as needed. Note that these files cannot be directly - referenced as timezone_abbreviations settings, because of + referenced as timezone_abbreviations settings, because of the dot embedded in their names. @@ -460,16 +460,16 @@ Time zone abbreviations defined in the configuration file override non-timezone meanings built into PostgreSQL. - For example, the Australia configuration file defines - SAT (for South Australian Standard Time). When this - file is active, SAT will not be recognized as an abbreviation + For example, the Australia configuration file defines + SAT (for South Australian Standard Time). When this + file is active, SAT will not be recognized as an abbreviation for Saturday. - If you modify files in .../share/timezonesets/, + If you modify files in .../share/timezonesets/, it is up to you to make backups — a normal database dump will not include this directory. @@ -492,10 +492,10 @@ datetime literal, the datetime values are constrained by the natural rules for dates and times according to the Gregorian calendar. - PostgreSQL follows the SQL + PostgreSQL follows the SQL standard's lead by counting dates exclusively in the Gregorian calendar, even for years before that calendar was in use. - This rule is known as the proleptic Gregorian calendar. + This rule is known as the proleptic Gregorian calendar. @@ -569,7 +569,7 @@ $ cal 9 1752 dominions, not other places. Since it would be difficult and confusing to try to track the actual calendars that were in use in various places at various times, - PostgreSQL does not try, but rather follows the Gregorian + PostgreSQL does not try, but rather follows the Gregorian calendar rules for all dates, even though this method is not historically accurate. @@ -597,7 +597,7 @@ $ cal 9 1752 and probably takes its name from Scaliger's father, the Italian scholar Julius Caesar Scaliger (1484-1558). In the Julian Date system, each day has a sequential number, starting - from JD 0 (which is sometimes called the Julian Date). + from JD 0 (which is sometimes called the Julian Date). JD 0 corresponds to 1 January 4713 BC in the Julian calendar, or 24 November 4714 BC in the Gregorian calendar. Julian Date counting is most often used by astronomers for labeling their nightly observations, @@ -607,10 +607,10 @@ $ cal 9 1752 - Although PostgreSQL supports Julian Date notation for + Although PostgreSQL supports Julian Date notation for input and output of dates (and also uses Julian dates for some internal datetime calculations), it does not observe the nicety of having dates - run from noon to noon. PostgreSQL treats a Julian Date + run from noon to noon. PostgreSQL treats a Julian Date as running from midnight to midnight. diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml index f19c6b19f5..1f17d3ad2d 100644 --- a/doc/src/sgml/dblink.sgml +++ b/doc/src/sgml/dblink.sgml @@ -8,8 +8,8 @@ - dblink is a module that supports connections to - other PostgreSQL databases from within a database + dblink is a module that supports connections to + other PostgreSQL databases from within a database session. @@ -44,9 +44,9 @@ dblink_connect(text connname, text connstr) returns text Description - dblink_connect() establishes a connection to a remote - PostgreSQL database. The server and database to - be contacted are identified through a standard libpq + dblink_connect() establishes a connection to a remote + PostgreSQL database. The server and database to + be contacted are identified through a standard libpq connection string. Optionally, a name can be assigned to the connection. Multiple named connections can be open at once, but only one unnamed connection is permitted at a time. The connection @@ -81,9 +81,9 @@ dblink_connect(text connname, text connstr) returns text connstr - libpq-style connection info string, for example + libpq-style connection info string, for example hostaddr=127.0.0.1 port=5432 dbname=mydb user=postgres - password=mypasswd. + password=mypasswd. For details see . Alternatively, the name of a foreign server. @@ -96,7 +96,7 @@ dblink_connect(text connname, text connstr) returns text Return Value - Returns status, which is always OK (since any error + Returns status, which is always OK (since any error causes the function to throw an error instead of returning). @@ -105,15 +105,15 @@ dblink_connect(text connname, text connstr) returns text Notes - Only superusers may use dblink_connect to create + Only superusers may use dblink_connect to create non-password-authenticated connections. If non-superusers need this - capability, use dblink_connect_u instead. + capability, use dblink_connect_u instead. It is unwise to choose connection names that contain equal signs, as this opens a risk of confusion with connection info strings - in other dblink functions. + in other dblink functions. @@ -208,8 +208,8 @@ dblink_connect_u(text connname, text connstr) returns text Description - dblink_connect_u() is identical to - dblink_connect(), except that it will allow non-superusers + dblink_connect_u() is identical to + dblink_connect(), except that it will allow non-superusers to connect using any authentication method. @@ -217,24 +217,24 @@ dblink_connect_u(text connname, text connstr) returns text If the remote server selects an authentication method that does not involve a password, then impersonation and subsequent escalation of privileges can occur, because the session will appear to have - originated from the user as which the local PostgreSQL + originated from the user as which the local PostgreSQL server runs. Also, even if the remote server does demand a password, it is possible for the password to be supplied from the server - environment, such as a ~/.pgpass file belonging to the + environment, such as a ~/.pgpass file belonging to the server's user. This opens not only a risk of impersonation, but the possibility of exposing a password to an untrustworthy remote server. - Therefore, dblink_connect_u() is initially - installed with all privileges revoked from PUBLIC, + Therefore, dblink_connect_u() is initially + installed with all privileges revoked from PUBLIC, making it un-callable except by superusers. In some situations - it may be appropriate to grant EXECUTE permission for - dblink_connect_u() to specific users who are considered + it may be appropriate to grant EXECUTE permission for + dblink_connect_u() to specific users who are considered trustworthy, but this should be done with care. It is also recommended - that any ~/.pgpass file belonging to the server's user - not contain any records specifying a wildcard host name. + that any ~/.pgpass file belonging to the server's user + not contain any records specifying a wildcard host name. - For further details see dblink_connect(). + For further details see dblink_connect(). @@ -265,8 +265,8 @@ dblink_disconnect(text connname) returns text Description - dblink_disconnect() closes a connection previously opened - by dblink_connect(). The form with no arguments closes + dblink_disconnect() closes a connection previously opened + by dblink_connect(). The form with no arguments closes an unnamed connection. @@ -290,7 +290,7 @@ dblink_disconnect(text connname) returns text Return Value - Returns status, which is always OK (since any error + Returns status, which is always OK (since any error causes the function to throw an error instead of returning). @@ -341,15 +341,15 @@ dblink(text sql [, bool fail_on_error]) returns setof record Description - dblink executes a query (usually a SELECT, + dblink executes a query (usually a SELECT, but it can be any SQL statement that returns rows) in a remote database. - When two text arguments are given, the first one is first + When two text arguments are given, the first one is first looked up as a persistent connection's name; if found, the command is executed on that connection. If not found, the first argument - is treated as a connection info string as for dblink_connect, + is treated as a connection info string as for dblink_connect, and the indicated connection is made just for the duration of this command. @@ -373,7 +373,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record A connection info string, as previously described for - dblink_connect. + dblink_connect. @@ -383,7 +383,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record The SQL query that you wish to execute in the remote database, - for example select * from foo. + for example select * from foo. @@ -407,11 +407,11 @@ dblink(text sql [, bool fail_on_error]) returns setof record The function returns the row(s) produced by the query. Since - dblink can be used with any query, it is declared - to return record, rather than specifying any particular + dblink can be used with any query, it is declared + to return record, rather than specifying any particular set of columns. This means that you must specify the expected set of columns in the calling query — otherwise - PostgreSQL would not know what to expect. + PostgreSQL would not know what to expect. Here is an example: @@ -421,20 +421,20 @@ SELECT * WHERE proname LIKE 'bytea%'; - The alias part of the FROM clause must + The alias part of the FROM clause must specify the column names and types that the function will return. (Specifying column names in an alias is actually standard SQL - syntax, but specifying column types is a PostgreSQL + syntax, but specifying column types is a PostgreSQL extension.) This allows the system to understand what - * should expand to, and what proname - in the WHERE clause refers to, in advance of trying + * should expand to, and what proname + in the WHERE clause refers to, in advance of trying to execute the function. At run time, an error will be thrown if the actual query result from the remote database does not - have the same number of columns shown in the FROM clause. - The column names need not match, however, and dblink + have the same number of columns shown in the FROM clause. + The column names need not match, however, and dblink does not insist on exact type matches either. It will succeed so long as the returned data strings are valid input for the - column type declared in the FROM clause. + column type declared in the FROM clause. @@ -442,7 +442,7 @@ SELECT * Notes - A convenient way to use dblink with predetermined + A convenient way to use dblink with predetermined queries is to create a view. This allows the column type information to be buried in the view, instead of having to spell it out in every query. For example, @@ -559,15 +559,15 @@ dblink_exec(text sql [, bool fail_on_error]) returns text Description - dblink_exec executes a command (that is, any SQL statement + dblink_exec executes a command (that is, any SQL statement that doesn't return rows) in a remote database. - When two text arguments are given, the first one is first + When two text arguments are given, the first one is first looked up as a persistent connection's name; if found, the command is executed on that connection. If not found, the first argument - is treated as a connection info string as for dblink_connect, + is treated as a connection info string as for dblink_connect, and the indicated connection is made just for the duration of this command. @@ -591,7 +591,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text A connection info string, as previously described for - dblink_connect. + dblink_connect. @@ -602,7 +602,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text The SQL command that you wish to execute in the remote database, for example - insert into foo values(0,'a','{"a0","b0","c0"}'). + insert into foo values(0,'a','{"a0","b0","c0"}'). @@ -614,7 +614,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -625,7 +625,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text Return Value - Returns status, either the command's status string or ERROR. + Returns status, either the command's status string or ERROR. @@ -695,9 +695,9 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Description - dblink_open() opens a cursor in a remote database. + dblink_open() opens a cursor in a remote database. The cursor can subsequently be manipulated with - dblink_fetch() and dblink_close(). + dblink_fetch() and dblink_close(). @@ -728,8 +728,8 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret sql - The SELECT statement that you wish to execute in the remote - database, for example select * from pg_class. + The SELECT statement that you wish to execute in the remote + database, for example select * from pg_class. @@ -741,7 +741,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -752,7 +752,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Return Value - Returns status, either OK or ERROR. + Returns status, either OK or ERROR. @@ -761,16 +761,16 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret Since a cursor can only persist within a transaction, - dblink_open starts an explicit transaction block - (BEGIN) on the remote side, if the remote side was + dblink_open starts an explicit transaction block + (BEGIN) on the remote side, if the remote side was not already within a transaction. This transaction will be - closed again when the matching dblink_close is + closed again when the matching dblink_close is executed. Note that if - you use dblink_exec to change data between - dblink_open and dblink_close, - and then an error occurs or you use dblink_disconnect before - dblink_close, your change will be - lost because the transaction will be aborted. + you use dblink_exec to change data between + dblink_open and dblink_close, + and then an error occurs or you use dblink_disconnect before + dblink_close, your change will be + lost because the transaction will be aborted. @@ -819,8 +819,8 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) Description - dblink_fetch fetches rows from a cursor previously - established by dblink_open. + dblink_fetch fetches rows from a cursor previously + established by dblink_open. @@ -851,7 +851,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) howmany - The maximum number of rows to retrieve. The next howmany + The maximum number of rows to retrieve. The next howmany rows are fetched, starting at the current cursor position, moving forward. Once the cursor has reached its end, no more rows are produced. @@ -878,7 +878,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) The function returns the row(s) fetched from the cursor. To use this function, you will need to specify the expected set of columns, - as previously discussed for dblink. + as previously discussed for dblink. @@ -887,11 +887,11 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) On a mismatch between the number of return columns specified in the - FROM clause, and the actual number of columns returned by the + FROM clause, and the actual number of columns returned by the remote cursor, an error will be thrown. In this event, the remote cursor is still advanced by as many rows as it would have been if the error had not occurred. The same is true for any other error occurring in the local - query after the remote FETCH has been done. + query after the remote FETCH has been done. @@ -972,8 +972,8 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Description - dblink_close closes a cursor previously opened with - dblink_open. + dblink_close closes a cursor previously opened with + dblink_open. @@ -1007,7 +1007,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text If true (the default when omitted) then an error thrown on the remote side of the connection causes an error to also be thrown locally. If false, the remote error is locally reported as a NOTICE, - and the function's return value is set to ERROR. + and the function's return value is set to ERROR. @@ -1018,7 +1018,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Return Value - Returns status, either OK or ERROR. + Returns status, either OK or ERROR. @@ -1026,9 +1026,9 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text Notes - If dblink_open started an explicit transaction block, + If dblink_open started an explicit transaction block, and this is the last remaining open cursor in this connection, - dblink_close will issue the matching COMMIT. + dblink_close will issue the matching COMMIT. @@ -1082,8 +1082,8 @@ dblink_get_connections() returns text[] Description - dblink_get_connections returns an array of the names - of all open named dblink connections. + dblink_get_connections returns an array of the names + of all open named dblink connections. @@ -1127,7 +1127,7 @@ dblink_error_message(text connname) returns text Description - dblink_error_message fetches the most recent remote + dblink_error_message fetches the most recent remote error message for a given connection. @@ -1190,7 +1190,7 @@ dblink_send_query(text connname, text sql) returns int Description - dblink_send_query sends a query to be executed + dblink_send_query sends a query to be executed asynchronously, that is, without immediately waiting for the result. There must not be an async query already in progress on the connection. @@ -1198,10 +1198,10 @@ dblink_send_query(text connname, text sql) returns int After successfully dispatching an async query, completion status - can be checked with dblink_is_busy, and the results - are ultimately collected with dblink_get_result. + can be checked with dblink_is_busy, and the results + are ultimately collected with dblink_get_result. It is also possible to attempt to cancel an active async query - using dblink_cancel_query. + using dblink_cancel_query. @@ -1223,7 +1223,7 @@ dblink_send_query(text connname, text sql) returns int The SQL statement that you wish to execute in the remote database, - for example select * from pg_class. + for example select * from pg_class. @@ -1272,7 +1272,7 @@ dblink_is_busy(text connname) returns int Description - dblink_is_busy tests whether an async query is in progress. + dblink_is_busy tests whether an async query is in progress. @@ -1297,7 +1297,7 @@ dblink_is_busy(text connname) returns int Returns 1 if connection is busy, 0 if it is not busy. If this function returns 0, it is guaranteed that - dblink_get_result will not block. + dblink_get_result will not block. @@ -1336,10 +1336,10 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex Description - dblink_get_notify retrieves notifications on either + dblink_get_notify retrieves notifications on either the unnamed connection, or on a named connection if specified. - To receive notifications via dblink, LISTEN must - first be issued, using dblink_exec. + To receive notifications via dblink, LISTEN must + first be issued, using dblink_exec. For details see and . @@ -1417,9 +1417,9 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record Description - dblink_get_result collects the results of an - asynchronous query previously sent with dblink_send_query. - If the query is not already completed, dblink_get_result + dblink_get_result collects the results of an + asynchronous query previously sent with dblink_send_query. + If the query is not already completed, dblink_get_result will wait until it is. @@ -1458,14 +1458,14 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record For an async query (that is, a SQL statement returning rows), the function returns the row(s) produced by the query. To use this function, you will need to specify the expected set of columns, - as previously discussed for dblink. + as previously discussed for dblink. For an async command (that is, a SQL statement not returning rows), the function returns a single row with a single text column containing the command's status string. It is still necessary to specify that - the result will have a single text column in the calling FROM + the result will have a single text column in the calling FROM clause. @@ -1474,22 +1474,22 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record Notes - This function must be called if - dblink_send_query returned 1. + This function must be called if + dblink_send_query returned 1. It must be called once for each query sent, and one additional time to obtain an empty set result, before the connection can be used again. - When using dblink_send_query and - dblink_get_result, dblink fetches the entire + When using dblink_send_query and + dblink_get_result, dblink fetches the entire remote query result before returning any of it to the local query processor. If the query returns a large number of rows, this can result in transient memory bloat in the local session. It may be better to open - such a query as a cursor with dblink_open and then fetch a + such a query as a cursor with dblink_open and then fetch a manageable number of rows at a time. Alternatively, use plain - dblink(), which avoids memory bloat by spooling large result + dblink(), which avoids memory bloat by spooling large result sets to disk. @@ -1581,13 +1581,13 @@ dblink_cancel_query(text connname) returns text Description - dblink_cancel_query attempts to cancel any query that + dblink_cancel_query attempts to cancel any query that is in progress on the named connection. Note that this is not certain to succeed (since, for example, the remote query might already have finished). A cancel request simply improves the odds that the query will fail soon. You must still complete the normal query protocol, for example by calling - dblink_get_result. + dblink_get_result. @@ -1610,7 +1610,7 @@ dblink_cancel_query(text connname) returns text Return Value - Returns OK if the cancel request has been sent, or + Returns OK if the cancel request has been sent, or the text of an error message on failure. @@ -1651,7 +1651,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results Description - dblink_get_pkey provides information about the primary + dblink_get_pkey provides information about the primary key of a relation in the local database. This is sometimes useful in generating queries to be sent to remote databases. @@ -1665,10 +1665,10 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1687,7 +1687,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results CREATE TYPE dblink_pkey_results AS (position int, colname text); - The position column simply runs from 1 to N; + The position column simply runs from 1 to N; it is the number of the field within the primary key, not the number within the table's columns. @@ -1748,10 +1748,10 @@ dblink_build_sql_insert(text relname, Description - dblink_build_sql_insert can be useful in doing selective + dblink_build_sql_insert can be useful in doing selective replication of a local table to a remote database. It selects a row from the local table based on primary key, and then builds a SQL - INSERT command that will duplicate that row, but with + INSERT command that will duplicate that row, but with the primary key values replaced by the values in the last argument. (To make an exact copy of the row, just specify the same values for the last two arguments.) @@ -1766,10 +1766,10 @@ dblink_build_sql_insert(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1780,7 +1780,7 @@ dblink_build_sql_insert(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -1811,7 +1811,7 @@ dblink_build_sql_insert(text relname, Values of the primary key fields to be placed in the resulting - INSERT command. Each field is represented in text form. + INSERT command. Each field is represented in text form. @@ -1828,10 +1828,10 @@ dblink_build_sql_insert(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. @@ -1881,9 +1881,9 @@ dblink_build_sql_delete(text relname, Description - dblink_build_sql_delete can be useful in doing selective + dblink_build_sql_delete can be useful in doing selective replication of a local table to a remote database. It builds a SQL - DELETE command that will delete the row with the given + DELETE command that will delete the row with the given primary key values. @@ -1896,10 +1896,10 @@ dblink_build_sql_delete(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -1910,7 +1910,7 @@ dblink_build_sql_delete(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -1929,7 +1929,7 @@ dblink_build_sql_delete(text relname, Values of the primary key fields to be used in the resulting - DELETE command. Each field is represented in text form. + DELETE command. Each field is represented in text form. @@ -1946,10 +1946,10 @@ dblink_build_sql_delete(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. @@ -2000,15 +2000,15 @@ dblink_build_sql_update(text relname, Description - dblink_build_sql_update can be useful in doing selective + dblink_build_sql_update can be useful in doing selective replication of a local table to a remote database. It selects a row from the local table based on primary key, and then builds a SQL - UPDATE command that will duplicate that row, but with + UPDATE command that will duplicate that row, but with the primary key values replaced by the values in the last argument. (To make an exact copy of the row, just specify the same values for - the last two arguments.) The UPDATE command always assigns + the last two arguments.) The UPDATE command always assigns all fields of the row — the main difference between this and - dblink_build_sql_insert is that it's assumed that + dblink_build_sql_insert is that it's assumed that the target row already exists in the remote table. @@ -2021,10 +2021,10 @@ dblink_build_sql_update(text relname, relname - Name of a local relation, for example foo or - myschema.mytab. Include double quotes if the + Name of a local relation, for example foo or + myschema.mytab. Include double quotes if the name is mixed-case or contains special characters, for - example "FooBar"; without quotes, the string + example "FooBar"; without quotes, the string will be folded to lower case. @@ -2035,7 +2035,7 @@ dblink_build_sql_update(text relname, Attribute numbers (1-based) of the primary key fields, - for example 1 2. + for example 1 2. @@ -2066,7 +2066,7 @@ dblink_build_sql_update(text relname, Values of the primary key fields to be placed in the resulting - UPDATE command. Each field is represented in text form. + UPDATE command. Each field is represented in text form. @@ -2083,10 +2083,10 @@ dblink_build_sql_update(text relname, Notes - As of PostgreSQL 9.0, the attribute numbers in + As of PostgreSQL 9.0, the attribute numbers in primary_key_attnums are interpreted as logical column numbers, corresponding to the column's position in - SELECT * FROM relname. Previous versions interpreted the + SELECT * FROM relname. Previous versions interpreted the numbers as physical column positions. There is a difference if any column(s) to the left of the indicated column have been dropped during the lifetime of the table. diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index b05a9c2150..817db92af2 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -149,7 +149,7 @@ DROP TABLE products; Nevertheless, it is common in SQL script files to unconditionally try to drop each table before creating it, ignoring any error messages, so that the script works whether or not the table exists. - (If you like, you can use the DROP TABLE IF EXISTS variant + (If you like, you can use the DROP TABLE IF EXISTS variant to avoid the error messages, but this is not standard SQL.) @@ -207,9 +207,9 @@ CREATE TABLE products ( The default value can be an expression, which will be evaluated whenever the default value is inserted (not when the table is created). A common example - is for a timestamp column to have a default of CURRENT_TIMESTAMP, + is for a timestamp column to have a default of CURRENT_TIMESTAMP, so that it gets set to the time of row insertion. Another common - example is generating a serial number for each row. + example is generating a serial number for each row. In PostgreSQL this is typically done by something like: @@ -218,8 +218,8 @@ CREATE TABLE products ( ... ); - where the nextval() function supplies successive values - from a sequence object (see nextval() function supplies successive values + from a sequence object (see ). This arrangement is sufficiently common that there's a special shorthand for it: @@ -228,7 +228,7 @@ CREATE TABLE products ( ... ); - The SERIAL shorthand is discussed further in SERIAL shorthand is discussed further in . @@ -385,7 +385,7 @@ CREATE TABLE products ( CHECK (price > 0), discounted_price numeric, CHECK (discounted_price > 0), - CONSTRAINT valid_discount CHECK (price > discounted_price) + CONSTRAINT valid_discount CHECK (price > discounted_price) ); @@ -623,7 +623,7 @@ CREATE TABLE example ( Adding a primary key will automatically create a unique B-tree index on the column or group of columns listed in the primary key, and will - force the column(s) to be marked NOT NULL. + force the column(s) to be marked NOT NULL. @@ -828,7 +828,7 @@ CREATE TABLE order_items ( (The essential difference between these two choices is that NO ACTION allows the check to be deferred until later in the transaction, whereas RESTRICT does not.) - CASCADE specifies that when a referenced row is deleted, + CASCADE specifies that when a referenced row is deleted, row(s) referencing it should be automatically deleted as well. There are two other options: SET NULL and SET DEFAULT. @@ -845,19 +845,19 @@ CREATE TABLE order_items ( Analogous to ON DELETE there is also ON UPDATE which is invoked when a referenced column is changed (updated). The possible actions are the same. - In this case, CASCADE means that the updated values of the + In this case, CASCADE means that the updated values of the referenced column(s) should be copied into the referencing row(s). Normally, a referencing row need not satisfy the foreign key constraint - if any of its referencing columns are null. If MATCH FULL + if any of its referencing columns are null. If MATCH FULL is added to the foreign key declaration, a referencing row escapes satisfying the constraint only if all its referencing columns are null (so a mix of null and non-null values is guaranteed to fail a - MATCH FULL constraint). If you don't want referencing rows + MATCH FULL constraint). If you don't want referencing rows to be able to avoid satisfying the foreign key constraint, declare the - referencing column(s) as NOT NULL. + referencing column(s) as NOT NULL. @@ -909,7 +909,7 @@ CREATE TABLE circles ( See also CREATE - TABLE ... CONSTRAINT ... EXCLUDE for details. + TABLE ... CONSTRAINT ... EXCLUDE for details. @@ -923,7 +923,7 @@ CREATE TABLE circles ( System Columns - Every table has several system columns that are + Every table has several system columns that are implicitly defined by the system. Therefore, these names cannot be used as names of user-defined columns. (Note that these restrictions are separate from whether the name is a key word or @@ -939,7 +939,7 @@ CREATE TABLE circles ( - oid + oid @@ -957,7 +957,7 @@ CREATE TABLE circles ( - tableoid + tableoid tableoid @@ -976,7 +976,7 @@ CREATE TABLE circles ( - xmin + xmin xmin @@ -992,7 +992,7 @@ CREATE TABLE circles ( - cmin + cmin cmin @@ -1006,7 +1006,7 @@ CREATE TABLE circles ( - xmax + xmax xmax @@ -1023,7 +1023,7 @@ CREATE TABLE circles ( - cmax + cmax cmax @@ -1036,7 +1036,7 @@ CREATE TABLE circles ( - ctid + ctid ctid @@ -1047,7 +1047,7 @@ CREATE TABLE circles ( although the ctid can be used to locate the row version very quickly, a row's ctid will change if it is - updated or moved by VACUUM FULL. Therefore + updated or moved by VACUUM FULL. Therefore ctid is useless as a long-term row identifier. The OID, or even better a user-defined serial number, should be used to identify logical rows. @@ -1074,7 +1074,7 @@ CREATE TABLE circles ( a unique constraint (or unique index) exists, the system takes care not to generate an OID matching an already-existing row. (Of course, this is only possible if the table contains fewer - than 232 (4 billion) rows, and in practice the + than 232 (4 billion) rows, and in practice the table size had better be much less than that, or performance might suffer.) @@ -1082,7 +1082,7 @@ CREATE TABLE circles ( OIDs should never be assumed to be unique across tables; use - the combination of tableoid and row OID if you + the combination of tableoid and row OID if you need a database-wide identifier. @@ -1090,7 +1090,7 @@ CREATE TABLE circles ( Of course, the tables in question must be created WITH OIDS. As of PostgreSQL 8.1, - WITHOUT OIDS is the default. + WITHOUT OIDS is the default. @@ -1107,7 +1107,7 @@ CREATE TABLE circles ( Command identifiers are also 32-bit quantities. This creates a hard limit - of 232 (4 billion) SQL commands + of 232 (4 billion) SQL commands within a single transaction. In practice this limit is not a problem — note that the limit is on the number of SQL commands, not the number of rows processed. @@ -1186,7 +1186,7 @@ CREATE TABLE circles ( ALTER TABLE products ADD COLUMN description text; The new column is initially filled with whatever default - value is given (null if you don't specify a DEFAULT clause). + value is given (null if you don't specify a DEFAULT clause). @@ -1196,9 +1196,9 @@ ALTER TABLE products ADD COLUMN description text; ALTER TABLE products ADD COLUMN description text CHECK (description <> ''); In fact all the options that can be applied to a column description - in CREATE TABLE can be used here. Keep in mind however + in CREATE TABLE can be used here. Keep in mind however that the default value must satisfy the given constraints, or the - ADD will fail. Alternatively, you can add + ADD will fail. Alternatively, you can add constraints later (see below) after you've filled in the new column correctly. @@ -1210,7 +1210,7 @@ ALTER TABLE products ADD COLUMN description text CHECK (description <> '') specified, PostgreSQL is able to avoid the physical update. So if you intend to fill the column with mostly nondefault values, it's best to add the column with no default, - insert the correct values using UPDATE, and then add any + insert the correct values using UPDATE, and then add any desired default as described below. @@ -1234,7 +1234,7 @@ ALTER TABLE products DROP COLUMN description; foreign key constraint of another table, PostgreSQL will not silently drop that constraint. You can authorize dropping everything that depends on - the column by adding CASCADE: + the column by adding CASCADE: ALTER TABLE products DROP COLUMN description CASCADE; @@ -1290,13 +1290,13 @@ ALTER TABLE products ALTER COLUMN product_no SET NOT NULL; ALTER TABLE products DROP CONSTRAINT some_name; - (If you are dealing with a generated constraint name like $2, + (If you are dealing with a generated constraint name like $2, don't forget that you'll need to double-quote it to make it a valid identifier.) - As with dropping a column, you need to add CASCADE if you + As with dropping a column, you need to add CASCADE if you want to drop a constraint that something else depends on. An example is that a foreign key constraint depends on a unique or primary key constraint on the referenced column(s). @@ -1326,7 +1326,7 @@ ALTER TABLE products ALTER COLUMN product_no DROP NOT NULL; ALTER TABLE products ALTER COLUMN price SET DEFAULT 7.77; Note that this doesn't affect any existing rows in the table, it - just changes the default for future INSERT commands. + just changes the default for future INSERT commands. @@ -1356,12 +1356,12 @@ ALTER TABLE products ALTER COLUMN price TYPE numeric(10,2); This will succeed only if each existing entry in the column can be converted to the new type by an implicit cast. If a more complex - conversion is needed, you can add a USING clause that + conversion is needed, you can add a USING clause that specifies how to compute the new values from the old. - PostgreSQL will attempt to convert the column's + PostgreSQL will attempt to convert the column's default value (if any) to the new type, as well as any constraints that involve the column. But these conversions might fail, or might produce surprising results. It's often best to drop any constraints @@ -1437,11 +1437,11 @@ ALTER TABLE products RENAME TO items; - There are different kinds of privileges: SELECT, - INSERT, UPDATE, DELETE, - TRUNCATE, REFERENCES, TRIGGER, - CREATE, CONNECT, TEMPORARY, - EXECUTE, and USAGE. + There are different kinds of privileges: SELECT, + INSERT, UPDATE, DELETE, + TRUNCATE, REFERENCES, TRIGGER, + CREATE, CONNECT, TEMPORARY, + EXECUTE, and USAGE. The privileges applicable to a particular object vary depending on the object's type (table, function, etc). For complete information on the different types of privileges @@ -1480,7 +1480,7 @@ GRANT UPDATE ON accounts TO joe; The special role name PUBLIC can be used to grant a privilege to every role on the system. Also, - group roles can be set up to help manage privileges when + group roles can be set up to help manage privileges when there are many users of a database — for details see . @@ -1492,7 +1492,7 @@ GRANT UPDATE ON accounts TO joe; REVOKE ALL ON accounts FROM PUBLIC; The special privileges of the object owner (i.e., the right to do - DROP, GRANT, REVOKE, etc.) + DROP, GRANT, REVOKE, etc.) are always implicit in being the owner, and cannot be granted or revoked. But the object owner can choose to revoke their own ordinary privileges, for example to make a @@ -1502,7 +1502,7 @@ REVOKE ALL ON accounts FROM PUBLIC; Ordinarily, only the object's owner (or a superuser) can grant or revoke privileges on an object. However, it is possible to grant a - privilege with grant option, which gives the recipient + privilege with grant option, which gives the recipient the right to grant it in turn to others. If the grant option is subsequently revoked then all who received the privilege from that recipient (directly or through a chain of grants) will lose the @@ -1525,10 +1525,10 @@ REVOKE ALL ON accounts FROM PUBLIC; In addition to the SQL-standard privilege system available through , - tables can have row security policies that restrict, + tables can have row security policies that restrict, on a per-user basis, which rows can be returned by normal queries or inserted, updated, or deleted by data modification commands. - This feature is also known as Row-Level Security. + This feature is also known as Row-Level Security. By default, tables do not have any policies, so that if a user has access privileges to a table according to the SQL privilege system, all rows within it are equally available for querying or updating. @@ -1537,20 +1537,20 @@ REVOKE ALL ON accounts FROM PUBLIC; When row security is enabled on a table (with ALTER TABLE ... ENABLE ROW LEVEL - SECURITY), all normal access to the table for selecting rows or + SECURITY), all normal access to the table for selecting rows or modifying rows must be allowed by a row security policy. (However, the table's owner is typically not subject to row security policies.) If no policy exists for the table, a default-deny policy is used, meaning that no rows are visible or can be modified. Operations that apply to the - whole table, such as TRUNCATE and REFERENCES, + whole table, such as TRUNCATE and REFERENCES, are not subject to row security. Row security policies can be specific to commands, or to roles, or to both. A policy can be specified to apply to ALL - commands, or to SELECT, INSERT, UPDATE, - or DELETE. Multiple roles can be assigned to a given + commands, or to SELECT, INSERT, UPDATE, + or DELETE. Multiple roles can be assigned to a given policy, and normal role membership and inheritance rules apply. @@ -1562,7 +1562,7 @@ REVOKE ALL ON accounts FROM PUBLIC; rule are leakproof functions, which are guaranteed to not leak information; the optimizer may choose to apply such functions ahead of the row-security check.) Rows for which the expression does - not return true will not be processed. Separate expressions + not return true will not be processed. Separate expressions may be specified to provide independent control over the rows which are visible and the rows which are allowed to be modified. Policy expressions are run as part of the query and with the privileges of the @@ -1571,11 +1571,11 @@ REVOKE ALL ON accounts FROM PUBLIC; - Superusers and roles with the BYPASSRLS attribute always + Superusers and roles with the BYPASSRLS attribute always bypass the row security system when accessing a table. Table owners normally bypass row security as well, though a table owner can choose to be subject to row security with ALTER - TABLE ... FORCE ROW LEVEL SECURITY. + TABLE ... FORCE ROW LEVEL SECURITY. @@ -1609,8 +1609,8 @@ REVOKE ALL ON accounts FROM PUBLIC; As a simple example, here is how to create a policy on - the account relation to allow only members of - the managers role to access rows, and only rows of their + the account relation to allow only members of + the managers role to access rows, and only rows of their accounts: @@ -1627,7 +1627,7 @@ CREATE POLICY account_managers ON accounts TO managers If no role is specified, or the special user name PUBLIC is used, then the policy applies to all users on the system. To allow all users to access their own row in - a users table, a simple policy can be used: + a users table, a simple policy can be used: @@ -1637,9 +1637,9 @@ CREATE POLICY user_policy ON users To use a different policy for rows that are being added to the table - compared to those rows that are visible, the WITH CHECK + compared to those rows that are visible, the WITH CHECK clause can be used. This policy would allow all users to view all rows - in the users table, but only modify their own: + in the users table, but only modify their own: @@ -1649,7 +1649,7 @@ CREATE POLICY user_policy ON users - Row security can also be disabled with the ALTER TABLE + Row security can also be disabled with the ALTER TABLE command. Disabling row security does not remove any policies that are defined on the table; they are simply ignored. Then all rows in the table are visible and modifiable, subject to the standard SQL privileges @@ -1658,7 +1658,7 @@ CREATE POLICY user_policy ON users Below is a larger example of how this feature can be used in production - environments. The table passwd emulates a Unix password + environments. The table passwd emulates a Unix password file: @@ -1820,7 +1820,7 @@ UPDATE 0 Referential integrity checks, such as unique or primary key constraints and foreign key references, always bypass row security to ensure that data integrity is maintained. Care must be taken when developing - schemas and row level policies to avoid covert channel leaks of + schemas and row level policies to avoid covert channel leaks of information through such referential integrity checks. @@ -1830,7 +1830,7 @@ UPDATE 0 disastrous if row security silently caused some rows to be omitted from the backup. In such a situation, you can set the configuration parameter - to off. This does not in itself bypass row security; + to off. This does not in itself bypass row security; what it does is throw an error if any query's results would get filtered by a policy. The reason for the error can then be investigated and fixed. @@ -1842,7 +1842,7 @@ UPDATE 0 best-performing case; when possible, it's best to design row security applications to work this way. If it is necessary to consult other rows or other tables to make a policy decision, that can be accomplished using - sub-SELECTs, or functions that contain SELECTs, + sub-SELECTs, or functions that contain SELECTs, in the policy expressions. Be aware however that such accesses can create race conditions that could allow information leakage if care is not taken. As an example, consider the following table design: @@ -1896,8 +1896,8 @@ GRANT ALL ON information TO public; - Now suppose that alice wishes to change the slightly - secret information, but decides that mallory should not + Now suppose that alice wishes to change the slightly + secret information, but decides that mallory should not be trusted with the new content of that row, so she does: @@ -1909,36 +1909,36 @@ COMMIT; - That looks safe; there is no window wherein mallory should be - able to see the secret from mallory string. However, there is - a race condition here. If mallory is concurrently doing, + That looks safe; there is no window wherein mallory should be + able to see the secret from mallory string. However, there is + a race condition here. If mallory is concurrently doing, say, SELECT * FROM information WHERE group_id = 2 FOR UPDATE; - and her transaction is in READ COMMITTED mode, it is possible - for her to see secret from mallory. That happens if her - transaction reaches the information row just - after alice's does. It blocks waiting - for alice's transaction to commit, then fetches the updated - row contents thanks to the FOR UPDATE clause. However, it - does not fetch an updated row for the - implicit SELECT from users, because that - sub-SELECT did not have FOR UPDATE; instead - the users row is read with the snapshot taken at the start + and her transaction is in READ COMMITTED mode, it is possible + for her to see secret from mallory. That happens if her + transaction reaches the information row just + after alice's does. It blocks waiting + for alice's transaction to commit, then fetches the updated + row contents thanks to the FOR UPDATE clause. However, it + does not fetch an updated row for the + implicit SELECT from users, because that + sub-SELECT did not have FOR UPDATE; instead + the users row is read with the snapshot taken at the start of the query. Therefore, the policy expression tests the old value - of mallory's privilege level and allows her to see the + of mallory's privilege level and allows her to see the updated row. There are several ways around this problem. One simple answer is to use - SELECT ... FOR SHARE in sub-SELECTs in row - security policies. However, that requires granting UPDATE - privilege on the referenced table (here users) to the + SELECT ... FOR SHARE in sub-SELECTs in row + security policies. However, that requires granting UPDATE + privilege on the referenced table (here users) to the affected users, which might be undesirable. (But another row security policy could be applied to prevent them from actually exercising that - privilege; or the sub-SELECT could be embedded into a security + privilege; or the sub-SELECT could be embedded into a security definer function.) Also, heavy concurrent use of row share locks on the referenced table could pose a performance problem, especially if updates of it are frequent. Another solution, practical if updates of the @@ -1977,19 +1977,19 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE; Users of a cluster do not necessarily have the privilege to access every database in the cluster. Sharing of user names means that there - cannot be different users named, say, joe in two databases + cannot be different users named, say, joe in two databases in the same cluster; but the system can be configured to allow - joe access to only some of the databases. + joe access to only some of the databases. - A database contains one or more named schemas, which + A database contains one or more named schemas, which in turn contain tables. Schemas also contain other kinds of named objects, including data types, functions, and operators. The same object name can be used in different schemas without conflict; for - example, both schema1 and myschema can - contain tables named mytable. Unlike databases, + example, both schema1 and myschema can + contain tables named mytable. Unlike databases, schemas are not rigidly separated: a user can access objects in any of the schemas in the database they are connected to, if they have privileges to do so. @@ -2053,10 +2053,10 @@ CREATE SCHEMA myschema; To create or access objects in a schema, write a - qualified name consisting of the schema name and + qualified name consisting of the schema name and table name separated by a dot: -schema.table +schema.table This works anywhere a table name is expected, including the table modification commands and the data access commands discussed in @@ -2068,10 +2068,10 @@ CREATE SCHEMA myschema; Actually, the even more general syntax -database.schema.table +database.schema.table can be used too, but at present this is just for pro - forma compliance with the SQL standard. If you write a database name, + forma compliance with the SQL standard. If you write a database name, it must be the same as the database you are connected to. @@ -2116,7 +2116,7 @@ CREATE SCHEMA schema_name AUTHORIZATION - Schema names beginning with pg_ are reserved for + Schema names beginning with pg_ are reserved for system purposes and cannot be created by users. @@ -2163,9 +2163,9 @@ CREATE TABLE public.products ( ... ); Qualified names are tedious to write, and it's often best not to wire a particular schema name into applications anyway. Therefore - tables are often referred to by unqualified names, + tables are often referred to by unqualified names, which consist of just the table name. The system determines which table - is meant by following a search path, which is a list + is meant by following a search path, which is a list of schemas to look in. The first matching table in the search path is taken to be the one wanted. If there is no match in the search path, an error is reported, even if matching table names exist @@ -2180,7 +2180,7 @@ CREATE TABLE public.products ( ... ); The first schema named in the search path is called the current schema. Aside from being the first schema searched, it is also the schema in - which new tables will be created if the CREATE TABLE + which new tables will be created if the CREATE TABLE command does not specify a schema name. @@ -2253,7 +2253,7 @@ SET search_path TO myschema; need to write a qualified operator name in an expression, there is a special provision: you must write -OPERATOR(schema.operator) +OPERATOR(schema.operator) This is needed to avoid syntactic ambiguity. An example is: @@ -2310,28 +2310,28 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; - In addition to public and user-created schemas, each - database contains a pg_catalog schema, which contains + In addition to public and user-created schemas, each + database contains a pg_catalog schema, which contains the system tables and all the built-in data types, functions, and - operators. pg_catalog is always effectively part of + operators. pg_catalog is always effectively part of the search path. If it is not named explicitly in the path then - it is implicitly searched before searching the path's + it is implicitly searched before searching the path's schemas. This ensures that built-in names will always be findable. However, you can explicitly place - pg_catalog at the end of your search path if you + pg_catalog at the end of your search path if you prefer to have user-defined names override built-in names. - Since system table names begin with pg_, it is best to + Since system table names begin with pg_, it is best to avoid such names to ensure that you won't suffer a conflict if some future version defines a system table named the same as your table. (With the default search path, an unqualified reference to your table name would then be resolved as the system table instead.) System tables will continue to follow the convention of having - names beginning with pg_, so that they will not + names beginning with pg_, so that they will not conflict with unqualified user-table names so long as users avoid - the pg_ prefix. + the pg_ prefix. @@ -2397,15 +2397,15 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC; implements only the basic schema support specified in the standard. Therefore, many users consider qualified names to really consist of - user_name.table_name. + user_name.table_name. This is how PostgreSQL will effectively behave if you create a per-user schema for every user. - Also, there is no concept of a public schema in the + Also, there is no concept of a public schema in the SQL standard. For maximum conformance to the standard, you should - not use (perhaps even remove) the public schema. + not use (perhaps even remove) the public schema. @@ -2461,9 +2461,9 @@ CREATE TABLE capitals ( ) INHERITS (cities); - In this case, the capitals table inherits - all the columns of its parent table, cities. State - capitals also have an extra column, state, that shows + In this case, the capitals table inherits + all the columns of its parent table, cities. State + capitals also have an extra column, state, that shows their state. @@ -2521,7 +2521,7 @@ SELECT name, altitude - You can also write the table name with a trailing * + You can also write the table name with a trailing * to explicitly specify that descendant tables are included: @@ -2530,7 +2530,7 @@ SELECT name, altitude WHERE altitude > 500; - Writing * is not necessary, since this behavior is always + Writing * is not necessary, since this behavior is always the default. However, this syntax is still supported for compatibility with older releases where the default could be changed. @@ -2559,7 +2559,7 @@ WHERE c.altitude > 500; (If you try to reproduce this example, you will probably get different numeric OIDs.) By doing a join with - pg_class you can see the actual table names: + pg_class you can see the actual table names: SELECT p.relname, c.name, c.altitude @@ -2579,7 +2579,7 @@ WHERE c.altitude > 500 AND c.tableoid = p.oid; - Another way to get the same effect is to use the regclass + Another way to get the same effect is to use the regclass alias type, which will print the table OID symbolically: @@ -2603,15 +2603,15 @@ VALUES ('Albany', NULL, NULL, 'NY'); INSERT always inserts into exactly the table specified. In some cases it is possible to redirect the insertion using a rule (see ). However that does not - help for the above case because the cities table - does not contain the column state, and so the + help for the above case because the cities table + does not contain the column state, and so the command will be rejected before the rule can be applied. All check constraints and not-null constraints on a parent table are automatically inherited by its children, unless explicitly specified - otherwise with NO INHERIT clauses. Other types of constraints + otherwise with NO INHERIT clauses. Other types of constraints (unique, primary key, and foreign key constraints) are not inherited. @@ -2620,7 +2620,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); the union of the columns defined by the parent tables. Any columns declared in the child table's definition are added to these. If the same column name appears in multiple parent tables, or in both a parent - table and the child's definition, then these columns are merged + table and the child's definition, then these columns are merged so that there is only one such column in the child table. To be merged, columns must have the same data types, else an error is raised. Inheritable check constraints and not-null constraints are merged in a @@ -2632,7 +2632,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); Table inheritance is typically established when the child table is - created, using the INHERITS clause of the + created, using the INHERITS clause of the statement. Alternatively, a table which is already defined in a compatible way can @@ -2642,7 +2642,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); the same names and types as the columns of the parent. It must also include check constraints with the same names and check expressions as those of the parent. Similarly an inheritance link can be removed from a child using the - NO INHERIT variant of ALTER TABLE. + NO INHERIT variant of ALTER TABLE. Dynamically adding and removing inheritance links like this can be useful when the inheritance relationship is being used for table partitioning (see ). @@ -2680,10 +2680,10 @@ VALUES ('Albany', NULL, NULL, 'NY'); Inherited queries perform access permission checks on the parent table - only. Thus, for example, granting UPDATE permission on - the cities table implies permission to update rows in + only. Thus, for example, granting UPDATE permission on + the cities table implies permission to update rows in the capitals table as well, when they are - accessed through cities. This preserves the appearance + accessed through cities. This preserves the appearance that the data is (also) in the parent table. But the capitals table could not be updated directly without an additional grant. In a similar way, the parent table's row @@ -2732,33 +2732,33 @@ VALUES ('Albany', NULL, NULL, 'NY'); - If we declared cities.name to be - UNIQUE or a PRIMARY KEY, this would not stop the - capitals table from having rows with names duplicating - rows in cities. And those duplicate rows would by - default show up in queries from cities. In fact, by - default capitals would have no unique constraint at all, + If we declared cities.name to be + UNIQUE or a PRIMARY KEY, this would not stop the + capitals table from having rows with names duplicating + rows in cities. And those duplicate rows would by + default show up in queries from cities. In fact, by + default capitals would have no unique constraint at all, and so could contain multiple rows with the same name. - You could add a unique constraint to capitals, but this - would not prevent duplication compared to cities. + You could add a unique constraint to capitals, but this + would not prevent duplication compared to cities. Similarly, if we were to specify that - cities.name REFERENCES some + cities.name REFERENCES some other table, this constraint would not automatically propagate to - capitals. In this case you could work around it by - manually adding the same REFERENCES constraint to - capitals. + capitals. In this case you could work around it by + manually adding the same REFERENCES constraint to + capitals. Specifying that another table's column REFERENCES - cities(name) would allow the other table to contain city names, but + cities(name) would allow the other table to contain city names, but not capital names. There is no good workaround for this case. @@ -2825,10 +2825,10 @@ VALUES ('Albany', NULL, NULL, 'NY'); Bulk loads and deletes can be accomplished by adding or removing partitions, if that requirement is planned into the partitioning design. - Doing ALTER TABLE DETACH PARTITION or dropping an individual - partition using DROP TABLE is far faster than a bulk + Doing ALTER TABLE DETACH PARTITION or dropping an individual + partition using DROP TABLE is far faster than a bulk operation. These commands also entirely avoid the - VACUUM overhead caused by a bulk DELETE. + VACUUM overhead caused by a bulk DELETE. @@ -2921,7 +2921,7 @@ VALUES ('Albany', NULL, NULL, 'NY'); containing data as a partition of a partitioned table, or remove a partition from a partitioned table turning it into a standalone table; see to learn more about the - ATTACH PARTITION and DETACH PARTITION + ATTACH PARTITION and DETACH PARTITION sub-commands. @@ -2968,9 +2968,9 @@ VALUES ('Albany', NULL, NULL, 'NY'); Partitions cannot have columns that are not present in the parent. It is neither possible to specify columns when creating partitions with - CREATE TABLE nor is it possible to add columns to - partitions after-the-fact using ALTER TABLE. Tables may be - added as a partition with ALTER TABLE ... ATTACH PARTITION + CREATE TABLE nor is it possible to add columns to + partitions after-the-fact using ALTER TABLE. Tables may be + added as a partition with ALTER TABLE ... ATTACH PARTITION only if their columns exactly match the parent, including any oid column. @@ -3049,7 +3049,7 @@ CREATE TABLE measurement ( accessing the partitioned table will have to scan fewer partitions if the conditions involve some or all of these columns. For example, consider a table range partitioned using columns - lastname and firstname (in that order) + lastname and firstname (in that order) as the partition key. @@ -3067,7 +3067,7 @@ CREATE TABLE measurement ( Partitions thus created are in every way normal - PostgreSQL + PostgreSQL tables (or, possibly, foreign tables). It is possible to specify a tablespace and storage parameters for each partition separately. @@ -3111,12 +3111,12 @@ CREATE TABLE measurement_y2006m02 PARTITION OF measurement PARTITION BY RANGE (peaktemp); - After creating partitions of measurement_y2006m02, - any data inserted into measurement that is mapped to - measurement_y2006m02 (or data that is directly inserted - into measurement_y2006m02, provided it satisfies its + After creating partitions of measurement_y2006m02, + any data inserted into measurement that is mapped to + measurement_y2006m02 (or data that is directly inserted + into measurement_y2006m02, provided it satisfies its partition constraint) will be further redirected to one of its - partitions based on the peaktemp column. The partition + partitions based on the peaktemp column. The partition key specified may overlap with the parent's partition key, although care should be taken when specifying the bounds of a sub-partition such that the set of data it accepts constitutes a subset of what @@ -3147,7 +3147,7 @@ CREATE INDEX ON measurement_y2008m01 (logdate); Ensure that the - configuration parameter is not disabled in postgresql.conf. + configuration parameter is not disabled in postgresql.conf. If it is, queries will not be optimized as desired. @@ -3197,7 +3197,7 @@ ALTER TABLE measurement DETACH PARTITION measurement_y2006m02; This allows further operations to be performed on the data before it is dropped. For example, this is often a useful time to back up - the data using COPY, pg_dump, or + the data using COPY, pg_dump, or similar tools. It might also be a useful time to aggregate data into smaller formats, perform other data manipulations, or run reports. @@ -3236,14 +3236,14 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - Before running the ATTACH PARTITION command, it is - recommended to create a CHECK constraint on the table to + Before running the ATTACH PARTITION command, it is + recommended to create a CHECK constraint on the table to be attached describing the desired partition constraint. That way, the system will be able to skip the scan to validate the implicit partition constraint. Without such a constraint, the table will be scanned to validate the partition constraint while holding an ACCESS EXCLUSIVE lock on the parent table. - One may then drop the constraint after ATTACH PARTITION + One may then drop the constraint after ATTACH PARTITION is finished, because it is no longer necessary. @@ -3285,7 +3285,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 - An UPDATE that causes a row to move from one partition to + An UPDATE that causes a row to move from one partition to another fails, because the new value of the row fails to satisfy the implicit partition constraint of the original partition. @@ -3376,7 +3376,7 @@ ALTER TABLE measurement ATTACH PARTITION measurement_y2008m02 the master table. Normally, these tables will not add any columns to the set inherited from the master. Just as with declarative partitioning, these partitions are in every way normal - PostgreSQL tables (or foreign tables). + PostgreSQL tables (or foreign tables). @@ -3460,7 +3460,7 @@ CREATE INDEX measurement_y2008m01_logdate ON measurement_y2008m01 (logdate); We want our application to be able to say INSERT INTO - measurement ... and have the data be redirected into the + measurement ... and have the data be redirected into the appropriate partition table. We can arrange that by attaching a suitable trigger function to the master table. If data will be added only to the latest partition, we can @@ -3567,9 +3567,9 @@ DO INSTEAD - Be aware that COPY ignores rules. If you want to - use COPY to insert data, you'll need to copy into the - correct partition table rather than into the master. COPY + Be aware that COPY ignores rules. If you want to + use COPY to insert data, you'll need to copy into the + correct partition table rather than into the master. COPY does fire triggers, so you can use it normally if you use the trigger approach. @@ -3585,7 +3585,7 @@ DO INSTEAD Ensure that the configuration parameter is not disabled in - postgresql.conf. + postgresql.conf. If it is, queries will not be optimized as desired. @@ -3666,8 +3666,8 @@ ALTER TABLE measurement_y2008m02 INHERIT measurement; The schemes shown here assume that the partition key column(s) of a row never change, or at least do not change enough to require - it to move to another partition. An UPDATE that attempts - to do that will fail because of the CHECK constraints. + it to move to another partition. An UPDATE that attempts + to do that will fail because of the CHECK constraints. If you need to handle such cases, you can put suitable update triggers on the partition tables, but it makes management of the structure much more complicated. @@ -3688,8 +3688,8 @@ ANALYZE measurement; - INSERT statements with ON CONFLICT - clauses are unlikely to work as expected, as the ON CONFLICT + INSERT statements with ON CONFLICT + clauses are unlikely to work as expected, as the ON CONFLICT action is only taken in case of unique violations on the specified target relation, not its child relations. @@ -3717,7 +3717,7 @@ ANALYZE measurement; - Constraint exclusion is a query optimization technique + Constraint exclusion is a query optimization technique that improves performance for partitioned tables defined in the fashion described above (both declaratively partitioned tables and those implemented using inheritance). As an example: @@ -3728,17 +3728,17 @@ SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; Without constraint exclusion, the above query would scan each of - the partitions of the measurement table. With constraint + the partitions of the measurement table. With constraint exclusion enabled, the planner will examine the constraints of each partition and try to prove that the partition need not be scanned because it could not contain any rows meeting the query's - WHERE clause. When the planner can prove this, it + WHERE clause. When the planner can prove this, it excludes the partition from the query plan. - You can use the EXPLAIN command to show the difference - between a plan with constraint_exclusion on and a plan + You can use the EXPLAIN command to show the difference + between a plan with constraint_exclusion on and a plan with it off. A typical unoptimized plan for this type of table setup is: @@ -3783,7 +3783,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - Note that constraint exclusion is driven only by CHECK + Note that constraint exclusion is driven only by CHECK constraints, not by the presence of indexes. Therefore it isn't necessary to define indexes on the key columns. Whether an index needs to be created for a given partition depends on whether you @@ -3795,11 +3795,11 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; The default (and recommended) setting of is actually neither - on nor off, but an intermediate setting - called partition, which causes the technique to be + on nor off, but an intermediate setting + called partition, which causes the technique to be applied only to queries that are likely to be working on partitioned - tables. The on setting causes the planner to examine - CHECK constraints in all queries, even simple ones that + tables. The on setting causes the planner to examine + CHECK constraints in all queries, even simple ones that are unlikely to benefit. @@ -3810,7 +3810,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; - Constraint exclusion only works when the query's WHERE + Constraint exclusion only works when the query's WHERE clause contains constants (or externally supplied parameters). For example, a comparison against a non-immutable function such as CURRENT_TIMESTAMP cannot be optimized, since the @@ -3867,7 +3867,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; PostgreSQL implements portions of the SQL/MED specification, allowing you to access data that resides outside PostgreSQL using regular SQL queries. Such data is referred to as - foreign data. (Note that this usage is not to be confused + foreign data. (Note that this usage is not to be confused with foreign keys, which are a type of constraint within the database.) @@ -3876,7 +3876,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; foreign data wrapper. A foreign data wrapper is a library that can communicate with an external data source, hiding the details of connecting to the data source and obtaining data from it. - There are some foreign data wrappers available as contrib + There are some foreign data wrappers available as contrib modules; see . Other kinds of foreign data wrappers might be found as third party products. If none of the existing foreign data wrappers suit your needs, you can write your own; see - To access foreign data, you need to create a foreign server + To access foreign data, you need to create a foreign server object, which defines how to connect to a particular external data source according to the set of options used by its supporting foreign data wrapper. Then you need to create one or more foreign @@ -3899,7 +3899,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate >= DATE '2008-01-01'; Accessing remote data may require authenticating to the external data source. This information can be provided by a - user mapping, which can provide additional data + user mapping, which can provide additional data such as user names and passwords based on the current PostgreSQL role. @@ -4002,13 +4002,13 @@ DROP TABLE products CASCADE; that depend on them, recursively. In this case, it doesn't remove the orders table, it only removes the foreign key constraint. It stops there because nothing depends on the foreign key constraint. - (If you want to check what DROP ... CASCADE will do, - run DROP without CASCADE and read the - DETAIL output.) + (If you want to check what DROP ... CASCADE will do, + run DROP without CASCADE and read the + DETAIL output.) - Almost all DROP commands in PostgreSQL support + Almost all DROP commands in PostgreSQL support specifying CASCADE. Of course, the nature of the possible dependencies varies with the type of the object. You can also write RESTRICT instead of @@ -4020,7 +4020,7 @@ DROP TABLE products CASCADE; According to the SQL standard, specifying either RESTRICT or CASCADE is - required in a DROP command. No database system actually + required in a DROP command. No database system actually enforces that rule, but whether the default behavior is RESTRICT or CASCADE varies across systems. @@ -4028,18 +4028,18 @@ DROP TABLE products CASCADE; - If a DROP command lists multiple + If a DROP command lists multiple objects, CASCADE is only required when there are dependencies outside the specified group. For example, when saying DROP TABLE tab1, tab2 the existence of a foreign - key referencing tab1 from tab2 would not mean + key referencing tab1 from tab2 would not mean that CASCADE is needed to succeed. For user-defined functions, PostgreSQL tracks dependencies associated with a function's externally-visible properties, - such as its argument and result types, but not dependencies + such as its argument and result types, but not dependencies that could only be known by examining the function body. As an example, consider this situation: @@ -4056,11 +4056,11 @@ CREATE FUNCTION get_color_note (rainbow) RETURNS text AS (See for an explanation of SQL-language functions.) PostgreSQL will be aware that - the get_color_note function depends on the rainbow + the get_color_note function depends on the rainbow type: dropping the type would force dropping the function, because its - argument type would no longer be defined. But PostgreSQL - will not consider get_color_note to depend on - the my_colors table, and so will not drop the function if + argument type would no longer be defined. But PostgreSQL + will not consider get_color_note to depend on + the my_colors table, and so will not drop the function if the table is dropped. While there are disadvantages to this approach, there are also benefits. The function is still valid in some sense if the table is missing, though executing it would cause an error; creating a new diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index 23af270e32..7ef996b51f 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -9,7 +9,7 @@ C, they must be compiled and linked in a special way to produce a file that can be dynamically loaded by the server. To be precise, a shared library needs to be - created.shared library + created.shared library @@ -30,7 +30,7 @@ executables: first the source files are compiled into object files, then the object files are linked together. The object files need to be created as position-independent code - (PIC),PIC which + (PIC),PIC which conceptually means that they can be placed at an arbitrary location in memory when they are loaded by the executable. (Object files intended for executables are usually not compiled that way.) The @@ -57,8 +57,8 @@ - FreeBSD - FreeBSDshared library + FreeBSD + FreeBSDshared library @@ -70,15 +70,15 @@ gcc -fPIC -c foo.c gcc -shared -o foo.so foo.o This is applicable as of version 3.0 of - FreeBSD. + FreeBSD. - HP-UX - HP-UXshared library + HP-UX + HP-UXshared library @@ -97,7 +97,7 @@ gcc -fPIC -c foo.c ld -b -o foo.sl foo.o - HP-UX uses the extension + HP-UX uses the extension .sl for shared libraries, unlike most other systems. @@ -106,8 +106,8 @@ ld -b -o foo.sl foo.o - Linux - Linuxshared library + Linux + Linuxshared library @@ -125,8 +125,8 @@ cc -shared -o foo.so foo.o - macOS - macOSshared library + macOS + macOSshared library @@ -141,8 +141,8 @@ cc -bundle -flat_namespace -undefined suppress -o foo.so foo.o - NetBSD - NetBSDshared library + NetBSD + NetBSDshared library @@ -161,8 +161,8 @@ gcc -shared -o foo.so foo.o - OpenBSD - OpenBSDshared library + OpenBSD + OpenBSDshared library @@ -179,17 +179,17 @@ ld -Bshareable -o foo.so foo.o - Solaris - Solarisshared library + Solaris + Solarisshared library The compiler flag to create PIC is with the Sun compiler and - with GCC. To + with GCC. To link shared libraries, the compiler option is with either compiler or alternatively - with GCC. + with GCC. cc -KPIC -c foo.c cc -G -o foo.so foo.o diff --git a/doc/src/sgml/dict-int.sgml b/doc/src/sgml/dict-int.sgml index d49f3e2a3a..04cf14a73d 100644 --- a/doc/src/sgml/dict-int.sgml +++ b/doc/src/sgml/dict-int.sgml @@ -8,7 +8,7 @@ - dict_int is an example of an add-on dictionary template + dict_int is an example of an add-on dictionary template for full-text search. The motivation for this example dictionary is to control the indexing of integers (signed and unsigned), allowing such numbers to be indexed while preventing excessive growth in the number of @@ -25,17 +25,17 @@ - The maxlen parameter specifies the maximum number of + The maxlen parameter specifies the maximum number of digits allowed in an integer word. The default value is 6. - The rejectlong parameter specifies whether an overlength - integer should be truncated or ignored. If rejectlong is - false (the default), the dictionary returns the first - maxlen digits of the integer. If rejectlong is - true, the dictionary treats an overlength integer as a stop + The rejectlong parameter specifies whether an overlength + integer should be truncated or ignored. If rejectlong is + false (the default), the dictionary returns the first + maxlen digits of the integer. If rejectlong is + true, the dictionary treats an overlength integer as a stop word, so that it will not be indexed. Note that this also means that such an integer cannot be searched for. @@ -47,8 +47,8 @@ Usage - Installing the dict_int extension creates a text search - template intdict_template and a dictionary intdict + Installing the dict_int extension creates a text search + template intdict_template and a dictionary intdict based on it, with the default parameters. You can alter the parameters, for example diff --git a/doc/src/sgml/dict-xsyn.sgml b/doc/src/sgml/dict-xsyn.sgml index 42362ffbc8..bf4965c36f 100644 --- a/doc/src/sgml/dict-xsyn.sgml +++ b/doc/src/sgml/dict-xsyn.sgml @@ -8,7 +8,7 @@ - dict_xsyn (Extended Synonym Dictionary) is an example of an + dict_xsyn (Extended Synonym Dictionary) is an example of an add-on dictionary template for full-text search. This dictionary type replaces words with groups of their synonyms, and so makes it possible to search for a word using any of its synonyms. @@ -18,41 +18,41 @@ Configuration - A dict_xsyn dictionary accepts the following options: + A dict_xsyn dictionary accepts the following options: - matchorig controls whether the original word is accepted by - the dictionary. Default is true. + matchorig controls whether the original word is accepted by + the dictionary. Default is true. - matchsynonyms controls whether the synonyms are - accepted by the dictionary. Default is false. + matchsynonyms controls whether the synonyms are + accepted by the dictionary. Default is false. - keeporig controls whether the original word is included in - the dictionary's output. Default is true. + keeporig controls whether the original word is included in + the dictionary's output. Default is true. - keepsynonyms controls whether the synonyms are included in - the dictionary's output. Default is true. + keepsynonyms controls whether the synonyms are included in + the dictionary's output. Default is true. - rules is the base name of the file containing the list of + rules is the base name of the file containing the list of synonyms. This file must be stored in - $SHAREDIR/tsearch_data/ (where $SHAREDIR means - the PostgreSQL installation's shared-data directory). - Its name must end in .rules (which is not to be included in - the rules parameter). + $SHAREDIR/tsearch_data/ (where $SHAREDIR means + the PostgreSQL installation's shared-data directory). + Its name must end in .rules (which is not to be included in + the rules parameter). @@ -71,15 +71,15 @@ word syn1 syn2 syn3 - The sharp (#) sign is a comment delimiter. It may appear at + The sharp (#) sign is a comment delimiter. It may appear at any position in a line. The rest of the line will be skipped. - Look at xsyn_sample.rules, which is installed in - $SHAREDIR/tsearch_data/, for an example. + Look at xsyn_sample.rules, which is installed in + $SHAREDIR/tsearch_data/, for an example. @@ -87,8 +87,8 @@ word syn1 syn2 syn3 Usage - Installing the dict_xsyn extension creates a text search - template xsyn_template and a dictionary xsyn + Installing the dict_xsyn extension creates a text search + template xsyn_template and a dictionary xsyn based on it, with default parameters. You can alter the parameters, for example diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml index 461deb9dba..ba23084354 100644 --- a/doc/src/sgml/diskusage.sgml +++ b/doc/src/sgml/diskusage.sgml @@ -5,7 +5,7 @@ This chapter discusses how to monitor the disk usage of a - PostgreSQL database system. + PostgreSQL database system. @@ -18,10 +18,10 @@ Each table has a primary heap disk file where most of the data is stored. If the table has any columns with potentially-wide values, - there also might be a TOAST file associated with the table, + there also might be a TOAST file associated with the table, which is used to store values too wide to fit comfortably in the main table (see ). There will be one valid index - on the TOAST table, if present. There also might be indexes + on the TOAST table, if present. There also might be indexes associated with the base table. Each table and index is stored in a separate disk file — possibly more than one file, if the file would exceed one gigabyte. Naming conventions for these files are described @@ -39,7 +39,7 @@ - Using psql on a recently vacuumed or analyzed database, + Using psql on a recently vacuumed or analyzed database, you can issue queries to see the disk usage of any table: SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'customer'; @@ -49,14 +49,14 @@ SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'custom base/16384/16806 | 60 (1 row) - Each page is typically 8 kilobytes. (Remember, relpages - is only updated by VACUUM, ANALYZE, and - a few DDL commands such as CREATE INDEX.) The file path name + Each page is typically 8 kilobytes. (Remember, relpages + is only updated by VACUUM, ANALYZE, and + a few DDL commands such as CREATE INDEX.) The file path name is of interest if you want to examine the table's disk file directly. - To show the space used by TOAST tables, use a query + To show the space used by TOAST tables, use a query like the following: SELECT relname, relpages diff --git a/doc/src/sgml/dml.sgml b/doc/src/sgml/dml.sgml index 071cdb610f..bc016d3cae 100644 --- a/doc/src/sgml/dml.sgml +++ b/doc/src/sgml/dml.sgml @@ -285,42 +285,42 @@ DELETE FROM products; Sometimes it is useful to obtain data from modified rows while they are - being manipulated. The INSERT, UPDATE, - and DELETE commands all have an - optional RETURNING clause that supports this. Use - of RETURNING avoids performing an extra database query to + being manipulated. The INSERT, UPDATE, + and DELETE commands all have an + optional RETURNING clause that supports this. Use + of RETURNING avoids performing an extra database query to collect the data, and is especially valuable when it would otherwise be difficult to identify the modified rows reliably. - The allowed contents of a RETURNING clause are the same as - a SELECT command's output list + The allowed contents of a RETURNING clause are the same as + a SELECT command's output list (see ). It can contain column names of the command's target table, or value expressions using those - columns. A common shorthand is RETURNING *, which selects + columns. A common shorthand is RETURNING *, which selects all columns of the target table in order. - In an INSERT, the data available to RETURNING is + In an INSERT, the data available to RETURNING is the row as it was inserted. This is not so useful in trivial inserts, since it would just repeat the data provided by the client. But it can be very handy when relying on computed default values. For example, - when using a serial - column to provide unique identifiers, RETURNING can return + when using a serial + column to provide unique identifiers, RETURNING can return the ID assigned to a new row: CREATE TABLE users (firstname text, lastname text, id serial primary key); INSERT INTO users (firstname, lastname) VALUES ('Joe', 'Cool') RETURNING id; - The RETURNING clause is also very useful - with INSERT ... SELECT. + The RETURNING clause is also very useful + with INSERT ... SELECT. - In an UPDATE, the data available to RETURNING is + In an UPDATE, the data available to RETURNING is the new content of the modified row. For example: UPDATE products SET price = price * 1.10 @@ -330,7 +330,7 @@ UPDATE products SET price = price * 1.10 - In a DELETE, the data available to RETURNING is + In a DELETE, the data available to RETURNING is the content of the deleted row. For example: DELETE FROM products @@ -341,9 +341,9 @@ DELETE FROM products If there are triggers () on the target table, - the data available to RETURNING is the row as modified by + the data available to RETURNING is the row as modified by the triggers. Thus, inspecting columns computed by triggers is another - common use-case for RETURNING. + common use-case for RETURNING. diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index ff58a17335..3a5b88ca1c 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -449,7 +449,7 @@ checking for fop... fop To produce HTML documentation with the stylesheet used on postgresql.org instead of the + url="https://www.postgresql.org/docs/current">postgresql.org instead of the default simple style use: doc/src/sgml$ make STYLE=website html diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml index 6dedc4a5f4..1bdcf64629 100644 --- a/doc/src/sgml/earthdistance.sgml +++ b/doc/src/sgml/earthdistance.sgml @@ -8,18 +8,18 @@ - The earthdistance module provides two different approaches to + The earthdistance module provides two different approaches to calculating great circle distances on the surface of the Earth. The one - described first depends on the cube module (which - must be installed before earthdistance can be - installed). The second one is based on the built-in point data type, + described first depends on the cube module (which + must be installed before earthdistance can be + installed). The second one is based on the built-in point data type, using longitude and latitude for the coordinates. In this module, the Earth is assumed to be perfectly spherical. (If that's too inaccurate for you, you might want to look at the - PostGIS + PostGIS project.) @@ -29,13 +29,13 @@ Data is stored in cubes that are points (both corners are the same) using 3 coordinates representing the x, y, and z distance from the center of the - Earth. A domain earth over cube is provided, which + Earth. A domain earth over cube is provided, which includes constraint checks that the value meets these restrictions and is reasonably close to the actual surface of the Earth. - The radius of the Earth is obtained from the earth() + The radius of the Earth is obtained from the earth() function. It is given in meters. But by changing this one function you can change the module to use some other units, or to use a different value of the radius that you feel is more appropriate. @@ -43,8 +43,8 @@ This package has applications to astronomical databases as well. - Astronomers will probably want to change earth() to return a - radius of 180/pi() so that distances are in degrees. + Astronomers will probably want to change earth() to return a + radius of 180/pi() so that distances are in degrees. @@ -123,11 +123,11 @@ earth_box(earth, float8)earth_box cube Returns a box suitable for an indexed search using the cube - @> + @> operator for points within a given great circle distance of a location. Some points in this box are further than the specified great circle distance from the location, so a second check using - earth_distance should be included in the query. + earth_distance should be included in the query. @@ -141,7 +141,7 @@ The second part of the module relies on representing Earth locations as - values of type point, in which the first component is taken to + values of type point, in which the first component is taken to represent longitude in degrees, and the second component is taken to represent latitude in degrees. Points are taken as (longitude, latitude) and not vice versa because longitude is closer to the intuitive idea of @@ -165,7 +165,7 @@ - point <@> point + point <@> point float8 Gives the distance in statute miles between two points on the Earth's surface. @@ -176,15 +176,15 @@
- Note that unlike the cube-based part of the module, units - are hardwired here: changing the earth() function will + Note that unlike the cube-based part of the module, units + are hardwired here: changing the earth() function will not affect the results of this operator. One disadvantage of the longitude/latitude representation is that you need to be careful about the edge conditions near the poles - and near +/- 180 degrees of longitude. The cube-based + and near +/- 180 degrees of longitude. The cube-based representation avoids these discontinuities. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index 716a101838..0f9ff3a8eb 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -46,7 +46,7 @@ correctness. Third, embedded SQL in C is specified in the SQL standard and supported by many other SQL database systems. The - PostgreSQL implementation is designed to match this + PostgreSQL implementation is designed to match this standard as much as possible, and it is usually possible to port embedded SQL programs written for other SQL databases to PostgreSQL with relative @@ -97,19 +97,19 @@ EXEC SQL CONNECT TO target AS - dbname@hostname:port + dbname@hostname:port - tcp:postgresql://hostname:port/dbname?options + tcp:postgresql://hostname:port/dbname?options - unix:postgresql://hostname:port/dbname?options + unix:postgresql://hostname:port/dbname?options @@ -475,7 +475,7 @@ EXEC SQL COMMIT; In the default mode, statements are committed only when EXEC SQL COMMIT is issued. The embedded SQL interface also supports autocommit of transactions (similar to - psql's default behavior) via the + psql's default behavior) via the command-line option to ecpg (see ) or via the EXEC SQL SET AUTOCOMMIT TO ON statement. In autocommit mode, each command is @@ -507,7 +507,7 @@ EXEC SQL COMMIT; - EXEC SQL PREPARE TRANSACTION transaction_id + EXEC SQL PREPARE TRANSACTION transaction_id Prepare the current transaction for two-phase commit. @@ -516,7 +516,7 @@ EXEC SQL COMMIT; - EXEC SQL COMMIT PREPARED transaction_id + EXEC SQL COMMIT PREPARED transaction_id Commit a transaction that is in prepared state. @@ -525,7 +525,7 @@ EXEC SQL COMMIT; - EXEC SQL ROLLBACK PREPARED transaction_id + EXEC SQL ROLLBACK PREPARED transaction_id Roll back a transaction that is in prepared state. @@ -720,7 +720,7 @@ EXEC SQL int i = 4; The definition of a structure or union also must be listed inside - a DECLARE section. Otherwise the preprocessor cannot + a DECLARE section. Otherwise the preprocessor cannot handle these types since it does not know the definition. @@ -890,8 +890,8 @@ do - character(n), varchar(n), text - char[n+1], VARCHAR[n+1]declared in ecpglib.h + character(n), varchar(n), text + char[n+1], VARCHAR[n+1]declared in ecpglib.h @@ -955,7 +955,7 @@ EXEC SQL END DECLARE SECTION; The other way is using the VARCHAR type, which is a special type provided by ECPG. The definition on an array of type VARCHAR is converted into a - named struct for every variable. A declaration like: + named struct for every variable. A declaration like: VARCHAR var[180]; @@ -994,10 +994,10 @@ struct varchar_var { int len; char arr[180]; } var; ECPG contains some special types that help you to interact easily with some special data types from the PostgreSQL server. In particular, it has implemented support for the - numeric, decimal, date, timestamp, - and interval types. These data types cannot usefully be + numeric, decimal, date, timestamp, + and interval types. These data types cannot usefully be mapped to primitive host variable types (such - as int, long long int, + as int, long long int, or char[]), because they have a complex internal structure. Applications deal with these types by declaring host variables in special types and accessing them using functions in @@ -1942,10 +1942,10 @@ free(out); The numeric type offers to do calculations with arbitrary precision. See for the equivalent type in the - PostgreSQL server. Because of the arbitrary precision this + PostgreSQL server. Because of the arbitrary precision this variable needs to be able to expand and shrink dynamically. That's why you can only create numeric variables on the heap, by means of the - PGTYPESnumeric_new and PGTYPESnumeric_free + PGTYPESnumeric_new and PGTYPESnumeric_free functions. The decimal type, which is similar but limited in precision, can be created on the stack as well as on the heap. @@ -2092,17 +2092,17 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2) - 1, if var1 is bigger than var2 + 1, if var1 is bigger than var2 - -1, if var1 is smaller than var2 + -1, if var1 is smaller than var2 - 0, if var1 and var2 are equal + 0, if var1 and var2 are equal @@ -2119,7 +2119,7 @@ int PGTYPESnumeric_cmp(numeric *var1, numeric *var2) int PGTYPESnumeric_from_int(signed int int_val, numeric *var); This function accepts a variable of type signed int and stores it - in the numeric variable var. Upon success, 0 is returned and + in the numeric variable var. Upon success, 0 is returned and -1 in case of a failure. @@ -2134,7 +2134,7 @@ int PGTYPESnumeric_from_int(signed int int_val, numeric *var); int PGTYPESnumeric_from_long(signed long int long_val, numeric *var); This function accepts a variable of type signed long int and stores it - in the numeric variable var. Upon success, 0 is returned and + in the numeric variable var. Upon success, 0 is returned and -1 in case of a failure. @@ -2149,7 +2149,7 @@ int PGTYPESnumeric_from_long(signed long int long_val, numeric *var); int PGTYPESnumeric_copy(numeric *src, numeric *dst); This function copies over the value of the variable that - src points to into the variable that dst + src points to into the variable that dst points to. It returns 0 on success and -1 if an error occurs. @@ -2164,7 +2164,7 @@ int PGTYPESnumeric_copy(numeric *src, numeric *dst); int PGTYPESnumeric_from_double(double d, numeric *dst); This function accepts a variable of type double and stores the result - in the variable that dst points to. It returns 0 on success + in the variable that dst points to. It returns 0 on success and -1 if an error occurs. @@ -2179,10 +2179,10 @@ int PGTYPESnumeric_from_double(double d, numeric *dst); int PGTYPESnumeric_to_double(numeric *nv, double *dp) The function converts the numeric value from the variable that - nv points to into the double variable that dp points + nv points to into the double variable that dp points to. It returns 0 on success and -1 if an error occurs, including - overflow. On overflow, the global variable errno will be set - to PGTYPES_NUM_OVERFLOW additionally. + overflow. On overflow, the global variable errno will be set + to PGTYPES_NUM_OVERFLOW additionally. @@ -2196,10 +2196,10 @@ int PGTYPESnumeric_to_double(numeric *nv, double *dp) int PGTYPESnumeric_to_int(numeric *nv, int *ip); The function converts the numeric value from the variable that - nv points to into the integer variable that ip + nv points to into the integer variable that ip points to. It returns 0 on success and -1 if an error occurs, including - overflow. On overflow, the global variable errno will be set - to PGTYPES_NUM_OVERFLOW additionally. + overflow. On overflow, the global variable errno will be set + to PGTYPES_NUM_OVERFLOW additionally. @@ -2213,10 +2213,10 @@ int PGTYPESnumeric_to_int(numeric *nv, int *ip); int PGTYPESnumeric_to_long(numeric *nv, long *lp); The function converts the numeric value from the variable that - nv points to into the long integer variable that - lp points to. It returns 0 on success and -1 if an error + nv points to into the long integer variable that + lp points to. It returns 0 on success and -1 if an error occurs, including overflow. On overflow, the global variable - errno will be set to PGTYPES_NUM_OVERFLOW + errno will be set to PGTYPES_NUM_OVERFLOW additionally. @@ -2231,10 +2231,10 @@ int PGTYPESnumeric_to_long(numeric *nv, long *lp); int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst); The function converts the numeric value from the variable that - src points to into the decimal variable that - dst points to. It returns 0 on success and -1 if an error + src points to into the decimal variable that + dst points to. It returns 0 on success and -1 if an error occurs, including overflow. On overflow, the global variable - errno will be set to PGTYPES_NUM_OVERFLOW + errno will be set to PGTYPES_NUM_OVERFLOW additionally. @@ -2249,8 +2249,8 @@ int PGTYPESnumeric_to_decimal(numeric *src, decimal *dst); int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst); The function converts the decimal value from the variable that - src points to into the numeric variable that - dst points to. It returns 0 on success and -1 if an error + src points to into the numeric variable that + dst points to. It returns 0 on success and -1 if an error occurs. Since the decimal type is implemented as a limited version of the numeric type, overflow cannot occur with this conversion. @@ -2265,7 +2265,7 @@ int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst); The date type in C enables your programs to deal with data of the SQL type date. See for the equivalent type in the - PostgreSQL server. + PostgreSQL server. The following functions can be used to work with the date type: @@ -2292,8 +2292,8 @@ date PGTYPESdate_from_timestamp(timestamp dt); date PGTYPESdate_from_asc(char *str, char **endptr); - The function receives a C char* string str and a pointer to - a C char* string endptr. At the moment ECPG always parses + The function receives a C char* string str and a pointer to + a C char* string endptr. At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. @@ -2397,9 +2397,9 @@ date PGTYPESdate_from_asc(char *str, char **endptr); char *PGTYPESdate_to_asc(date dDate); - The function receives the date dDate as its only parameter. - It will output the date in the form 1999-01-18, i.e., in the - YYYY-MM-DD format. + The function receives the date dDate as its only parameter. + It will output the date in the form 1999-01-18, i.e., in the + YYYY-MM-DD format. @@ -2414,11 +2414,11 @@ char *PGTYPESdate_to_asc(date dDate); void PGTYPESdate_julmdy(date d, int *mdy); - The function receives the date d and a pointer to an array - of 3 integer values mdy. The variable name indicates - the sequential order: mdy[0] will be set to contain the - number of the month, mdy[1] will be set to the value of the - day and mdy[2] will contain the year. + The function receives the date d and a pointer to an array + of 3 integer values mdy. The variable name indicates + the sequential order: mdy[0] will be set to contain the + number of the month, mdy[1] will be set to the value of the + day and mdy[2] will contain the year. @@ -2432,7 +2432,7 @@ void PGTYPESdate_julmdy(date d, int *mdy); void PGTYPESdate_mdyjul(int *mdy, date *jdate); - The function receives the array of the 3 integers (mdy) as + The function receives the array of the 3 integers (mdy) as its first argument and as its second argument a pointer to a variable of type date that should hold the result of the operation. @@ -2447,7 +2447,7 @@ void PGTYPESdate_mdyjul(int *mdy, date *jdate); int PGTYPESdate_dayofweek(date d); - The function receives the date variable d as its only + The function receives the date variable d as its only argument and returns an integer that indicates the day of the week for this date. @@ -2499,7 +2499,7 @@ int PGTYPESdate_dayofweek(date d); void PGTYPESdate_today(date *d); - The function receives a pointer to a date variable (d) + The function receives a pointer to a date variable (d) that it sets to the current date. @@ -2514,9 +2514,9 @@ void PGTYPESdate_today(date *d); int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); - The function receives the date to convert (dDate), the - format mask (fmtstring) and the string that will hold the - textual representation of the date (outbuf). + The function receives the date to convert (dDate), the + format mask (fmtstring) and the string that will hold the + textual representation of the date (outbuf). On success, 0 is returned and a negative value if an error occurred. @@ -2637,9 +2637,9 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); The function receives a pointer to the date value that should hold the - result of the operation (d), the format mask to use for - parsing the date (fmt) and the C char* string containing - the textual representation of the date (str). The textual + result of the operation (d), the format mask to use for + parsing the date (fmt) and the C char* string containing + the textual representation of the date (str). The textual representation is expected to match the format mask. However you do not need to have a 1:1 mapping of the string to the format mask. The function only analyzes the sequential order and looks for the literals @@ -2742,7 +2742,7 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); The timestamp type in C enables your programs to deal with data of the SQL type timestamp. See for the equivalent - type in the PostgreSQL server. + type in the PostgreSQL server. The following functions can be used to work with the timestamp type: @@ -2756,8 +2756,8 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); - The function receives the string to parse (str) and a - pointer to a C char* (endptr). + The function receives the string to parse (str) and a + pointer to a C char* (endptr). At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. @@ -2765,15 +2765,15 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); The function returns the parsed timestamp on success. On error, - PGTYPESInvalidTimestamp is returned and errno is - set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. + PGTYPESInvalidTimestamp is returned and errno is + set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. In general, the input string can contain any combination of an allowed date specification, a whitespace character and an allowed time specification. Note that time zones are not supported by ECPG. It can parse them but does not apply any calculation as the - PostgreSQL server does for example. Timezone + PostgreSQL server does for example. Timezone specifiers are silently discarded. @@ -2819,7 +2819,7 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); char *PGTYPEStimestamp_to_asc(timestamp tstamp); - The function receives the timestamp tstamp as + The function receives the timestamp tstamp as its only argument and returns an allocated string that contains the textual representation of the timestamp. @@ -2835,7 +2835,7 @@ char *PGTYPEStimestamp_to_asc(timestamp tstamp); void PGTYPEStimestamp_current(timestamp *ts); The function retrieves the current timestamp and saves it into the - timestamp variable that ts points to. + timestamp variable that ts points to. @@ -2849,8 +2849,8 @@ void PGTYPEStimestamp_current(timestamp *ts); int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmtstr); The function receives a pointer to the timestamp to convert as its - first argument (ts), a pointer to the output buffer - (output), the maximal length that has been allocated for + first argument (ts), a pointer to the output buffer + (output), the maximal length that has been allocated for the output buffer (str_len) and the format mask to use for the conversion (fmtstr). @@ -2861,7 +2861,7 @@ int PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmt You can use the following format specifiers for the format mask. The format specifiers are the same ones that are used in the - strftime function in libc. Any + strftime function in libc. Any non-format specifier will be copied into the output buffer. + ("). The text matching the portion of the pattern between these markers is returned. - Some examples, with #" delimiting the return string: + Some examples, with #" delimiting the return string: substring('foobar' from '%#"o_b#"%' for '#') oob substring('foobar' from '#"o_b#"%' for '#') NULL @@ -4191,7 +4191,7 @@ substring('foobar' from '#"o_b#"%' for '#') NULL POSIX regular expressions provide a more powerful means for pattern matching than the LIKE and - SIMILAR TO operators. + SIMILAR TO operators. Many Unix tools such as egrep, sed, or awk use a pattern matching language that is similar to the one described here. @@ -4228,7 +4228,7 @@ substring('foobar' from '#"o_b#"%' for '#') NULL - The substring function with two parameters, + The substring function with two parameters, substring(string from pattern), provides extraction of a substring @@ -4253,30 +4253,30 @@ substring('foobar' from 'o(.)b') o - The regexp_replace function provides substitution of + The regexp_replace function provides substitution of new text for substrings that match POSIX regular expression patterns. It has the syntax - regexp_replace(source, - pattern, replacement - , flags ). - The source string is returned unchanged if - there is no match to the pattern. If there is a - match, the source string is returned with the - replacement string substituted for the matching - substring. The replacement string can contain - \n, where n is 1 + regexp_replace(source, + pattern, replacement + , flags ). + The source string is returned unchanged if + there is no match to the pattern. If there is a + match, the source string is returned with the + replacement string substituted for the matching + substring. The replacement string can contain + \n, where n is 1 through 9, to indicate that the source substring matching the - n'th parenthesized subexpression of the pattern should be - inserted, and it can contain \& to indicate that the + n'th parenthesized subexpression of the pattern should be + inserted, and it can contain \& to indicate that the substring matching the entire pattern should be inserted. Write - \\ if you need to put a literal backslash in the replacement + \\ if you need to put a literal backslash in the replacement text. - The flags parameter is an optional text + The flags parameter is an optional text string containing zero or more single-letter flags that change the - function's behavior. Flag i specifies case-insensitive - matching, while flag g specifies replacement of each matching + function's behavior. Flag i specifies case-insensitive + matching, while flag g specifies replacement of each matching substring rather than only the first one. Supported flags (though - not g) are + not g) are described in . @@ -4293,22 +4293,22 @@ regexp_replace('foobarbaz', 'b(..)', E'X\\1Y', 'g') - The regexp_match function returns a text array of + The regexp_match function returns a text array of captured substring(s) resulting from the first match of a POSIX regular expression pattern to a string. It has the syntax - regexp_match(string, - pattern , flags ). - If there is no match, the result is NULL. - If a match is found, and the pattern contains no + regexp_match(string, + pattern , flags ). + If there is no match, the result is NULL. + If a match is found, and the pattern contains no parenthesized subexpressions, then the result is a single-element text array containing the substring matching the whole pattern. - If a match is found, and the pattern contains + If a match is found, and the pattern contains parenthesized subexpressions, then the result is a text array - whose n'th element is the substring matching - the n'th parenthesized subexpression of - the pattern (not counting non-capturing + whose n'th element is the substring matching + the n'th parenthesized subexpression of + the pattern (not counting non-capturing parentheses; see below for details). - The flags parameter is an optional text string + The flags parameter is an optional text string containing zero or more single-letter flags that change the function's behavior. Supported flags are described in . @@ -4330,7 +4330,7 @@ SELECT regexp_match('foobarbequebaz', '(bar)(beque)'); (1 row) In the common case where you just want the whole matching substring - or NULL for no match, write something like + or NULL for no match, write something like SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; regexp_match @@ -4341,20 +4341,20 @@ SELECT (regexp_match('foobarbequebaz', 'bar.*que'))[1]; - The regexp_matches function returns a set of text arrays + The regexp_matches function returns a set of text arrays of captured substring(s) resulting from matching a POSIX regular expression pattern to a string. It has the same syntax as regexp_match. This function returns no rows if there is no match, one row if there is - a match and the g flag is not given, or N - rows if there are N matches and the g flag + a match and the g flag is not given, or N + rows if there are N matches and the g flag is given. Each returned row is a text array containing the whole matched substring or the substrings matching parenthesized - subexpressions of the pattern, just as described above + subexpressions of the pattern, just as described above for regexp_match. - regexp_matches accepts all the flags shown + regexp_matches accepts all the flags shown in , plus - the g flag which commands it to return all matches, not + the g flag which commands it to return all matches, not just the first one. @@ -4377,46 +4377,46 @@ SELECT regexp_matches('foobarbequebazilbarfbonk', '(b[^b]+)(b[^b]+)', 'g'); - In most cases regexp_matches() should be used with - the g flag, since if you only want the first match, it's - easier and more efficient to use regexp_match(). - However, regexp_match() only exists - in PostgreSQL version 10 and up. When working in older - versions, a common trick is to place a regexp_matches() + In most cases regexp_matches() should be used with + the g flag, since if you only want the first match, it's + easier and more efficient to use regexp_match(). + However, regexp_match() only exists + in PostgreSQL version 10 and up. When working in older + versions, a common trick is to place a regexp_matches() call in a sub-select, for example: SELECT col1, (SELECT regexp_matches(col2, '(bar)(beque)')) FROM tab; - This produces a text array if there's a match, or NULL if - not, the same as regexp_match() would do. Without the + This produces a text array if there's a match, or NULL if + not, the same as regexp_match() would do. Without the sub-select, this query would produce no output at all for table rows without a match, which is typically not the desired behavior. - The regexp_split_to_table function splits a string using a POSIX + The regexp_split_to_table function splits a string using a POSIX regular expression pattern as a delimiter. It has the syntax - regexp_split_to_table(string, pattern - , flags ). - If there is no match to the pattern, the function returns the - string. If there is at least one match, for each match it returns + regexp_split_to_table(string, pattern + , flags ). + If there is no match to the pattern, the function returns the + string. If there is at least one match, for each match it returns the text from the end of the last match (or the beginning of the string) to the beginning of the match. When there are no more matches, it returns the text from the end of the last match to the end of the string. - The flags parameter is an optional text string containing + The flags parameter is an optional text string containing zero or more single-letter flags that change the function's behavior. regexp_split_to_table supports the flags described in . - The regexp_split_to_array function behaves the same as - regexp_split_to_table, except that regexp_split_to_array - returns its result as an array of text. It has the syntax - regexp_split_to_array(string, pattern - , flags ). - The parameters are the same as for regexp_split_to_table. + The regexp_split_to_array function behaves the same as + regexp_split_to_table, except that regexp_split_to_array + returns its result as an array of text. It has the syntax + regexp_split_to_array(string, pattern + , flags ). + The parameters are the same as for regexp_split_to_table. @@ -4471,8 +4471,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; zero-length matches that occur at the start or end of the string or immediately after a previous match. This is contrary to the strict definition of regexp matching that is implemented by - regexp_match and - regexp_matches, but is usually the most convenient behavior + regexp_match and + regexp_matches, but is usually the most convenient behavior in practice. Other software systems such as Perl use similar definitions. @@ -4491,16 +4491,16 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Regular expressions (REs), as defined in POSIX 1003.2, come in two forms: - extended REs or EREs + extended REs or EREs (roughly those of egrep), and - basic REs or BREs + basic REs or BREs (roughly those of ed). PostgreSQL supports both forms, and also implements some extensions that are not in the POSIX standard, but have become widely used due to their availability in programming languages such as Perl and Tcl. REs using these non-POSIX extensions are called - advanced REs or AREs + advanced REs or AREs in this documentation. AREs are almost an exact superset of EREs, but BREs have several notational incompatibilities (as well as being much more limited). @@ -4510,9 +4510,9 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - PostgreSQL always initially presumes that a regular + PostgreSQL always initially presumes that a regular expression follows the ARE rules. However, the more limited ERE or - BRE rules can be chosen by prepending an embedded option + BRE rules can be chosen by prepending an embedded option to the RE pattern, as described in . This can be useful for compatibility with applications that expect exactly the POSIX 1003.2 rules. @@ -4527,15 +4527,15 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - A branch is zero or more quantified atoms or - constraints, concatenated. + A branch is zero or more quantified atoms or + constraints, concatenated. It matches a match for the first, followed by a match for the second, etc; an empty branch matches the empty string. - A quantified atom is an atom possibly followed - by a single quantifier. + A quantified atom is an atom possibly followed + by a single quantifier. Without a quantifier, it matches a match for the atom. With a quantifier, it can match some number of matches of the atom. An atom can be any of the possibilities @@ -4545,7 +4545,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - A constraint matches an empty string, but matches only when + A constraint matches an empty string, but matches only when specific conditions are met. A constraint can be used where an atom could be used, except it cannot be followed by a quantifier. The simple constraints are shown in @@ -4567,57 +4567,57 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - (re) - (where re is any regular expression) + (re) + (where re is any regular expression) matches a match for - re, with the match noted for possible reporting + re, with the match noted for possible reporting - (?:re) + (?:re) as above, but the match is not noted for reporting - (a non-capturing set of parentheses) + (a non-capturing set of parentheses) (AREs only) - . + . matches any single character - [chars] - a bracket expression, - matching any one of the chars (see + [chars] + a bracket expression, + matching any one of the chars (see for more detail) - \k - (where k is a non-alphanumeric character) + \k + (where k is a non-alphanumeric character) matches that character taken as an ordinary character, - e.g., \\ matches a backslash character + e.g., \\ matches a backslash character - \c - where c is alphanumeric + \c + where c is alphanumeric (possibly followed by other characters) - is an escape, see - (AREs only; in EREs and BREs, this matches c) + is an escape, see + (AREs only; in EREs and BREs, this matches c) - { + { when followed by a character other than a digit, - matches the left-brace character {; + matches the left-brace character {; when followed by a digit, it is the beginning of a - bound (see below) + bound (see below) - x - where x is a single character with no other + x + where x is a single character with no other significance, matches that character @@ -4625,7 +4625,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - An RE cannot end with a backslash (\). + An RE cannot end with a backslash (\). @@ -4649,82 +4649,82 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - * + * a sequence of 0 or more matches of the atom - + + + a sequence of 1 or more matches of the atom - ? + ? a sequence of 0 or 1 matches of the atom - {m} - a sequence of exactly m matches of the atom + {m} + a sequence of exactly m matches of the atom - {m,} - a sequence of m or more matches of the atom + {m,} + a sequence of m or more matches of the atom - {m,n} - a sequence of m through n - (inclusive) matches of the atom; m cannot exceed - n + {m,n} + a sequence of m through n + (inclusive) matches of the atom; m cannot exceed + n - *? - non-greedy version of * + *? + non-greedy version of * - +? - non-greedy version of + + +? + non-greedy version of + - ?? - non-greedy version of ? + ?? + non-greedy version of ? - {m}? - non-greedy version of {m} + {m}? + non-greedy version of {m} - {m,}? - non-greedy version of {m,} + {m,}? + non-greedy version of {m,} - {m,n}? - non-greedy version of {m,n} + {m,n}? + non-greedy version of {m,n} - The forms using {...} - are known as bounds. - The numbers m and n within a bound are + The forms using {...} + are known as bounds. + The numbers m and n within a bound are unsigned decimal integers with permissible values from 0 to 255 inclusive. - Non-greedy quantifiers (available in AREs only) match the - same possibilities as their corresponding normal (greedy) + Non-greedy quantifiers (available in AREs only) match the + same possibilities as their corresponding normal (greedy) counterparts, but prefer the smallest number rather than the largest number of matches. See for more detail. @@ -4733,7 +4733,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; A quantifier cannot immediately follow another quantifier, e.g., - ** is invalid. + ** is invalid. A quantifier cannot begin an expression or subexpression or follow ^ or |. @@ -4753,40 +4753,40 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - ^ + ^ matches at the beginning of the string - $ + $ matches at the end of the string - (?=re) - positive lookahead matches at any point - where a substring matching re begins + (?=re) + positive lookahead matches at any point + where a substring matching re begins (AREs only) - (?!re) - negative lookahead matches at any point - where no substring matching re begins + (?!re) + negative lookahead matches at any point + where no substring matching re begins (AREs only) - (?<=re) - positive lookbehind matches at any point - where a substring matching re ends + (?<=re) + positive lookbehind matches at any point + where a substring matching re ends (AREs only) - (?<!re) - negative lookbehind matches at any point - where no substring matching re ends + (?<!re) + negative lookbehind matches at any point + where no substring matching re ends (AREs only) @@ -4795,7 +4795,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Lookahead and lookbehind constraints cannot contain back - references (see ), + references (see ), and all parentheses within them are considered non-capturing. @@ -4808,7 +4808,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; characters enclosed in []. It normally matches any single character from the list (but see below). If the list begins with ^, it matches any single character - not from the rest of the list. + not from the rest of the list. If two characters in the list are separated by -, this is shorthand for the full range of characters between those two @@ -4853,7 +4853,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - PostgreSQL currently does not support multi-character collating + PostgreSQL currently does not support multi-character collating elements. This information describes possible future behavior. @@ -4861,7 +4861,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Within a bracket expression, a collating element enclosed in [= and =] is an equivalence - class, standing for the sequences of characters of all collating + class, standing for the sequences of characters of all collating elements equivalent to that one, including itself. (If there are no other equivalent collating elements, the treatment is as if the enclosing delimiters were [. and @@ -4896,7 +4896,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; matching empty strings at the beginning and end of a word respectively. A word is defined as a sequence of word characters that is neither preceded nor followed by word - characters. A word character is an alnum character (as + characters. A word character is an alnum character (as defined by ctype3) or an underscore. This is an extension, compatible with but not @@ -4911,44 +4911,44 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Regular Expression Escapes - Escapes are special sequences beginning with \ + Escapes are special sequences beginning with \ followed by an alphanumeric character. Escapes come in several varieties: character entry, class shorthands, constraint escapes, and back references. - A \ followed by an alphanumeric character but not constituting + A \ followed by an alphanumeric character but not constituting a valid escape is illegal in AREs. In EREs, there are no escapes: outside a bracket expression, - a \ followed by an alphanumeric character merely stands for + a \ followed by an alphanumeric character merely stands for that character as an ordinary character, and inside a bracket expression, - \ is an ordinary character. + \ is an ordinary character. (The latter is the one actual incompatibility between EREs and AREs.) - Character-entry escapes exist to make it easier to specify + Character-entry escapes exist to make it easier to specify non-printing and other inconvenient characters in REs. They are shown in . - Class-shorthand escapes provide shorthands for certain + Class-shorthand escapes provide shorthands for certain commonly-used character classes. They are shown in . - A constraint escape is a constraint, + A constraint escape is a constraint, matching the empty string if specific conditions are met, written as an escape. They are shown in . - A back reference (\n) matches the + A back reference (\n) matches the same string matched by the previous parenthesized subexpression specified - by the number n + by the number n (see ). For example, - ([bc])\1 matches bb or cc - but not bc or cb. + ([bc])\1 matches bb or cc + but not bc or cb. The subexpression must entirely precede the back reference in the RE. Subexpressions are numbered in the order of their leading parentheses. Non-capturing parentheses do not define subexpressions. @@ -4967,122 +4967,122 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \a + \a alert (bell) character, as in C - \b + \b backspace, as in C - \B - synonym for backslash (\) to help reduce the need for backslash + \B + synonym for backslash (\) to help reduce the need for backslash doubling - \cX - (where X is any character) the character whose + \cX + (where X is any character) the character whose low-order 5 bits are the same as those of - X, and whose other bits are all zero + X, and whose other bits are all zero - \e + \e the character whose collating-sequence name - is ESC, - or failing that, the character with octal value 033 + is ESC, + or failing that, the character with octal value 033 - \f + \f form feed, as in C - \n + \n newline, as in C - \r + \r carriage return, as in C - \t + \t horizontal tab, as in C - \uwxyz - (where wxyz is exactly four hexadecimal digits) + \uwxyz + (where wxyz is exactly four hexadecimal digits) the character whose hexadecimal value is - 0xwxyz + 0xwxyz - \Ustuvwxyz - (where stuvwxyz is exactly eight hexadecimal + \Ustuvwxyz + (where stuvwxyz is exactly eight hexadecimal digits) the character whose hexadecimal value is - 0xstuvwxyz + 0xstuvwxyz - \v + \v vertical tab, as in C - \xhhh - (where hhh is any sequence of hexadecimal + \xhhh + (where hhh is any sequence of hexadecimal digits) the character whose hexadecimal value is - 0xhhh + 0xhhh (a single character no matter how many hexadecimal digits are used) - \0 - the character whose value is 0 (the null byte) + \0 + the character whose value is 0 (the null byte) - \xy - (where xy is exactly two octal digits, - and is not a back reference) + \xy + (where xy is exactly two octal digits, + and is not a back reference) the character whose octal value is - 0xy + 0xy - \xyz - (where xyz is exactly three octal digits, - and is not a back reference) + \xyz + (where xyz is exactly three octal digits, + and is not a back reference) the character whose octal value is - 0xyz + 0xyz - Hexadecimal digits are 0-9, - a-f, and A-F. - Octal digits are 0-7. + Hexadecimal digits are 0-9, + a-f, and A-F. + Octal digits are 0-7. Numeric character-entry escapes specifying values outside the ASCII range (0-127) have meanings dependent on the database encoding. When the encoding is UTF-8, escape values are equivalent to Unicode code points, - for example \u1234 means the character U+1234. + for example \u1234 means the character U+1234. For other multibyte encodings, character-entry escapes usually just specify the concatenation of the byte values for the character. If the escape value does not correspond to any legal character in the database @@ -5091,8 +5091,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; The character-entry escapes are always taken as ordinary characters. - For example, \135 is ] in ASCII, but - \135 does not terminate a bracket expression. + For example, \135 is ] in ASCII, but + \135 does not terminate a bracket expression. @@ -5108,34 +5108,34 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \d - [[:digit:]] + \d + [[:digit:]] - \s - [[:space:]] + \s + [[:space:]] - \w - [[:alnum:]_] + \w + [[:alnum:]_] (note underscore is included) - \D - [^[:digit:]] + \D + [^[:digit:]] - \S - [^[:space:]] + \S + [^[:space:]] - \W - [^[:alnum:]_] + \W + [^[:alnum:]_] (note underscore is included) @@ -5143,13 +5143,13 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Within bracket expressions, \d, \s, - and \w lose their outer brackets, - and \D, \S, and \W are illegal. - (So, for example, [a-c\d] is equivalent to - [a-c[:digit:]]. - Also, [a-c\D], which is equivalent to - [a-c^[:digit:]], is illegal.) + Within bracket expressions, \d, \s, + and \w lose their outer brackets, + and \D, \S, and \W are illegal. + (So, for example, [a-c\d] is equivalent to + [a-c[:digit:]]. + Also, [a-c\D], which is equivalent to + [a-c^[:digit:]], is illegal.) @@ -5165,38 +5165,38 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \A + \A matches only at the beginning of the string (see for how this differs from - ^) + ^) - \m + \m matches only at the beginning of a word - \M + \M matches only at the end of a word - \y + \y matches only at the beginning or end of a word - \Y + \Y matches only at a point that is not the beginning or end of a word - \Z + \Z matches only at the end of the string (see for how this differs from - $) + $) @@ -5204,7 +5204,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; A word is defined as in the specification of - [[:<:]] and [[:>:]] above. + [[:<:]] and [[:>:]] above. Constraint escapes are illegal within bracket expressions. @@ -5221,18 +5221,18 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - \m - (where m is a nonzero digit) - a back reference to the m'th subexpression + \m + (where m is a nonzero digit) + a back reference to the m'th subexpression - \mnn - (where m is a nonzero digit, and - nn is some more digits, and the decimal value - mnn is not greater than the number of closing capturing + \mnn + (where m is a nonzero digit, and + nn is some more digits, and the decimal value + mnn is not greater than the number of closing capturing parentheses seen so far) - a back reference to the mnn'th subexpression + a back reference to the mnn'th subexpression @@ -5263,29 +5263,29 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - An RE can begin with one of two special director prefixes. - If an RE begins with ***:, + An RE can begin with one of two special director prefixes. + If an RE begins with ***:, the rest of the RE is taken as an ARE. (This normally has no effect in - PostgreSQL, since REs are assumed to be AREs; + PostgreSQL, since REs are assumed to be AREs; but it does have an effect if ERE or BRE mode had been specified by - the flags parameter to a regex function.) - If an RE begins with ***=, + the flags parameter to a regex function.) + If an RE begins with ***=, the rest of the RE is taken to be a literal string, with all characters considered ordinary characters. - An ARE can begin with embedded options: - a sequence (?xyz) - (where xyz is one or more alphabetic characters) + An ARE can begin with embedded options: + a sequence (?xyz) + (where xyz is one or more alphabetic characters) specifies options affecting the rest of the RE. These options override any previously determined options — in particular, they can override the case-sensitivity behavior implied by - a regex operator, or the flags parameter to a regex + a regex operator, or the flags parameter to a regex function. The available option letters are shown in . - Note that these same option letters are used in the flags + Note that these same option letters are used in the flags parameters of regex functions. @@ -5302,67 +5302,67 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - b + b rest of RE is a BRE - c + c case-sensitive matching (overrides operator type) - e + e rest of RE is an ERE - i + i case-insensitive matching (see ) (overrides operator type) - m - historical synonym for n + m + historical synonym for n - n + n newline-sensitive matching (see ) - p + p partial newline-sensitive matching (see ) - q - rest of RE is a literal (quoted) string, all ordinary + q + rest of RE is a literal (quoted) string, all ordinary characters - s + s non-newline-sensitive matching (default) - t + t tight syntax (default; see below) - w - inverse partial newline-sensitive (weird) matching + w + inverse partial newline-sensitive (weird) matching (see ) - x + x expanded syntax (see below) @@ -5370,18 +5370,18 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo;
- Embedded options take effect at the ) terminating the sequence. + Embedded options take effect at the ) terminating the sequence. They can appear only at the start of an ARE (after the - ***: director if any). + ***: director if any). - In addition to the usual (tight) RE syntax, in which all - characters are significant, there is an expanded syntax, - available by specifying the embedded x option. + In addition to the usual (tight) RE syntax, in which all + characters are significant, there is an expanded syntax, + available by specifying the embedded x option. In the expanded syntax, white-space characters in the RE are ignored, as are - all characters between a # + all characters between a # and the following newline (or the end of the RE). This permits paragraphing and commenting a complex RE. There are three exceptions to that basic rule: @@ -5389,41 +5389,41 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; - a white-space character or # preceded by \ is + a white-space character or # preceded by \ is retained - white space or # within a bracket expression is retained + white space or # within a bracket expression is retained white space and comments cannot appear within multi-character symbols, - such as (?: + such as (?: For this purpose, white-space characters are blank, tab, newline, and - any character that belongs to the space character class. + any character that belongs to the space character class. Finally, in an ARE, outside bracket expressions, the sequence - (?#ttt) - (where ttt is any text not containing a )) + (?#ttt) + (where ttt is any text not containing a )) is a comment, completely ignored. Again, this is not allowed between the characters of - multi-character symbols, like (?:. + multi-character symbols, like (?:. Such comments are more a historical artifact than a useful facility, and their use is deprecated; use the expanded syntax instead. - None of these metasyntax extensions is available if - an initial ***= director + None of these metasyntax extensions is available if + an initial ***= director has specified that the user's input be treated as a literal string rather than as an RE. @@ -5437,8 +5437,8 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; string, the RE matches the one starting earliest in the string. If the RE could match more than one substring starting at that point, either the longest possible match or the shortest possible match will - be taken, depending on whether the RE is greedy or - non-greedy. + be taken, depending on whether the RE is greedy or + non-greedy.
@@ -5458,39 +5458,39 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; A quantified atom with a fixed-repetition quantifier - ({m} + ({m} or - {m}?) + {m}?) has the same greediness (possibly none) as the atom itself. A quantified atom with other normal quantifiers (including - {m,n} - with m equal to n) + {m,n} + with m equal to n) is greedy (prefers longest match). A quantified atom with a non-greedy quantifier (including - {m,n}? - with m equal to n) + {m,n}? + with m equal to n) is non-greedy (prefers shortest match). A branch — that is, an RE that has no top-level - | operator — has the same greediness as the first + | operator — has the same greediness as the first quantified atom in it that has a greediness attribute. An RE consisting of two or more branches connected by the - | operator is always greedy. + | operator is always greedy.
@@ -5501,7 +5501,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; quantified atoms, but with branches and entire REs that contain quantified atoms. What that means is that the matching is done in such a way that the branch, or whole RE, matches the longest or shortest possible - substring as a whole. Once the length of the entire match + substring as a whole. Once the length of the entire match is determined, the part of it that matches any particular subexpression is determined on the basis of the greediness attribute of that subexpression, with subexpressions starting earlier in the RE taking @@ -5516,16 +5516,16 @@ SELECT SUBSTRING('XY1234Z', 'Y*([0-9]{1,3})'); SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); Result: 1 - In the first case, the RE as a whole is greedy because Y* - is greedy. It can match beginning at the Y, and it matches - the longest possible string starting there, i.e., Y123. - The output is the parenthesized part of that, or 123. - In the second case, the RE as a whole is non-greedy because Y*? - is non-greedy. It can match beginning at the Y, and it matches - the shortest possible string starting there, i.e., Y1. - The subexpression [0-9]{1,3} is greedy but it cannot change + In the first case, the RE as a whole is greedy because Y* + is greedy. It can match beginning at the Y, and it matches + the longest possible string starting there, i.e., Y123. + The output is the parenthesized part of that, or 123. + In the second case, the RE as a whole is non-greedy because Y*? + is non-greedy. It can match beginning at the Y, and it matches + the shortest possible string starting there, i.e., Y1. + The subexpression [0-9]{1,3} is greedy but it cannot change the decision as to the overall match length; so it is forced to match - just 1. + just 1. @@ -5533,11 +5533,11 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); the total match length is either as long as possible or as short as possible, according to the attribute assigned to the whole RE. The attributes assigned to the subexpressions only affect how much of that - match they are allowed to eat relative to each other. + match they are allowed to eat relative to each other. - The quantifiers {1,1} and {1,1}? + The quantifiers {1,1} and {1,1}? can be used to force greediness or non-greediness, respectively, on a subexpression or a whole RE. This is useful when you need the whole RE to have a greediness attribute @@ -5549,8 +5549,8 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); SELECT regexp_match('abc01234xyz', '(.*)(\d+)(.*)'); Result: {abc0123,4,xyz} - That didn't work: the first .* is greedy so - it eats as much as it can, leaving the \d+ to + That didn't work: the first .* is greedy so + it eats as much as it can, leaving the \d+ to match at the last possible place, the last digit. We might try to fix that by making it non-greedy: @@ -5573,14 +5573,14 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); match lengths are measured in characters, not collating elements. An empty string is considered longer than no match at all. For example: - bb* - matches the three middle characters of abbbc; - (week|wee)(night|knights) - matches all ten characters of weeknights; - when (.*).* - is matched against abc the parenthesized subexpression + bb* + matches the three middle characters of abbbc; + (week|wee)(night|knights) + matches all ten characters of weeknights; + when (.*).* + is matched against abc the parenthesized subexpression matches all three characters; and when - (a*)* is matched against bc + (a*)* is matched against bc both the whole RE and the parenthesized subexpression match an empty string. @@ -5592,38 +5592,38 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); When an alphabetic that exists in multiple cases appears as an ordinary character outside a bracket expression, it is effectively transformed into a bracket expression containing both cases, - e.g., x becomes [xX]. + e.g., x becomes [xX]. When it appears inside a bracket expression, all case counterparts of it are added to the bracket expression, e.g., - [x] becomes [xX] - and [^x] becomes [^xX]. + [x] becomes [xX] + and [^x] becomes [^xX]. - If newline-sensitive matching is specified, . - and bracket expressions using ^ + If newline-sensitive matching is specified, . + and bracket expressions using ^ will never match the newline character (so that matches will never cross newlines unless the RE explicitly arranges it) - and ^ and $ + and ^ and $ will match the empty string after and before a newline respectively, in addition to matching at beginning and end of string respectively. - But the ARE escapes \A and \Z - continue to match beginning or end of string only. + But the ARE escapes \A and \Z + continue to match beginning or end of string only. If partial newline-sensitive matching is specified, - this affects . and bracket expressions - as with newline-sensitive matching, but not ^ - and $. + this affects . and bracket expressions + as with newline-sensitive matching, but not ^ + and $. If inverse partial newline-sensitive matching is specified, - this affects ^ and $ - as with newline-sensitive matching, but not . + this affects ^ and $ + as with newline-sensitive matching, but not . and bracket expressions. This isn't very useful but is provided for symmetry. @@ -5642,18 +5642,18 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); The only feature of AREs that is actually incompatible with - POSIX EREs is that \ does not lose its special + POSIX EREs is that \ does not lose its special significance inside bracket expressions. All other ARE features use syntax which is illegal or has undefined or unspecified effects in POSIX EREs; - the *** syntax of directors likewise is outside the POSIX + the *** syntax of directors likewise is outside the POSIX syntax for both BREs and EREs. Many of the ARE extensions are borrowed from Perl, but some have been changed to clean them up, and a few Perl extensions are not present. - Incompatibilities of note include \b, \B, + Incompatibilities of note include \b, \B, the lack of special treatment for a trailing newline, the addition of complemented bracket expressions to the things affected by newline-sensitive matching, @@ -5664,12 +5664,12 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Two significant incompatibilities exist between AREs and the ERE syntax - recognized by pre-7.4 releases of PostgreSQL: + recognized by pre-7.4 releases of PostgreSQL: - In AREs, \ followed by an alphanumeric character is either + In AREs, \ followed by an alphanumeric character is either an escape or an error, while in previous releases, it was just another way of writing the alphanumeric. This should not be much of a problem because there was no reason to @@ -5678,9 +5678,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - In AREs, \ remains a special character within - [], so a literal \ within a bracket - expression must be written \\. + In AREs, \ remains a special character within + [], so a literal \ within a bracket + expression must be written \\. @@ -5692,27 +5692,27 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); BREs differ from EREs in several respects. - In BREs, |, +, and ? + In BREs, |, +, and ? are ordinary characters and there is no equivalent for their functionality. The delimiters for bounds are - \{ and \}, - with { and } + \{ and \}, + with { and } by themselves ordinary characters. The parentheses for nested subexpressions are - \( and \), - with ( and ) by themselves ordinary characters. - ^ is an ordinary character except at the beginning of the + \( and \), + with ( and ) by themselves ordinary characters. + ^ is an ordinary character except at the beginning of the RE or the beginning of a parenthesized subexpression, - $ is an ordinary character except at the end of the + $ is an ordinary character except at the end of the RE or the end of a parenthesized subexpression, - and * is an ordinary character if it appears at the beginning + and * is an ordinary character if it appears at the beginning of the RE or the beginning of a parenthesized subexpression - (after a possible leading ^). + (after a possible leading ^). Finally, single-digit back references are available, and - \< and \> + \< and \> are synonyms for - [[:<:]] and [[:>:]] + [[:<:]] and [[:>:]] respectively; no other escapes are available in BREs. @@ -5839,13 +5839,13 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); exist to handle input formats that cannot be converted by simple casting. For most standard date/time formats, simply casting the source string to the required data type works, and is much easier. - Similarly, to_number is unnecessary for standard numeric + Similarly, to_number is unnecessary for standard numeric representations. - In a to_char output template string, there are certain + In a to_char output template string, there are certain patterns that are recognized and replaced with appropriately-formatted data based on the given value. Any text that is not a template pattern is simply copied verbatim. Similarly, in an input template string (for the @@ -6022,11 +6022,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); D - day of the week, Sunday (1) to Saturday (7) + day of the week, Sunday (1) to Saturday (7) ID - ISO 8601 day of the week, Monday (1) to Sunday (7) + ISO 8601 day of the week, Monday (1) to Sunday (7) W @@ -6063,17 +6063,17 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TZ upper case time-zone abbreviation - (only supported in to_char) + (only supported in to_char) tz lower case time-zone abbreviation - (only supported in to_char) + (only supported in to_char) OF time-zone offset from UTC - (only supported in to_char) + (only supported in to_char) @@ -6107,12 +6107,12 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TH suffix upper case ordinal number suffix - DDTH, e.g., 12TH + DDTH, e.g., 12TH th suffix lower case ordinal number suffix - DDth, e.g., 12th + DDth, e.g., 12th FX prefix @@ -6153,7 +6153,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); TM does not include trailing blanks. - to_timestamp and to_date ignore + to_timestamp and to_date ignore the TM modifier. @@ -6179,9 +6179,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); even if it contains pattern key words. For example, in '"Hello Year "YYYY', the YYYY will be replaced by the year data, but the single Y in Year - will not be. In to_date, to_number, - and to_timestamp, double-quoted strings skip the number of - input characters contained in the string, e.g. "XX" + will not be. In to_date, to_number, + and to_timestamp, double-quoted strings skip the number of + input characters contained in the string, e.g. "XX" skips two input characters. @@ -6198,9 +6198,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); In to_timestamp and to_date, if the year format specification is less than four digits, e.g. - YYY, and the supplied year is less than four digits, + YYY, and the supplied year is less than four digits, the year will be adjusted to be nearest to the year 2020, e.g. - 95 becomes 1995. + 95 becomes 1995. @@ -6269,7 +6269,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); Attempting to enter a date using a mixture of ISO 8601 week-numbering fields and Gregorian date fields is nonsensical, and will cause an error. In the context of an ISO 8601 week-numbering year, the - concept of a month or day of month has no + concept of a month or day of month has no meaning. In the context of a Gregorian year, the ISO week has no meaning. @@ -6278,8 +6278,8 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); While to_date will reject a mixture of Gregorian and ISO week-numbering date fields, to_char will not, since output format - specifications like YYYY-MM-DD (IYYY-IDDD) can be - useful. But avoid writing something like IYYY-MM-DD; + specifications like YYYY-MM-DD (IYYY-IDDD) can be + useful. But avoid writing something like IYYY-MM-DD; that would yield surprising results near the start of the year. (See for more information.) @@ -6323,11 +6323,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - to_char(interval) formats HH and - HH12 as shown on a 12-hour clock, for example zero hours - and 36 hours both output as 12, while HH24 + to_char(interval) formats HH and + HH12 as shown on a 12-hour clock, for example zero hours + and 36 hours both output as 12, while HH24 outputs the full hour value, which can exceed 23 in - an interval value. + an interval value. @@ -6423,19 +6423,19 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); - 0 specifies a digit position that will always be printed, - even if it contains a leading/trailing zero. 9 also + 0 specifies a digit position that will always be printed, + even if it contains a leading/trailing zero. 9 also specifies a digit position, but if it is a leading zero then it will be replaced by a space, while if it is a trailing zero and fill mode - is specified then it will be deleted. (For to_number(), + is specified then it will be deleted. (For to_number(), these two pattern characters are equivalent.) - The pattern characters S, L, D, - and G represent the sign, currency symbol, decimal point, + The pattern characters S, L, D, + and G represent the sign, currency symbol, decimal point, and thousands separator characters defined by the current locale (see and ). The pattern characters period @@ -6447,9 +6447,9 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); If no explicit provision is made for a sign - in to_char()'s pattern, one column will be reserved for + in to_char()'s pattern, one column will be reserved for the sign, and it will be anchored to (appear just left of) the - number. If S appears just left of some 9's, + number. If S appears just left of some 9's, it will likewise be anchored to the number. @@ -6742,7 +6742,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); inputs actually come in two variants: one that takes time with time zone or timestamp with time zone, and one that takes time without time zone or timestamp without time zone. For brevity, these variants are not shown separately. Also, the - + and * operators come in commutative pairs (for + + and * operators come in commutative pairs (for example both date + integer and integer + date); we show only one of each such pair. @@ -6899,7 +6899,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); age(timestamp, timestamp) interval - Subtract arguments, producing a symbolic result that + Subtract arguments, producing a symbolic result that uses years and months, rather than just days age(timestamp '2001-04-10', timestamp '1957-06-13') 43 years 9 mons 27 days @@ -7109,7 +7109,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); justify_interval(interval) interval - Adjust interval using justify_days and justify_hours, with additional sign adjustments + Adjust interval using justify_days and justify_hours, with additional sign adjustments justify_interval(interval '1 mon -1 hour') 29 days 23:00:00 @@ -7302,7 +7302,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); text Current date and time - (like clock_timestamp, but as a text string); + (like clock_timestamp, but as a text string); see @@ -7344,7 +7344,7 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); OVERLAPS - In addition to these functions, the SQL OVERLAPS operator is + In addition to these functions, the SQL OVERLAPS operator is supported: (start1, end1) OVERLAPS (start2, end2) @@ -7355,11 +7355,11 @@ SELECT regexp_match('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); can be specified as pairs of dates, times, or time stamps; or as a date, time, or time stamp followed by an interval. When a pair of values is provided, either the start or the end can be written - first; OVERLAPS automatically takes the earlier value + first; OVERLAPS automatically takes the earlier value of the pair as the start. Each time period is considered to - represent the half-open interval start <= - time < end, unless - start and end are equal in which case it + represent the half-open interval start <= + time < end, unless + start and end are equal in which case it represents that single time instant. This means for instance that two time periods with only an endpoint in common do not overlap. @@ -7398,31 +7398,31 @@ SELECT (DATE '2001-10-30', DATE '2001-10-30') OVERLAPS - Note there can be ambiguity in the months field returned by - age because different months have different numbers of - days. PostgreSQL's approach uses the month from the + Note there can be ambiguity in the months field returned by + age because different months have different numbers of + days. PostgreSQL's approach uses the month from the earlier of the two dates when calculating partial months. For example, - age('2004-06-01', '2004-04-30') uses April to yield - 1 mon 1 day, while using May would yield 1 mon 2 - days because May has 31 days, while April has only 30. + age('2004-06-01', '2004-04-30') uses April to yield + 1 mon 1 day, while using May would yield 1 mon 2 + days because May has 31 days, while April has only 30. Subtraction of dates and timestamps can also be complex. One conceptually simple way to perform subtraction is to convert each value to a number - of seconds using EXTRACT(EPOCH FROM ...), then subtract the + of seconds using EXTRACT(EPOCH FROM ...), then subtract the results; this produces the - number of seconds between the two values. This will adjust + number of seconds between the two values. This will adjust for the number of days in each month, timezone changes, and daylight saving time adjustments. Subtraction of date or timestamp - values with the - operator + values with the - operator returns the number of days (24-hours) and hours/minutes/seconds - between the values, making the same adjustments. The age + between the values, making the same adjustments. The age function returns years, months, days, and hours/minutes/seconds, performing field-by-field subtraction and then adjusting for negative field values. The following queries illustrate the differences in these approaches. The sample results were produced with timezone - = 'US/Eastern'; there is a daylight saving time change between the + = 'US/Eastern'; there is a daylight saving time change between the two dates used: @@ -7534,8 +7534,8 @@ SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); dow - The day of the week as Sunday (0) to - Saturday (6) + The day of the week as Sunday (0) to + Saturday (6) @@ -7587,7 +7587,7 @@ SELECT EXTRACT(EPOCH FROM INTERVAL '5 days 3 hours'); You can convert an epoch value back to a time stamp - with to_timestamp: + with to_timestamp: SELECT to_timestamp(982384720.12); @@ -7614,8 +7614,8 @@ SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); isodow - The day of the week as Monday (1) to - Sunday (7) + The day of the week as Monday (1) to + Sunday (7) @@ -7623,8 +7623,8 @@ SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); Result: 7 - This is identical to dow except for Sunday. This - matches the ISO 8601 day of the week numbering. + This is identical to dow except for Sunday. This + matches the ISO 8601 day of the week numbering. @@ -7819,11 +7819,11 @@ SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); In the ISO week-numbering system, it is possible for early-January dates to be part of the 52nd or 53rd week of the previous year, and for late-December dates to be part of the first week of the next year. - For example, 2005-01-01 is part of the 53rd week of year - 2004, and 2006-01-01 is part of the 52nd week of year - 2005, while 2012-12-31 is part of the first week of 2013. - It's recommended to use the isoyear field together with - week to get consistent results. + For example, 2005-01-01 is part of the 53rd week of year + 2004, and 2006-01-01 is part of the 52nd week of year + 2005, while 2012-12-31 is part of the first week of 2013. + It's recommended to use the isoyear field together with + week to get consistent results. @@ -7837,8 +7837,8 @@ SELECT EXTRACT(WEEK FROM TIMESTAMP '2001-02-16 20:38:40'); year - The year field. Keep in mind there is no 0 AD, so subtracting - BC years from AD years should be done with care. + The year field. Keep in mind there is no 0 AD, so subtracting + BC years from AD years should be done with care. @@ -7853,11 +7853,11 @@ SELECT EXTRACT(YEAR FROM TIMESTAMP '2001-02-16 20:38:40'); - When the input value is +/-Infinity, extract returns - +/-Infinity for monotonically-increasing fields (epoch, - julian, year, isoyear, - decade, century, and millennium). - For other fields, NULL is returned. PostgreSQL + When the input value is +/-Infinity, extract returns + +/-Infinity for monotonically-increasing fields (epoch, + julian, year, isoyear, + decade, century, and millennium). + For other fields, NULL is returned. PostgreSQL versions before 9.6 returned zero for all cases of infinite input. @@ -7908,13 +7908,13 @@ SELECT date_part('hour', INTERVAL '4 hours 3 minutes'); date_trunc('field', source) source is a value expression of type - timestamp or interval. + timestamp or interval. (Values of type date and time are cast automatically to timestamp or - interval, respectively.) + interval, respectively.) field selects to which precision to truncate the input value. The return value is of type - timestamp or interval + timestamp or interval with all fields that are less significant than the selected one set to zero (or one, for day and month). @@ -7983,34 +7983,34 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); - timestamp without time zone AT TIME ZONE zone + timestamp without time zone AT TIME ZONE zone timestamp with time zone - Treat given time stamp without time zone as located in the specified time zone + Treat given time stamp without time zone as located in the specified time zone - timestamp with time zone AT TIME ZONE zone + timestamp with time zone AT TIME ZONE zone timestamp without time zone - Convert given time stamp with time zone to the new time + Convert given time stamp with time zone to the new time zone, with no time zone designation - time with time zone AT TIME ZONE zone + time with time zone AT TIME ZONE zone time with time zone - Convert given time with time zone to the new time zone + Convert given time with time zone to the new time zone - In these expressions, the desired time zone zone can be + In these expressions, the desired time zone zone can be specified either as a text string (e.g., 'PST') or as an interval (e.g., INTERVAL '-08:00'). In the text case, a time zone name can be specified in any of the ways @@ -8018,7 +8018,7 @@ SELECT date_trunc('year', TIMESTAMP '2001-02-16 20:38:40'); - Examples (assuming the local time zone is PST8PDT): + Examples (assuming the local time zone is PST8PDT): SELECT TIMESTAMP '2001-02-16 20:38:40' AT TIME ZONE 'MST'; Result: 2001-02-16 19:38:40-08 @@ -8032,10 +8032,10 @@ SELECT TIMESTAMP WITH TIME ZONE '2001-02-16 20:38:40-05' AT TIME ZONE 'MST'; - The function timezone(zone, - timestamp) is equivalent to the SQL-conforming construct - timestamp AT TIME ZONE - zone. + The function timezone(zone, + timestamp) is equivalent to the SQL-conforming construct + timestamp AT TIME ZONE + zone. @@ -8140,23 +8140,23 @@ now() - transaction_timestamp() is equivalent to + transaction_timestamp() is equivalent to CURRENT_TIMESTAMP, but is named to clearly reflect what it returns. - statement_timestamp() returns the start time of the current + statement_timestamp() returns the start time of the current statement (more specifically, the time of receipt of the latest command message from the client). - statement_timestamp() and transaction_timestamp() + statement_timestamp() and transaction_timestamp() return the same value during the first command of a transaction, but might differ during subsequent commands. - clock_timestamp() returns the actual current time, and + clock_timestamp() returns the actual current time, and therefore its value changes even within a single SQL command. - timeofday() is a historical + timeofday() is a historical PostgreSQL function. Like - clock_timestamp(), it returns the actual current time, - but as a formatted text string rather than a timestamp - with time zone value. - now() is a traditional PostgreSQL + clock_timestamp(), it returns the actual current time, + but as a formatted text string rather than a timestamp + with time zone value. + now() is a traditional PostgreSQL equivalent to transaction_timestamp(). @@ -8174,7 +8174,7 @@ SELECT TIMESTAMP 'now'; -- incorrect for use with DEFAULT - You do not want to use the third form when specifying a DEFAULT + You do not want to use the third form when specifying a DEFAULT clause while creating a table. The system will convert now to a timestamp as soon as the constant is parsed, so that when the default value is needed, @@ -8210,16 +8210,16 @@ SELECT TIMESTAMP 'now'; -- incorrect for use with DEFAULT process: pg_sleep(seconds) -pg_sleep_for(interval) -pg_sleep_until(timestamp with time zone) +pg_sleep_for(interval) +pg_sleep_until(timestamp with time zone) pg_sleep makes the current session's process sleep until seconds seconds have elapsed. seconds is a value of type - double precision, so fractional-second delays can be specified. + double precision, so fractional-second delays can be specified. pg_sleep_for is a convenience function for larger - sleep times specified as an interval. + sleep times specified as an interval. pg_sleep_until is a convenience function for when a specific wake-up time is desired. For example: @@ -8341,7 +8341,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - Notice that except for the two-argument form of enum_range, + Notice that except for the two-argument form of enum_range, these functions disregard the specific value passed to them; they care only about its declared data type. Either null or a specific value of the type can be passed, with the same result. It is more common to @@ -8365,13 +8365,13 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - Note that the same as operator, ~=, represents + Note that the same as operator, ~=, represents the usual notion of equality for the point, box, polygon, and circle types. - Some of these types also have an = operator, but - = compares - for equal areas only. The other scalar comparison operators - (<= and so on) likewise compare areas for these types. + Some of these types also have an = operator, but + = compares + for equal areas only. The other scalar comparison operators + (<= and so on) likewise compare areas for these types. @@ -8548,8 +8548,8 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple Before PostgreSQL 8.2, the containment - operators @> and <@ were respectively - called ~ and @. These names are still + operators @> and <@ were respectively + called ~ and @. These names are still available, but are deprecated and will eventually be removed. @@ -8604,67 +8604,67 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - area(object) + area(object) double precision area area(box '((0,0),(1,1))') - center(object) + center(object) point center center(box '((0,0),(1,2))') - diameter(circle) + diameter(circle) double precision diameter of circle diameter(circle '((0,0),2.0)') - height(box) + height(box) double precision vertical size of box height(box '((0,0),(1,1))') - isclosed(path) + isclosed(path) boolean a closed path? isclosed(path '((0,0),(1,1),(2,0))') - isopen(path) + isopen(path) boolean an open path? isopen(path '[(0,0),(1,1),(2,0)]') - length(object) + length(object) double precision length length(path '((-1,0),(1,0))') - npoints(path) + npoints(path) int number of points npoints(path '[(0,0),(1,1),(2,0)]') - npoints(polygon) + npoints(polygon) int number of points npoints(polygon '((1,1),(0,0))') - pclose(path) + pclose(path) path convert path to closed pclose(path '[(0,0),(1,1),(2,0)]') - popen(path) + popen(path) path convert path to open popen(path '((0,0),(1,1),(2,0))') @@ -8676,7 +8676,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple radius(circle '((0,0),2.0)') - width(box) + width(box) double precision horizontal size of box width(box '((0,0),(1,1))') @@ -8859,13 +8859,13 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - It is possible to access the two component numbers of a point + It is possible to access the two component numbers of a point as though the point were an array with indexes 0 and 1. For example, if - t.p is a point column then - SELECT p[0] FROM t retrieves the X coordinate and - UPDATE t SET p[1] = ... changes the Y coordinate. - In the same way, a value of type box or lseg can be treated - as an array of two point values. + t.p is a point column then + SELECT p[0] FROM t retrieves the X coordinate and + UPDATE t SET p[1] = ... changes the Y coordinate. + In the same way, a value of type box or lseg can be treated + as an array of two point values. @@ -9188,19 +9188,19 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - Any cidr value can be cast to inet implicitly + Any cidr value can be cast to inet implicitly or explicitly; therefore, the functions shown above as operating on - inet also work on cidr values. (Where there are - separate functions for inet and cidr, it is because + inet also work on cidr values. (Where there are + separate functions for inet and cidr, it is because the behavior should be different for the two cases.) - Also, it is permitted to cast an inet value to cidr. + Also, it is permitted to cast an inet value to cidr. When this is done, any bits to the right of the netmask are silently zeroed - to create a valid cidr value. + to create a valid cidr value. In addition, - you can cast a text value to inet or cidr + you can cast a text value to inet or cidr using normal casting syntax: for example, - inet(expression) or - colname::cidr. + inet(expression) or + colname::cidr. @@ -9345,64 +9345,64 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple @@ - boolean - tsvector matches tsquery ? + boolean + tsvector matches tsquery ? to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat') t @@@ - boolean - deprecated synonym for @@ + boolean + deprecated synonym for @@ to_tsvector('fat cats ate rats') @@@ to_tsquery('cat & rat') t || - tsvector - concatenate tsvectors + tsvector + concatenate tsvectors 'a:1 b:2'::tsvector || 'c:1 d:2 b:3'::tsvector 'a':1 'b':2,5 'c':3 'd':4 && - tsquery - AND tsquerys together + tsquery + AND tsquerys together 'fat | rat'::tsquery && 'cat'::tsquery ( 'fat' | 'rat' ) & 'cat' || - tsquery - OR tsquerys together + tsquery + OR tsquerys together 'fat | rat'::tsquery || 'cat'::tsquery ( 'fat' | 'rat' ) | 'cat' !! - tsquery - negate a tsquery + tsquery + negate a tsquery !! 'cat'::tsquery !'cat' <-> - tsquery - tsquery followed by tsquery + tsquery + tsquery followed by tsquery to_tsquery('fat') <-> to_tsquery('rat') 'fat' <-> 'rat' @> - boolean - tsquery contains another ? + boolean + tsquery contains another ? 'cat'::tsquery @> 'cat & rat'::tsquery f <@ - boolean - tsquery is contained in ? + boolean + tsquery is contained in ? 'cat'::tsquery <@ 'cat & rat'::tsquery t @@ -9412,15 +9412,15 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - The tsquery containment operators consider only the lexemes + The tsquery containment operators consider only the lexemes listed in the two queries, ignoring the combining operators. In addition to the operators shown in the table, the ordinary B-tree - comparison operators (=, <, etc) are defined - for types tsvector and tsquery. These are not very + comparison operators (=, <, etc) are defined + for types tsvector and tsquery. These are not very useful for text searching but allow, for example, unique indexes to be built on columns of these types. @@ -9443,7 +9443,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple array_to_tsvector - array_to_tsvector(text[]) + array_to_tsvector(text[]) tsvector convert array of lexemes to tsvector @@ -9467,10 +9467,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple length - length(tsvector) + length(tsvector) integer - number of lexemes in tsvector + number of lexemes in tsvector length('fat:2,4 cat:3 rat:5A'::tsvector) 3 @@ -9479,10 +9479,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple numnode - numnode(tsquery) + numnode(tsquery) integer - number of lexemes plus operators in tsquery + number of lexemes plus operators in tsquery numnode('(fat & rat) | cat'::tsquery) 5 @@ -9491,10 +9491,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple plainto_tsquery - plainto_tsquery( config regconfig , query text) + plainto_tsquery( config regconfig , query text) tsquery - produce tsquery ignoring punctuation + produce tsquery ignoring punctuation plainto_tsquery('english', 'The Fat Rats') 'fat' & 'rat' @@ -9503,10 +9503,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple phraseto_tsquery - phraseto_tsquery( config regconfig , query text) + phraseto_tsquery( config regconfig , query text) tsquery - produce tsquery that searches for a phrase, + produce tsquery that searches for a phrase, ignoring punctuation phraseto_tsquery('english', 'The Fat Rats') 'fat' <-> 'rat' @@ -9516,10 +9516,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple querytree - querytree(query tsquery) + querytree(query tsquery) text - get indexable part of a tsquery + get indexable part of a tsquery querytree('foo & ! bar'::tsquery) 'foo' @@ -9528,7 +9528,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple setweight - setweight(vector tsvector, weight "char") + setweight(vector tsvector, weight "char") tsvector assign weight to each element of vector @@ -9541,7 +9541,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple setweight setweight for specific lexeme(s) - setweight(vector tsvector, weight "char", lexemes text[]) + setweight(vector tsvector, weight "char", lexemes text[]) tsvector assign weight to elements of vector that are listed in lexemes @@ -9553,10 +9553,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple strip - strip(tsvector) + strip(tsvector) tsvector - remove positions and weights from tsvector + remove positions and weights from tsvector strip('fat:2,4 cat:3 rat:5A'::tsvector) 'cat' 'fat' 'rat' @@ -9565,10 +9565,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple to_tsquery - to_tsquery( config regconfig , query text) + to_tsquery( config regconfig , query text) tsquery - normalize words and convert to tsquery + normalize words and convert to tsquery to_tsquery('english', 'The & Fat & Rats') 'fat' & 'rat' @@ -9577,21 +9577,21 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple to_tsvector - to_tsvector( config regconfig , document text) + to_tsvector( config regconfig , document text) tsvector - reduce document text to tsvector + reduce document text to tsvector to_tsvector('english', 'The Fat Rats') 'fat':2 'rat':3 - to_tsvector( config regconfig , document json(b)) + to_tsvector( config regconfig , document json(b)) tsvector - reduce each string value in the document to a tsvector, and then - concatenate those in document order to produce a single tsvector + reduce each string value in the document to a tsvector, and then + concatenate those in document order to produce a single tsvector to_tsvector('english', '{"a": "The Fat Rats"}'::json) 'fat':2 'rat':3 @@ -9601,7 +9601,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_delete - ts_delete(vector tsvector, lexeme text) + ts_delete(vector tsvector, lexeme text) tsvector remove given lexeme from vector @@ -9611,7 +9611,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - ts_delete(vector tsvector, lexemes text[]) + ts_delete(vector tsvector, lexemes text[]) tsvector remove any occurrence of lexemes in lexemes from vector @@ -9623,7 +9623,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_filter - ts_filter(vector tsvector, weights "char"[]) + ts_filter(vector tsvector, weights "char"[]) tsvector select only elements with given weights from vector @@ -9635,7 +9635,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_headline - ts_headline( config regconfig, document text, query tsquery , options text ) + ts_headline( config regconfig, document text, query tsquery , options text ) text display a query match @@ -9644,7 +9644,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - ts_headline( config regconfig, document json(b), query tsquery , options text ) + ts_headline( config regconfig, document json(b), query tsquery , options text ) text display a query match @@ -9656,7 +9656,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rank - ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) + ts_rank( weights float4[], vector tsvector, query tsquery , normalization integer ) float4 rank document for query @@ -9668,7 +9668,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rank_cd - ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) + ts_rank_cd( weights float4[], vector tsvector, query tsquery , normalization integer ) float4 rank document for query using cover density @@ -9680,18 +9680,18 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_rewrite - ts_rewrite(query tsquery, target tsquery, substitute tsquery) + ts_rewrite(query tsquery, target tsquery, substitute tsquery) tsquery - replace target with substitute + replace target with substitute within query ts_rewrite('a & b'::tsquery, 'a'::tsquery, 'foo|bar'::tsquery) 'b' & ( 'foo' | 'bar' ) - ts_rewrite(query tsquery, select text) + ts_rewrite(query tsquery, select text) tsquery - replace using targets and substitutes from a SELECT command + replace using targets and substitutes from a SELECT command SELECT ts_rewrite('a & b'::tsquery, 'SELECT t,s FROM aliases') 'b' & ( 'foo' | 'bar' ) @@ -9700,22 +9700,22 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsquery_phrase - tsquery_phrase(query1 tsquery, query2 tsquery) + tsquery_phrase(query1 tsquery, query2 tsquery) tsquery - make query that searches for query1 followed - by query2 (same as <-> + make query that searches for query1 followed + by query2 (same as <-> operator) tsquery_phrase(to_tsquery('fat'), to_tsquery('cat')) 'fat' <-> 'cat' - tsquery_phrase(query1 tsquery, query2 tsquery, distance integer) + tsquery_phrase(query1 tsquery, query2 tsquery, distance integer) tsquery - make query that searches for query1 followed by - query2 at distance distance + make query that searches for query1 followed by + query2 at distance distance tsquery_phrase(to_tsquery('fat'), to_tsquery('cat'), 10) 'fat' <10> 'cat' @@ -9724,10 +9724,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_to_array - tsvector_to_array(tsvector) + tsvector_to_array(tsvector) text[] - convert tsvector to array of lexemes + convert tsvector to array of lexemes tsvector_to_array('fat:2,4 cat:3 rat:5A'::tsvector) {cat,fat,rat} @@ -9739,7 +9739,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_update_trigger() trigger - trigger function for automatic tsvector column update + trigger function for automatic tsvector column update CREATE TRIGGER ... tsvector_update_trigger(tsvcol, 'pg_catalog.swedish', title, body) @@ -9751,7 +9751,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple tsvector_update_trigger_column() trigger - trigger function for automatic tsvector column update + trigger function for automatic tsvector column update CREATE TRIGGER ... tsvector_update_trigger_column(tsvcol, configcol, title, body) @@ -9761,7 +9761,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple unnest for tsvector - unnest(tsvector, OUT lexeme text, OUT positions smallint[], OUT weights text) + unnest(tsvector, OUT lexeme text, OUT positions smallint[], OUT weights text) setof record expand a tsvector to a set of rows @@ -9774,7 +9774,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple - All the text search functions that accept an optional regconfig + All the text search functions that accept an optional regconfig argument will use the configuration specified by when that argument is omitted. @@ -9807,7 +9807,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_debug - ts_debug( config regconfig, document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + ts_debug( config regconfig, document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) setof record test a configuration @@ -9819,7 +9819,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_lexize - ts_lexize(dict regdictionary, token text) + ts_lexize(dict regdictionary, token text) text[] test a dictionary @@ -9831,7 +9831,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_parse - ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) + ts_parse(parser_name text, document text, OUT tokid integer, OUT token text) setof record test a parser @@ -9839,7 +9839,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple (1,foo) ... - ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) + ts_parse(parser_oid oid, document text, OUT tokid integer, OUT token text) setof record test a parser ts_parse(3722, 'foo - bar') @@ -9850,7 +9850,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_token_type - ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) + ts_token_type(parser_name text, OUT tokid integer, OUT alias text, OUT description text) setof record get token types defined by parser @@ -9858,7 +9858,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple (1,asciiword,"Word, all ASCII") ... - ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) + ts_token_type(parser_oid oid, OUT tokid integer, OUT alias text, OUT description text) setof record get token types defined by parser ts_token_type(3722) @@ -9869,10 +9869,10 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple ts_stat - ts_stat(sqlquery text, weights text, OUT word text, OUT ndoc integer, OUT nentry integer) + ts_stat(sqlquery text, weights text, OUT word text, OUT ndoc integer, OUT nentry integer) setof record - get statistics of a tsvector column + get statistics of a tsvector column ts_stat('SELECT vector from apod') (foo,10,15) ... @@ -9894,7 +9894,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple and xmlserialize for converting to and from type xml are not repeated here. Use of most of these functions requires the installation to have been built - with configure --with-libxml. + with configure --with-libxml. @@ -10246,7 +10246,7 @@ SELECT xmlagg(x) FROM test; - To determine the order of the concatenation, an ORDER BY + To determine the order of the concatenation, an ORDER BY clause may be added to the aggregate call as described in . For example: @@ -10365,18 +10365,18 @@ SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Tor - These functions check whether a text string is well-formed XML, + These functions check whether a text string is well-formed XML, returning a Boolean result. xml_is_well_formed_document checks for a well-formed document, while xml_is_well_formed_content checks for well-formed content. xml_is_well_formed does the former if the configuration - parameter is set to DOCUMENT, or the latter if it is set to - CONTENT. This means that + parameter is set to DOCUMENT, or the latter if it is set to + CONTENT. This means that xml_is_well_formed is useful for seeing whether - a simple cast to type xml will succeed, whereas the other two + a simple cast to type xml will succeed, whereas the other two functions are useful for seeing whether the corresponding variants of - XMLPARSE will succeed. + XMLPARSE will succeed. @@ -10446,7 +10446,7 @@ SELECT xml_is_well_formed_document(' The optional third argument of the function is an array of namespace - mappings. This array should be a two-dimensional text array with + mappings. This array should be a two-dimensional text array with the length of the second axis being equal to 2 (i.e., it should be an array of arrays, each of which consists of exactly 2 elements). The first element of each array entry is the namespace name (alias), the second the namespace URI. It is not required that aliases provided in this array be the same as those being used in the XML document itself (in other words, both in the XML document and in the xpath - function context, aliases are local). + function context, aliases are local). @@ -10514,7 +10514,7 @@ SELECT xpath('//mydefns:b/text()', 'testxpath function. Instead of returning the individual XML values that satisfy the XPath, this function returns a Boolean indicating whether the query was satisfied or not. This - function is equivalent to the standard XMLEXISTS predicate, + function is equivalent to the standard XMLEXISTS predicate, except that it also offers support for a namespace mapping argument. @@ -10560,21 +10560,21 @@ SELECT xpath_exists('/my:a/text()', 'test - The optional XMLNAMESPACES clause is a comma-separated + The optional XMLNAMESPACES clause is a comma-separated list of namespaces. It specifies the XML namespaces used in the document and their aliases. A default namespace specification is not currently supported. - The required row_expression argument is an XPath + The required row_expression argument is an XPath expression that is evaluated against the supplied XML document to obtain an ordered sequence of XML nodes. This sequence is what - xmltable transforms into output rows. + xmltable transforms into output rows. - document_expression provides the XML document to + document_expression provides the XML document to operate on. The BY REF clauses have no effect in PostgreSQL, but are allowed for SQL conformance and compatibility with other @@ -10586,9 +10586,9 @@ SELECT xpath_exists('/my:a/text()', 'test The mandatory COLUMNS clause specifies the list of columns in the output table. - If the COLUMNS clause is omitted, the rows in the result - set contain a single column of type xml containing the - data matched by row_expression. + If the COLUMNS clause is omitted, the rows in the result + set contain a single column of type xml containing the + data matched by row_expression. If COLUMNS is specified, each entry describes a single column. See the syntax summary above for the format. @@ -10604,10 +10604,10 @@ SELECT xpath_exists('/my:a/text()', 'test - The column_expression for a column is an XPath expression + The column_expression for a column is an XPath expression that is evaluated for each row, relative to the result of the - row_expression, to find the value of the column. - If no column_expression is given, then the column name + row_expression, to find the value of the column. + If no column_expression is given, then the column name is used as an implicit path. @@ -10615,55 +10615,55 @@ SELECT xpath_exists('/my:a/text()', 'testNULL). - Any xsi:nil attributes are ignored. + empty string (not NULL). + Any xsi:nil attributes are ignored. - The text body of the XML matched by the column_expression + The text body of the XML matched by the column_expression is used as the column value. Multiple text() nodes within an element are concatenated in order. Any child elements, processing instructions, and comments are ignored, but the text contents of child elements are concatenated to the result. - Note that the whitespace-only text() node between two non-text - elements is preserved, and that leading whitespace on a text() + Note that the whitespace-only text() node between two non-text + elements is preserved, and that leading whitespace on a text() node is not flattened. If the path expression does not match for a given row but - default_expression is specified, the value resulting + default_expression is specified, the value resulting from evaluating that expression is used. - If no DEFAULT clause is given for the column, - the field will be set to NULL. - It is possible for a default_expression to reference + If no DEFAULT clause is given for the column, + the field will be set to NULL. + It is possible for a default_expression to reference the value of output columns that appear prior to it in the column list, so the default of one column may be based on the value of another column. - Columns may be marked NOT NULL. If the - column_expression for a NOT NULL column - does not match anything and there is no DEFAULT or the - default_expression also evaluates to null, an error + Columns may be marked NOT NULL. If the + column_expression for a NOT NULL column + does not match anything and there is no DEFAULT or the + default_expression also evaluates to null, an error is reported. - Unlike regular PostgreSQL functions, column_expression - and default_expression are not evaluated to a simple + Unlike regular PostgreSQL functions, column_expression + and default_expression are not evaluated to a simple value before calling the function. - column_expression is normally evaluated - exactly once per input row, and default_expression + column_expression is normally evaluated + exactly once per input row, and default_expression is evaluated each time a default is needed for a field. If the expression qualifies as stable or immutable the repeat evaluation may be skipped. - Effectively xmltable behaves more like a subquery than a + Effectively xmltable behaves more like a subquery than a function call. This means that you can usefully use volatile functions like - nextval in default_expression, and - column_expression may depend on other parts of the + nextval in default_expression, and + column_expression may depend on other parts of the XML document. @@ -11029,7 +11029,7 @@ table2-mapping - <type>json</> and <type>jsonb</> Operators + <type>json</type> and <type>jsonb</type> Operators @@ -11059,14 +11059,14 @@ table2-mapping ->> int - Get JSON array element as text + Get JSON array element as text '[1,2,3]'::json->>2 3 ->> text - Get JSON object field as text + Get JSON object field as text '{"a":1,"b":2}'::json->>'b' 2 @@ -11080,7 +11080,7 @@ table2-mapping #>> text[] - Get JSON object at specified path as text + Get JSON object at specified path as text '{"a":[1,2,3],"b":[4,5,6]}'::json#>>'{a,2}' 3 @@ -11095,7 +11095,7 @@ table2-mapping The field/element/path extraction operators return the same type as their left-hand input (either json or jsonb), except for those specified as - returning text, which coerce the value to text. + returning text, which coerce the value to text. The field/element/path extraction operators return NULL, rather than failing, if the JSON input does not have the right structure to match the request; for example if no such element exists. The @@ -11115,14 +11115,14 @@ table2-mapping Some further operators also exist only for jsonb, as shown in . Many of these operators can be indexed by - jsonb operator classes. For a full description of - jsonb containment and existence semantics, see jsonb operator classes. For a full description of + jsonb containment and existence semantics, see . describes how these operators can be used to effectively index - jsonb data. + jsonb data.
- Additional <type>jsonb</> Operators + Additional <type>jsonb</type> Operators @@ -11211,7 +11211,7 @@ table2-mapping - The || operator concatenates the elements at the top level of + The || operator concatenates the elements at the top level of each of its operands. It does not operate recursively. For example, if both operands are objects with a common key field name, the value of the field in the result will just be the value from the right hand operand. @@ -11221,8 +11221,8 @@ table2-mapping shows the functions that are available for creating json and jsonb values. - (There are no equivalent functions for jsonb, of the row_to_json - and array_to_json functions. However, the to_jsonb + (There are no equivalent functions for jsonb, of the row_to_json + and array_to_json functions. However, the to_jsonb function supplies much the same functionality as these functions would.) @@ -11274,14 +11274,14 @@ table2-mapping to_jsonb(anyelement) - Returns the value as json or jsonb. + Returns the value as json or jsonb. Arrays and composites are converted (recursively) to arrays and objects; otherwise, if there is a cast from the type to json, the cast function will be used to perform the conversion; otherwise, a scalar value is produced. For any scalar type other than a number, a Boolean, or a null value, the text representation will be used, in such a fashion that it is a - valid json or jsonb value. + valid json or jsonb value. to_json('Fred said "Hi."'::text) "Fred said \"Hi.\"" @@ -11343,8 +11343,8 @@ table2-mapping such that each inner array has exactly two elements, which are taken as a key/value pair. - json_object('{a, 1, b, "def", c, 3.5}') - json_object('{{a, 1},{b, "def"},{c, 3.5}}') + json_object('{a, 1, b, "def", c, 3.5}') + json_object('{{a, 1},{b, "def"},{c, 3.5}}') {"a": "1", "b": "def", "c": "3.5"} @@ -11352,7 +11352,7 @@ table2-mapping jsonb_object(keys text[], values text[]) - This form of json_object takes keys and values pairwise from two separate + This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form. json_object('{a, b}', '{1,2}') @@ -11364,9 +11364,9 @@ table2-mapping - array_to_json and row_to_json have the same - behavior as to_json except for offering a pretty-printing - option. The behavior described for to_json likewise applies + array_to_json and row_to_json have the same + behavior as to_json except for offering a pretty-printing + option. The behavior described for to_json likewise applies to each individual value converted by the other JSON creation functions. @@ -11530,7 +11530,7 @@ table2-mapping setof key text, value text Expands the outermost JSON object into a set of key/value pairs. The - returned values will be of type text. + returned values will be of type text. select * from json_each_text('{"a":"foo", "b":"bar"}') @@ -11562,7 +11562,7 @@ table2-mapping text Returns JSON value pointed to by path_elems - as text + as text (equivalent to #>> operator). json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4', 'f6') @@ -11593,7 +11593,7 @@ table2-mapping anyelement Expands the object in from_json to a row - whose columns match the record type defined by base + whose columns match the record type defined by base (see note below). select * from json_populate_record(null::myrowtype, '{"a": 1, "b": ["2", "a b"], "c": {"d": 4, "e": "a b c"}}') @@ -11613,7 +11613,7 @@ table2-mapping Expands the outermost array of objects in from_json to a set of rows whose - columns match the record type defined by base (see + columns match the record type defined by base (see note below). select * from json_populate_recordset(null::myrowtype, '[{"a":1,"b":2},{"a":3,"b":4}]') @@ -11653,7 +11653,7 @@ table2-mapping setof text - Expands a JSON array to a set of text values. + Expands a JSON array to a set of text values. select * from json_array_elements_text('["foo", "bar"]') @@ -11673,8 +11673,8 @@ table2-mapping Returns the type of the outermost JSON value as a text string. Possible types are - object, array, string, number, - boolean, and null. + object, array, string, number, + boolean, and null. json_typeof('-123.4') number @@ -11686,8 +11686,8 @@ table2-mapping record Builds an arbitrary record from a JSON object (see note below). As - with all functions returning record, the caller must - explicitly define the structure of the record with an AS + with all functions returning record, the caller must + explicitly define the structure of the record with an AS clause. select * from json_to_record('{"a":1,"b":[1,2,3],"c":[1,2,3],"e":"bar","r": {"a": 123, "b": "a b c"}}') as x(a int, b text, c int[], d text, r myrowtype) @@ -11706,9 +11706,9 @@ table2-mapping setof record Builds an arbitrary set of records from a JSON array of objects (see - note below). As with all functions returning record, the + note below). As with all functions returning record, the caller must explicitly define the structure of the record with - an AS clause. + an AS clause. select * from json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]') as x(a int, b text); @@ -11743,7 +11743,7 @@ table2-mapping replaced by new_value, or with new_value added if create_missing is true ( default is - true) and the item + true) and the item designated by path does not exist. As with the path orientated operators, negative integers that appear in path count from the end @@ -11770,7 +11770,7 @@ table2-mapping path is in a JSONB array, new_value will be inserted before target or after if insert_after is true (default is - false). If target section + false). If target section designated by path is in JSONB object, new_value will be inserted only if target does not exist. As with the path @@ -11820,17 +11820,17 @@ table2-mapping Many of these functions and operators will convert Unicode escapes in JSON strings to the appropriate single character. This is a non-issue - if the input is type jsonb, because the conversion was already - done; but for json input, this may result in throwing an error, + if the input is type jsonb, because the conversion was already + done; but for json input, this may result in throwing an error, as noted in . - In json_populate_record, json_populate_recordset, - json_to_record and json_to_recordset, - type coercion from the JSON is best effort and may not result + In json_populate_record, json_populate_recordset, + json_to_record and json_to_recordset, + type coercion from the JSON is best effort and may not result in desired values for some types. JSON keys are matched to identical column names in the target row type. JSON fields that do not appear in the target row type will be omitted from the output, and @@ -11840,18 +11840,18 @@ table2-mapping - All the items of the path parameter of jsonb_set - as well as jsonb_insert except the last item must be present - in the target. If create_missing is false, all - items of the path parameter of jsonb_set must be - present. If these conditions are not met the target is + All the items of the path parameter of jsonb_set + as well as jsonb_insert except the last item must be present + in the target. If create_missing is false, all + items of the path parameter of jsonb_set must be + present. If these conditions are not met the target is returned unchanged. If the last path item is an object key, it will be created if it is absent and given the new value. If the last path item is an array index, if it is positive the item to set is found by counting from - the left, and if negative by counting from the right - -1 + the left, and if negative by counting from the right - -1 designates the rightmost element, and so on. If the item is out of the range -array_length .. array_length -1, and create_missing is true, the new value is added at the beginning @@ -11862,20 +11862,20 @@ table2-mapping - The json_typeof function's null return value + The json_typeof function's null return value should not be confused with a SQL NULL. While - calling json_typeof('null'::json) will - return null, calling json_typeof(NULL::json) + calling json_typeof('null'::json) will + return null, calling json_typeof(NULL::json) will return a SQL NULL. - If the argument to json_strip_nulls contains duplicate + If the argument to json_strip_nulls contains duplicate field names in any object, the result could be semantically somewhat different, depending on the order in which they occur. This is not an - issue for jsonb_strip_nulls since jsonb values never have + issue for jsonb_strip_nulls since jsonb values never have duplicate object field names. @@ -11886,7 +11886,7 @@ table2-mapping values as JSON, and the aggregate function json_object_agg which aggregates pairs of values into a JSON object, and their jsonb equivalents, - jsonb_agg and jsonb_object_agg. + jsonb_agg and jsonb_object_agg. @@ -11963,52 +11963,52 @@ table2-mapping The sequence to be operated on by a sequence function is specified by - a regclass argument, which is simply the OID of the sequence in the - pg_class system catalog. You do not have to look up the - OID by hand, however, since the regclass data type's input + a regclass argument, which is simply the OID of the sequence in the + pg_class system catalog. You do not have to look up the + OID by hand, however, since the regclass data type's input converter will do the work for you. Just write the sequence name enclosed in single quotes so that it looks like a literal constant. For compatibility with the handling of ordinary SQL names, the string will be converted to lower case unless it contains double quotes around the sequence name. Thus: -nextval('foo') operates on sequence foo -nextval('FOO') operates on sequence foo -nextval('"Foo"') operates on sequence Foo +nextval('foo') operates on sequence foo +nextval('FOO') operates on sequence foo +nextval('"Foo"') operates on sequence Foo The sequence name can be schema-qualified if necessary: -nextval('myschema.foo') operates on myschema.foo +nextval('myschema.foo') operates on myschema.foo nextval('"myschema".foo') same as above -nextval('foo') searches search path for foo +nextval('foo') searches search path for foo See for more information about - regclass. + regclass. Before PostgreSQL 8.1, the arguments of the - sequence functions were of type text, not regclass, and + sequence functions were of type text, not regclass, and the above-described conversion from a text string to an OID value would happen at run time during each call. For backward compatibility, this facility still exists, but internally it is now handled as an implicit - coercion from text to regclass before the function is + coercion from text to regclass before the function is invoked. When you write the argument of a sequence function as an unadorned - literal string, it becomes a constant of type regclass. + literal string, it becomes a constant of type regclass. Since this is really just an OID, it will track the originally identified sequence despite later renaming, schema reassignment, - etc. This early binding behavior is usually desirable for + etc. This early binding behavior is usually desirable for sequence references in column defaults and views. But sometimes you might - want late binding where the sequence reference is resolved + want late binding where the sequence reference is resolved at run time. To get late-binding behavior, force the constant to be - stored as a text constant instead of regclass: + stored as a text constant instead of regclass: -nextval('foo'::text) foo is looked up at runtime +nextval('foo'::text) foo is looked up at runtime Note that late binding was the only behavior supported in PostgreSQL releases before 8.1, so you @@ -12051,14 +12051,14 @@ nextval('foo'::text) foo is looked up at rolled back; that is, once a value has been fetched it is considered used and will not be returned again. This is true even if the surrounding transaction later aborts, or if the calling query ends - up not using the value. For example an INSERT with - an ON CONFLICT clause will compute the to-be-inserted + up not using the value. For example an INSERT with + an ON CONFLICT clause will compute the to-be-inserted tuple, including doing any required nextval calls, before detecting any conflict that would cause it to follow - the ON CONFLICT rule instead. Such cases will leave + the ON CONFLICT rule instead. Such cases will leave unused holes in the sequence of assigned values. - Thus, PostgreSQL sequence objects cannot - be used to obtain gapless sequences. + Thus, PostgreSQL sequence objects cannot + be used to obtain gapless sequences. @@ -12094,7 +12094,7 @@ nextval('foo'::text) foo is looked up at Return the value most recently returned by - nextval in the current session. This function is + nextval in the current session. This function is identical to currval, except that instead of taking the sequence name as an argument it refers to whichever sequence nextval was most recently applied to @@ -12119,20 +12119,20 @@ nextval('foo'::text) foo is looked up at specified value and sets its is_called field to true, meaning that the next nextval will advance the sequence before - returning a value. The value reported by currval is + returning a value. The value reported by currval is also set to the specified value. In the three-parameter form, is_called can be set to either true - or false. true has the same effect as + or false. true has the same effect as the two-parameter form. If it is set to false, the next nextval will return exactly the specified value, and sequence advancement commences with the following nextval. Furthermore, the value reported by - currval is not changed in this case. For example, + currval is not changed in this case. For example, -SELECT setval('foo', 42); Next nextval will return 43 +SELECT setval('foo', 42); Next nextval will return 43 SELECT setval('foo', 42, true); Same as above -SELECT setval('foo', 42, false); Next nextval will return 42 +SELECT setval('foo', 42, false); Next nextval will return 42 The result returned by setval is just the value of its @@ -12183,7 +12183,7 @@ SELECT setval('foo', 42, false); Next nextval wi - <literal>CASE</> + <literal>CASE</literal> The SQL CASE expression is a @@ -12206,7 +12206,7 @@ END condition's result is not true, any subsequent WHEN clauses are examined in the same manner. If no WHEN condition yields true, the value of the - CASE expression is the result of the + CASE expression is the result of the ELSE clause. If the ELSE clause is omitted and no condition is true, the result is null. @@ -12245,7 +12245,7 @@ SELECT a, - There is a simple form of CASE expression + There is a simple form of CASE expression that is a variant of the general form above: @@ -12299,7 +12299,7 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; situations in which subexpressions of an expression are evaluated at different times, so that the principle that CASE evaluates only necessary subexpressions is not ironclad. For - example a constant 1/0 subexpression will usually result in + example a constant 1/0 subexpression will usually result in a division-by-zero failure at planning time, even if it's within a CASE arm that would never be entered at run time. @@ -12307,7 +12307,7 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; - <literal>COALESCE</> + <literal>COALESCE</literal> COALESCE @@ -12333,8 +12333,8 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; SELECT COALESCE(description, short_description, '(none)') ... - This returns description if it is not null, otherwise - short_description if it is not null, otherwise (none). + This returns description if it is not null, otherwise + short_description if it is not null, otherwise (none). @@ -12342,13 +12342,13 @@ SELECT COALESCE(description, short_description, '(none)') ... evaluates the arguments that are needed to determine the result; that is, arguments to the right of the first non-null argument are not evaluated. This SQL-standard function provides capabilities similar - to NVL and IFNULL, which are used in some other + to NVL and IFNULL, which are used in some other database systems. - <literal>NULLIF</> + <literal>NULLIF</literal> NULLIF @@ -12369,7 +12369,7 @@ SELECT NULLIF(value, '(none)') ... - In this example, if value is (none), + In this example, if value is (none), null is returned, otherwise the value of value is returned. @@ -12394,7 +12394,7 @@ SELECT NULLIF(value, '(none)') ... - The GREATEST and LEAST functions select the + The GREATEST and LEAST functions select the largest or smallest value from a list of any number of expressions. The expressions must all be convertible to a common data type, which will be the type of the result @@ -12404,7 +12404,7 @@ SELECT NULLIF(value, '(none)') ... - Note that GREATEST and LEAST are not in + Note that GREATEST and LEAST are not in the SQL standard, but are a common extension. Some other databases make them return NULL if any argument is NULL, rather than only when all are NULL. @@ -12534,7 +12534,7 @@ SELECT NULLIF(value, '(none)') ... If the contents of two arrays are equal but the dimensionality is different, the first difference in the dimensionality information determines the sort order. (This is a change from versions of - PostgreSQL prior to 8.2: older versions would claim + PostgreSQL prior to 8.2: older versions would claim that two arrays with the same contents were equal, even if the number of dimensions or subscript ranges were different.) @@ -12833,7 +12833,7 @@ NULL baz(3 rows)
- In array_position and array_positions, + In array_position and array_positions, each array element is compared to the searched value using IS NOT DISTINCT FROM semantics. @@ -12868,8 +12868,8 @@ NULL baz(3 rows) - There are two differences in the behavior of string_to_array - from pre-9.1 versions of PostgreSQL. + There are two differences in the behavior of string_to_array + from pre-9.1 versions of PostgreSQL. First, it will return an empty (zero-element) array rather than NULL when the input string is of zero length. Second, if the delimiter string is NULL, the function splits the input into individual characters, rather @@ -13198,7 +13198,7 @@ NULL baz(3 rows) - The lower and upper functions return null + The lower and upper functions return null if the range is empty or the requested bound is infinite. The lower_inc, upper_inc, lower_inf, and upper_inf @@ -13550,7 +13550,7 @@ NULL baz(3 rows) smallint, int, bigint, real, double precision, numeric, - interval, or money + interval, or money bigint for smallint or @@ -13647,7 +13647,7 @@ SELECT count(*) FROM sometable; aggregate functions, produce meaningfully different result values depending on the order of the input values. This ordering is unspecified by default, but can be controlled by writing an - ORDER BY clause within the aggregate call, as shown in + ORDER BY clause within the aggregate call, as shown in . Alternatively, supplying the input values from a sorted subquery will usually work. For example: @@ -14082,9 +14082,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; shows some - aggregate functions that use the ordered-set aggregate + aggregate functions that use the ordered-set aggregate syntax. These functions are sometimes referred to as inverse - distribution functions. + distribution functions. @@ -14249,7 +14249,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; window function of the same name defined in . In each case, the aggregate result is the value that the associated window function would have - returned for the hypothetical row constructed from + returned for the hypothetical row constructed from args, if such a row had been added to the sorted group of rows computed from the sorted_args. @@ -14280,10 +14280,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" bigint @@ -14303,10 +14303,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; dense_rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" bigint @@ -14326,10 +14326,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; percent_rank(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" double precision @@ -14349,10 +14349,10 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; cume_dist(args) WITHIN GROUP (ORDER BY sorted_args) - VARIADIC "any" + VARIADIC "any" - VARIADIC "any" + VARIADIC "any" double precision @@ -14360,7 +14360,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; No relative rank of the hypothetical row, ranging from - 1/N to 1 + 1/N to 1 @@ -14374,7 +14374,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; the aggregated arguments given in sorted_args. Unlike most built-in aggregates, these aggregates are not strict, that is they do not drop input rows containing nulls. Null values sort according - to the rule specified in the ORDER BY clause. + to the rule specified in the ORDER BY clause. @@ -14413,14 +14413,14 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Grouping operations are used in conjunction with grouping sets (see ) to distinguish result rows. The - arguments to the GROUPING operation are not actually evaluated, - but they must match exactly expressions given in the GROUP BY + arguments to the GROUPING operation are not actually evaluated, + but they must match exactly expressions given in the GROUP BY clause of the associated query level. Bits are assigned with the rightmost argument being the least-significant bit; each bit is 0 if the corresponding expression is included in the grouping criteria of the grouping set generating the result row, and 1 if it is not. For example: -=> SELECT * FROM items_sold; +=> SELECT * FROM items_sold; make | model | sales -------+-------+------- Foo | GT | 10 @@ -14429,7 +14429,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Bar | Sport | 5 (4 rows) -=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); +=> SELECT make, model, GROUPING(make,model), sum(sales) FROM items_sold GROUP BY ROLLUP(make,model); make | model | grouping | sum -------+-------+----------+----- Foo | GT | 0 | 10 @@ -14464,8 +14464,8 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; The built-in window functions are listed in . Note that these functions - must be invoked using window function syntax, i.e., an - OVER clause is required. + must be invoked using window function syntax, i.e., an + OVER clause is required. @@ -14474,7 +14474,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; aggregate (i.e., not ordered-set or hypothetical-set aggregates) can be used as a window function; see for a list of the built-in aggregates. - Aggregate functions act as window functions only when an OVER + Aggregate functions act as window functions only when an OVER clause follows the call; otherwise they act as non-window aggregates and return a single row for the entire set. @@ -14515,7 +14515,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; bigint - rank of the current row with gaps; same as row_number of its first peer + rank of the current row with gaps; same as row_number of its first peer @@ -14541,7 +14541,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; double precision - relative rank of the current row: (rank - 1) / (total partition rows - 1) + relative rank of the current row: (rank - 1) / (total partition rows - 1) @@ -14562,7 +14562,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; ntile - ntile(num_buckets integer) + ntile(num_buckets integer) integer @@ -14577,9 +14577,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lag - lag(value anyelement - [, offset integer - [, default anyelement ]]) + lag(value anyelement + [, offset integer + [, default anyelement ]]) @@ -14606,9 +14606,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lead - lead(value anyelement - [, offset integer - [, default anyelement ]]) + lead(value anyelement + [, offset integer + [, default anyelement ]]) @@ -14634,7 +14634,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; first_value - first_value(value any) + first_value(value any) same type as value @@ -14650,7 +14650,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; last_value - last_value(value any) + last_value(value any) same type as value @@ -14667,7 +14667,7 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; nth_value - nth_value(value any, nth integer) + nth_value(value any, nth integer) @@ -14686,22 +14686,22 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; All of the functions listed in depend on the sort ordering - specified by the ORDER BY clause of the associated window + specified by the ORDER BY clause of the associated window definition. Rows that are not distinct when considering only the - ORDER BY columns are said to be peers. - The four ranking functions (including cume_dist) are + ORDER BY columns are said to be peers. + The four ranking functions (including cume_dist) are defined so that they give the same answer for all peer rows. - Note that first_value, last_value, and - nth_value consider only the rows within the window - frame, which by default contains the rows from the start of the + Note that first_value, last_value, and + nth_value consider only the rows within the window + frame, which by default contains the rows from the start of the partition through the last peer of the current row. This is - likely to give unhelpful results for last_value and - sometimes also nth_value. You can redefine the frame by - adding a suitable frame specification (RANGE or - ROWS) to the OVER clause. + likely to give unhelpful results for last_value and + sometimes also nth_value. You can redefine the frame by + adding a suitable frame specification (RANGE or + ROWS) to the OVER clause. See for more information about frame specifications. @@ -14709,34 +14709,34 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; When an aggregate function is used as a window function, it aggregates over the rows within the current row's window frame. - An aggregate used with ORDER BY and the default window frame - definition produces a running sum type of behavior, which may or + An aggregate used with ORDER BY and the default window frame + definition produces a running sum type of behavior, which may or may not be what's wanted. To obtain - aggregation over the whole partition, omit ORDER BY or use - ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. + aggregation over the whole partition, omit ORDER BY or use + ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING. Other frame specifications can be used to obtain other effects. - The SQL standard defines a RESPECT NULLS or - IGNORE NULLS option for lead, lag, - first_value, last_value, and - nth_value. This is not implemented in + The SQL standard defines a RESPECT NULLS or + IGNORE NULLS option for lead, lag, + first_value, last_value, and + nth_value. This is not implemented in PostgreSQL: the behavior is always the - same as the standard's default, namely RESPECT NULLS. - Likewise, the standard's FROM FIRST or FROM LAST - option for nth_value is not implemented: only the - default FROM FIRST behavior is supported. (You can achieve - the result of FROM LAST by reversing the ORDER BY + same as the standard's default, namely RESPECT NULLS. + Likewise, the standard's FROM FIRST or FROM LAST + option for nth_value is not implemented: only the + default FROM FIRST behavior is supported. (You can achieve + the result of FROM LAST by reversing the ORDER BY ordering.) - cume_dist computes the fraction of partition rows that + cume_dist computes the fraction of partition rows that are less than or equal to the current row and its peers, while - percent_rank computes the fraction of partition rows that + percent_rank computes the fraction of partition rows that are less than the current row, assuming the current row does not exist in the partition. @@ -14789,12 +14789,12 @@ EXISTS (subquery) - The argument of EXISTS is an arbitrary SELECT statement, + The argument of EXISTS is an arbitrary SELECT statement, or subquery. The subquery is evaluated to determine whether it returns any rows. If it returns at least one row, the result of EXISTS is - true; if the subquery returns no rows, the result of EXISTS - is false. + true; if the subquery returns no rows, the result of EXISTS + is false. @@ -14814,15 +14814,15 @@ EXISTS (subquery) Since the result depends only on whether any rows are returned, and not on the contents of those rows, the output list of the subquery is normally unimportant. A common coding convention is - to write all EXISTS tests in the form + to write all EXISTS tests in the form EXISTS(SELECT 1 WHERE ...). There are exceptions to this rule however, such as subqueries that use INTERSECT. - This simple example is like an inner join on col2, but - it produces at most one output row for each tab1 row, - even if there are several matching tab2 rows: + This simple example is like an inner join on col2, but + it produces at most one output row for each tab1 row, + even if there are several matching tab2 rows: SELECT col1 FROM tab1 @@ -14842,8 +14842,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the case where the subquery returns no rows). @@ -14871,8 +14871,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result. - The result of IN is true if any equal subquery row is found. - The result is false if no equal row is found (including the + The result of IN is true if any equal subquery row is found. + The result is false if no equal row is found (including the case where the subquery returns no rows). @@ -14898,9 +14898,9 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized subquery, which must return exactly one column. The left-hand expression is evaluated and compared to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows + The result of NOT IN is true if only unequal subquery rows are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. + The result is false if any equal row is found. @@ -14927,9 +14927,9 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); subquery, which must return exactly as many columns as there are expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result. - The result of NOT IN is true if only unequal subquery rows + The result of NOT IN is true if only unequal subquery rows are found (including the case where the subquery returns no rows). - The result is false if any equal row is found. + The result is false if any equal row is found. @@ -14957,8 +14957,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the case where the subquery returns no rows). @@ -14981,8 +14981,8 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); -row_constructor operator ANY (subquery) -row_constructor operator SOME (subquery) +row_constructor operator ANY (subquery) +row_constructor operator SOME (subquery) @@ -14993,9 +14993,9 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result, using the given operator. - The result of ANY is true if the comparison + The result of ANY is true if the comparison returns true for any subquery row. - The result is false if the comparison returns false for every + The result is false if the comparison returns false for every subquery row (including the case where the subquery returns no rows). The result is NULL if the comparison does not return true for any row, @@ -15021,9 +15021,9 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); is evaluated and compared to each row of the subquery result using the given operator, which must yield a Boolean result. - The result of ALL is true if all rows yield true + The result of ALL is true if all rows yield true (including the case where the subquery returns no rows). - The result is false if any false result is found. + The result is false if any false result is found. The result is NULL if the comparison does not return false for any row, and it returns NULL for at least one row. @@ -15049,10 +15049,10 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); expressions in the left-hand row. The left-hand expressions are evaluated and compared row-wise to each row of the subquery result, using the given operator. - The result of ALL is true if the comparison + The result of ALL is true if the comparison returns true for all subquery rows (including the case where the subquery returns no rows). - The result is false if the comparison returns false for any + The result is false if the comparison returns false for any subquery row. The result is NULL if the comparison does not return false for any subquery row, and it returns NULL for at least one row. @@ -15165,7 +15165,7 @@ WHERE EXISTS (SELECT 1 FROM tab2 WHERE col2 = tab1.col2); The right-hand side is a parenthesized list - of scalar expressions. The result is true if the left-hand expression's + of scalar expressions. The result is true if the left-hand expression's result is equal to any of the right-hand expressions. This is a shorthand notation for @@ -15243,8 +15243,8 @@ AND is evaluated and compared to each element of the array using the given operator, which must yield a Boolean result. - The result of ANY is true if any true result is obtained. - The result is false if no true result is found (including the + The result of ANY is true if any true result is obtained. + The result is false if no true result is found (including the case where the array has zero elements). @@ -15279,9 +15279,9 @@ AND is evaluated and compared to each element of the array using the given operator, which must yield a Boolean result. - The result of ALL is true if all comparisons yield true + The result of ALL is true if all comparisons yield true (including the case where the array has zero elements). - The result is false if any false result is found. + The result is false if any false result is found. @@ -15310,12 +15310,12 @@ AND The two row values must have the same number of fields. Each side is evaluated and they are compared row-wise. Row constructor comparisons are allowed when the operator is - =, - <>, - <, - <=, - > or - >=. + =, + <>, + <, + <=, + > or + >=. Every row element must be of a type which has a default B-tree operator class or the attempted comparison may generate an error. @@ -15328,7 +15328,7 @@ AND - The = and <> cases work slightly differently + The = and <> cases work slightly differently from the others. Two rows are considered equal if all their corresponding members are non-null and equal; the rows are unequal if any corresponding members are non-null and unequal; @@ -15336,13 +15336,13 @@ AND - For the <, <=, > and - >= cases, the row elements are compared left-to-right, + For the <, <=, > and + >= cases, the row elements are compared left-to-right, stopping as soon as an unequal or null pair of elements is found. If either of this pair of elements is null, the result of the row comparison is unknown (null); otherwise comparison of this pair of elements determines the result. For example, - ROW(1,2,NULL) < ROW(1,3,0) + ROW(1,2,NULL) < ROW(1,3,0) yields true, not null, because the third pair of elements are not considered. @@ -15350,13 +15350,13 @@ AND Prior to PostgreSQL 8.2, the - <, <=, > and >= + <, <=, > and >= cases were not handled per SQL specification. A comparison like - ROW(a,b) < ROW(c,d) + ROW(a,b) < ROW(c,d) was implemented as - a < c AND b < d + a < c AND b < d whereas the correct behavior is equivalent to - a < c OR (a = c AND b < d). + a < c OR (a = c AND b < d). @@ -15409,15 +15409,15 @@ AND Each side is evaluated and they are compared row-wise. Composite type comparisons are allowed when the operator is - =, - <>, - <, - <=, - > or - >=, + =, + <>, + <, + <=, + > or + >=, or has semantics similar to one of these. (To be specific, an operator can be a row comparison operator if it is a member of a B-tree operator - class, or is the negator of the = member of a B-tree operator + class, or is the negator of the = member of a B-tree operator class.) The default behavior of the above operators is the same as for IS [ NOT ] DISTINCT FROM for row constructors (see ). @@ -15427,12 +15427,12 @@ AND To support matching of rows which include elements without a default B-tree operator class, the following operators are defined for composite type comparison: - *=, - *<>, - *<, - *<=, - *>, and - *>=. + *=, + *<>, + *<, + *<=, + *>, and + *>=. These operators compare the internal binary representation of the two rows. Two rows might have a different binary representation even though comparisons of the two rows with the equality operator is true. @@ -15501,7 +15501,7 @@ AND - generate_series(start, stop, step interval) + generate_series(start, stop, step interval) timestamp or timestamp with time zone setof timestamp or setof timestamp with time zone (same as argument type) @@ -15616,7 +15616,7 @@ SELECT * FROM generate_series('2008-03-01 00:00'::timestamp, - generate_subscripts is a convenience function that generates + generate_subscripts is a convenience function that generates the set of valid subscripts for the specified dimension of the given array. Zero rows are returned for arrays that do not have the requested dimension, @@ -15681,7 +15681,7 @@ SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]); by WITH ORDINALITY, a bigint column is appended to the output which starts from 1 and increments by 1 for each row of the function's output. This is most useful in the case of set returning - functions such as unnest(). + functions such as unnest(). -- set returning function WITH ORDINALITY @@ -15825,7 +15825,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); - pg_current_logfile(text) + pg_current_logfile(text) text Primary log file name, or log in the requested format, currently in use by the logging collector @@ -15870,7 +15870,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); pg_trigger_depth() int - current nesting level of PostgreSQL triggers + current nesting level of PostgreSQL triggers (0 if not called, directly or indirectly, from inside a trigger) @@ -15889,7 +15889,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); version() text - PostgreSQL version information. See also for a machine-readable version. + PostgreSQL version information. See also for a machine-readable version. @@ -15979,7 +15979,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); current_role and user are synonyms for current_user. (The SQL standard draws a distinction between current_role - and current_user, but PostgreSQL + and current_user, but PostgreSQL does not, since it unifies users and roles into a single kind of entity.) @@ -15990,7 +15990,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); other named objects that are created without specifying a target schema. current_schemas(boolean) returns an array of the names of all schemas presently in the search path. The Boolean option determines whether or not - implicitly included system schemas such as pg_catalog are included in the + implicitly included system schemas such as pg_catalog are included in the returned search path. @@ -15998,7 +15998,7 @@ SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n); The search path can be altered at run time. The command is: -SET search_path TO schema , schema, ... +SET search_path TO schema , schema, ... @@ -16043,7 +16043,7 @@ SET search_path TO schema , schema, .. waiting for a lock that would conflict with the blocked process's lock request and is ahead of it in the wait queue (soft block). When using parallel queries the result always lists client-visible process IDs (that - is, pg_backend_pid results) even if the actual lock is held + is, pg_backend_pid results) even if the actual lock is held or awaited by a child worker process. As a result of that, there may be duplicated PIDs in the result. Also note that when a prepared transaction holds a conflicting lock, it will be represented by a zero process ID in @@ -16095,15 +16095,15 @@ SET search_path TO schema , schema, .. is NULL. When multiple log files exist, each in a different format, pg_current_logfile called without arguments returns the path of the file having the first format - found in the ordered list: stderr, csvlog. + found in the ordered list: stderr, csvlog. NULL is returned when no log file has any of these formats. To request a specific file format supply, as text, - either csvlog or stderr as the value of the + either csvlog or stderr as the value of the optional parameter. The return value is NULL when the log format requested is not a configured . The pg_current_logfiles reflects the contents of the - current_logfiles file. + current_logfiles file. @@ -16460,7 +16460,7 @@ SET search_path TO schema , schema, .. has_table_privilege checks whether a user can access a table in a particular way. The user can be specified by name, by OID (pg_authid.oid), - public to indicate the PUBLIC pseudo-role, or if the argument is + public to indicate the PUBLIC pseudo-role, or if the argument is omitted current_user is assumed. The table can be specified by name or by OID. (Thus, there are actually six variants of @@ -16470,12 +16470,12 @@ SET search_path TO schema , schema, .. The desired access privilege type is specified by a text string, which must evaluate to one of the values SELECT, INSERT, - UPDATE, DELETE, TRUNCATE, + UPDATE, DELETE, TRUNCATE, REFERENCES, or TRIGGER. Optionally, - WITH GRANT OPTION can be added to a privilege type to test + WITH GRANT OPTION can be added to a privilege type to test whether the privilege is held with grant option. Also, multiple privilege types can be listed separated by commas, in which case the result will - be true if any of the listed privileges is held. + be true if any of the listed privileges is held. (Case of the privilege string is not significant, and extra whitespace is allowed between but not within privilege names.) Some examples: @@ -16499,7 +16499,7 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') has_any_column_privilege checks whether a user can access any column of a table in a particular way. Its argument possibilities - are analogous to has_table_privilege, + are analogous to has_table_privilege, except that the desired access privilege type must evaluate to some combination of SELECT, @@ -16508,8 +16508,8 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') REFERENCES. Note that having any of these privileges at the table level implicitly grants it for each column of the table, so has_any_column_privilege will always return - true if has_table_privilege does for the same - arguments. But has_any_column_privilege also succeeds if + true if has_table_privilege does for the same + arguments. But has_any_column_privilege also succeeds if there is a column-level grant of the privilege for at least one column. @@ -16547,7 +16547,7 @@ SELECT has_table_privilege('joe', 'mytable', 'INSERT, SELECT WITH GRANT OPTION') Its argument possibilities are analogous to has_table_privilege. When specifying a function by a text string rather than by OID, - the allowed input is the same as for the regprocedure data type + the allowed input is the same as for the regprocedure data type (see ). The desired access privilege type must evaluate to EXECUTE. @@ -16609,7 +16609,7 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); Its argument possibilities are analogous to has_table_privilege. When specifying a type by a text string rather than by OID, - the allowed input is the same as for the regtype data type + the allowed input is the same as for the regtype data type (see ). The desired access privilege type must evaluate to USAGE. @@ -16620,14 +16620,14 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); can access a role in a particular way. Its argument possibilities are analogous to has_table_privilege, - except that public is not allowed as a user name. + except that public is not allowed as a user name. The desired access privilege type must evaluate to some combination of MEMBER or USAGE. MEMBER denotes direct or indirect membership in - the role (that is, the right to do SET ROLE), while + the role (that is, the right to do SET ROLE), while USAGE denotes whether the privileges of the role - are immediately available without doing SET ROLE. + are immediately available without doing SET ROLE. @@ -16639,7 +16639,7 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); shows functions that - determine whether a certain object is visible in the + determine whether a certain object is visible in the current schema search path. For example, a table is said to be visible if its containing schema is in the search path and no table of the same @@ -16793,16 +16793,16 @@ SELECT relname FROM pg_class WHERE pg_table_is_visible(oid); pg_type_is_visible can also be used with domains. For functions and operators, an object in the search path is visible if there is no object of the same name - and argument data type(s) earlier in the path. For operator + and argument data type(s) earlier in the path. For operator classes, both name and associated index access method are considered. All these functions require object OIDs to identify the object to be checked. If you want to test an object by name, it is convenient to use - the OID alias types (regclass, regtype, - regprocedure, regoperator, regconfig, - or regdictionary), + the OID alias types (regclass, regtype, + regprocedure, regoperator, regconfig, + or regdictionary), for example: SELECT pg_type_is_visible('myschema.widget'::regtype); @@ -16949,7 +16949,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); - format_type(type_oid, typemod) + format_type(type_oid, typemod) text get SQL name of a data type @@ -16959,18 +16959,18 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get definition of a constraint - pg_get_constraintdef(constraint_oid, pretty_bool) + pg_get_constraintdef(constraint_oid, pretty_bool) text get definition of a constraint - pg_get_expr(pg_node_tree, relation_oid) + pg_get_expr(pg_node_tree, relation_oid) text decompile internal form of an expression, assuming that any Vars in it refer to the relation indicated by the second parameter - pg_get_expr(pg_node_tree, relation_oid, pretty_bool) + pg_get_expr(pg_node_tree, relation_oid, pretty_bool) text decompile internal form of an expression, assuming that any Vars in it refer to the relation indicated by the second parameter @@ -16993,19 +16993,19 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_function_result(func_oid) text - get RETURNS clause for function + get RETURNS clause for function pg_get_indexdef(index_oid) text - get CREATE INDEX command for index + get CREATE INDEX command for index - pg_get_indexdef(index_oid, column_no, pretty_bool) + pg_get_indexdef(index_oid, column_no, pretty_bool) text - get CREATE INDEX command for index, + get CREATE INDEX command for index, or definition of just one index column when - column_no is not zero + column_no is not zero pg_get_keywords() @@ -17015,12 +17015,12 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_ruledef(rule_oid) text - get CREATE RULE command for rule + get CREATE RULE command for rule - pg_get_ruledef(rule_oid, pretty_bool) + pg_get_ruledef(rule_oid, pretty_bool) text - get CREATE RULE command for rule + get CREATE RULE command for rule pg_get_serial_sequence(table_name, column_name) @@ -17030,17 +17030,17 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_statisticsobjdef(statobj_oid) text - get CREATE STATISTICS command for extended statistics object + get CREATE STATISTICS command for extended statistics object pg_get_triggerdef(trigger_oid) text - get CREATE [ CONSTRAINT ] TRIGGER command for trigger + get CREATE [ CONSTRAINT ] TRIGGER command for trigger - pg_get_triggerdef(trigger_oid, pretty_bool) + pg_get_triggerdef(trigger_oid, pretty_bool) text - get CREATE [ CONSTRAINT ] TRIGGER command for trigger + get CREATE [ CONSTRAINT ] TRIGGER command for trigger pg_get_userbyid(role_oid) @@ -17053,7 +17053,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get underlying SELECT command for view or materialized view (deprecated) - pg_get_viewdef(view_name, pretty_bool) + pg_get_viewdef(view_name, pretty_bool) text get underlying SELECT command for view or materialized view (deprecated) @@ -17063,29 +17063,29 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); get underlying SELECT command for view or materialized view - pg_get_viewdef(view_oid, pretty_bool) + pg_get_viewdef(view_oid, pretty_bool) text get underlying SELECT command for view or materialized view - pg_get_viewdef(view_oid, wrap_column_int) + pg_get_viewdef(view_oid, wrap_column_int) text get underlying SELECT command for view or materialized view; lines with fields are wrapped to specified number of columns, pretty-printing is implied - pg_index_column_has_property(index_oid, column_no, prop_name) + pg_index_column_has_property(index_oid, column_no, prop_name) boolean test whether an index column has a specified property - pg_index_has_property(index_oid, prop_name) + pg_index_has_property(index_oid, prop_name) boolean test whether an index has a specified property - pg_indexam_has_property(am_oid, prop_name) + pg_indexam_has_property(am_oid, prop_name) boolean test whether an index access method has a specified property @@ -17166,11 +17166,11 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); pg_get_keywords returns a set of records describing - the SQL keywords recognized by the server. The word column - contains the keyword. The catcode column contains a - category code: U for unreserved, C for column name, - T for type or function name, or R for reserved. - The catdesc column contains a possibly-localized string + the SQL keywords recognized by the server. The word column + contains the keyword. The catcode column contains a + category code: U for unreserved, C for column name, + T for type or function name, or R for reserved. + The catdesc column contains a possibly-localized string describing the category. @@ -17187,26 +17187,26 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); catalogs. If the expression might contain Vars, specify the OID of the relation they refer to as the second parameter; if no Vars are expected, zero is sufficient. pg_get_viewdef reconstructs the - SELECT query that defines a view. Most of these functions come - in two variants, one of which can optionally pretty-print the + SELECT query that defines a view. Most of these functions come + in two variants, one of which can optionally pretty-print the result. The pretty-printed format is more readable, but the default format is more likely to be interpreted the same way by future versions of - PostgreSQL; avoid using pretty-printed output for dump - purposes. Passing false for the pretty-print parameter yields + PostgreSQL; avoid using pretty-printed output for dump + purposes. Passing false for the pretty-print parameter yields the same result as the variant that does not have the parameter at all. - pg_get_functiondef returns a complete - CREATE OR REPLACE FUNCTION statement for a function. + pg_get_functiondef returns a complete + CREATE OR REPLACE FUNCTION statement for a function. pg_get_function_arguments returns the argument list of a function, in the form it would need to appear in within - CREATE FUNCTION. + CREATE FUNCTION. pg_get_function_result similarly returns the - appropriate RETURNS clause for the function. + appropriate RETURNS clause for the function. pg_get_function_identity_arguments returns the argument list necessary to identify a function, in the form it - would need to appear in within ALTER FUNCTION, for + would need to appear in within ALTER FUNCTION, for instance. This form omits default values. @@ -17219,10 +17219,10 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); (serial, smallserial, bigserial), it is the sequence created for that serial column definition. In the latter case, this association can be modified or removed with ALTER - SEQUENCE OWNED BY. (The function probably should have been called + SEQUENCE OWNED BY. (The function probably should have been called pg_get_owned_sequence; its current name reflects the - fact that it has typically been used with serial - or bigserial columns.) The first input parameter is a table name + fact that it has typically been used with serial + or bigserial columns.) The first input parameter is a table name with optional schema, and the second parameter is a column name. Because the first parameter is potentially a schema and table, it is not treated as a double-quoted identifier, meaning it is lower cased by default, while the @@ -17290,8 +17290,8 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); distance_orderable - Can the column be scanned in order by a distance - operator, for example ORDER BY col <-> constant ? + Can the column be scanned in order by a distance + operator, for example ORDER BY col <-> constant ? @@ -17301,14 +17301,14 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); search_array - Does the column natively support col = ANY(array) + Does the column natively support col = ANY(array) searches? search_nulls - Does the column support IS NULL and - IS NOT NULL searches? + Does the column support IS NULL and + IS NOT NULL searches? @@ -17324,7 +17324,7 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); clusterable - Can the index be used in a CLUSTER command? + Can the index be used in a CLUSTER command? @@ -17355,9 +17355,9 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); can_order - Does the access method support ASC, - DESC and related keywords in - CREATE INDEX? + Does the access method support ASC, + DESC and related keywords in + CREATE INDEX? @@ -17382,9 +17382,9 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); pg_options_to_table returns the set of storage option name/value pairs - (option_name/option_value) when passed - pg_class.reloptions or - pg_attribute.attoptions. + (option_name/option_value) when passed + pg_class.reloptions or + pg_attribute.attoptions. @@ -17394,14 +17394,14 @@ SELECT currval(pg_get_serial_sequence('sometable', 'id')); empty and cannot be dropped. To display the specific objects populating the tablespace, you will need to connect to the databases identified by pg_tablespace_databases and query their - pg_class catalogs. + pg_class catalogs. pg_typeof returns the OID of the data type of the value that is passed to it. This can be helpful for troubleshooting or dynamically constructing SQL queries. The function is declared as - returning regtype, which is an OID alias type (see + returning regtype, which is an OID alias type (see ); this means that it is the same as an OID for comparison purposes but displays as a type name. For example: @@ -17447,10 +17447,10 @@ SELECT collation for ('foo' COLLATE "de_DE"); to_regoperator, to_regtype, to_regnamespace, and to_regrole functions translate relation, function, operator, type, schema, and role - names (given as text) to objects of - type regclass, regproc, regprocedure, - regoper, regoperator, regtype, - regnamespace, and regrole + names (given as text) to objects of + type regclass, regproc, regprocedure, + regoper, regoperator, regtype, + regnamespace, and regrole respectively. These functions differ from a cast from text in that they don't accept a numeric OID, and that they return null rather than throwing an error if the name is not found (or, for @@ -17493,18 +17493,18 @@ SELECT collation for ('foo' COLLATE "de_DE"); get description of a database object - pg_identify_object(catalog_id oid, object_id oid, object_sub_id integer) - type text, schema text, name text, identity text + pg_identify_object(catalog_id oid, object_id oid, object_sub_id integer) + type text, schema text, name text, identity text get identity of a database object - pg_identify_object_as_address(catalog_id oid, object_id oid, object_sub_id integer) - type text, name text[], args text[] + pg_identify_object_as_address(catalog_id oid, object_id oid, object_sub_id integer) + type text, name text[], args text[] get external representation of a database object's address - pg_get_object_address(type text, name text[], args text[]) - catalog_id oid, object_id oid, object_sub_id int32 + pg_get_object_address(type text, name text[], args text[]) + catalog_id oid, object_id oid, object_sub_id int32 get address of a database object, from its external representation @@ -17525,13 +17525,13 @@ SELECT collation for ('foo' COLLATE "de_DE"); to uniquely identify the database object specified by catalog OID, object OID and a (possibly zero) sub-object ID. This information is intended to be machine-readable, and is never translated. - type identifies the type of database object; - schema is the schema name that the object belongs in, or - NULL for object types that do not belong to schemas; - name is the name of the object, quoted if necessary, only + type identifies the type of database object; + schema is the schema name that the object belongs in, or + NULL for object types that do not belong to schemas; + name is the name of the object, quoted if necessary, only present if it can be used (alongside schema name, if pertinent) as a unique - identifier of the object, otherwise NULL; - identity is the complete object identity, with the precise format + identifier of the object, otherwise NULL; + identity is the complete object identity, with the precise format depending on object type, and each part within the format being schema-qualified and quoted as necessary. @@ -17542,10 +17542,10 @@ SELECT collation for ('foo' COLLATE "de_DE"); catalog OID, object OID and a (possibly zero) sub-object ID. The returned information is independent of the current server, that is, it could be used to identify an identically named object in another server. - type identifies the type of database object; - name and args are text arrays that together + type identifies the type of database object; + name and args are text arrays that together form a reference to the object. These three columns can be passed to - pg_get_object_address to obtain the internal address + pg_get_object_address to obtain the internal address of the object. This function is the inverse of pg_get_object_address. @@ -17554,13 +17554,13 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_get_object_address returns a row containing enough information to uniquely identify the database object specified by its type and object name and argument arrays. The returned values are the - ones that would be used in system catalogs such as pg_depend + ones that would be used in system catalogs such as pg_depend and can be passed to other system functions such as - pg_identify_object or pg_describe_object. - catalog_id is the OID of the system catalog containing the + pg_identify_object or pg_describe_object. + catalog_id is the OID of the system catalog containing the object; - object_id is the OID of the object itself, and - object_sub_id is the object sub-ID, or zero if none. + object_id is the OID of the object itself, and + object_sub_id is the object sub-ID, or zero if none. This function is the inverse of pg_identify_object_as_address. @@ -17739,9 +17739,9 @@ SELECT collation for ('foo' COLLATE "de_DE");
- The internal transaction ID type (xid) is 32 bits wide and + The internal transaction ID type (xid) is 32 bits wide and wraps around every 4 billion transactions. However, these functions - export a 64-bit format that is extended with an epoch counter + export a 64-bit format that is extended with an epoch counter so it will not wrap around during the life of an installation. The data type used by these functions, txid_snapshot, stores information about transaction ID @@ -17782,9 +17782,9 @@ SELECT collation for ('foo' COLLATE "de_DE"); xip_list Active txids at the time of the snapshot. The list - includes only those active txids between xmin - and xmax; there might be active txids higher - than xmax. A txid that is xmin <= txid < + includes only those active txids between xmin + and xmax; there might be active txids higher + than xmax. A txid that is xmin <= txid < xmax and not in this list was already completed at the time of the snapshot, and thus either visible or dead according to its commit status. The list does not @@ -17797,27 +17797,27 @@ SELECT collation for ('foo' COLLATE "de_DE"); - txid_snapshot's textual representation is - xmin:xmax:xip_list. + txid_snapshot's textual representation is + xmin:xmax:xip_list. For example 10:20:10,14,15 means xmin=10, xmax=20, xip_list=10, 14, 15. - txid_status(bigint) reports the commit status of a recent + txid_status(bigint) reports the commit status of a recent transaction. Applications may use it to determine whether a transaction committed or aborted when the application and database server become disconnected while a COMMIT is in progress. The status of a transaction will be reported as either - in progress, - committed, or aborted, provided that the + in progress, + committed, or aborted, provided that the transaction is recent enough that the system retains the commit status of that transaction. If is old enough that no references to that transaction survive in the system and the commit status information has been discarded, this function will return NULL. Note that prepared - transactions are reported as in progress; applications must + transactions are reported as in progress; applications must check pg_prepared_xacts if they + linkend="view-pg-prepared-xacts">pg_prepared_xacts if they need to determine whether the txid is a prepared transaction. @@ -17852,7 +17852,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); pg_last_committed_xact pg_last_committed_xact() - xid xid, timestamp timestamp with time zone + xid xid, timestamp timestamp with time zone get transaction ID and commit timestamp of latest committed transaction @@ -17861,7 +17861,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); The functions shown in - print information initialized during initdb, such + print information initialized during initdb, such as the catalog version. They also show information about write-ahead logging and checkpoint processing. This information is cluster-wide, and not specific to any one database. They provide most of the same @@ -17927,12 +17927,12 @@ SELECT collation for ('foo' COLLATE "de_DE"); - pg_control_checkpoint returns a record, shown in + pg_control_checkpoint returns a record, shown in - <function>pg_control_checkpoint</> Columns + <function>pg_control_checkpoint</function> Columns @@ -18043,12 +18043,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_system returns a record, shown in + pg_control_system returns a record, shown in - <function>pg_control_system</> Columns + <function>pg_control_system</function> Columns @@ -18084,12 +18084,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_init returns a record, shown in + pg_control_init returns a record, shown in - <function>pg_control_init</> Columns + <function>pg_control_init</function> Columns @@ -18165,12 +18165,12 @@ SELECT collation for ('foo' COLLATE "de_DE");
- pg_control_recovery returns a record, shown in + pg_control_recovery returns a record, shown in - <function>pg_control_recovery</> Columns + <function>pg_control_recovery</function> Columns @@ -18217,7 +18217,7 @@ SELECT collation for ('foo' COLLATE "de_DE"); The functions described in this section are used to control and - monitor a PostgreSQL installation. + monitor a PostgreSQL installation. @@ -18357,7 +18357,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_cancel_backend(pid int) + pg_cancel_backend(pid int) boolean Cancel a backend's current query. This is also allowed if the @@ -18382,7 +18382,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_terminate_backend(pid int) + pg_terminate_backend(pid int) boolean Terminate a backend. This is also allowed if the calling role @@ -18401,28 +18401,28 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_cancel_backend and pg_terminate_backend - send signals (SIGINT or SIGTERM + pg_cancel_backend and pg_terminate_backend + send signals (SIGINT or SIGTERM respectively) to backend processes identified by process ID. The process ID of an active backend can be found from the pid column of the pg_stat_activity view, or by listing the postgres processes on the server (using - ps on Unix or the Task - Manager on Windows). + ps on Unix or the Task + Manager on Windows). The role of an active backend can be found from the usename column of the pg_stat_activity view. - pg_reload_conf sends a SIGHUP signal + pg_reload_conf sends a SIGHUP signal to the server, causing configuration files to be reloaded by all server processes. - pg_rotate_logfile signals the log-file manager to switch + pg_rotate_logfile signals the log-file manager to switch to a new output file immediately. This works only when the built-in log collector is running, since otherwise there is no log-file manager subprocess. @@ -18492,7 +18492,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_create_restore_point(name text) + pg_create_restore_point(name text) pg_lsn Create a named point for performing restore (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18520,7 +18520,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_start_backup(label text , fast boolean , exclusive boolean ) + pg_start_backup(label text , fast boolean , exclusive boolean ) pg_lsn Prepare for performing on-line backup (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18534,7 +18534,7 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_stop_backup(exclusive boolean , wait_for_archive boolean ) + pg_stop_backup(exclusive boolean , wait_for_archive boolean ) setof record Finish performing exclusive or non-exclusive on-line backup (restricted to superusers by default, but other users can be granted EXECUTE to run the function) @@ -18562,23 +18562,23 @@ SELECT set_config('log_statement_stats', 'off', false); - pg_walfile_name(lsn pg_lsn) + pg_walfile_name(lsn pg_lsn) text Convert write-ahead log location to file name - pg_walfile_name_offset(lsn pg_lsn) + pg_walfile_name_offset(lsn pg_lsn) - text, integer + text, integer Convert write-ahead log location to file name and decimal byte offset within file - pg_wal_lsn_diff(lsn pg_lsn, lsn pg_lsn) + pg_wal_lsn_diff(lsn pg_lsn, lsn pg_lsn) - numeric + numeric Calculate the difference between two write-ahead log locations @@ -18586,17 +18586,17 @@ SELECT set_config('log_statement_stats', 'off', false);
- pg_start_backup accepts an arbitrary user-defined label for + pg_start_backup accepts an arbitrary user-defined label for the backup. (Typically this would be the name under which the backup dump file will be stored.) When used in exclusive mode, the function writes a - backup label file (backup_label) and, if there are any links - in the pg_tblspc/ directory, a tablespace map file - (tablespace_map) into the database cluster's data directory, + backup label file (backup_label) and, if there are any links + in the pg_tblspc/ directory, a tablespace map file + (tablespace_map) into the database cluster's data directory, performs a checkpoint, and then returns the backup's starting write-ahead log location as text. The user can ignore this result value, but it is provided in case it is useful. When used in non-exclusive mode, the contents of these files are instead returned by the - pg_stop_backup function, and should be written to the backup + pg_stop_backup function, and should be written to the backup by the caller. @@ -18606,29 +18606,29 @@ postgres=# select pg_start_backup('label_goes_here'); 0/D4445B8 (1 row) - There is an optional second parameter of type boolean. If true, - it specifies executing pg_start_backup as quickly as + There is an optional second parameter of type boolean. If true, + it specifies executing pg_start_backup as quickly as possible. This forces an immediate checkpoint which will cause a spike in I/O operations, slowing any concurrently executing queries. - In an exclusive backup, pg_stop_backup removes the label file - and, if it exists, the tablespace_map file created by - pg_start_backup. In a non-exclusive backup, the contents of - the backup_label and tablespace_map are returned + In an exclusive backup, pg_stop_backup removes the label file + and, if it exists, the tablespace_map file created by + pg_start_backup. In a non-exclusive backup, the contents of + the backup_label and tablespace_map are returned in the result of the function, and should be written to files in the backup (and not in the data directory). There is an optional second - parameter of type boolean. If false, the pg_stop_backup + parameter of type boolean. If false, the pg_stop_backup will return immediately after the backup is completed without waiting for WAL to be archived. This behavior is only useful for backup software which independently monitors WAL archiving. Otherwise, WAL required to make the backup consistent might be missing and make the backup - useless. When this parameter is set to true, pg_stop_backup + useless. When this parameter is set to true, pg_stop_backup will wait for WAL to be archived when archiving is enabled; on the standby, - this means that it will wait only when archive_mode = always. + this means that it will wait only when archive_mode = always. If write activity on the primary is low, it may be useful to run - pg_switch_wal on the primary in order to trigger + pg_switch_wal on the primary in order to trigger an immediate segment switch. @@ -18636,7 +18636,7 @@ postgres=# select pg_start_backup('label_goes_here'); When executed on a primary, the function also creates a backup history file in the write-ahead log archive area. The history file includes the label given to - pg_start_backup, the starting and ending write-ahead log locations for + pg_start_backup, the starting and ending write-ahead log locations for the backup, and the starting and ending times of the backup. The return value is the backup's ending write-ahead log location (which again can be ignored). After recording the ending location, the current @@ -18646,16 +18646,16 @@ postgres=# select pg_start_backup('label_goes_here');
- pg_switch_wal moves to the next write-ahead log file, allowing the + pg_switch_wal moves to the next write-ahead log file, allowing the current file to be archived (assuming you are using continuous archiving). The return value is the ending write-ahead log location + 1 within the just-completed write-ahead log file. If there has been no write-ahead log activity since the last write-ahead log switch, - pg_switch_wal does nothing and returns the start location + pg_switch_wal does nothing and returns the start location of the write-ahead log file currently in use. - pg_create_restore_point creates a named write-ahead log + pg_create_restore_point creates a named write-ahead log record that can be used as recovery target, and returns the corresponding write-ahead log location. The given name can then be used with to specify the point up to which @@ -18665,11 +18665,11 @@ postgres=# select pg_start_backup('label_goes_here'); - pg_current_wal_lsn displays the current write-ahead log write + pg_current_wal_lsn displays the current write-ahead log write location in the same format used by the above functions. Similarly, - pg_current_wal_insert_lsn displays the current write-ahead log - insertion location and pg_current_wal_flush_lsn displays the - current write-ahead log flush location. The insertion location is the logical + pg_current_wal_insert_lsn displays the current write-ahead log + insertion location and pg_current_wal_flush_lsn displays the + current write-ahead log flush location. The insertion location is the logical end of the write-ahead log at any instant, while the write location is the end of what has actually been written out from the server's internal buffers and flush location is the location guaranteed to be written to durable storage. The write @@ -18681,7 +18681,7 @@ postgres=# select pg_start_backup('label_goes_here'); - You can use pg_walfile_name_offset to extract the + You can use pg_walfile_name_offset to extract the corresponding write-ahead log file name and byte offset from the results of any of the above functions. For example: @@ -18691,7 +18691,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); 00000001000000000000000D | 4039624 (1 row) - Similarly, pg_walfile_name extracts just the write-ahead log file name. + Similarly, pg_walfile_name extracts just the write-ahead log file name. When the given write-ahead log location is exactly at a write-ahead log file boundary, both these functions return the name of the preceding write-ahead log file. This is usually the desired behavior for managing write-ahead log archiving @@ -18700,7 +18700,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_wal_lsn_diff calculates the difference in bytes + pg_wal_lsn_diff calculates the difference in bytes between two write-ahead log locations. It can be used with pg_stat_replication or some functions shown in to get the replication lag. @@ -18878,21 +18878,21 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - PostgreSQL allows database sessions to synchronize their - snapshots. A snapshot determines which data is visible to the + PostgreSQL allows database sessions to synchronize their + snapshots. A snapshot determines which data is visible to the transaction that is using the snapshot. Synchronized snapshots are necessary when two or more sessions need to see identical content in the database. If two sessions just start their transactions independently, there is always a possibility that some third transaction commits - between the executions of the two START TRANSACTION commands, + between the executions of the two START TRANSACTION commands, so that one session sees the effects of that transaction and the other does not. - To solve this problem, PostgreSQL allows a transaction to - export the snapshot it is using. As long as the exporting - transaction remains open, other transactions can import its + To solve this problem, PostgreSQL allows a transaction to + export the snapshot it is using. As long as the exporting + transaction remains open, other transactions can import its snapshot, and thereby be guaranteed that they see exactly the same view of the database that the first transaction sees. But note that any database changes made by any one of these transactions remain invisible @@ -18902,7 +18902,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - Snapshots are exported with the pg_export_snapshot function, + Snapshots are exported with the pg_export_snapshot function, shown in , and imported with the command. @@ -18928,13 +18928,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - The function pg_export_snapshot saves the current snapshot - and returns a text string identifying the snapshot. This string + The function pg_export_snapshot saves the current snapshot + and returns a text string identifying the snapshot. This string must be passed (outside the database) to clients that want to import the snapshot. The snapshot is available for import only until the end of the transaction that exported it. A transaction can export more than one snapshot, if needed. Note that doing so is only useful in READ - COMMITTED transactions, since in REPEATABLE READ and + COMMITTED transactions, since in REPEATABLE READ and higher isolation levels, transactions use the same snapshot throughout their lifetime. Once a transaction has exported any snapshots, it cannot be prepared with . @@ -18989,7 +18989,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_create_physical_replication_slot - pg_create_physical_replication_slot(slot_name name , immediately_reserve boolean, temporary boolean) + pg_create_physical_replication_slot(slot_name name , immediately_reserve boolean, temporary boolean) (slot_name name, lsn pg_lsn) @@ -18997,13 +18997,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Creates a new physical replication slot named slot_name. The optional second parameter, - when true, specifies that the LSN for this + when true, specifies that the LSN for this replication slot be reserved immediately; otherwise - the LSN is reserved on first connection from a streaming + the LSN is reserved on first connection from a streaming replication client. Streaming changes from a physical slot is only possible with the streaming-replication protocol — see . The optional third - parameter, temporary, when set to true, specifies that + parameter, temporary, when set to true, specifies that the slot should not be permanently stored to disk and is only meant for use by current session. Temporary slots are also released upon any error. This function corresponds @@ -19024,7 +19024,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Drops the physical or logical replication slot named slot_name. Same as replication protocol - command DROP_REPLICATION_SLOT. For logical slots, this must + command DROP_REPLICATION_SLOT. For logical slots, this must be called when connected to the same database the slot was created on. @@ -19034,7 +19034,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_create_logical_replication_slot - pg_create_logical_replication_slot(slot_name name, plugin name , temporary boolean) + pg_create_logical_replication_slot(slot_name name, plugin name , temporary boolean) (slot_name name, lsn pg_lsn) @@ -19043,7 +19043,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Creates a new logical (decoding) replication slot named slot_name using the output plugin plugin. The optional third - parameter, temporary, when set to true, specifies that + parameter, temporary, when set to true, specifies that the slot should not be permanently stored to disk and is only meant for use by current session. Temporary slots are also released upon any error. A call to this function has the same @@ -19065,9 +19065,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); Returns changes in the slot slot_name, starting from the point at which since changes have been consumed last. If - upto_lsn and upto_nchanges are NULL, + upto_lsn and upto_nchanges are NULL, logical decoding will continue until end of WAL. If - upto_lsn is non-NULL, decoding will include only + upto_lsn is non-NULL, decoding will include only those transactions which commit prior to the specified LSN. If upto_nchanges is non-NULL, decoding will stop when the number of rows produced by decoding exceeds @@ -19155,7 +19155,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_drop(node_name text) - void + void Delete a previously created replication origin, including any @@ -19187,7 +19187,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_session_setup(node_name text) - void + void Mark the current session as replaying from the given @@ -19205,7 +19205,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_session_reset() - void + void Cancel the effects @@ -19254,7 +19254,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_xact_setup(origin_lsn pg_lsn, origin_timestamp timestamptz) - void + void Mark the current transaction as replaying a transaction that has @@ -19273,7 +19273,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_xact_reset() - void + void Cancel the effects of @@ -19289,7 +19289,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_replication_origin_advance(node_name text, lsn pg_lsn) - void + void Set replication progress for the given node to the given @@ -19446,7 +19446,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); bigint Disk space used by the specified fork ('main', - 'fsm', 'vm', or 'init') + 'fsm', 'vm', or 'init') of the specified table or index @@ -19519,7 +19519,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); bigint Total disk space used by the specified table, - including all indexes and TOAST data + including all indexes and TOAST data @@ -19527,48 +19527,48 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_column_size shows the space used to store any individual + pg_column_size shows the space used to store any individual data value. - pg_total_relation_size accepts the OID or name of a + pg_total_relation_size accepts the OID or name of a table or toast table, and returns the total on-disk space used for that table, including all associated indexes. This function is equivalent to pg_table_size - + pg_indexes_size. + + pg_indexes_size. - pg_table_size accepts the OID or name of a table and + pg_table_size accepts the OID or name of a table and returns the disk space needed for that table, exclusive of indexes. (TOAST space, free space map, and visibility map are included.) - pg_indexes_size accepts the OID or name of a table and + pg_indexes_size accepts the OID or name of a table and returns the total disk space used by all the indexes attached to that table. - pg_database_size and pg_tablespace_size + pg_database_size and pg_tablespace_size accept the OID or name of a database or tablespace, and return the total disk space used therein. To use pg_database_size, - you must have CONNECT permission on the specified database - (which is granted by default), or be a member of the pg_read_all_stats - role. To use pg_tablespace_size, you must have - CREATE permission on the specified tablespace, or be a member - of the pg_read_all_stats role unless it is the default tablespace for + you must have CONNECT permission on the specified database + (which is granted by default), or be a member of the pg_read_all_stats + role. To use pg_tablespace_size, you must have + CREATE permission on the specified tablespace, or be a member + of the pg_read_all_stats role unless it is the default tablespace for the current database. - pg_relation_size accepts the OID or name of a table, index + pg_relation_size accepts the OID or name of a table, index or toast table, and returns the on-disk size in bytes of one fork of that relation. (Note that for most purposes it is more convenient to - use the higher-level functions pg_total_relation_size - or pg_table_size, which sum the sizes of all forks.) + use the higher-level functions pg_total_relation_size + or pg_table_size, which sum the sizes of all forks.) With one argument, it returns the size of the main data fork of the relation. The second argument can be provided to specify which fork to examine: @@ -19601,13 +19601,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_size_pretty can be used to format the result of one of + pg_size_pretty can be used to format the result of one of the other functions in a human-readable way, using bytes, kB, MB, GB or TB as appropriate. - pg_size_bytes can be used to get the size in bytes from a + pg_size_bytes can be used to get the size in bytes from a string in human-readable format. The input may have units of bytes, kB, MB, GB or TB, and is parsed case-insensitively. If no units are specified, bytes are assumed. @@ -19616,17 +19616,17 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The units kB, MB, GB and TB used by the functions - pg_size_pretty and pg_size_bytes are defined + pg_size_pretty and pg_size_bytes are defined using powers of 2 rather than powers of 10, so 1kB is 1024 bytes, 1MB is - 10242 = 1048576 bytes, and so on. + 10242 = 1048576 bytes, and so on. The functions above that operate on tables or indexes accept a - regclass argument, which is simply the OID of the table or index - in the pg_class system catalog. You do not have to look up - the OID by hand, however, since the regclass data type's input + regclass argument, which is simply the OID of the table or index + in the pg_class system catalog. You do not have to look up + the OID by hand, however, since the regclass data type's input converter will do the work for you. Just write the table name enclosed in single quotes so that it looks like a literal constant. For compatibility with the handling of ordinary SQL names, the string @@ -19695,28 +19695,28 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_relation_filenode accepts the OID or name of a table, - index, sequence, or toast table, and returns the filenode number + pg_relation_filenode accepts the OID or name of a table, + index, sequence, or toast table, and returns the filenode number currently assigned to it. The filenode is the base component of the file name(s) used for the relation (see for more information). For most tables the result is the same as - pg_class.relfilenode, but for certain - system catalogs relfilenode is zero and this function must + pg_class.relfilenode, but for certain + system catalogs relfilenode is zero and this function must be used to get the correct value. The function returns NULL if passed a relation that does not have storage, such as a view. - pg_relation_filepath is similar to - pg_relation_filenode, but it returns the entire file path name - (relative to the database cluster's data directory PGDATA) of + pg_relation_filepath is similar to + pg_relation_filenode, but it returns the entire file path name + (relative to the database cluster's data directory PGDATA) of the relation. - pg_filenode_relation is the reverse of - pg_relation_filenode. Given a tablespace OID and - a filenode, it returns the associated relation's OID. For a table + pg_filenode_relation is the reverse of + pg_relation_filenode. Given a tablespace OID and + a filenode, it returns the associated relation's OID. For a table in the database's default tablespace, the tablespace can be specified as 0. @@ -19736,7 +19736,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_collation_actual_version - pg_collation_actual_version(oid) + pg_collation_actual_version(oid) text Return actual version of collation from operating system @@ -19744,7 +19744,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_import_system_collations - pg_import_system_collations(schema regnamespace) + pg_import_system_collations(schema regnamespace) integer Import operating system collations @@ -19763,7 +19763,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_import_system_collations adds collations to the system + pg_import_system_collations adds collations to the system catalog pg_collation based on all the locales it finds in the operating system. This is what initdb uses; @@ -19818,28 +19818,28 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - brin_summarize_new_values(index regclass) + brin_summarize_new_values(index regclass) integer summarize page ranges not already summarized - brin_summarize_range(index regclass, blockNumber bigint) + brin_summarize_range(index regclass, blockNumber bigint) integer summarize the page range covering the given block, if not already summarized - brin_desummarize_range(index regclass, blockNumber bigint) + brin_desummarize_range(index regclass, blockNumber bigint) integer de-summarize the page range covering the given block, if summarized - gin_clean_pending_list(index regclass) + gin_clean_pending_list(index regclass) bigint move GIN pending list entries into main index structure @@ -19849,25 +19849,25 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - brin_summarize_new_values accepts the OID or name of a + brin_summarize_new_values accepts the OID or name of a BRIN index and inspects the index to find page ranges in the base table that are not currently summarized by the index; for any such range it creates a new summary index tuple by scanning the table pages. It returns the number of new page range summaries that were inserted - into the index. brin_summarize_range does the same, except + into the index. brin_summarize_range does the same, except it only summarizes the range that covers the given block number. - gin_clean_pending_list accepts the OID or name of + gin_clean_pending_list accepts the OID or name of a GIN index and cleans up the pending list of the specified index by moving entries in it to the main GIN data structure in bulk. It returns the number of pages removed from the pending list. Note that if the argument is a GIN index built with - the fastupdate option disabled, no cleanup happens and the + the fastupdate option disabled, no cleanup happens and the return value is 0, because the index doesn't have a pending list. Please see and - for details of the pending list and fastupdate option. + for details of the pending list and fastupdate option. @@ -19879,9 +19879,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); The functions shown in provide native access to files on the machine hosting the server. Only files within the - database cluster directory and the log_directory can be + database cluster directory and the log_directory can be accessed. Use a relative path for files in the cluster directory, - and a path matching the log_directory configuration setting + and a path matching the log_directory configuration setting for log files. Use of these functions is restricted to superusers except where stated otherwise. @@ -19897,7 +19897,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_ls_dir(dirname text [, missing_ok boolean, include_dot_dirs boolean]) + pg_ls_dir(dirname text [, missing_ok boolean, include_dot_dirs boolean]) setof text @@ -19911,7 +19911,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); setof record List the name, size, and last modification time of files in the log - directory. Access is granted to members of the pg_monitor + directory. Access is granted to members of the pg_monitor role and may be granted to other non-superuser roles. @@ -19922,13 +19922,13 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); setof record List the name, size, and last modification time of files in the WAL - directory. Access is granted to members of the pg_monitor + directory. Access is granted to members of the pg_monitor role and may be granted to other non-superuser roles. - pg_read_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) + pg_read_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) text @@ -19937,7 +19937,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_read_binary_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) + pg_read_binary_file(filename text [, offset bigint, length bigint [, missing_ok boolean] ]) bytea @@ -19946,7 +19946,7 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - pg_stat_file(filename text[, missing_ok boolean]) + pg_stat_file(filename text[, missing_ok boolean]) record @@ -19958,23 +19958,23 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); - Some of these functions take an optional missing_ok parameter, + Some of these functions take an optional missing_ok parameter, which specifies the behavior when the file or directory does not exist. If true, the function returns NULL (except - pg_ls_dir, which returns an empty result set). If - false, an error is raised. The default is false. + pg_ls_dir, which returns an empty result set). If + false, an error is raised. The default is false. pg_ls_dir - pg_ls_dir returns the names of all files (and directories + pg_ls_dir returns the names of all files (and directories and other special files) in the specified directory. The - include_dot_dirs indicates whether . and .. are + include_dot_dirs indicates whether . and .. are included in the result set. The default is to exclude them - (false), but including them can be useful when - missing_ok is true, to distinguish an + (false), but including them can be useful when + missing_ok is true, to distinguish an empty directory from an non-existent directory. @@ -19982,9 +19982,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_ls_logdir - pg_ls_logdir returns the name, size, and last modified time + pg_ls_logdir returns the name, size, and last modified time (mtime) of each file in the log directory. By default, only superusers - and members of the pg_monitor role can use this function. + and members of the pg_monitor role can use this function. Access may be granted to others using GRANT. @@ -19992,9 +19992,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_ls_waldir - pg_ls_waldir returns the name, size, and last modified time + pg_ls_waldir returns the name, size, and last modified time (mtime) of each file in the write ahead log (WAL) directory. By - default only superusers and members of the pg_monitor role + default only superusers and members of the pg_monitor role can use this function. Access may be granted to others using GRANT. @@ -20003,11 +20003,11 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_read_file - pg_read_file returns part of a text file, starting - at the given offset, returning at most length - bytes (less if the end of file is reached first). If offset + pg_read_file returns part of a text file, starting + at the given offset, returning at most length + bytes (less if the end of file is reached first). If offset is negative, it is relative to the end of the file. - If offset and length are omitted, the entire + If offset and length are omitted, the entire file is returned. The bytes read from the file are interpreted as a string in the server encoding; an error is thrown if they are not valid in that encoding. @@ -20017,10 +20017,10 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup()); pg_read_binary_file - pg_read_binary_file is similar to - pg_read_file, except that the result is a bytea value; + pg_read_binary_file is similar to + pg_read_file, except that the result is a bytea value; accordingly, no encoding checks are performed. - In combination with the convert_from function, this function + In combination with the convert_from function, this function can be used to read a file in a specified encoding: SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); @@ -20031,7 +20031,7 @@ SELECT convert_from(pg_read_binary_file('file_in_utf8.txt'), 'UTF8'); pg_stat_file - pg_stat_file returns a record containing the file + pg_stat_file returns a record containing the file size, last accessed time stamp, last modified time stamp, last file status change time stamp (Unix platforms only), file creation time stamp (Windows only), and a boolean @@ -20064,42 +20064,42 @@ SELECT (pg_stat_file('filename')).modification; - pg_advisory_lock(key bigint) + pg_advisory_lock(key bigint) void Obtain exclusive session level advisory lock - pg_advisory_lock(key1 int, key2 int) + pg_advisory_lock(key1 int, key2 int) void Obtain exclusive session level advisory lock - pg_advisory_lock_shared(key bigint) + pg_advisory_lock_shared(key bigint) void Obtain shared session level advisory lock - pg_advisory_lock_shared(key1 int, key2 int) + pg_advisory_lock_shared(key1 int, key2 int) void Obtain shared session level advisory lock - pg_advisory_unlock(key bigint) + pg_advisory_unlock(key bigint) boolean Release an exclusive session level advisory lock - pg_advisory_unlock(key1 int, key2 int) + pg_advisory_unlock(key1 int, key2 int) boolean Release an exclusive session level advisory lock @@ -20113,98 +20113,98 @@ SELECT (pg_stat_file('filename')).modification; - pg_advisory_unlock_shared(key bigint) + pg_advisory_unlock_shared(key bigint) boolean Release a shared session level advisory lock - pg_advisory_unlock_shared(key1 int, key2 int) + pg_advisory_unlock_shared(key1 int, key2 int) boolean Release a shared session level advisory lock - pg_advisory_xact_lock(key bigint) + pg_advisory_xact_lock(key bigint) void Obtain exclusive transaction level advisory lock - pg_advisory_xact_lock(key1 int, key2 int) + pg_advisory_xact_lock(key1 int, key2 int) void Obtain exclusive transaction level advisory lock - pg_advisory_xact_lock_shared(key bigint) + pg_advisory_xact_lock_shared(key bigint) void Obtain shared transaction level advisory lock - pg_advisory_xact_lock_shared(key1 int, key2 int) + pg_advisory_xact_lock_shared(key1 int, key2 int) void Obtain shared transaction level advisory lock - pg_try_advisory_lock(key bigint) + pg_try_advisory_lock(key bigint) boolean Obtain exclusive session level advisory lock if available - pg_try_advisory_lock(key1 int, key2 int) + pg_try_advisory_lock(key1 int, key2 int) boolean Obtain exclusive session level advisory lock if available - pg_try_advisory_lock_shared(key bigint) + pg_try_advisory_lock_shared(key bigint) boolean Obtain shared session level advisory lock if available - pg_try_advisory_lock_shared(key1 int, key2 int) + pg_try_advisory_lock_shared(key1 int, key2 int) boolean Obtain shared session level advisory lock if available - pg_try_advisory_xact_lock(key bigint) + pg_try_advisory_xact_lock(key bigint) boolean Obtain exclusive transaction level advisory lock if available - pg_try_advisory_xact_lock(key1 int, key2 int) + pg_try_advisory_xact_lock(key1 int, key2 int) boolean Obtain exclusive transaction level advisory lock if available - pg_try_advisory_xact_lock_shared(key bigint) + pg_try_advisory_xact_lock_shared(key bigint) boolean Obtain shared transaction level advisory lock if available - pg_try_advisory_xact_lock_shared(key1 int, key2 int) + pg_try_advisory_xact_lock_shared(key1 int, key2 int) boolean Obtain shared transaction level advisory lock if available @@ -20217,7 +20217,7 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_lock - pg_advisory_lock locks an application-defined resource, + pg_advisory_lock locks an application-defined resource, which can be identified either by a single 64-bit key value or two 32-bit key values (note that these two key spaces do not overlap). If another session already holds a lock on the same resource identifier, @@ -20231,8 +20231,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_lock_shared - pg_advisory_lock_shared works the same as - pg_advisory_lock, + pg_advisory_lock_shared works the same as + pg_advisory_lock, except the lock can be shared with other sessions requesting shared locks. Only would-be exclusive lockers are locked out. @@ -20241,10 +20241,10 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_lock - pg_try_advisory_lock is similar to - pg_advisory_lock, except the function will not wait for the + pg_try_advisory_lock is similar to + pg_advisory_lock, except the function will not wait for the lock to become available. It will either obtain the lock immediately and - return true, or return false if the lock cannot be + return true, or return false if the lock cannot be acquired immediately. @@ -20252,8 +20252,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_lock_shared - pg_try_advisory_lock_shared works the same as - pg_try_advisory_lock, except it attempts to acquire + pg_try_advisory_lock_shared works the same as + pg_try_advisory_lock, except it attempts to acquire a shared rather than an exclusive lock. @@ -20261,10 +20261,10 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock - pg_advisory_unlock will release a previously-acquired + pg_advisory_unlock will release a previously-acquired exclusive session level advisory lock. It - returns true if the lock is successfully released. - If the lock was not held, it will return false, + returns true if the lock is successfully released. + If the lock was not held, it will return false, and in addition, an SQL warning will be reported by the server. @@ -20272,8 +20272,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock_shared - pg_advisory_unlock_shared works the same as - pg_advisory_unlock, + pg_advisory_unlock_shared works the same as + pg_advisory_unlock, except it releases a shared session level advisory lock. @@ -20281,7 +20281,7 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_unlock_all - pg_advisory_unlock_all will release all session level advisory + pg_advisory_unlock_all will release all session level advisory locks held by the current session. (This function is implicitly invoked at session end, even if the client disconnects ungracefully.) @@ -20290,8 +20290,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_xact_lock - pg_advisory_xact_lock works the same as - pg_advisory_lock, except the lock is automatically released + pg_advisory_xact_lock works the same as + pg_advisory_lock, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20299,8 +20299,8 @@ SELECT (pg_stat_file('filename')).modification; pg_advisory_xact_lock_shared - pg_advisory_xact_lock_shared works the same as - pg_advisory_lock_shared, except the lock is automatically released + pg_advisory_xact_lock_shared works the same as + pg_advisory_lock_shared, except the lock is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20308,8 +20308,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_xact_lock - pg_try_advisory_xact_lock works the same as - pg_try_advisory_lock, except the lock, if acquired, + pg_try_advisory_xact_lock works the same as + pg_try_advisory_lock, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20318,8 +20318,8 @@ SELECT (pg_stat_file('filename')).modification; pg_try_advisory_xact_lock_shared - pg_try_advisory_xact_lock_shared works the same as - pg_try_advisory_lock_shared, except the lock, if acquired, + pg_try_advisory_xact_lock_shared works the same as + pg_try_advisory_lock_shared, except the lock, if acquired, is automatically released at the end of the current transaction and cannot be released explicitly. @@ -20336,8 +20336,8 @@ SELECT (pg_stat_file('filename')).modification; - Currently PostgreSQL provides one built in trigger - function, suppress_redundant_updates_trigger, + Currently PostgreSQL provides one built in trigger + function, suppress_redundant_updates_trigger, which will prevent any update that does not actually change the data in the row from taking place, in contrast to the normal behavior which always performs the update @@ -20354,7 +20354,7 @@ SELECT (pg_stat_file('filename')).modification; However, detecting such situations in client code is not always easy, or even possible, and writing expressions to detect them can be error-prone. An alternative is to use - suppress_redundant_updates_trigger, which will skip + suppress_redundant_updates_trigger, which will skip updates that don't change the data. You should use this with care, however. The trigger takes a small but non-trivial time for each record, so if most of the records affected by an update are actually changed, @@ -20362,7 +20362,7 @@ SELECT (pg_stat_file('filename')).modification; - The suppress_redundant_updates_trigger function can be + The suppress_redundant_updates_trigger function can be added to a table like this: CREATE TRIGGER z_min_update @@ -20384,7 +20384,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); Event Trigger Functions - PostgreSQL provides these helper functions + PostgreSQL provides these helper functions to retrieve information from event triggers. @@ -20401,12 +20401,12 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - pg_event_trigger_ddl_commands returns a list of + pg_event_trigger_ddl_commands returns a list of DDL commands executed by each user action, when invoked in a function attached to a - ddl_command_end event trigger. If called in any other + ddl_command_end event trigger. If called in any other context, an error is raised. - pg_event_trigger_ddl_commands returns one row for each + pg_event_trigger_ddl_commands returns one row for each base command executed; some commands that are a single SQL sentence may return more than one row. This function returns the following columns: @@ -20451,7 +20451,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); schema_name text - Name of the schema the object belongs in, if any; otherwise NULL. + Name of the schema the object belongs in, if any; otherwise NULL. No quoting is applied. @@ -20492,11 +20492,11 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - pg_event_trigger_dropped_objects returns a list of all objects - dropped by the command in whose sql_drop event it is called. + pg_event_trigger_dropped_objects returns a list of all objects + dropped by the command in whose sql_drop event it is called. If called in any other context, - pg_event_trigger_dropped_objects raises an error. - pg_event_trigger_dropped_objects returns the following columns: + pg_event_trigger_dropped_objects raises an error. + pg_event_trigger_dropped_objects returns the following columns: @@ -20553,7 +20553,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); schema_name text - Name of the schema the object belonged in, if any; otherwise NULL. + Name of the schema the object belonged in, if any; otherwise NULL. No quoting is applied. @@ -20562,7 +20562,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); text Name of the object, if the combination of schema and name can be - used as a unique identifier for the object; otherwise NULL. + used as a unique identifier for the object; otherwise NULL. No quoting is applied, and name is never schema-qualified. @@ -20598,7 +20598,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); - The pg_event_trigger_dropped_objects function can be used + The pg_event_trigger_dropped_objects function can be used in an event trigger like this: CREATE FUNCTION test_event_trigger_for_drops() @@ -20631,7 +20631,7 @@ CREATE EVENT TRIGGER test_event_trigger_for_drops The functions shown in provide information about a table for which a - table_rewrite event has just been called. + table_rewrite event has just been called. If called in any other context, an error is raised. @@ -20668,7 +20668,7 @@ CREATE EVENT TRIGGER test_event_trigger_for_drops - The pg_event_trigger_table_rewrite_oid function can be used + The pg_event_trigger_table_rewrite_oid function can be used in an event trigger like this: CREATE FUNCTION test_event_trigger_table_rewrite_oid() diff --git a/doc/src/sgml/fuzzystrmatch.sgml b/doc/src/sgml/fuzzystrmatch.sgml index ff5bc08fea..373ac4891d 100644 --- a/doc/src/sgml/fuzzystrmatch.sgml +++ b/doc/src/sgml/fuzzystrmatch.sgml @@ -8,14 +8,14 @@ - The fuzzystrmatch module provides several + The fuzzystrmatch module provides several functions to determine similarities and distance between strings. - At present, the soundex, metaphone, - dmetaphone, and dmetaphone_alt functions do + At present, the soundex, metaphone, + dmetaphone, and dmetaphone_alt functions do not work well with multibyte encodings (such as UTF-8). @@ -31,7 +31,7 @@ - The fuzzystrmatch module provides two functions + The fuzzystrmatch module provides two functions for working with Soundex codes: @@ -49,12 +49,12 @@ difference(text, text) returns int - The soundex function converts a string to its Soundex code. - The difference function converts two strings to their Soundex + The soundex function converts a string to its Soundex code. + The difference function converts two strings to their Soundex codes and then reports the number of matching code positions. Since Soundex codes have four characters, the result ranges from zero to four, with zero being no match and four being an exact match. (Thus, the - function is misnamed — similarity would have been + function is misnamed — similarity would have been a better name.) @@ -115,10 +115,10 @@ levenshtein_less_equal(text source, text target, int max_d) returns int levenshtein_less_equal is an accelerated version of the Levenshtein function for use when only small distances are of interest. - If the actual distance is less than or equal to max_d, + If the actual distance is less than or equal to max_d, then levenshtein_less_equal returns the correct - distance; otherwise it returns some value greater than max_d. - If max_d is negative then the behavior is the same as + distance; otherwise it returns some value greater than max_d. + If max_d is negative then the behavior is the same as levenshtein. @@ -198,9 +198,9 @@ test=# SELECT metaphone('GUMBO', 4); Double Metaphone - The Double Metaphone system computes two sounds like strings - for a given input string — a primary and an - alternate. In most cases they are the same, but for non-English + The Double Metaphone system computes two sounds like strings + for a given input string — a primary and an + alternate. In most cases they are the same, but for non-English names especially they can be a bit different, depending on pronunciation. These functions compute the primary and alternate codes: diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl index 01fc6166bf..e655703b5b 100644 --- a/doc/src/sgml/generate-errcodes-table.pl +++ b/doc/src/sgml/generate-errcodes-table.pl @@ -30,12 +30,12 @@ while (<$errcodes>) s/-/—/; # Wrap PostgreSQL in - s/PostgreSQL/PostgreSQL<\/>/g; + s/PostgreSQL/PostgreSQL<\/productname>/g; print "\n\n"; print "\n"; print ""; - print "$_\n"; + print "$_\n"; print "\n"; next; diff --git a/doc/src/sgml/generic-wal.sgml b/doc/src/sgml/generic-wal.sgml index dfa78c5ca2..7a0284994c 100644 --- a/doc/src/sgml/generic-wal.sgml +++ b/doc/src/sgml/generic-wal.sgml @@ -13,8 +13,8 @@ The API for constructing generic WAL records is defined in - access/generic_xlog.h and implemented - in access/transam/generic_xlog.c. + access/generic_xlog.h and implemented + in access/transam/generic_xlog.c. @@ -24,24 +24,24 @@ - state = GenericXLogStart(relation) — start + state = GenericXLogStart(relation) — start construction of a generic WAL record for the given relation. - page = GenericXLogRegisterBuffer(state, buffer, flags) + page = GenericXLogRegisterBuffer(state, buffer, flags) — register a buffer to be modified within the current generic WAL record. This function returns a pointer to a temporary copy of the buffer's page, where modifications should be made. (Do not modify the buffer's contents directly.) The third argument is a bit mask of flags applicable to the operation. Currently the only such flag is - GENERIC_XLOG_FULL_IMAGE, which indicates that a full-page + GENERIC_XLOG_FULL_IMAGE, which indicates that a full-page image rather than a delta update should be included in the WAL record. Typically this flag would be set if the page is new or has been rewritten completely. - GenericXLogRegisterBuffer can be repeated if the + GenericXLogRegisterBuffer can be repeated if the WAL-logged action needs to modify multiple pages. @@ -54,7 +54,7 @@ - GenericXLogFinish(state) — apply the changes to + GenericXLogFinish(state) — apply the changes to the buffers and emit the generic WAL record. @@ -63,7 +63,7 @@ WAL record construction can be canceled between any of the above steps by - calling GenericXLogAbort(state). This will discard all + calling GenericXLogAbort(state). This will discard all changes to the page image copies. @@ -75,13 +75,13 @@ No direct modifications of buffers are allowed! All modifications must - be done in copies acquired from GenericXLogRegisterBuffer(). + be done in copies acquired from GenericXLogRegisterBuffer(). In other words, code that makes generic WAL records should never call - BufferGetPage() for itself. However, it remains the + BufferGetPage() for itself. However, it remains the caller's responsibility to pin/unpin and lock/unlock the buffers at appropriate times. Exclusive lock must be held on each target buffer - from before GenericXLogRegisterBuffer() until after - GenericXLogFinish(). + from before GenericXLogRegisterBuffer() until after + GenericXLogFinish(). @@ -97,7 +97,7 @@ The maximum number of buffers that can be registered for a generic WAL - record is MAX_GENERIC_XLOG_PAGES. An error will be thrown + record is MAX_GENERIC_XLOG_PAGES. An error will be thrown if this limit is exceeded. @@ -106,26 +106,26 @@ Generic WAL assumes that the pages to be modified have standard layout, and in particular that there is no useful data between - pd_lower and pd_upper. + pd_lower and pd_upper.
Since you are modifying copies of buffer - pages, GenericXLogStart() does not start a critical + pages, GenericXLogStart() does not start a critical section. Thus, you can safely do memory allocation, error throwing, - etc. between GenericXLogStart() and - GenericXLogFinish(). The only actual critical section is - present inside GenericXLogFinish(). There is no need to - worry about calling GenericXLogAbort() during an error + etc. between GenericXLogStart() and + GenericXLogFinish(). The only actual critical section is + present inside GenericXLogFinish(). There is no need to + worry about calling GenericXLogAbort() during an error exit, either. - GenericXLogFinish() takes care of marking buffers dirty + GenericXLogFinish() takes care of marking buffers dirty and setting their LSNs. You do not need to do this explicitly. @@ -148,7 +148,7 @@ - If GENERIC_XLOG_FULL_IMAGE is not specified for a + If GENERIC_XLOG_FULL_IMAGE is not specified for a registered buffer, the generic WAL record contains a delta between the old and the new page images. This delta is based on byte-by-byte comparison. This is not very compact for the case of moving data diff --git a/doc/src/sgml/geqo.sgml b/doc/src/sgml/geqo.sgml index e0f8adcd6e..99ee3ebca0 100644 --- a/doc/src/sgml/geqo.sgml +++ b/doc/src/sgml/geqo.sgml @@ -88,7 +88,7 @@ - According to the comp.ai.genetic FAQ it cannot be stressed too + According to the comp.ai.genetic FAQ it cannot be stressed too strongly that a GA is not a pure random search for a solution to a problem. A GA uses stochastic processes, but the result is distinctly non-random (better than random). @@ -222,7 +222,7 @@ are considered; and all the initially-determined relation scan plans are available. The estimated cost is the cheapest of these possibilities.) Join sequences with lower estimated cost are considered - more fit than those with higher cost. The genetic algorithm + more fit than those with higher cost. The genetic algorithm discards the least fit candidates. Then new candidates are generated by combining genes of more-fit candidates — that is, by using randomly-chosen portions of known low-cost join sequences to create @@ -235,20 +235,20 @@ This process is inherently nondeterministic, because of the randomized choices made during both the initial population selection and subsequent - mutation of the best candidates. To avoid surprising changes + mutation of the best candidates. To avoid surprising changes of the selected plan, each run of the GEQO algorithm restarts its random number generator with the current - parameter setting. As long as geqo_seed and the other + parameter setting. As long as geqo_seed and the other GEQO parameters are kept fixed, the same plan will be generated for a given query (and other planner inputs such as statistics). To experiment - with different search paths, try changing geqo_seed. + with different search paths, try changing geqo_seed. Future Implementation Tasks for - <productname>PostgreSQL</> <acronym>GEQO</acronym> + PostgreSQL GEQO Work is still needed to improve the genetic algorithm parameter diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml index 7c2321ec3c..873627a210 100644 --- a/doc/src/sgml/gin.sgml +++ b/doc/src/sgml/gin.sgml @@ -21,15 +21,15 @@ - We use the word item to refer to a composite value that - is to be indexed, and the word key to refer to an element + We use the word item to refer to a composite value that + is to be indexed, and the word key to refer to an element value. GIN always stores and searches for keys, not item values per se. A GIN index stores a set of (key, posting list) pairs, - where a posting list is a set of row IDs in which the key + where a posting list is a set of row IDs in which the key occurs. The same row ID can appear in multiple posting lists, since an item can contain more than one key. Each key value is stored only once, so a GIN index is very compact for cases @@ -66,7 +66,7 @@ Built-in Operator Classes - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GIN operator classes shown in . (Some of the optional modules described in @@ -85,38 +85,38 @@ - array_ops - anyarray + array_ops + anyarray - && - <@ - = - @> + && + <@ + = + @> - jsonb_ops - jsonb + jsonb_ops + jsonb - ? - ?& - ?| - @> + ? + ?& + ?| + @> - jsonb_path_ops - jsonb + jsonb_path_ops + jsonb - @> + @> - tsvector_ops - tsvector + tsvector_ops + tsvector - @@ - @@@ + @@ + @@@ @@ -124,8 +124,8 @@ - Of the two operator classes for type jsonb, jsonb_ops - is the default. jsonb_path_ops supports fewer operators but + Of the two operator classes for type jsonb, jsonb_ops + is the default. jsonb_path_ops supports fewer operators but offers better performance for those operators. See for details. @@ -157,15 +157,15 @@ Datum *extractValue(Datum itemValue, int32 *nkeys, - bool **nullFlags) + bool **nullFlags) Returns a palloc'd array of keys given an item to be indexed. The - number of returned keys must be stored into *nkeys. + number of returned keys must be stored into *nkeys. If any of the keys can be null, also palloc an array of - *nkeys bool fields, store its address at - *nullFlags, and set these null flags as needed. - *nullFlags can be left NULL (its initial value) + *nkeys bool fields, store its address at + *nullFlags, and set these null flags as needed. + *nullFlags can be left NULL (its initial value) if all keys are non-null. The return value can be NULL if the item contains no keys. @@ -175,40 +175,40 @@ Datum *extractQuery(Datum query, int32 *nkeys, StrategyNumber n, bool **pmatch, Pointer **extra_data, - bool **nullFlags, int32 *searchMode) + bool **nullFlags, int32 *searchMode) Returns a palloc'd array of keys given a value to be queried; that is, - query is the value on the right-hand side of an + query is the value on the right-hand side of an indexable operator whose left-hand side is the indexed column. - n is the strategy number of the operator within the + n is the strategy number of the operator within the operator class (see ). - Often, extractQuery will need - to consult n to determine the data type of - query and the method it should use to extract key values. - The number of returned keys must be stored into *nkeys. + Often, extractQuery will need + to consult n to determine the data type of + query and the method it should use to extract key values. + The number of returned keys must be stored into *nkeys. If any of the keys can be null, also palloc an array of - *nkeys bool fields, store its address at - *nullFlags, and set these null flags as needed. - *nullFlags can be left NULL (its initial value) + *nkeys bool fields, store its address at + *nullFlags, and set these null flags as needed. + *nullFlags can be left NULL (its initial value) if all keys are non-null. - The return value can be NULL if the query contains no keys. + The return value can be NULL if the query contains no keys. - searchMode is an output argument that allows - extractQuery to specify details about how the search + searchMode is an output argument that allows + extractQuery to specify details about how the search will be done. - If *searchMode is set to - GIN_SEARCH_MODE_DEFAULT (which is the value it is + If *searchMode is set to + GIN_SEARCH_MODE_DEFAULT (which is the value it is initialized to before call), only items that match at least one of the returned keys are considered candidate matches. - If *searchMode is set to - GIN_SEARCH_MODE_INCLUDE_EMPTY, then in addition to items + If *searchMode is set to + GIN_SEARCH_MODE_INCLUDE_EMPTY, then in addition to items containing at least one matching key, items that contain no keys at all are considered candidate matches. (This mode is useful for implementing is-subset-of operators, for example.) - If *searchMode is set to GIN_SEARCH_MODE_ALL, + If *searchMode is set to GIN_SEARCH_MODE_ALL, then all non-null items in the index are considered candidate matches, whether they match any of the returned keys or not. (This mode is much slower than the other two choices, since it requires @@ -217,33 +217,33 @@ in most cases is probably not a good candidate for a GIN operator class.) The symbols to use for setting this mode are defined in - access/gin.h. + access/gin.h. - pmatch is an output argument for use when partial match - is supported. To use it, extractQuery must allocate - an array of *nkeys booleans and store its address at - *pmatch. Each element of the array should be set to TRUE + pmatch is an output argument for use when partial match + is supported. To use it, extractQuery must allocate + an array of *nkeys booleans and store its address at + *pmatch. Each element of the array should be set to TRUE if the corresponding key requires partial match, FALSE if not. - If *pmatch is set to NULL then GIN assumes partial match + If *pmatch is set to NULL then GIN assumes partial match is not required. The variable is initialized to NULL before call, so this argument can simply be ignored by operator classes that do not support partial match. - extra_data is an output argument that allows - extractQuery to pass additional data to the - consistent and comparePartial methods. - To use it, extractQuery must allocate - an array of *nkeys pointers and store its address at - *extra_data, then store whatever it wants to into the + extra_data is an output argument that allows + extractQuery to pass additional data to the + consistent and comparePartial methods. + To use it, extractQuery must allocate + an array of *nkeys pointers and store its address at + *extra_data, then store whatever it wants to into the individual pointers. The variable is initialized to NULL before call, so this argument can simply be ignored by operator classes that - do not require extra data. If *extra_data is set, the - whole array is passed to the consistent method, and - the appropriate element to the comparePartial method. + do not require extra data. If *extra_data is set, the + whole array is passed to the consistent method, and + the appropriate element to the comparePartial method. @@ -251,10 +251,10 @@ An operator class must also provide a function to check if an indexed item - matches the query. It comes in two flavors, a boolean consistent - function, and a ternary triConsistent function. - triConsistent covers the functionality of both, so providing - triConsistent alone is sufficient. However, if the boolean + matches the query. It comes in two flavors, a boolean consistent + function, and a ternary triConsistent function. + triConsistent covers the functionality of both, so providing + triConsistent alone is sufficient. However, if the boolean variant is significantly cheaper to calculate, it can be advantageous to provide both. If only the boolean variant is provided, some optimizations that depend on refuting index items before fetching all the keys are @@ -264,48 +264,48 @@ bool consistent(bool check[], StrategyNumber n, Datum query, int32 nkeys, Pointer extra_data[], bool *recheck, - Datum queryKeys[], bool nullFlags[]) + Datum queryKeys[], bool nullFlags[]) Returns TRUE if an indexed item satisfies the query operator with - strategy number n (or might satisfy it, if the recheck + strategy number n (or might satisfy it, if the recheck indication is returned). This function does not have direct access to the indexed item's value, since GIN does not store items explicitly. Rather, what is available is knowledge about which key values extracted from the query appear in a given - indexed item. The check array has length - nkeys, which is the same as the number of keys previously - returned by extractQuery for this query datum. + indexed item. The check array has length + nkeys, which is the same as the number of keys previously + returned by extractQuery for this query datum. Each element of the - check array is TRUE if the indexed item contains the + check array is TRUE if the indexed item contains the corresponding query key, i.e., if (check[i] == TRUE) the i-th key of the - extractQuery result array is present in the indexed item. - The original query datum is - passed in case the consistent method needs to consult it, - and so are the queryKeys[] and nullFlags[] - arrays previously returned by extractQuery. - extra_data is the extra-data array returned by - extractQuery, or NULL if none. + extractQuery result array is present in the indexed item. + The original query datum is + passed in case the consistent method needs to consult it, + and so are the queryKeys[] and nullFlags[] + arrays previously returned by extractQuery. + extra_data is the extra-data array returned by + extractQuery, or NULL if none. - When extractQuery returns a null key in - queryKeys[], the corresponding check[] element + When extractQuery returns a null key in + queryKeys[], the corresponding check[] element is TRUE if the indexed item contains a null key; that is, the - semantics of check[] are like IS NOT DISTINCT - FROM. The consistent function can examine the - corresponding nullFlags[] element if it needs to tell + semantics of check[] are like IS NOT DISTINCT + FROM. The consistent function can examine the + corresponding nullFlags[] element if it needs to tell the difference between a regular value match and a null match. - On success, *recheck should be set to TRUE if the heap + On success, *recheck should be set to TRUE if the heap tuple needs to be rechecked against the query operator, or FALSE if the index test is exact. That is, a FALSE return value guarantees that the heap tuple does not match the query; a TRUE return value with - *recheck set to FALSE guarantees that the heap tuple does + *recheck set to FALSE guarantees that the heap tuple does match the query; and a TRUE return value with - *recheck set to TRUE means that the heap tuple might match + *recheck set to TRUE means that the heap tuple might match the query, so it needs to be fetched and rechecked by evaluating the query operator directly against the originally indexed item. @@ -315,30 +315,30 @@ GinTernaryValue triConsistent(GinTernaryValue check[], StrategyNumber n, Datum query, int32 nkeys, Pointer extra_data[], - Datum queryKeys[], bool nullFlags[]) + Datum queryKeys[], bool nullFlags[]) - triConsistent is similar to consistent, - but instead of booleans in the check vector, there are + triConsistent is similar to consistent, + but instead of booleans in the check vector, there are three possible values for each - key: GIN_TRUE, GIN_FALSE and - GIN_MAYBE. GIN_FALSE and GIN_TRUE + key: GIN_TRUE, GIN_FALSE and + GIN_MAYBE. GIN_FALSE and GIN_TRUE have the same meaning as regular boolean values, while - GIN_MAYBE means that the presence of that key is not known. - When GIN_MAYBE values are present, the function should only - return GIN_TRUE if the item certainly matches whether or + GIN_MAYBE means that the presence of that key is not known. + When GIN_MAYBE values are present, the function should only + return GIN_TRUE if the item certainly matches whether or not the index item contains the corresponding query keys. Likewise, the - function must return GIN_FALSE only if the item certainly - does not match, whether or not it contains the GIN_MAYBE - keys. If the result depends on the GIN_MAYBE entries, i.e., + function must return GIN_FALSE only if the item certainly + does not match, whether or not it contains the GIN_MAYBE + keys. If the result depends on the GIN_MAYBE entries, i.e., the match cannot be confirmed or refuted based on the known query keys, - the function must return GIN_MAYBE. + the function must return GIN_MAYBE. - When there are no GIN_MAYBE values in the check - vector, a GIN_MAYBE return value is the equivalent of - setting the recheck flag in the - boolean consistent function. + When there are no GIN_MAYBE values in the check + vector, a GIN_MAYBE return value is the equivalent of + setting the recheck flag in the + boolean consistent function. @@ -352,7 +352,7 @@ - int compare(Datum a, Datum b) + int compare(Datum a, Datum b) Compares two keys (not indexed items!) and returns an integer less than @@ -364,13 +364,13 @@ - Alternatively, if the operator class does not provide a compare + Alternatively, if the operator class does not provide a compare method, GIN will look up the default btree operator class for the index key data type, and use its comparison function. It is recommended to specify the comparison function in a GIN operator class that is meant for just one data type, as looking up the btree operator class costs a few cycles. However, polymorphic GIN operator classes (such - as array_ops) typically cannot specify a single comparison + as array_ops) typically cannot specify a single comparison function. @@ -381,7 +381,7 @@ int comparePartial(Datum partial_key, Datum key, StrategyNumber n, - Pointer extra_data) + Pointer extra_data) Compare a partial-match query key to an index key. Returns an integer @@ -389,11 +389,11 @@ does not match the query, but the index scan should continue; zero means that the index key does match the query; greater than zero indicates that the index scan should stop because no more matches - are possible. The strategy number n of the operator + are possible. The strategy number n of the operator that generated the partial match query is provided, in case its semantics are needed to determine when to end the scan. Also, - extra_data is the corresponding element of the extra-data - array made by extractQuery, or NULL if none. + extra_data is the corresponding element of the extra-data + array made by extractQuery, or NULL if none. Null keys are never passed to this function. @@ -402,25 +402,25 @@ - To support partial match queries, an operator class must - provide the comparePartial method, and its - extractQuery method must set the pmatch + To support partial match queries, an operator class must + provide the comparePartial method, and its + extractQuery method must set the pmatch parameter when a partial-match query is encountered. See for details. - The actual data types of the various Datum values mentioned + The actual data types of the various Datum values mentioned above vary depending on the operator class. The item values passed to - extractValue are always of the operator class's input type, and - all key values must be of the class's STORAGE type. The type of - the query argument passed to extractQuery, - consistent and triConsistent is whatever is the + extractValue are always of the operator class's input type, and + all key values must be of the class's STORAGE type. The type of + the query argument passed to extractQuery, + consistent and triConsistent is whatever is the right-hand input type of the class member operator identified by the strategy number. This need not be the same as the indexed type, so long as key values of the correct type can be extracted from it. However, it is recommended that the SQL declarations of these three support functions use - the opclass's indexed data type for the query argument, even + the opclass's indexed data type for the query argument, even though the actual type might be something else depending on the operator. @@ -434,8 +434,8 @@ constructed over keys, where each key is an element of one or more indexed items (a member of an array, for example) and where each tuple in a leaf page contains either a pointer to a B-tree of heap pointers (a - posting tree), or a simple list of heap pointers (a posting - list) when the list is small enough to fit into a single index tuple along + posting tree), or a simple list of heap pointers (a posting + list) when the list is small enough to fit into a single index tuple along with the key value. @@ -443,7 +443,7 @@ As of PostgreSQL 9.1, null key values can be included in the index. Also, placeholder nulls are included in the index for indexed items that are null or contain no keys according to - extractValue. This allows searches that should find empty + extractValue. This allows searches that should find empty items to do so. @@ -461,7 +461,7 @@ intrinsic nature of inverted indexes: inserting or updating one heap row can cause many inserts into the index (one for each key extracted from the indexed item). As of PostgreSQL 8.4, - GIN is capable of postponing much of this work by inserting + GIN is capable of postponing much of this work by inserting new tuples into a temporary, unsorted list of pending entries. When the table is vacuumed or autoanalyzed, or when gin_clean_pending_list function is called, or if the @@ -479,7 +479,7 @@ of pending entries in addition to searching the regular index, and so a large list of pending entries will slow searches significantly. Another disadvantage is that, while most updates are fast, an update - that causes the pending list to become too large will incur an + that causes the pending list to become too large will incur an immediate cleanup cycle and thus be much slower than other updates. Proper use of autovacuum can minimize both of these problems. @@ -497,15 +497,15 @@ Partial Match Algorithm - GIN can support partial match queries, in which the query + GIN can support partial match queries, in which the query does not determine an exact match for one or more keys, but the possible matches fall within a reasonably narrow range of key values (within the - key sorting order determined by the compare support method). - The extractQuery method, instead of returning a key value + key sorting order determined by the compare support method). + The extractQuery method, instead of returning a key value to be matched exactly, returns a key value that is the lower bound of - the range to be searched, and sets the pmatch flag true. - The key range is then scanned using the comparePartial - method. comparePartial must return zero for a matching + the range to be searched, and sets the pmatch flag true. + The key range is then scanned using the comparePartial + method. comparePartial must return zero for a matching index key, less than zero for a non-match that is still within the range to be searched, or greater than zero if the index key is past the range that could match. @@ -542,7 +542,7 @@ Build time for a GIN index is very sensitive to - the maintenance_work_mem setting; it doesn't pay to + the maintenance_work_mem setting; it doesn't pay to skimp on work memory during index creation. @@ -553,18 +553,18 @@ During a series of insertions into an existing GIN - index that has fastupdate enabled, the system will clean up + index that has fastupdate enabled, the system will clean up the pending-entry list whenever the list grows larger than - gin_pending_list_limit. To avoid fluctuations in observed + gin_pending_list_limit. To avoid fluctuations in observed response time, it's desirable to have pending-list cleanup occur in the background (i.e., via autovacuum). Foreground cleanup operations - can be avoided by increasing gin_pending_list_limit + can be avoided by increasing gin_pending_list_limit or making autovacuum more aggressive. However, enlarging the threshold of the cleanup operation means that if a foreground cleanup does occur, it will take even longer. - gin_pending_list_limit can be overridden for individual + gin_pending_list_limit can be overridden for individual GIN indexes by changing storage parameters, and which allows each GIN index to have its own cleanup threshold. For example, it's possible to increase the threshold only for the GIN @@ -616,7 +616,7 @@ GIN assumes that indexable operators are strict. This - means that extractValue will not be called at all on a null + means that extractValue will not be called at all on a null item value (instead, a placeholder index entry is created automatically), and extractQuery will not be called on a null query value either (instead, the query is presumed to be unsatisfiable). Note @@ -629,36 +629,36 @@ Examples - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GIN operator classes previously shown in . - The following contrib modules also contain + The following contrib modules also contain GIN operator classes: - btree_gin + btree_gin B-tree equivalent functionality for several data types - hstore + hstore Module for storing (key, value) pairs - intarray + intarray Enhanced support for int[] - pg_trgm + pg_trgm Text similarity using trigram matching diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml index 1648eb3672..4e4470d439 100644 --- a/doc/src/sgml/gist.sgml +++ b/doc/src/sgml/gist.sgml @@ -44,7 +44,7 @@ Built-in Operator Classes - The core PostgreSQL distribution + The core PostgreSQL distribution includes the GiST operator classes shown in . (Some of the optional modules described in @@ -64,142 +64,142 @@ - box_ops - box + box_ops + box - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - circle_ops - circle + circle_ops + circle - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - <-> + <-> - inet_ops - inet, cidr + inet_ops + inet, cidr - && - >> - >>= - > - >= - <> - << - <<= - < - <= - = + && + >> + >>= + > + >= + <> + << + <<= + < + <= + = - point_ops - point + point_ops + point - >> - >^ - << - <@ - <@ - <@ - <^ - ~= + >> + >^ + << + <@ + <@ + <@ + <^ + ~= - <-> + <-> - poly_ops - polygon + poly_ops + polygon - && - &> - &< - &<| - >> - << - <<| - <@ - @> - @ - |&> - |>> - ~ - ~= + && + &> + &< + &<| + >> + << + <<| + <@ + @> + @ + |&> + |>> + ~ + ~= - <-> + <-> - range_ops + range_ops any range type - && - &> - &< - >> - << - <@ - -|- - = - @> - @> + && + &> + &< + >> + << + <@ + -|- + = + @> + @> - tsquery_ops - tsquery + tsquery_ops + tsquery - <@ - @> + <@ + @> - tsvector_ops - tsvector + tsvector_ops + tsvector - @@ + @@ @@ -209,9 +209,9 @@ - For historical reasons, the inet_ops operator class is - not the default class for types inet and cidr. - To use it, mention the class name in CREATE INDEX, + For historical reasons, the inet_ops operator class is + not the default class for types inet and cidr. + To use it, mention the class name in CREATE INDEX, for example CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops); @@ -270,53 +270,53 @@ CREATE INDEX ON my_table USING GIST (my_inet_column inet_ops); There are five methods that an index operator class for GiST must provide, and four that are optional. Correctness of the index is ensured - by proper implementation of the same, consistent - and union methods, while efficiency (size and speed) of the - index will depend on the penalty and picksplit + by proper implementation of the same, consistent + and union methods, while efficiency (size and speed) of the + index will depend on the penalty and picksplit methods. - Two optional methods are compress and - decompress, which allow an index to have internal tree data of + Two optional methods are compress and + decompress, which allow an index to have internal tree data of a different type than the data it indexes. The leaves are to be of the indexed data type, while the other tree nodes can be of any C struct (but - you still have to follow PostgreSQL data type rules here, - see about varlena for variable sized data). If the tree's - internal data type exists at the SQL level, the STORAGE option - of the CREATE OPERATOR CLASS command can be used. - The optional eighth method is distance, which is needed + you still have to follow PostgreSQL data type rules here, + see about varlena for variable sized data). If the tree's + internal data type exists at the SQL level, the STORAGE option + of the CREATE OPERATOR CLASS command can be used. + The optional eighth method is distance, which is needed if the operator class wishes to support ordered scans (nearest-neighbor - searches). The optional ninth method fetch is needed if the + searches). The optional ninth method fetch is needed if the operator class wishes to support index-only scans, except when the - compress method is omitted. + compress method is omitted. - consistent + consistent - Given an index entry p and a query value q, + Given an index entry p and a query value q, this function determines whether the index entry is - consistent with the query; that is, could the predicate - indexed_column - indexable_operator q be true for + consistent with the query; that is, could the predicate + indexed_column + indexable_operator q be true for any row represented by the index entry? For a leaf index entry this is equivalent to testing the indexable condition, while for an internal tree node this determines whether it is necessary to scan the subtree of the index represented by the tree node. When the result is - true, a recheck flag must also be returned. + true, a recheck flag must also be returned. This indicates whether the predicate is certainly true or only possibly - true. If recheck = false then the index has - tested the predicate condition exactly, whereas if recheck - = true the row is only a candidate match. In that case the + true. If recheck = false then the index has + tested the predicate condition exactly, whereas if recheck + = true the row is only a candidate match. In that case the system will automatically evaluate the - indexable_operator against the actual row value to see + indexable_operator against the actual row value to see if it is really a match. This convention allows GiST to support both lossless and lossy index structures. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_consistent(internal, data_type, smallint, oid, internal) @@ -356,23 +356,23 @@ my_consistent(PG_FUNCTION_ARGS) } - Here, key is an element in the index and query - the value being looked up in the index. The StrategyNumber + Here, key is an element in the index and query + the value being looked up in the index. The StrategyNumber parameter indicates which operator of your operator class is being applied — it matches one of the operator numbers in the - CREATE OPERATOR CLASS command. + CREATE OPERATOR CLASS command. Depending on which operators you have included in the class, the data - type of query could vary with the operator, since it will + type of query could vary with the operator, since it will be whatever type is on the righthand side of the operator, which might be different from the indexed data type appearing on the lefthand side. (The above code skeleton assumes that only one type is possible; if - not, fetching the query argument value would have to depend + not, fetching the query argument value would have to depend on the operator.) It is recommended that the SQL declaration of - the consistent function use the opclass's indexed data - type for the query argument, even though the actual type + the consistent function use the opclass's indexed data + type for the query argument, even though the actual type might be something else depending on the operator. @@ -380,7 +380,7 @@ my_consistent(PG_FUNCTION_ARGS) - union + union This method consolidates information in the tree. Given a set of @@ -389,7 +389,7 @@ my_consistent(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_union(internal, internal) @@ -439,44 +439,44 @@ my_union(PG_FUNCTION_ARGS) As you can see, in this skeleton we're dealing with a data type - where union(X, Y, Z) = union(union(X, Y), Z). It's easy + where union(X, Y, Z) = union(union(X, Y), Z). It's easy enough to support data types where this is not the case, by implementing the proper union algorithm in this - GiST support method. + GiST support method. - The result of the union function must be a value of the + The result of the union function must be a value of the index's storage type, whatever that is (it might or might not be - different from the indexed column's type). The union - function should return a pointer to newly palloc()ed + different from the indexed column's type). The union + function should return a pointer to newly palloc()ed memory. You can't just return the input value as-is, even if there is no type change. - As shown above, the union function's - first internal argument is actually - a GistEntryVector pointer. The second argument is a + As shown above, the union function's + first internal argument is actually + a GistEntryVector pointer. The second argument is a pointer to an integer variable, which can be ignored. (It used to be - required that the union function store the size of its + required that the union function store the size of its result value into that variable, but this is no longer necessary.) - compress + compress Converts a data item into a format suitable for physical storage in an index page. - If the compress method is omitted, data items are stored + If the compress method is omitted, data items are stored in the index without modification. - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_compress(internal) @@ -519,7 +519,7 @@ my_compress(PG_FUNCTION_ARGS) - You have to adapt compressed_data_type to the specific + You have to adapt compressed_data_type to the specific type you're converting to in order to compress your leaf nodes, of course. @@ -527,24 +527,24 @@ my_compress(PG_FUNCTION_ARGS) - decompress + decompress Converts the stored representation of a data item into a format that can be manipulated by the other GiST methods in the operator class. - If the decompress method is omitted, it is assumed that + If the decompress method is omitted, it is assumed that the other GiST methods can work directly on the stored data format. - (decompress is not necessarily the reverse of + (decompress is not necessarily the reverse of the compress method; in particular, if compress is lossy then it's impossible - for decompress to exactly reconstruct the original - data. decompress is not necessarily equivalent - to fetch, either, since the other GiST methods might not + for decompress to exactly reconstruct the original + data. decompress is not necessarily equivalent + to fetch, either, since the other GiST methods might not require full reconstruction of the data.) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_decompress(internal) @@ -573,7 +573,7 @@ my_decompress(PG_FUNCTION_ARGS) - penalty + penalty Returns a value indicating the cost of inserting the new @@ -584,7 +584,7 @@ my_decompress(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_penalty(internal, internal, internal) @@ -612,15 +612,15 @@ my_penalty(PG_FUNCTION_ARGS) } - For historical reasons, the penalty function doesn't - just return a float result; instead it has to store the value + For historical reasons, the penalty function doesn't + just return a float result; instead it has to store the value at the location indicated by the third argument. The return value per se is ignored, though it's conventional to pass back the address of that argument. - The penalty function is crucial to good performance of + The penalty function is crucial to good performance of the index. It'll get used at insertion time to determine which branch to follow when choosing where to add the new entry in the tree. At query time, the more balanced the index, the quicker the lookup. @@ -629,7 +629,7 @@ my_penalty(PG_FUNCTION_ARGS) - picksplit + picksplit When an index page split is necessary, this function decides which @@ -638,7 +638,7 @@ my_penalty(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_picksplit(internal, internal) @@ -725,33 +725,33 @@ my_picksplit(PG_FUNCTION_ARGS) } - Notice that the picksplit function's result is delivered - by modifying the passed-in v structure. The return + Notice that the picksplit function's result is delivered + by modifying the passed-in v structure. The return value per se is ignored, though it's conventional to pass back the - address of v. + address of v. - Like penalty, the picksplit function + Like penalty, the picksplit function is crucial to good performance of the index. Designing suitable - penalty and picksplit implementations + penalty and picksplit implementations is where the challenge of implementing well-performing - GiST indexes lies. + GiST indexes lies. - same + same Returns true if two index entries are identical, false otherwise. - (An index entry is a value of the index's storage type, + (An index entry is a value of the index's storage type, not necessarily the original indexed column's type.) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_same(storage_type, storage_type, internal) @@ -777,7 +777,7 @@ my_same(PG_FUNCTION_ARGS) } - For historical reasons, the same function doesn't + For historical reasons, the same function doesn't just return a Boolean result; instead it has to store the flag at the location indicated by the third argument. The return value per se is ignored, though it's conventional to pass back the @@ -787,15 +787,15 @@ my_same(PG_FUNCTION_ARGS) - distance + distance - Given an index entry p and a query value q, + Given an index entry p and a query value q, this function determines the index entry's - distance from the query value. This function must be + distance from the query value. This function must be supplied if the operator class contains any ordering operators. A query using the ordering operator will be implemented by returning - index entries with the smallest distance values first, + index entries with the smallest distance values first, so the results must be consistent with the operator's semantics. For a leaf index entry the result just represents the distance to the index entry; for an internal tree node, the result must be the @@ -803,7 +803,7 @@ my_same(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_distance(internal, data_type, smallint, oid, internal) @@ -836,8 +836,8 @@ my_distance(PG_FUNCTION_ARGS) } - The arguments to the distance function are identical to - the arguments of the consistent function. + The arguments to the distance function are identical to + the arguments of the consistent function. @@ -847,31 +847,31 @@ my_distance(PG_FUNCTION_ARGS) geometric applications. For an internal tree node, the distance returned must not be greater than the distance to any of the child nodes. If the returned distance is not exact, the function must set - *recheck to true. (This is not necessary for internal tree + *recheck to true. (This is not necessary for internal tree nodes; for them, the calculation is always assumed to be inexact.) In this case the executor will calculate the accurate distance after fetching the tuple from the heap, and reorder the tuples if necessary. - If the distance function returns *recheck = true for any + If the distance function returns *recheck = true for any leaf node, the original ordering operator's return type must - be float8 or float4, and the distance function's + be float8 or float4, and the distance function's result values must be comparable to those of the original ordering operator, since the executor will sort using both distance function results and recalculated ordering-operator results. Otherwise, the - distance function's result values can be any finite float8 + distance function's result values can be any finite float8 values, so long as the relative order of the result values matches the order returned by the ordering operator. (Infinity and minus infinity are used internally to handle cases such as nulls, so it is not - recommended that distance functions return these values.) + recommended that distance functions return these values.) - fetch + fetch Converts the compressed index representation of a data item into the @@ -880,7 +880,7 @@ my_distance(PG_FUNCTION_ARGS) - The SQL declaration of the function must look like this: + The SQL declaration of the function must look like this: CREATE OR REPLACE FUNCTION my_fetch(internal) @@ -889,14 +889,14 @@ AS 'MODULE_PATHNAME' LANGUAGE C STRICT; - The argument is a pointer to a GISTENTRY struct. On - entry, its key field contains a non-NULL leaf datum in - compressed form. The return value is another GISTENTRY - struct, whose key field contains the same datum in its + The argument is a pointer to a GISTENTRY struct. On + entry, its key field contains a non-NULL leaf datum in + compressed form. The return value is another GISTENTRY + struct, whose key field contains the same datum in its original, uncompressed form. If the opclass's compress function does - nothing for leaf entries, the fetch method can return the + nothing for leaf entries, the fetch method can return the argument as-is. Or, if the opclass does not have a compress function, - the fetch method can be omitted as well, since it would + the fetch method can be omitted as well, since it would necessarily be a no-op. @@ -933,7 +933,7 @@ my_fetch(PG_FUNCTION_ARGS) If the compress method is lossy for leaf entries, the operator class cannot support index-only scans, and must not define - a fetch function. + a fetch function. @@ -942,15 +942,15 @@ my_fetch(PG_FUNCTION_ARGS) All the GiST support methods are normally called in short-lived memory - contexts; that is, CurrentMemoryContext will get reset after + contexts; that is, CurrentMemoryContext will get reset after each tuple is processed. It is therefore not very important to worry about pfree'ing everything you palloc. However, in some cases it's useful for a support method to cache data across repeated calls. To do that, allocate - the longer-lived data in fcinfo->flinfo->fn_mcxt, and - keep a pointer to it in fcinfo->flinfo->fn_extra. Such + the longer-lived data in fcinfo->flinfo->fn_mcxt, and + keep a pointer to it in fcinfo->flinfo->fn_extra. Such data will survive for the life of the index operation (e.g., a single GiST index scan, index build, or index tuple insertion). Be careful to pfree - the previous value when replacing a fn_extra value, or the leak + the previous value when replacing a fn_extra value, or the leak will accumulate for the duration of the operation. @@ -974,7 +974,7 @@ my_fetch(PG_FUNCTION_ARGS) - However, buffering index build needs to call the penalty + However, buffering index build needs to call the penalty function more often, which consumes some extra CPU resources. Also, the buffers used in the buffering build need temporary disk space, up to the size of the resulting index. Buffering can also influence the quality @@ -1002,57 +1002,57 @@ my_fetch(PG_FUNCTION_ARGS) The PostgreSQL source distribution includes several examples of index methods implemented using GiST. The core system currently provides text search - support (indexing for tsvector and tsquery) as well as + support (indexing for tsvector and tsquery) as well as R-Tree equivalent functionality for some of the built-in geometric data types - (see src/backend/access/gist/gistproc.c). The following - contrib modules also contain GiST + (see src/backend/access/gist/gistproc.c). The following + contrib modules also contain GiST operator classes: - btree_gist + btree_gist B-tree equivalent functionality for several data types - cube + cube Indexing for multidimensional cubes - hstore + hstore Module for storing (key, value) pairs - intarray + intarray RD-Tree for one-dimensional array of int4 values - ltree + ltree Indexing for tree-like structures - pg_trgm + pg_trgm Text similarity using trigram matching - seg + seg Indexing for float ranges diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index 6c54fbd40d..086d6abb30 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -3,12 +3,12 @@ High Availability, Load Balancing, and Replication - high availability - failover - replication - load balancing - clustering - data partitioning + high availability + failover + replication + load balancing + clustering + data partitioning Database servers can work together to allow a second server to @@ -38,12 +38,12 @@ Some solutions deal with synchronization by allowing only one server to modify the data. Servers that can modify data are - called read/write, master or primary servers. - Servers that track changes in the master are called standby - or secondary servers. A standby server that cannot be connected + called read/write, master or primary servers. + Servers that track changes in the master are called standby + or secondary servers. A standby server that cannot be connected to until it is promoted to a master server is called a warm - standby server, and one that can accept connections and serves read-only - queries is called a hot standby server. + standby server, and one that can accept connections and serves read-only + queries is called a hot standby server. @@ -99,7 +99,7 @@ Shared hardware functionality is common in network storage devices. Using a network file system is also possible, though care must be - taken that the file system has full POSIX behavior (see POSIX behavior (see ). One significant limitation of this method is that if the shared disk array fails or becomes corrupt, the primary and standby servers are both nonfunctional. Another issue is @@ -121,7 +121,7 @@ the mirroring must be done in a way that ensures the standby server has a consistent copy of the file system — specifically, writes to the standby must be done in the same order as those on the master. - DRBD is a popular file system replication solution + DRBD is a popular file system replication solution for Linux. @@ -143,7 +143,7 @@ protocol to make nodes agree on a serializable transactional order. Warm and hot standby servers can be kept current by reading a - stream of write-ahead log (WAL) + stream of write-ahead log (WAL) records. If the main server fails, the standby contains almost all of the data of the main server, and can be quickly made the new master database server. This can be synchronous or @@ -189,7 +189,7 @@ protocol to make nodes agree on a serializable transactional order. - Slony-I is an example of this type of replication, with per-table + Slony-I is an example of this type of replication, with per-table granularity, and support for multiple standby servers. Because it updates the standby server asynchronously (in batches), there is possible data loss during fail over. @@ -212,7 +212,7 @@ protocol to make nodes agree on a serializable transactional order. If queries are simply broadcast unmodified, functions like - random(), CURRENT_TIMESTAMP, and + random(), CURRENT_TIMESTAMP, and sequences can have different values on different servers. This is because each server operates independently, and because SQL queries are broadcast (and not actual modified rows). If @@ -226,7 +226,7 @@ protocol to make nodes agree on a serializable transactional order. transactions either commit or abort on all servers, perhaps using two-phase commit ( and ). - Pgpool-II and Continuent Tungsten + Pgpool-II and Continuent Tungsten are examples of this type of replication. @@ -266,12 +266,12 @@ protocol to make nodes agree on a serializable transactional order. there is no need to partition workloads between master and standby servers, and because the data changes are sent from one server to another, there is no problem with non-deterministic - functions like random(). + functions like random(). - PostgreSQL does not offer this type of replication, - though PostgreSQL two-phase commit (PostgreSQL does not offer this type of replication, + though PostgreSQL two-phase commit ( and ) can be used to implement this in application code or middleware. @@ -284,8 +284,8 @@ protocol to make nodes agree on a serializable transactional order. - Because PostgreSQL is open source and easily - extended, a number of companies have taken PostgreSQL + Because PostgreSQL is open source and easily + extended, a number of companies have taken PostgreSQL and created commercial closed-source solutions with unique failover, replication, and load balancing capabilities. @@ -475,9 +475,9 @@ protocol to make nodes agree on a serializable transactional order. concurrently on a single query. It is usually accomplished by splitting the data among servers and having each server execute its part of the query and return results to a central server where they - are combined and returned to the user. Pgpool-II + are combined and returned to the user. Pgpool-II has this capability. Also, this can be implemented using the - PL/Proxy tool set. + PL/Proxy tool set. @@ -494,10 +494,10 @@ protocol to make nodes agree on a serializable transactional order. Continuous archiving can be used to create a high - availability (HA) cluster configuration with one or more - standby servers ready to take over operations if the + availability (HA) cluster configuration with one or more + standby servers ready to take over operations if the primary server fails. This capability is widely referred to as - warm standby or log shipping. + warm standby or log shipping. @@ -513,7 +513,7 @@ protocol to make nodes agree on a serializable transactional order. Directly moving WAL records from one database server to another - is typically described as log shipping. PostgreSQL + is typically described as log shipping. PostgreSQL implements file-based log shipping by transferring WAL records one file (WAL segment) at a time. WAL files (16MB) can be shipped easily and cheaply over any distance, whether it be to an @@ -597,7 +597,7 @@ protocol to make nodes agree on a serializable transactional order. In general, log shipping between servers running different major - PostgreSQL release + PostgreSQL release levels is not possible. It is the policy of the PostgreSQL Global Development Group not to make changes to disk formats during minor release upgrades, so it is likely that running different minor release levels @@ -621,32 +621,32 @@ protocol to make nodes agree on a serializable transactional order. (see ) or directly from the master over a TCP connection (streaming replication). The standby server will also attempt to restore any WAL found in the standby cluster's - pg_wal directory. That typically happens after a server + pg_wal directory. That typically happens after a server restart, when the standby replays again WAL that was streamed from the master before the restart, but you can also manually copy files to - pg_wal at any time to have them replayed. + pg_wal at any time to have them replayed. At startup, the standby begins by restoring all WAL available in the - archive location, calling restore_command. Once it - reaches the end of WAL available there and restore_command - fails, it tries to restore any WAL available in the pg_wal directory. + archive location, calling restore_command. Once it + reaches the end of WAL available there and restore_command + fails, it tries to restore any WAL available in the pg_wal directory. If that fails, and streaming replication has been configured, the standby tries to connect to the primary server and start streaming WAL - from the last valid record found in archive or pg_wal. If that fails + from the last valid record found in archive or pg_wal. If that fails or streaming replication is not configured, or if the connection is later disconnected, the standby goes back to step 1 and tries to restore the file from the archive again. This loop of retries from the - archive, pg_wal, and via streaming replication goes on until the server + archive, pg_wal, and via streaming replication goes on until the server is stopped or failover is triggered by a trigger file. Standby mode is exited and the server switches to normal operation - when pg_ctl promote is run or a trigger file is found - (trigger_file). Before failover, - any WAL immediately available in the archive or in pg_wal will be + when pg_ctl promote is run or a trigger file is found + (trigger_file). Before failover, + any WAL immediately available in the archive or in pg_wal will be restored, but no attempt is made to connect to the master. @@ -667,8 +667,8 @@ protocol to make nodes agree on a serializable transactional order. If you want to use streaming replication, set up authentication on the primary server to allow replication connections from the standby server(s); that is, create a role and provide a suitable entry or - entries in pg_hba.conf with the database field set to - replication. Also ensure max_wal_senders is set + entries in pg_hba.conf with the database field set to + replication. Also ensure max_wal_senders is set to a sufficiently large value in the configuration file of the primary server. If replication slots will be used, ensure that max_replication_slots is set sufficiently @@ -687,19 +687,19 @@ protocol to make nodes agree on a serializable transactional order. To set up the standby server, restore the base backup taken from primary server (see ). Create a recovery - command file recovery.conf in the standby's cluster data - directory, and turn on standby_mode. Set - restore_command to a simple command to copy files from + command file recovery.conf in the standby's cluster data + directory, and turn on standby_mode. Set + restore_command to a simple command to copy files from the WAL archive. If you plan to have multiple standby servers for high - availability purposes, set recovery_target_timeline to - latest, to make the standby server follow the timeline change + availability purposes, set recovery_target_timeline to + latest, to make the standby server follow the timeline change that occurs at failover to another standby. Do not use pg_standby or similar tools with the built-in standby mode - described here. restore_command should return immediately + described here. restore_command should return immediately if the file does not exist; the server will retry the command again if necessary. See for using tools like pg_standby. @@ -708,11 +708,11 @@ protocol to make nodes agree on a serializable transactional order. If you want to use streaming replication, fill in - primary_conninfo with a libpq connection string, including + primary_conninfo with a libpq connection string, including the host name (or IP address) and any additional details needed to connect to the primary server. If the primary needs a password for authentication, the password needs to be specified in - primary_conninfo as well. + primary_conninfo as well. @@ -726,8 +726,8 @@ protocol to make nodes agree on a serializable transactional order. If you're using a WAL archive, its size can be minimized using the parameter to remove files that are no longer required by the standby server. - The pg_archivecleanup utility is designed specifically to - be used with archive_cleanup_command in typical single-standby + The pg_archivecleanup utility is designed specifically to + be used with archive_cleanup_command in typical single-standby configurations, see . Note however, that if you're using the archive for backup purposes, you need to retain files needed to recover from at least the latest base @@ -735,7 +735,7 @@ protocol to make nodes agree on a serializable transactional order. - A simple example of a recovery.conf is: + A simple example of a recovery.conf is: standby_mode = 'on' primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' @@ -746,7 +746,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' You can have any number of standby servers, but if you use streaming - replication, make sure you set max_wal_senders high enough in + replication, make sure you set max_wal_senders high enough in the primary to allow them to be connected simultaneously. @@ -773,7 +773,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' changes becoming visible in the standby. This delay is however much smaller than with file-based log shipping, typically under one second assuming the standby is powerful enough to keep up with the load. With - streaming replication, archive_timeout is not required to + streaming replication, archive_timeout is not required to reduce the data loss window. @@ -782,7 +782,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' archiving, the server might recycle old WAL segments before the standby has received them. If this occurs, the standby will need to be reinitialized from a new base backup. You can avoid this by setting - wal_keep_segments to a value large enough to ensure that + wal_keep_segments to a value large enough to ensure that WAL segments are not recycled too early, or by configuring a replication slot for the standby. If you set up a WAL archive that's accessible from the standby, these solutions are not required, since the standby can @@ -793,11 +793,11 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' To use streaming replication, set up a file-based log-shipping standby server as described in . The step that turns a file-based log-shipping standby into streaming replication - standby is setting primary_conninfo setting in the - recovery.conf file to point to the primary server. Set + standby is setting primary_conninfo setting in the + recovery.conf file to point to the primary server. Set and authentication options - (see pg_hba.conf) on the primary so that the standby server - can connect to the replication pseudo-database on the primary + (see pg_hba.conf) on the primary so that the standby server + can connect to the replication pseudo-database on the primary server (see ). @@ -815,7 +815,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' - When the standby is started and primary_conninfo is set + When the standby is started and primary_conninfo is set correctly, the standby will connect to the primary after replaying all WAL files available in the archive. If the connection is established successfully, you will see a walreceiver process in the standby, and @@ -829,20 +829,20 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' so that only trusted users can read the WAL stream, because it is easy to extract privileged information from it. Standby servers must authenticate to the primary as a superuser or an account that has the - REPLICATION privilege. It is recommended to create a - dedicated user account with REPLICATION and LOGIN - privileges for replication. While REPLICATION privilege gives + REPLICATION privilege. It is recommended to create a + dedicated user account with REPLICATION and LOGIN + privileges for replication. While REPLICATION privilege gives very high permissions, it does not allow the user to modify any data on - the primary system, which the SUPERUSER privilege does. + the primary system, which the SUPERUSER privilege does. Client authentication for replication is controlled by a - pg_hba.conf record specifying replication in the - database field. For example, if the standby is running on - host IP 192.168.1.100 and the account name for replication - is foo, the administrator can add the following line to the - pg_hba.conf file on the primary: + pg_hba.conf record specifying replication in the + database field. For example, if the standby is running on + host IP 192.168.1.100 and the account name for replication + is foo, the administrator can add the following line to the + pg_hba.conf file on the primary: # Allow the user "foo" from host 192.168.1.100 to connect to the primary @@ -854,14 +854,14 @@ host replication foo 192.168.1.100/32 md5 The host name and port number of the primary, connection user name, - and password are specified in the recovery.conf file. - The password can also be set in the ~/.pgpass file on the - standby (specify replication in the database + and password are specified in the recovery.conf file. + The password can also be set in the ~/.pgpass file on the + standby (specify replication in the database field). - For example, if the primary is running on host IP 192.168.1.50, + For example, if the primary is running on host IP 192.168.1.50, port 5432, the account name for replication is - foo, and the password is foopass, the administrator - can add the following line to the recovery.conf file on the + foo, and the password is foopass, the administrator + can add the following line to the recovery.conf file on the standby: @@ -880,22 +880,22 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' standby. You can calculate this lag by comparing the current WAL write location on the primary with the last WAL location received by the standby. These locations can be retrieved using - pg_current_wal_lsn on the primary and - pg_last_wal_receive_lsn on the standby, + pg_current_wal_lsn on the primary and + pg_last_wal_receive_lsn on the standby, respectively (see and for details). The last WAL receive location in the standby is also displayed in the process status of the WAL receiver process, displayed using the - ps command (see for details). + ps command (see for details). You can retrieve a list of WAL sender processes via the - pg_stat_replication view. Large differences between - pg_current_wal_lsn and the view's sent_lsn field + pg_stat_replication view. Large differences between + pg_current_wal_lsn and the view's sent_lsn field might indicate that the master server is under heavy load, while - differences between sent_lsn and - pg_last_wal_receive_lsn on the standby might indicate + differences between sent_lsn and + pg_last_wal_receive_lsn on the standby might indicate network delay, or that the standby is under heavy load. @@ -911,7 +911,7 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' Replication slots provide an automated way to ensure that the master does not remove WAL segments until they have been received by all standbys, and that the master does not remove rows which could cause a - recovery conflict even when the + recovery conflict even when the standby is disconnected. @@ -922,7 +922,7 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass' However, these methods often result in retaining more WAL segments than required, whereas replication slots retain only the number of segments known to be needed. An advantage of these methods is that they bound - the space requirement for pg_wal; there is currently no way + the space requirement for pg_wal; there is currently no way to do this using replication slots. @@ -966,8 +966,8 @@ postgres=# SELECT * FROM pg_replication_slots; node_a_slot | physical | | | f | | | (1 row) - To configure the standby to use this slot, primary_slot_name - should be configured in the standby's recovery.conf. + To configure the standby to use this slot, primary_slot_name + should be configured in the standby's recovery.conf. Here is a simple example: standby_mode = 'on' @@ -1022,7 +1022,7 @@ primary_slot_name = 'node_a_slot' If an upstream standby server is promoted to become new master, downstream servers will continue to stream from the new master if - recovery_target_timeline is set to 'latest'. + recovery_target_timeline is set to 'latest'. @@ -1031,7 +1031,7 @@ primary_slot_name = 'node_a_slot' and , and configure host-based authentication). - You will also need to set primary_conninfo in the downstream + You will also need to set primary_conninfo in the downstream standby to point to the cascading standby. @@ -1044,7 +1044,7 @@ primary_slot_name = 'node_a_slot' - PostgreSQL streaming replication is asynchronous by + PostgreSQL streaming replication is asynchronous by default. If the primary server crashes then some transactions that were committed may not have been replicated to the standby server, causing data loss. The amount @@ -1058,8 +1058,8 @@ primary_slot_name = 'node_a_slot' standby servers. This extends that standard level of durability offered by a transaction commit. This level of protection is referred to as 2-safe replication in computer science theory, and group-1-safe - (group-safe and 1-safe) when synchronous_commit is set to - remote_write. + (group-safe and 1-safe) when synchronous_commit is set to + remote_write. @@ -1104,14 +1104,14 @@ primary_slot_name = 'node_a_slot' Once streaming replication has been configured, configuring synchronous replication requires only one additional configuration step: must be set to - a non-empty value. synchronous_commit must also be set to - on, but since this is the default value, typically no change is + a non-empty value. synchronous_commit must also be set to + on, but since this is the default value, typically no change is required. (See and .) This configuration will cause each commit to wait for confirmation that the standby has written the commit record to durable storage. - synchronous_commit can be set by individual + synchronous_commit can be set by individual users, so it can be configured in the configuration file, for particular users or databases, or dynamically by applications, in order to control the durability guarantee on a per-transaction basis. @@ -1121,12 +1121,12 @@ primary_slot_name = 'node_a_slot' After a commit record has been written to disk on the primary, the WAL record is then sent to the standby. The standby sends reply messages each time a new batch of WAL data is written to disk, unless - wal_receiver_status_interval is set to zero on the standby. - In the case that synchronous_commit is set to - remote_apply, the standby sends reply messages when the commit + wal_receiver_status_interval is set to zero on the standby. + In the case that synchronous_commit is set to + remote_apply, the standby sends reply messages when the commit record is replayed, making the transaction visible. If the standby is chosen as a synchronous standby, according to the setting - of synchronous_standby_names on the primary, the reply + of synchronous_standby_names on the primary, the reply messages from that standby will be considered along with those from other synchronous standbys to decide when to release transactions waiting for confirmation that the commit record has been received. These parameters @@ -1138,13 +1138,13 @@ primary_slot_name = 'node_a_slot' - Setting synchronous_commit to remote_write will + Setting synchronous_commit to remote_write will cause each commit to wait for confirmation that the standby has received the commit record and written it out to its own operating system, but not for the data to be flushed to disk on the standby. This - setting provides a weaker guarantee of durability than on + setting provides a weaker guarantee of durability than on does: the standby could lose the data in the event of an operating system - crash, though not a PostgreSQL crash. + crash, though not a PostgreSQL crash. However, it's a useful setting in practice because it can decrease the response time for the transaction. Data loss could only occur if both the primary and the standby crash and @@ -1152,7 +1152,7 @@ primary_slot_name = 'node_a_slot' - Setting synchronous_commit to remote_apply will + Setting synchronous_commit to remote_apply will cause each commit to wait until the current synchronous standbys report that they have replayed the transaction, making it visible to user queries. In simple cases, this allows for load balancing with causal @@ -1176,12 +1176,12 @@ primary_slot_name = 'node_a_slot' transactions will wait until all the standby servers which are considered as synchronous confirm receipt of their data. The number of synchronous standbys that transactions must wait for replies from is specified in - synchronous_standby_names. This parameter also specifies - a list of standby names and the method (FIRST and - ANY) to choose synchronous standbys from the listed ones. + synchronous_standby_names. This parameter also specifies + a list of standby names and the method (FIRST and + ANY) to choose synchronous standbys from the listed ones. - The method FIRST specifies a priority-based synchronous + The method FIRST specifies a priority-based synchronous replication and makes transaction commits wait until their WAL records are replicated to the requested number of synchronous standbys chosen based on their priorities. The standbys whose names appear earlier in the list are @@ -1192,36 +1192,36 @@ primary_slot_name = 'node_a_slot' next-highest-priority standby. - An example of synchronous_standby_names for + An example of synchronous_standby_names for a priority-based multiple synchronous standbys is: synchronous_standby_names = 'FIRST 2 (s1, s2, s3)' - In this example, if four standby servers s1, s2, - s3 and s4 are running, the two standbys - s1 and s2 will be chosen as synchronous standbys + In this example, if four standby servers s1, s2, + s3 and s4 are running, the two standbys + s1 and s2 will be chosen as synchronous standbys because their names appear early in the list of standby names. - s3 is a potential synchronous standby and will take over - the role of synchronous standby when either of s1 or - s2 fails. s4 is an asynchronous standby since + s3 is a potential synchronous standby and will take over + the role of synchronous standby when either of s1 or + s2 fails. s4 is an asynchronous standby since its name is not in the list. - The method ANY specifies a quorum-based synchronous + The method ANY specifies a quorum-based synchronous replication and makes transaction commits wait until their WAL records - are replicated to at least the requested number of + are replicated to at least the requested number of synchronous standbys in the list. - An example of synchronous_standby_names for + An example of synchronous_standby_names for a quorum-based multiple synchronous standbys is: synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - In this example, if four standby servers s1, s2, - s3 and s4 are running, transaction commits will - wait for replies from at least any two standbys of s1, - s2 and s3. s4 is an asynchronous + In this example, if four standby servers s1, s2, + s3 and s4 are running, transaction commits will + wait for replies from at least any two standbys of s1, + s2 and s3. s4 is an asynchronous standby since its name is not in the list. @@ -1243,7 +1243,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - PostgreSQL allows the application developer + PostgreSQL allows the application developer to specify the durability level required via replication. This can be specified for the system overall, though it can also be specified for specific users or connections, or even individual transactions. @@ -1275,10 +1275,10 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' Planning for High Availability - synchronous_standby_names specifies the number and + synchronous_standby_names specifies the number and names of synchronous standbys that transaction commits made when - synchronous_commit is set to on, - remote_apply or remote_write will wait for + synchronous_commit is set to on, + remote_apply or remote_write will wait for responses from. Such transaction commits may never be completed if any one of synchronous standbys should crash. @@ -1286,7 +1286,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' The best solution for high availability is to ensure you keep as many synchronous standbys as requested. This can be achieved by naming multiple - potential synchronous standbys using synchronous_standby_names. + potential synchronous standbys using synchronous_standby_names. @@ -1305,14 +1305,14 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' When a standby first attaches to the primary, it will not yet be properly - synchronized. This is described as catchup mode. Once + synchronized. This is described as catchup mode. Once the lag between standby and primary reaches zero for the first time - we move to real-time streaming state. + we move to real-time streaming state. The catch-up duration may be long immediately after the standby has been created. If the standby is shut down, then the catch-up period will increase according to the length of time the standby has been down. The standby is only able to become a synchronous standby - once it has reached streaming state. + once it has reached streaming state. This state can be viewed using the pg_stat_replication view. @@ -1334,7 +1334,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If you really cannot keep as many synchronous standbys as requested then you should decrease the number of synchronous standbys that transaction commits must wait for responses from - in synchronous_standby_names (or disable it) and + in synchronous_standby_names (or disable it) and reload the configuration file on the primary server. @@ -1347,7 +1347,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If you need to re-create a standby server while transactions are waiting, make sure that the commands pg_start_backup() and pg_stop_backup() are run in a session with - synchronous_commit = off, otherwise those + synchronous_commit = off, otherwise those requests will wait forever for the standby to appear. @@ -1381,7 +1381,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' - If archive_mode is set to on, the + If archive_mode is set to on, the archiver is not enabled during recovery or standby mode. If the standby server is promoted, it will start archiving after the promotion, but will not archive any WAL it did not generate itself. To get a complete @@ -1415,7 +1415,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' If the primary server fails and the standby server becomes the new primary, and then the old primary restarts, you must have a mechanism for informing the old primary that it is no longer the primary. This is - sometimes known as STONITH (Shoot The Other Node In The Head), which is + sometimes known as STONITH (Shoot The Other Node In The Head), which is necessary to avoid situations where both systems think they are the primary, which will lead to confusion and ultimately data loss. @@ -1466,10 +1466,10 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' To trigger failover of a log-shipping standby server, - run pg_ctl promote or create a trigger - file with the file name and path specified by the trigger_file - setting in recovery.conf. If you're planning to use - pg_ctl promote to fail over, trigger_file is + run pg_ctl promote or create a trigger + file with the file name and path specified by the trigger_file + setting in recovery.conf. If you're planning to use + pg_ctl promote to fail over, trigger_file is not required. If you're setting up the reporting servers that are only used to offload read-only queries from the primary, not for high availability purposes, you don't need to promote it. @@ -1481,9 +1481,9 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' An alternative to the built-in standby mode described in the previous - sections is to use a restore_command that polls the archive location. + sections is to use a restore_command that polls the archive location. This was the only option available in versions 8.4 and below. In this - setup, set standby_mode off, because you are implementing + setup, set standby_mode off, because you are implementing the polling required for standby operation yourself. See the module for a reference implementation of this. @@ -1494,7 +1494,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' time, so if you use the standby server for queries (see Hot Standby), there is a delay between an action in the master and when the action becomes visible in the standby, corresponding the time it takes - to fill up the WAL file. archive_timeout can be used to make that delay + to fill up the WAL file. archive_timeout can be used to make that delay shorter. Also note that you can't combine streaming replication with this method. @@ -1511,25 +1511,25 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)' The magic that makes the two loosely coupled servers work together is - simply a restore_command used on the standby that, + simply a restore_command used on the standby that, when asked for the next WAL file, waits for it to become available from - the primary. The restore_command is specified in the - recovery.conf file on the standby server. Normal recovery + the primary. The restore_command is specified in the + recovery.conf file on the standby server. Normal recovery processing would request a file from the WAL archive, reporting failure if the file was unavailable. For standby processing it is normal for the next WAL file to be unavailable, so the standby must wait for - it to appear. For files ending in .backup or - .history there is no need to wait, and a non-zero return - code must be returned. A waiting restore_command can be + it to appear. For files ending in .backup or + .history there is no need to wait, and a non-zero return + code must be returned. A waiting restore_command can be written as a custom script that loops after polling for the existence of the next WAL file. There must also be some way to trigger failover, which - should interrupt the restore_command, break the loop and + should interrupt the restore_command, break the loop and return a file-not-found error to the standby server. This ends recovery and the standby will then come up as a normal server. - Pseudocode for a suitable restore_command is: + Pseudocode for a suitable restore_command is: triggered = false; while (!NextWALFileReady() && !triggered) @@ -1544,7 +1544,7 @@ if (!triggered) - A working example of a waiting restore_command is provided + A working example of a waiting restore_command is provided in the module. It should be used as a reference on how to correctly implement the logic described above. It can also be extended as needed to support specific @@ -1553,14 +1553,14 @@ if (!triggered) The method for triggering failover is an important part of planning - and design. One potential option is the restore_command + and design. One potential option is the restore_command command. It is executed once for each WAL file, but the process - running the restore_command is created and dies for + running the restore_command is created and dies for each file, so there is no daemon or server process, and signals or a signal handler cannot be used. Therefore, the - restore_command is not suitable to trigger failover. + restore_command is not suitable to trigger failover. It is possible to use a simple timeout facility, especially if - used in conjunction with a known archive_timeout + used in conjunction with a known archive_timeout setting on the primary. However, this is somewhat error prone since a network problem or busy primary server might be sufficient to initiate failover. A notification mechanism such as the explicit @@ -1579,7 +1579,7 @@ if (!triggered) Set up primary and standby systems as nearly identical as possible, including two identical copies of - PostgreSQL at the same release level. + PostgreSQL at the same release level. @@ -1602,8 +1602,8 @@ if (!triggered) Begin recovery on the standby server from the local WAL - archive, using a recovery.conf that specifies a - restore_command that waits as described + archive, using a recovery.conf that specifies a + restore_command that waits as described previously (see ). @@ -1637,7 +1637,7 @@ if (!triggered) - An external program can call the pg_walfile_name_offset() + An external program can call the pg_walfile_name_offset() function (see ) to find out the file name and the exact byte offset within it of the current end of WAL. It can then access the WAL file directly @@ -1646,17 +1646,17 @@ if (!triggered) loss is the polling cycle time of the copying program, which can be very small, and there is no wasted bandwidth from forcing partially-used segment files to be archived. Note that the standby servers' - restore_command scripts can only deal with whole WAL files, + restore_command scripts can only deal with whole WAL files, so the incrementally copied data is not ordinarily made available to the standby servers. It is of use only when the primary dies — then the last partial WAL file is fed to the standby before allowing it to come up. The correct implementation of this process requires - cooperation of the restore_command script with the data + cooperation of the restore_command script with the data copying program. - Starting with PostgreSQL version 9.0, you can use + Starting with PostgreSQL version 9.0, you can use streaming replication (see ) to achieve the same benefits with less effort. @@ -1716,17 +1716,17 @@ if (!triggered) - Query access - SELECT, COPY TO + Query access - SELECT, COPY TO - Cursor commands - DECLARE, FETCH, CLOSE + Cursor commands - DECLARE, FETCH, CLOSE - Parameters - SHOW, SET, RESET + Parameters - SHOW, SET, RESET @@ -1735,17 +1735,17 @@ if (!triggered) - BEGIN, END, ABORT, START TRANSACTION + BEGIN, END, ABORT, START TRANSACTION - SAVEPOINT, RELEASE, ROLLBACK TO SAVEPOINT + SAVEPOINT, RELEASE, ROLLBACK TO SAVEPOINT - EXCEPTION blocks and other internal subtransactions + EXCEPTION blocks and other internal subtransactions @@ -1753,19 +1753,19 @@ if (!triggered) - LOCK TABLE, though only when explicitly in one of these modes: - ACCESS SHARE, ROW SHARE or ROW EXCLUSIVE. + LOCK TABLE, though only when explicitly in one of these modes: + ACCESS SHARE, ROW SHARE or ROW EXCLUSIVE. - Plans and resources - PREPARE, EXECUTE, - DEALLOCATE, DISCARD + Plans and resources - PREPARE, EXECUTE, + DEALLOCATE, DISCARD - Plugins and extensions - LOAD + Plugins and extensions - LOAD @@ -1779,9 +1779,9 @@ if (!triggered) - Data Manipulation Language (DML) - INSERT, - UPDATE, DELETE, COPY FROM, - TRUNCATE. + Data Manipulation Language (DML) - INSERT, + UPDATE, DELETE, COPY FROM, + TRUNCATE. Note that there are no allowed actions that result in a trigger being executed during recovery. This restriction applies even to temporary tables, because table rows cannot be read or written without @@ -1791,31 +1791,31 @@ if (!triggered) - Data Definition Language (DDL) - CREATE, - DROP, ALTER, COMMENT. + Data Definition Language (DDL) - CREATE, + DROP, ALTER, COMMENT. This restriction applies even to temporary tables, because carrying out these operations would require updating the system catalog tables. - SELECT ... FOR SHARE | UPDATE, because row locks cannot be + SELECT ... FOR SHARE | UPDATE, because row locks cannot be taken without updating the underlying data files. - Rules on SELECT statements that generate DML commands. + Rules on SELECT statements that generate DML commands. - LOCK that explicitly requests a mode higher than ROW EXCLUSIVE MODE. + LOCK that explicitly requests a mode higher than ROW EXCLUSIVE MODE. - LOCK in short default form, since it requests ACCESS EXCLUSIVE MODE. + LOCK in short default form, since it requests ACCESS EXCLUSIVE MODE. @@ -1824,19 +1824,19 @@ if (!triggered) - BEGIN READ WRITE, - START TRANSACTION READ WRITE + BEGIN READ WRITE, + START TRANSACTION READ WRITE - SET TRANSACTION READ WRITE, - SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE + SET TRANSACTION READ WRITE, + SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE - SET transaction_read_only = off + SET transaction_read_only = off @@ -1844,35 +1844,35 @@ if (!triggered) - Two-phase commit commands - PREPARE TRANSACTION, - COMMIT PREPARED, ROLLBACK PREPARED + Two-phase commit commands - PREPARE TRANSACTION, + COMMIT PREPARED, ROLLBACK PREPARED because even read-only transactions need to write WAL in the prepare phase (the first phase of two phase commit). - Sequence updates - nextval(), setval() + Sequence updates - nextval(), setval() - LISTEN, UNLISTEN, NOTIFY + LISTEN, UNLISTEN, NOTIFY - In normal operation, read-only transactions are allowed to - use LISTEN, UNLISTEN, and - NOTIFY, so Hot Standby sessions operate under slightly tighter + In normal operation, read-only transactions are allowed to + use LISTEN, UNLISTEN, and + NOTIFY, so Hot Standby sessions operate under slightly tighter restrictions than ordinary read-only sessions. It is possible that some of these restrictions might be loosened in a