summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorPavan Deolasee2015-05-05 09:19:18 +0000
committerPavan Deolasee2015-05-05 09:19:18 +0000
commit73fa25c67cbfa24c03e28c96bf356f2592671730 (patch)
tree10ded7e26abd78d93658cb72fc5cb9d4672eff2a /contrib
parentda4d108859bcd7a308ca75aba54281e32968822c (diff)
parent4a9ab6d8619817f9e3989c99b65140e19041dab7 (diff)
Merge branch 'XL_MASTER_MERGE_9_4' into XL_NEW_MASTER
Conflicts: src/test/regress/expected/aggregates.out src/test/regress/expected/create_index.out src/test/regress/expected/inherit.out src/test/regress/expected/join.out src/test/regress/expected/window.out src/test/regress/expected/with.out
Diffstat (limited to 'contrib')
-rw-r--r--contrib/Makefile10
-rw-r--r--contrib/README2
-rw-r--r--contrib/adminpack/adminpack.c10
-rw-r--r--contrib/auth_delay/auth_delay.c2
-rw-r--r--contrib/auto_explain/auto_explain.c18
-rw-r--r--contrib/btree_gin/btree_gin.c5
-rw-r--r--contrib/btree_gist/btree_bit.c21
-rw-r--r--contrib/btree_gist/btree_bytea.c7
-rw-r--r--contrib/btree_gist/btree_cash.c10
-rw-r--r--contrib/btree_gist/btree_date.c10
-rw-r--r--contrib/btree_gist/btree_float4.c10
-rw-r--r--contrib/btree_gist/btree_float8.c10
-rw-r--r--contrib/btree_gist/btree_gist.c2
-rw-r--r--contrib/btree_gist/btree_inet.c8
-rw-r--r--contrib/btree_gist/btree_int2.c20
-rw-r--r--contrib/btree_gist/btree_int4.c20
-rw-r--r--contrib/btree_gist/btree_int8.c10
-rw-r--r--contrib/btree_gist/btree_interval.c17
-rw-r--r--contrib/btree_gist/btree_macaddr.c11
-rw-r--r--contrib/btree_gist/btree_numeric.c7
-rw-r--r--contrib/btree_gist/btree_oid.c10
-rw-r--r--contrib/btree_gist/btree_text.c10
-rw-r--r--contrib/btree_gist/btree_time.c12
-rw-r--r--contrib/btree_gist/btree_ts.c15
-rw-r--r--contrib/btree_gist/btree_utils_num.c8
-rw-r--r--contrib/btree_gist/btree_utils_num.h3
-rw-r--r--contrib/btree_gist/btree_utils_var.c67
-rw-r--r--contrib/btree_gist/expected/not_equal.out1
-rw-r--r--contrib/chkpass/chkpass.c41
-rw-r--r--contrib/citext/citext.c10
-rw-r--r--contrib/citext/expected/citext.out45
-rw-r--r--contrib/citext/expected/citext_1.out45
-rw-r--r--contrib/citext/sql/citext.sql27
-rw-r--r--contrib/cube/Makefile14
-rw-r--r--contrib/cube/cube.c538
-rw-r--r--contrib/cube/cubedata.h39
-rw-r--r--contrib/cube/cubeparse.y69
-rw-r--r--contrib/cube/cubescan.l8
-rw-r--r--contrib/cube/expected/cube.out281
-rw-r--r--contrib/cube/expected/cube_1.out281
-rw-r--r--contrib/cube/expected/cube_2.out281
-rw-r--r--contrib/cube/expected/cube_3.out281
-rw-r--r--contrib/cube/sql/cube.sql59
-rw-r--r--contrib/dblink/Makefile5
-rw-r--r--contrib/dblink/dblink--1.0--1.1.sql14
-rw-r--r--contrib/dblink/dblink--1.1.sql (renamed from contrib/dblink/dblink--1.0.sql)14
-rw-r--r--contrib/dblink/dblink.c453
-rw-r--r--contrib/dblink/dblink.control2
-rw-r--r--contrib/dblink/dblink.h3
-rw-r--r--contrib/dblink/expected/dblink.out189
-rw-r--r--contrib/dblink/sql/dblink.sql105
-rw-r--r--contrib/dict_int/dict_int.c5
-rw-r--r--contrib/dict_xsyn/dict_xsyn.c5
-rw-r--r--contrib/dummy_seclabel/dummy_seclabel.c2
-rw-r--r--contrib/earthdistance/earthdistance.c4
-rw-r--r--contrib/file_fdw/data/text.csv9
-rw-r--r--contrib/file_fdw/file_fdw.c249
-rw-r--r--contrib/file_fdw/input/file_fdw.source16
-rw-r--r--contrib/file_fdw/output/file_fdw.source46
-rw-r--r--contrib/fuzzystrmatch/dmetaphone.c5
-rw-r--r--contrib/fuzzystrmatch/fuzzystrmatch.c14
-rw-r--r--contrib/fuzzystrmatch/levenshtein.c10
-rw-r--r--contrib/hstore/Makefile4
-rw-r--r--contrib/hstore/crc32.c2
-rw-r--r--contrib/hstore/expected/hstore.out54
-rw-r--r--contrib/hstore/hstore--1.0--1.1.sql2
-rw-r--r--contrib/hstore/hstore--1.1--1.2.sql48
-rw-r--r--contrib/hstore/hstore--1.1.sql524
-rw-r--r--contrib/hstore/hstore--1.2--1.3.sql17
-rw-r--r--contrib/hstore/hstore--1.3.sql (renamed from contrib/hstore/hstore--1.0.sql)34
-rw-r--r--contrib/hstore/hstore.control2
-rw-r--r--contrib/hstore/hstore.h22
-rw-r--r--contrib/hstore/hstore_compat.c6
-rw-r--r--contrib/hstore/hstore_gin.c7
-rw-r--r--contrib/hstore/hstore_gist.c41
-rw-r--r--contrib/hstore/hstore_io.c371
-rw-r--r--contrib/hstore/hstore_op.c44
-rw-r--r--contrib/hstore/sql/hstore.sql15
-rw-r--r--contrib/intarray/_int.h20
-rw-r--r--contrib/intarray/_int_bool.c104
-rw-r--r--contrib/intarray/_int_gin.c4
-rw-r--r--contrib/intarray/_int_gist.c22
-rw-r--r--contrib/intarray/_int_op.c21
-rw-r--r--contrib/intarray/_int_tool.c27
-rw-r--r--contrib/intarray/_intbig_gist.c44
-rwxr-xr-xcontrib/intarray/bench/bench.pl112
-rwxr-xr-xcontrib/intarray/bench/create_test.pl39
-rw-r--r--contrib/isn/isn.c4
-rw-r--r--contrib/isn/isn.h2
-rw-r--r--contrib/lo/lo.c18
-rw-r--r--contrib/ltree/_ltree_gist.c37
-rw-r--r--contrib/ltree/_ltree_op.c8
-rw-r--r--contrib/ltree/ltree.h19
-rw-r--r--contrib/ltree/ltree_gist.c28
-rw-r--r--contrib/ltree/ltree_io.c18
-rw-r--r--contrib/ltree/ltree_op.c30
-rw-r--r--contrib/ltree/ltxtquery_io.c91
-rw-r--r--contrib/ltree/ltxtquery_op.c5
-rw-r--r--contrib/oid2name/oid2name.c172
-rw-r--r--contrib/pageinspect/Makefile3
-rw-r--r--contrib/pageinspect/btreefuncs.c97
-rw-r--r--contrib/pageinspect/fsmfuncs.c12
-rw-r--r--contrib/pageinspect/heapfuncs.c9
-rw-r--r--contrib/pageinspect/pageinspect--1.0--1.1.sql18
-rw-r--r--contrib/pageinspect/pageinspect--1.1--1.2.sql18
-rw-r--r--contrib/pageinspect/pageinspect--1.2.sql (renamed from contrib/pageinspect/pageinspect--1.0.sql)6
-rw-r--r--contrib/pageinspect/pageinspect.control2
-rw-r--r--contrib/pageinspect/rawpage.c25
-rw-r--r--contrib/passwordcheck/passwordcheck.c2
-rw-r--r--contrib/pg_archivecleanup/pg_archivecleanup.c41
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c7
-rw-r--r--contrib/pg_freespacemap/pg_freespacemap.c2
-rw-r--r--contrib/pg_prewarm/Makefile18
-rw-r--r--contrib/pg_prewarm/pg_prewarm--1.0.sql14
-rw-r--r--contrib/pg_prewarm/pg_prewarm.c203
-rw-r--r--contrib/pg_prewarm/pg_prewarm.control5
-rw-r--r--contrib/pg_standby/pg_standby.c42
-rw-r--r--contrib/pg_stat_statements/Makefile4
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql2
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.0.sql39
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql43
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.2.sql (renamed from contrib/pg_stat_statements/pg_stat_statements--1.1.sql)11
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c1120
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.control2
-rw-r--r--contrib/pg_test_fsync/pg_test_fsync.c31
-rw-r--r--contrib/pg_test_timing/pg_test_timing.c67
-rw-r--r--contrib/pg_trgm/Makefile4
-rw-r--r--contrib/pg_trgm/expected/pg_trgm.out289
-rw-r--r--contrib/pg_trgm/pg_trgm--1.0--1.1.sql12
-rw-r--r--contrib/pg_trgm/pg_trgm--1.1.sql (renamed from contrib/pg_trgm/pg_trgm--1.0.sql)14
-rw-r--r--contrib/pg_trgm/pg_trgm.control2
-rw-r--r--contrib/pg_trgm/sql/pg_trgm.sql53
-rw-r--r--contrib/pg_trgm/trgm.h36
-rw-r--r--contrib/pg_trgm/trgm_gin.c57
-rw-r--r--contrib/pg_trgm/trgm_gist.c229
-rw-r--r--contrib/pg_trgm/trgm_op.c182
-rw-r--r--contrib/pg_trgm/trgm_regexp.c2247
-rw-r--r--contrib/pg_upgrade/Makefile8
-rw-r--r--contrib/pg_upgrade/TESTING10
-rw-r--r--contrib/pg_upgrade/check.c484
-rw-r--r--contrib/pg_upgrade/controldata.c231
-rw-r--r--contrib/pg_upgrade/dump.c118
-rw-r--r--contrib/pg_upgrade/exec.c201
-rw-r--r--contrib/pg_upgrade/file.c139
-rw-r--r--contrib/pg_upgrade/function.c33
-rw-r--r--contrib/pg_upgrade/info.c206
-rw-r--r--contrib/pg_upgrade/option.c199
-rw-r--r--contrib/pg_upgrade/page.c62
-rw-r--r--contrib/pg_upgrade/parallel.c357
-rw-r--r--contrib/pg_upgrade/pg_upgrade.c309
-rw-r--r--contrib/pg_upgrade/pg_upgrade.h132
-rw-r--r--contrib/pg_upgrade/relfilenode.c306
-rw-r--r--contrib/pg_upgrade/server.c173
-rw-r--r--contrib/pg_upgrade/tablespace.c62
-rw-r--r--contrib/pg_upgrade/test.sh98
-rw-r--r--contrib/pg_upgrade/util.c148
-rw-r--r--contrib/pg_upgrade/version.c95
-rw-r--r--contrib/pg_upgrade/version_old_8_3.c76
-rw-r--r--contrib/pg_upgrade_support/pg_upgrade_support.c27
-rw-r--r--contrib/pg_xlogdump/.gitignore19
-rw-r--r--contrib/pg_xlogdump/Makefile31
-rw-r--r--contrib/pg_xlogdump/compat.c99
-rw-r--r--contrib/pg_xlogdump/pg_xlogdump.c735
-rw-r--r--contrib/pg_xlogdump/rmgrdesc.c35
-rw-r--r--contrib/pg_xlogdump/rmgrdesc.h21
-rw-r--r--contrib/pgbench/pgbench.c1282
-rw-r--r--contrib/pgcrypto/Makefile2
-rw-r--r--contrib/pgcrypto/crypt-blowfish.c5
-rw-r--r--contrib/pgcrypto/crypt-des.c2
-rw-r--r--contrib/pgcrypto/crypt-gensalt.c2
-rw-r--r--contrib/pgcrypto/crypt-md5.c4
-rw-r--r--contrib/pgcrypto/expected/pgp-encrypt.out36
-rw-r--r--contrib/pgcrypto/expected/pgp-pubkey-decrypt.out76
-rw-r--r--contrib/pgcrypto/fortuna.c23
-rw-r--r--contrib/pgcrypto/fortuna.h2
-rw-r--r--contrib/pgcrypto/imath.c6
-rw-r--r--contrib/pgcrypto/imath.h26
-rw-r--r--contrib/pgcrypto/internal-sha2.c10
-rw-r--r--contrib/pgcrypto/internal.c12
-rw-r--r--contrib/pgcrypto/mbuf.c14
-rw-r--r--contrib/pgcrypto/mbuf.h2
-rw-r--r--contrib/pgcrypto/md5.c2
-rw-r--r--contrib/pgcrypto/md5.h2
-rw-r--r--contrib/pgcrypto/openssl.c10
-rw-r--r--contrib/pgcrypto/pgcrypto--1.0--1.1.sql9
-rw-r--r--contrib/pgcrypto/pgcrypto--1.1.sql (renamed from contrib/pgcrypto/pgcrypto--1.0.sql)7
-rw-r--r--contrib/pgcrypto/pgcrypto.c29
-rw-r--r--contrib/pgcrypto/pgcrypto.control2
-rw-r--r--contrib/pgcrypto/pgcrypto.h3
-rw-r--r--contrib/pgcrypto/pgp-armor.c3
-rw-r--r--contrib/pgcrypto/pgp-cfb.c7
-rw-r--r--contrib/pgcrypto/pgp-compress.c7
-rw-r--r--contrib/pgcrypto/pgp-decrypt.c21
-rw-r--r--contrib/pgcrypto/pgp-encrypt.c12
-rw-r--r--contrib/pgcrypto/pgp-info.c2
-rw-r--r--contrib/pgcrypto/pgp-mpi-internal.c5
-rw-r--r--contrib/pgcrypto/pgp-mpi-openssl.c5
-rw-r--r--contrib/pgcrypto/pgp-mpi.c5
-rw-r--r--contrib/pgcrypto/pgp-pgsql.c29
-rw-r--r--contrib/pgcrypto/pgp-pubdec.c3
-rw-r--r--contrib/pgcrypto/pgp-pubenc.c9
-rw-r--r--contrib/pgcrypto/pgp-pubkey.c18
-rw-r--r--contrib/pgcrypto/pgp-s2k.c6
-rw-r--r--contrib/pgcrypto/pgp.c5
-rw-r--r--contrib/pgcrypto/pgp.h8
-rw-r--r--contrib/pgcrypto/px-crypt.c4
-rw-r--r--contrib/pgcrypto/px-crypt.h2
-rw-r--r--contrib/pgcrypto/px-hmac.c10
-rw-r--r--contrib/pgcrypto/px.c10
-rw-r--r--contrib/pgcrypto/px.h4
-rw-r--r--contrib/pgcrypto/random.c2
-rw-r--r--contrib/pgcrypto/rijndael.c6
-rw-r--r--contrib/pgcrypto/rijndael.h4
-rw-r--r--contrib/pgcrypto/sha1.c2
-rw-r--r--contrib/pgcrypto/sha1.h2
-rw-r--r--contrib/pgcrypto/sha2.c11
-rw-r--r--contrib/pgcrypto/sha2.h2
-rw-r--r--contrib/pgcrypto/sql/pgp-encrypt.sql36
-rw-r--r--contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql73
-rw-r--r--contrib/pgrowlocks/Makefile2
-rw-r--r--contrib/pgrowlocks/pgrowlocks--1.0--1.1.sql17
-rw-r--r--contrib/pgrowlocks/pgrowlocks--1.1.sql (renamed from contrib/pgrowlocks/pgrowlocks--1.0.sql)4
-rw-r--r--contrib/pgrowlocks/pgrowlocks.c188
-rw-r--r--contrib/pgrowlocks/pgrowlocks.control2
-rw-r--r--contrib/pgstattuple/Makefile2
-rw-r--r--contrib/pgstattuple/expected/pgstattuple.out94
-rw-r--r--contrib/pgstattuple/pgstatindex.c214
-rw-r--r--contrib/pgstattuple/pgstattuple--1.0--1.1.sql11
-rw-r--r--contrib/pgstattuple/pgstattuple--1.1--1.2.sql39
-rw-r--r--contrib/pgstattuple/pgstattuple--1.2.sql (renamed from contrib/pgstattuple/pgstattuple--1.0.sql)42
-rw-r--r--contrib/pgstattuple/pgstattuple.c11
-rw-r--r--contrib/pgstattuple/pgstattuple.control2
-rw-r--r--contrib/pgstattuple/sql/pgstattuple.sql20
-rw-r--r--contrib/postgres_fdw/.gitignore4
-rw-r--r--contrib/postgres_fdw/Makefile27
-rw-r--r--contrib/postgres_fdw/connection.c715
-rw-r--r--contrib/postgres_fdw/deparse.c1842
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out2836
-rw-r--r--contrib/postgres_fdw/option.c295
-rw-r--r--contrib/postgres_fdw/postgres_fdw--1.0.sql18
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c2702
-rw-r--r--contrib/postgres_fdw/postgres_fdw.control5
-rw-r--r--contrib/postgres_fdw/postgres_fdw.h77
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql611
-rw-r--r--contrib/seg/Makefile14
-rwxr-xr-xcontrib/seg/seg-validate.pl56
-rw-r--r--contrib/seg/seg.c7
-rw-r--r--contrib/seg/segscan.l8
-rwxr-xr-xcontrib/seg/sort-segments.pl29
-rw-r--r--contrib/sepgsql/database.c52
-rw-r--r--contrib/sepgsql/dml.c19
-rw-r--r--contrib/sepgsql/expected/alter.out223
-rw-r--r--contrib/sepgsql/expected/ddl.out319
-rw-r--r--contrib/sepgsql/expected/dml.out28
-rw-r--r--contrib/sepgsql/expected/label.out52
-rw-r--r--contrib/sepgsql/expected/misc.out67
-rw-r--r--contrib/sepgsql/hooks.c211
-rw-r--r--contrib/sepgsql/label.c31
-rwxr-xr-xcontrib/sepgsql/launcher2
-rw-r--r--contrib/sepgsql/proc.c145
-rw-r--r--contrib/sepgsql/relation.c341
-rw-r--r--contrib/sepgsql/schema.c90
-rw-r--r--contrib/sepgsql/selinux.c19
-rw-r--r--contrib/sepgsql/sepgsql-regtest.te41
-rw-r--r--contrib/sepgsql/sepgsql.h24
-rw-r--r--contrib/sepgsql/sql/alter.sql136
-rw-r--r--contrib/sepgsql/sql/ddl.sql18
-rw-r--r--contrib/sepgsql/sql/dml.sql18
-rw-r--r--contrib/sepgsql/sql/label.sql13
-rw-r--r--contrib/sepgsql/sql/misc.sql25
-rwxr-xr-xcontrib/sepgsql/test_sepgsql30
-rw-r--r--contrib/sepgsql/uavc.c20
-rw-r--r--contrib/spi/autoinc.c2
-rw-r--r--contrib/spi/insert_username.c2
-rw-r--r--contrib/spi/moddatetime.c2
-rw-r--r--contrib/spi/refint.c7
-rw-r--r--contrib/spi/timetravel.c12
-rw-r--r--contrib/sslinfo/sslinfo.c37
-rwxr-xr-xcontrib/start-scripts/osx/PostgreSQL6
-rw-r--r--contrib/tablefunc/expected/tablefunc.out1
-rw-r--r--contrib/tablefunc/tablefunc.c8
-rw-r--r--contrib/tablefunc/tablefunc.h2
-rw-r--r--contrib/tcn/tcn.c13
-rw-r--r--contrib/test_decoding/.gitignore5
-rw-r--r--contrib/test_decoding/Makefile73
-rw-r--r--contrib/test_decoding/expected/binary.out35
-rw-r--r--contrib/test_decoding/expected/concurrent_ddl_dml.out733
-rw-r--r--contrib/test_decoding/expected/ddl.out647
-rw-r--r--contrib/test_decoding/expected/decoding_in_xact.out89
-rw-r--r--contrib/test_decoding/expected/delayed_startup.out38
-rw-r--r--contrib/test_decoding/expected/mxact.out66
-rw-r--r--contrib/test_decoding/expected/permissions.out130
-rw-r--r--contrib/test_decoding/expected/prepared.out82
-rw-r--r--contrib/test_decoding/expected/rewrite.out107
-rw-r--r--contrib/test_decoding/expected/toast.out90
-rw-r--r--contrib/test_decoding/logical.conf2
-rw-r--r--contrib/test_decoding/specs/concurrent_ddl_dml.spec94
-rw-r--r--contrib/test_decoding/specs/delayed_startup.spec24
-rw-r--r--contrib/test_decoding/specs/mxact.spec38
-rw-r--r--contrib/test_decoding/sql/binary.sql14
-rw-r--r--contrib/test_decoding/sql/ddl.sql338
-rw-r--r--contrib/test_decoding/sql/decoding_in_xact.sql41
-rw-r--r--contrib/test_decoding/sql/permissions.sql69
-rw-r--r--contrib/test_decoding/sql/prepared.sql50
-rw-r--r--contrib/test_decoding/sql/rewrite.sql62
-rw-r--r--contrib/test_decoding/sql/toast.sql51
-rw-r--r--contrib/test_decoding/test_decoding.c407
-rw-r--r--contrib/test_parser/test_parser.c14
-rw-r--r--contrib/test_shm_mq/.gitignore4
-rw-r--r--contrib/test_shm_mq/Makefile20
-rw-r--r--contrib/test_shm_mq/expected/test_shm_mq.out36
-rw-r--r--contrib/test_shm_mq/setup.c328
-rw-r--r--contrib/test_shm_mq/sql/test_shm_mq.sql12
-rw-r--r--contrib/test_shm_mq/test.c262
-rw-r--r--contrib/test_shm_mq/test_shm_mq--1.0.sql19
-rw-r--r--contrib/test_shm_mq/test_shm_mq.control4
-rw-r--r--contrib/test_shm_mq/test_shm_mq.h45
-rw-r--r--contrib/test_shm_mq/worker.c224
-rw-r--r--contrib/tsearch2/tsearch2.c25
-rw-r--r--contrib/unaccent/unaccent.c65
-rw-r--r--contrib/uuid-ossp/.gitignore6
-rw-r--r--contrib/uuid-ossp/Makefile16
-rw-r--r--contrib/uuid-ossp/expected/uuid_ossp.out139
-rw-r--r--contrib/uuid-ossp/sql/uuid_ossp.sql75
-rw-r--r--contrib/uuid-ossp/uuid-ossp--1.0.sql2
-rw-r--r--contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql2
-rw-r--r--contrib/uuid-ossp/uuid-ossp.c439
-rw-r--r--contrib/vacuumlo/vacuumlo.c172
-rw-r--r--contrib/worker_spi/Makefile17
-rw-r--r--contrib/worker_spi/worker_spi--1.0.sql9
-rw-r--r--contrib/worker_spi/worker_spi.c407
-rw-r--r--contrib/worker_spi/worker_spi.control5
-rw-r--r--contrib/xml2/expected/xml2.out15
-rw-r--r--contrib/xml2/expected/xml2_1.out15
-rw-r--r--contrib/xml2/sql/xml2.sql15
-rw-r--r--contrib/xml2/xpath.c14
-rw-r--r--contrib/xml2/xslt_proc.c74
337 files changed, 29659 insertions, 4802 deletions
diff --git a/contrib/Makefile b/contrib/Makefile
index 57d4045559..9b6ac2ec5a 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -32,6 +32,7 @@ SUBDIRS = \
pg_archivecleanup \
pg_buffercache \
pg_freespacemap \
+ pg_prewarm \
pg_standby \
pg_stat_statements \
pg_test_fsync \
@@ -45,15 +46,20 @@ SUBDIRS = \
pgstattuple \
pgxc_clean \
pgxc_ctl \
+ pg_xlogdump \
+ postgres_fdw \
seg \
spi \
tablefunc \
tcn \
+ test_decoding \
test_parser \
+ test_shm_mq \
tsearch2 \
unaccent \
vacuumlo \
- stormstats
+ stormstats \
+ worker_spi
ifeq ($(with_openssl),yes)
SUBDIRS += sslinfo
@@ -61,7 +67,7 @@ else
ALWAYS_SUBDIRS += sslinfo
endif
-ifeq ($(with_ossp_uuid),yes)
+ifneq ($(with_uuid),no)
SUBDIRS += uuid-ossp
else
ALWAYS_SUBDIRS += uuid-ossp
diff --git a/contrib/README b/contrib/README
index b58d0a60e4..5eaeb2451f 100644
--- a/contrib/README
+++ b/contrib/README
@@ -12,7 +12,7 @@ documentation.
When building from the source distribution, these modules are not
built automatically, unless you build the "world" target. You can
-also build and install them all by running "gmake all" and "gmake
+also build and install them all by running "make all" and "make
install" in this directory; or to build and install just one selected
module, do the same in that module's subdirectory.
diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c
index 431a675187..8193b1f1d0 100644
--- a/contrib/adminpack/adminpack.c
+++ b/contrib/adminpack/adminpack.c
@@ -3,7 +3,7 @@
* adminpack.c
*
*
- * Copyright (c) 2002-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2014, PostgreSQL Global Development Group
*
* Author: Andreas Pflug <pgadmin@pse-consulting.de>
*
@@ -40,11 +40,6 @@
PG_MODULE_MAGIC;
-Datum pg_file_write(PG_FUNCTION_ARGS);
-Datum pg_file_rename(PG_FUNCTION_ARGS);
-Datum pg_file_unlink(PG_FUNCTION_ARGS);
-Datum pg_logdir_ls(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(pg_file_write);
PG_FUNCTION_INFO_V1(pg_file_rename);
PG_FUNCTION_INFO_V1(pg_file_unlink);
@@ -376,8 +371,7 @@ pg_logdir_ls(PG_FUNCTION_ARGS)
/* Seems the timestamp is OK; prepare and return tuple */
values[0] = timestampbuf;
- values[1] = palloc(strlen(fctx->location) + strlen(de->d_name) + 2);
- sprintf(values[1], "%s/%s", fctx->location, de->d_name);
+ values[1] = psprintf("%s/%s", fctx->location, de->d_name);
tuple = BuildTupleFromCStrings(funcctx->attinmeta, values);
diff --git a/contrib/auth_delay/auth_delay.c b/contrib/auth_delay/auth_delay.c
index 4e0d5959d1..3131e827b8 100644
--- a/contrib/auth_delay/auth_delay.c
+++ b/contrib/auth_delay/auth_delay.c
@@ -59,7 +59,7 @@ _PG_init(void)
NULL,
&auth_delay_milliseconds,
0,
- 0, INT_MAX,
+ 0, INT_MAX / 1000,
PGC_SIGHUP,
GUC_UNIT_MS,
NULL,
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index ad333b6644..cbbd25753f 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -3,7 +3,7 @@
* auto_explain.c
*
*
- * Copyright (c) 2008-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/auto_explain/auto_explain.c
@@ -12,6 +12,8 @@
*/
#include "postgres.h"
+#include <limits.h>
+
#include "commands/explain.h"
#include "executor/instrument.h"
#include "utils/guc.h"
@@ -23,6 +25,7 @@ static int auto_explain_log_min_duration = -1; /* msec or -1 */
static bool auto_explain_log_analyze = false;
static bool auto_explain_log_verbose = false;
static bool auto_explain_log_buffers = false;
+static bool auto_explain_log_triggers = false;
static bool auto_explain_log_timing = false;
static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
static bool auto_explain_log_nested_statements = false;
@@ -111,6 +114,17 @@ _PG_init(void)
NULL,
NULL);
+ DefineCustomBoolVariable("auto_explain.log_triggers",
+ "Include trigger statistics in plans.",
+ "This has no effect unless log_analyze is also set.",
+ &auto_explain_log_triggers,
+ false,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
DefineCustomEnumVariable("auto_explain.log_format",
"EXPLAIN format to be used for plan logging.",
NULL,
@@ -293,6 +307,8 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
ExplainBeginOutput(&es);
ExplainQueryText(&es, queryDesc);
ExplainPrintPlan(&es, queryDesc);
+ if (es.analyze && auto_explain_log_triggers)
+ ExplainPrintTriggers(&es, queryDesc);
ExplainEndOutput(&es);
/* Remove last line break */
diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c
index 5e6003f63c..87d23e0350 100644
--- a/contrib/btree_gin/btree_gin.c
+++ b/contrib/btree_gin/btree_gin.c
@@ -32,7 +32,6 @@ typedef struct QueryInfo
#define GIN_EXTRACT_VALUE(type) \
PG_FUNCTION_INFO_V1(gin_extract_value_##type); \
-Datum gin_extract_value_##type(PG_FUNCTION_ARGS); \
Datum \
gin_extract_value_##type(PG_FUNCTION_ARGS) \
{ \
@@ -59,7 +58,6 @@ gin_extract_value_##type(PG_FUNCTION_ARGS) \
#define GIN_EXTRACT_QUERY(type) \
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
-Datum gin_extract_query_##type(PG_FUNCTION_ARGS); \
Datum \
gin_extract_query_##type(PG_FUNCTION_ARGS) \
{ \
@@ -109,7 +107,6 @@ gin_extract_query_##type(PG_FUNCTION_ARGS) \
*/
#define GIN_COMPARE_PREFIX(type) \
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
-Datum gin_compare_prefix_##type(PG_FUNCTION_ARGS); \
Datum \
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
{ \
@@ -182,7 +179,6 @@ gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
PG_FUNCTION_INFO_V1(gin_btree_consistent);
-Datum gin_btree_consistent(PG_FUNCTION_ARGS);
Datum
gin_btree_consistent(PG_FUNCTION_ARGS)
{
@@ -404,7 +400,6 @@ GIN_SUPPORT(varbit)
#define NUMERIC_IS_LEFTMOST(x) ((x) == NULL)
PG_FUNCTION_INFO_V1(gin_numeric_cmp);
-Datum gin_numeric_cmp(PG_FUNCTION_ARGS);
Datum
gin_numeric_cmp(PG_FUNCTION_ARGS)
diff --git a/contrib/btree_gist/btree_bit.c b/contrib/btree_gist/btree_bit.c
index 5c0d198b09..76297515c5 100644
--- a/contrib/btree_gist/btree_bit.c
+++ b/contrib/btree_gist/btree_bit.c
@@ -19,14 +19,6 @@ PG_FUNCTION_INFO_V1(gbt_bit_consistent);
PG_FUNCTION_INFO_V1(gbt_bit_penalty);
PG_FUNCTION_INFO_V1(gbt_bit_same);
-Datum gbt_bit_compress(PG_FUNCTION_ARGS);
-Datum gbt_bit_union(PG_FUNCTION_ARGS);
-Datum gbt_bit_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_bit_consistent(PG_FUNCTION_ARGS);
-Datum gbt_bit_penalty(PG_FUNCTION_ARGS);
-Datum gbt_bit_same(PG_FUNCTION_ARGS);
-
-
/* define for comparison */
@@ -83,10 +75,14 @@ static bytea *
gbt_bit_xfrm(bytea *leaf)
{
bytea *out = leaf;
- int s = INTALIGN(VARBITBYTES(leaf) + VARHDRSZ);
-
- out = palloc(s);
- SET_VARSIZE(out, s);
+ int sz = VARBITBYTES(leaf) + VARHDRSZ;
+ int padded_sz = INTALIGN(sz);
+
+ out = (bytea *) palloc(padded_sz);
+ /* initialize the padding bytes to zero */
+ while (sz < padded_sz)
+ ((char *) out)[sz++] = 0;
+ SET_VARSIZE(out, padded_sz);
memcpy((void *) VARDATA(out), (void *) VARBITS(leaf), VARBITBYTES(leaf));
return out;
}
@@ -97,7 +93,6 @@ gbt_bit_xfrm(bytea *leaf)
static GBT_VARKEY *
gbt_bit_l2n(GBT_VARKEY *leaf)
{
-
GBT_VARKEY *out = leaf;
GBT_VARKEY_R r = gbt_var_key_readable(leaf);
bytea *o;
diff --git a/contrib/btree_gist/btree_bytea.c b/contrib/btree_gist/btree_bytea.c
index 0dd441964a..dfc25a45c6 100644
--- a/contrib/btree_gist/btree_bytea.c
+++ b/contrib/btree_gist/btree_bytea.c
@@ -18,13 +18,6 @@ PG_FUNCTION_INFO_V1(gbt_bytea_consistent);
PG_FUNCTION_INFO_V1(gbt_bytea_penalty);
PG_FUNCTION_INFO_V1(gbt_bytea_same);
-Datum gbt_bytea_compress(PG_FUNCTION_ARGS);
-Datum gbt_bytea_union(PG_FUNCTION_ARGS);
-Datum gbt_bytea_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_bytea_consistent(PG_FUNCTION_ARGS);
-Datum gbt_bytea_penalty(PG_FUNCTION_ARGS);
-Datum gbt_bytea_same(PG_FUNCTION_ARGS);
-
/* define for comparison */
diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c
index 8e8495ca06..63f86ebeef 100644
--- a/contrib/btree_gist/btree_cash.c
+++ b/contrib/btree_gist/btree_cash.c
@@ -24,14 +24,6 @@ PG_FUNCTION_INFO_V1(gbt_cash_distance);
PG_FUNCTION_INFO_V1(gbt_cash_penalty);
PG_FUNCTION_INFO_V1(gbt_cash_same);
-Datum gbt_cash_compress(PG_FUNCTION_ARGS);
-Datum gbt_cash_union(PG_FUNCTION_ARGS);
-Datum gbt_cash_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_cash_consistent(PG_FUNCTION_ARGS);
-Datum gbt_cash_distance(PG_FUNCTION_ARGS);
-Datum gbt_cash_penalty(PG_FUNCTION_ARGS);
-Datum gbt_cash_same(PG_FUNCTION_ARGS);
-
static bool
gbt_cashgt(const void *a, const void *b)
{
@@ -86,6 +78,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_cash,
sizeof(Cash),
+ 16, /* sizeof(gbtreekey16) */
gbt_cashgt,
gbt_cashge,
gbt_casheq,
@@ -97,7 +90,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(cash_dist);
-Datum cash_dist(PG_FUNCTION_ARGS);
Datum
cash_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c
index 1c0c3ec20c..7a4c6aa600 100644
--- a/contrib/btree_gist/btree_date.c
+++ b/contrib/btree_gist/btree_date.c
@@ -24,14 +24,6 @@ PG_FUNCTION_INFO_V1(gbt_date_distance);
PG_FUNCTION_INFO_V1(gbt_date_penalty);
PG_FUNCTION_INFO_V1(gbt_date_same);
-Datum gbt_date_compress(PG_FUNCTION_ARGS);
-Datum gbt_date_union(PG_FUNCTION_ARGS);
-Datum gbt_date_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_date_consistent(PG_FUNCTION_ARGS);
-Datum gbt_date_distance(PG_FUNCTION_ARGS);
-Datum gbt_date_penalty(PG_FUNCTION_ARGS);
-Datum gbt_date_same(PG_FUNCTION_ARGS);
-
static bool
gbt_dategt(const void *a, const void *b)
{
@@ -104,6 +96,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_date,
sizeof(DateADT),
+ 8, /* sizeof(gbtreekey8) */
gbt_dategt,
gbt_datege,
gbt_dateeq,
@@ -115,7 +108,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(date_dist);
-Datum date_dist(PG_FUNCTION_ARGS);
Datum
date_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c
index cf1e45a381..778d8dad84 100644
--- a/contrib/btree_gist/btree_float4.c
+++ b/contrib/btree_gist/btree_float4.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_float4_distance);
PG_FUNCTION_INFO_V1(gbt_float4_penalty);
PG_FUNCTION_INFO_V1(gbt_float4_same);
-Datum gbt_float4_compress(PG_FUNCTION_ARGS);
-Datum gbt_float4_union(PG_FUNCTION_ARGS);
-Datum gbt_float4_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_float4_consistent(PG_FUNCTION_ARGS);
-Datum gbt_float4_distance(PG_FUNCTION_ARGS);
-Datum gbt_float4_penalty(PG_FUNCTION_ARGS);
-Datum gbt_float4_same(PG_FUNCTION_ARGS);
-
static bool
gbt_float4gt(const void *a, const void *b)
{
@@ -85,6 +77,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_float4,
sizeof(float4),
+ 8, /* sizeof(gbtreekey8) */
gbt_float4gt,
gbt_float4ge,
gbt_float4eq,
@@ -96,7 +89,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(float4_dist);
-Datum float4_dist(PG_FUNCTION_ARGS);
Datum
float4_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c
index 3ce87642cb..c898bf2d97 100644
--- a/contrib/btree_gist/btree_float8.c
+++ b/contrib/btree_gist/btree_float8.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_float8_distance);
PG_FUNCTION_INFO_V1(gbt_float8_penalty);
PG_FUNCTION_INFO_V1(gbt_float8_same);
-Datum gbt_float8_compress(PG_FUNCTION_ARGS);
-Datum gbt_float8_union(PG_FUNCTION_ARGS);
-Datum gbt_float8_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_float8_consistent(PG_FUNCTION_ARGS);
-Datum gbt_float8_distance(PG_FUNCTION_ARGS);
-Datum gbt_float8_penalty(PG_FUNCTION_ARGS);
-Datum gbt_float8_same(PG_FUNCTION_ARGS);
-
static bool
gbt_float8gt(const void *a, const void *b)
@@ -93,6 +85,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_float8,
sizeof(float8),
+ 16, /* sizeof(gbtreekey16) */
gbt_float8gt,
gbt_float8ge,
gbt_float8eq,
@@ -104,7 +97,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(float8_dist);
-Datum float8_dist(PG_FUNCTION_ARGS);
Datum
float8_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_gist.c b/contrib/btree_gist/btree_gist.c
index f2d2ed2cb2..e1dc253c96 100644
--- a/contrib/btree_gist/btree_gist.c
+++ b/contrib/btree_gist/btree_gist.c
@@ -11,8 +11,6 @@ PG_FUNCTION_INFO_V1(gbt_decompress);
PG_FUNCTION_INFO_V1(gbtreekey_in);
PG_FUNCTION_INFO_V1(gbtreekey_out);
-Datum gbt_decompress(PG_FUNCTION_ARGS);
-
/**************************************************
* In/Out for keys
**************************************************/
diff --git a/contrib/btree_gist/btree_inet.c b/contrib/btree_gist/btree_inet.c
index c136296ab5..822786125d 100644
--- a/contrib/btree_gist/btree_inet.c
+++ b/contrib/btree_gist/btree_inet.c
@@ -25,13 +25,6 @@ PG_FUNCTION_INFO_V1(gbt_inet_consistent);
PG_FUNCTION_INFO_V1(gbt_inet_penalty);
PG_FUNCTION_INFO_V1(gbt_inet_same);
-Datum gbt_inet_compress(PG_FUNCTION_ARGS);
-Datum gbt_inet_union(PG_FUNCTION_ARGS);
-Datum gbt_inet_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_inet_consistent(PG_FUNCTION_ARGS);
-Datum gbt_inet_penalty(PG_FUNCTION_ARGS);
-Datum gbt_inet_same(PG_FUNCTION_ARGS);
-
static bool
gbt_inetgt(const void *a, const void *b)
@@ -81,6 +74,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_inet,
sizeof(double),
+ 16, /* sizeof(gbtreekey16) */
gbt_inetgt,
gbt_inetge,
gbt_ineteq,
diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c
index a40912b13d..a88aae6453 100644
--- a/contrib/btree_gist/btree_int2.c
+++ b/contrib/btree_gist/btree_int2.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_int2_distance);
PG_FUNCTION_INFO_V1(gbt_int2_penalty);
PG_FUNCTION_INFO_V1(gbt_int2_same);
-Datum gbt_int2_compress(PG_FUNCTION_ARGS);
-Datum gbt_int2_union(PG_FUNCTION_ARGS);
-Datum gbt_int2_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_int2_consistent(PG_FUNCTION_ARGS);
-Datum gbt_int2_distance(PG_FUNCTION_ARGS);
-Datum gbt_int2_penalty(PG_FUNCTION_ARGS);
-Datum gbt_int2_same(PG_FUNCTION_ARGS);
-
static bool
gbt_int2gt(const void *a, const void *b)
{
@@ -77,7 +69,7 @@ gbt_int2key_cmp(const void *a, const void *b)
static float8
gbt_int2_dist(const void *a, const void *b)
{
- return GET_FLOAT_DISTANCE(int2, a, b);
+ return GET_FLOAT_DISTANCE(int16, a, b);
}
@@ -85,6 +77,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_int2,
sizeof(int16),
+ 4, /* sizeof(gbtreekey4) */
gbt_int2gt,
gbt_int2ge,
gbt_int2eq,
@@ -96,14 +89,13 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(int2_dist);
-Datum int2_dist(PG_FUNCTION_ARGS);
Datum
int2_dist(PG_FUNCTION_ARGS)
{
- int2 a = PG_GETARG_INT16(0);
- int2 b = PG_GETARG_INT16(1);
- int2 r;
- int2 ra;
+ int16 a = PG_GETARG_INT16(0);
+ int16 b = PG_GETARG_INT16(1);
+ int16 r;
+ int16 ra;
r = a - b;
ra = Abs(r);
diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c
index 426f23f3fe..889a512078 100644
--- a/contrib/btree_gist/btree_int4.c
+++ b/contrib/btree_gist/btree_int4.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_int4_distance);
PG_FUNCTION_INFO_V1(gbt_int4_penalty);
PG_FUNCTION_INFO_V1(gbt_int4_same);
-Datum gbt_int4_compress(PG_FUNCTION_ARGS);
-Datum gbt_int4_union(PG_FUNCTION_ARGS);
-Datum gbt_int4_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_int4_consistent(PG_FUNCTION_ARGS);
-Datum gbt_int4_distance(PG_FUNCTION_ARGS);
-Datum gbt_int4_penalty(PG_FUNCTION_ARGS);
-Datum gbt_int4_same(PG_FUNCTION_ARGS);
-
static bool
gbt_int4gt(const void *a, const void *b)
@@ -78,7 +70,7 @@ gbt_int4key_cmp(const void *a, const void *b)
static float8
gbt_int4_dist(const void *a, const void *b)
{
- return GET_FLOAT_DISTANCE(int4, a, b);
+ return GET_FLOAT_DISTANCE(int32, a, b);
}
@@ -86,6 +78,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_int4,
sizeof(int32),
+ 8, /* sizeof(gbtreekey8) */
gbt_int4gt,
gbt_int4ge,
gbt_int4eq,
@@ -97,14 +90,13 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(int4_dist);
-Datum int4_dist(PG_FUNCTION_ARGS);
Datum
int4_dist(PG_FUNCTION_ARGS)
{
- int4 a = PG_GETARG_INT32(0);
- int4 b = PG_GETARG_INT32(1);
- int4 r;
- int4 ra;
+ int32 a = PG_GETARG_INT32(0);
+ int32 b = PG_GETARG_INT32(1);
+ int32 r;
+ int32 ra;
r = a - b;
ra = Abs(r);
diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c
index c05d8687fd..8685cee176 100644
--- a/contrib/btree_gist/btree_int8.c
+++ b/contrib/btree_gist/btree_int8.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_int8_distance);
PG_FUNCTION_INFO_V1(gbt_int8_penalty);
PG_FUNCTION_INFO_V1(gbt_int8_same);
-Datum gbt_int8_compress(PG_FUNCTION_ARGS);
-Datum gbt_int8_union(PG_FUNCTION_ARGS);
-Datum gbt_int8_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_int8_consistent(PG_FUNCTION_ARGS);
-Datum gbt_int8_distance(PG_FUNCTION_ARGS);
-Datum gbt_int8_penalty(PG_FUNCTION_ARGS);
-Datum gbt_int8_same(PG_FUNCTION_ARGS);
-
static bool
gbt_int8gt(const void *a, const void *b)
@@ -86,6 +78,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_int8,
sizeof(int64),
+ 16, /* sizeof(gbtreekey16) */
gbt_int8gt,
gbt_int8ge,
gbt_int8eq,
@@ -97,7 +90,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(int8_dist);
-Datum int8_dist(PG_FUNCTION_ARGS);
Datum
int8_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c
index bb779adf8e..68d80e8e0a 100644
--- a/contrib/btree_gist/btree_interval.c
+++ b/contrib/btree_gist/btree_interval.c
@@ -26,15 +26,6 @@ PG_FUNCTION_INFO_V1(gbt_intv_distance);
PG_FUNCTION_INFO_V1(gbt_intv_penalty);
PG_FUNCTION_INFO_V1(gbt_intv_same);
-Datum gbt_intv_compress(PG_FUNCTION_ARGS);
-Datum gbt_intv_decompress(PG_FUNCTION_ARGS);
-Datum gbt_intv_union(PG_FUNCTION_ARGS);
-Datum gbt_intv_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_intv_consistent(PG_FUNCTION_ARGS);
-Datum gbt_intv_distance(PG_FUNCTION_ARGS);
-Datum gbt_intv_penalty(PG_FUNCTION_ARGS);
-Datum gbt_intv_same(PG_FUNCTION_ARGS);
-
static bool
gbt_intvgt(const void *a, const void *b)
@@ -95,8 +86,10 @@ gbt_intv_dist(const void *a, const void *b)
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
- * in pg_type. This might be less than sizeof(Interval) if the compiler
- * insists on adding alignment padding at the end of the struct.
+ * in pg_type. This might be less than sizeof(Interval) if the compiler
+ * insists on adding alignment padding at the end of the struct. (Note:
+ * this concern is obsolete with the current definition of Interval, but
+ * was real before a separate "day" field was added to it.)
*/
#define INTERVALSIZE 16
@@ -104,6 +97,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_intv,
sizeof(Interval),
+ 32, /* sizeof(gbtreekey32) */
gbt_intvgt,
gbt_intvge,
gbt_intveq,
@@ -129,7 +123,6 @@ abs_interval(Interval *a)
}
PG_FUNCTION_INFO_V1(interval_dist);
-Datum interval_dist(PG_FUNCTION_ARGS);
Datum
interval_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_macaddr.c b/contrib/btree_gist/btree_macaddr.c
index 31125beda6..ed58a1b742 100644
--- a/contrib/btree_gist/btree_macaddr.c
+++ b/contrib/btree_gist/btree_macaddr.c
@@ -12,6 +12,7 @@ typedef struct
{
macaddr lower;
macaddr upper;
+ char pad[4]; /* make struct size = sizeof(gbtreekey16) */
} macKEY;
/*
@@ -24,13 +25,6 @@ PG_FUNCTION_INFO_V1(gbt_macad_consistent);
PG_FUNCTION_INFO_V1(gbt_macad_penalty);
PG_FUNCTION_INFO_V1(gbt_macad_same);
-Datum gbt_macad_compress(PG_FUNCTION_ARGS);
-Datum gbt_macad_union(PG_FUNCTION_ARGS);
-Datum gbt_macad_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_macad_consistent(PG_FUNCTION_ARGS);
-Datum gbt_macad_penalty(PG_FUNCTION_ARGS);
-Datum gbt_macad_same(PG_FUNCTION_ARGS);
-
static bool
gbt_macadgt(const void *a, const void *b)
@@ -81,6 +75,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_macad,
sizeof(macaddr),
+ 16, /* sizeof(gbtreekey16) */
gbt_macadgt,
gbt_macadge,
gbt_macadeq,
@@ -149,7 +144,7 @@ Datum
gbt_macad_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
- void *out = palloc(sizeof(macKEY));
+ void *out = palloc0(sizeof(macKEY));
*(int *) PG_GETARG_POINTER(1) = sizeof(macKEY);
PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo));
diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c
index 37938aff6a..02ccca8647 100644
--- a/contrib/btree_gist/btree_numeric.c
+++ b/contrib/btree_gist/btree_numeric.c
@@ -23,13 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_numeric_consistent);
PG_FUNCTION_INFO_V1(gbt_numeric_penalty);
PG_FUNCTION_INFO_V1(gbt_numeric_same);
-Datum gbt_numeric_compress(PG_FUNCTION_ARGS);
-Datum gbt_numeric_union(PG_FUNCTION_ARGS);
-Datum gbt_numeric_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_numeric_consistent(PG_FUNCTION_ARGS);
-Datum gbt_numeric_penalty(PG_FUNCTION_ARGS);
-Datum gbt_numeric_same(PG_FUNCTION_ARGS);
-
/* define for comparison */
diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c
index e80a23c0b1..f6b7bfa05b 100644
--- a/contrib/btree_gist/btree_oid.c
+++ b/contrib/btree_gist/btree_oid.c
@@ -23,14 +23,6 @@ PG_FUNCTION_INFO_V1(gbt_oid_distance);
PG_FUNCTION_INFO_V1(gbt_oid_penalty);
PG_FUNCTION_INFO_V1(gbt_oid_same);
-Datum gbt_oid_compress(PG_FUNCTION_ARGS);
-Datum gbt_oid_union(PG_FUNCTION_ARGS);
-Datum gbt_oid_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_oid_consistent(PG_FUNCTION_ARGS);
-Datum gbt_oid_distance(PG_FUNCTION_ARGS);
-Datum gbt_oid_penalty(PG_FUNCTION_ARGS);
-Datum gbt_oid_same(PG_FUNCTION_ARGS);
-
static bool
gbt_oidgt(const void *a, const void *b)
@@ -92,6 +84,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_oid,
sizeof(Oid),
+ 8, /* sizeof(gbtreekey8) */
gbt_oidgt,
gbt_oidge,
gbt_oideq,
@@ -103,7 +96,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(oid_dist);
-Datum oid_dist(PG_FUNCTION_ARGS);
Datum
oid_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_text.c b/contrib/btree_gist/btree_text.c
index 277820dc0a..2e00cb60ba 100644
--- a/contrib/btree_gist/btree_text.c
+++ b/contrib/btree_gist/btree_text.c
@@ -19,15 +19,6 @@ PG_FUNCTION_INFO_V1(gbt_bpchar_consistent);
PG_FUNCTION_INFO_V1(gbt_text_penalty);
PG_FUNCTION_INFO_V1(gbt_text_same);
-Datum gbt_text_compress(PG_FUNCTION_ARGS);
-Datum gbt_bpchar_compress(PG_FUNCTION_ARGS);
-Datum gbt_text_union(PG_FUNCTION_ARGS);
-Datum gbt_text_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_text_consistent(PG_FUNCTION_ARGS);
-Datum gbt_bpchar_consistent(PG_FUNCTION_ARGS);
-Datum gbt_text_penalty(PG_FUNCTION_ARGS);
-Datum gbt_text_same(PG_FUNCTION_ARGS);
-
/* define for comparison */
@@ -121,7 +112,6 @@ gbt_text_compress(PG_FUNCTION_ARGS)
Datum
gbt_bpchar_compress(PG_FUNCTION_ARGS)
{
-
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval;
diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c
index a148e5e120..cdf81711e7 100644
--- a/contrib/btree_gist/btree_time.c
+++ b/contrib/btree_gist/btree_time.c
@@ -27,16 +27,6 @@ PG_FUNCTION_INFO_V1(gbt_timetz_consistent);
PG_FUNCTION_INFO_V1(gbt_time_penalty);
PG_FUNCTION_INFO_V1(gbt_time_same);
-Datum gbt_time_compress(PG_FUNCTION_ARGS);
-Datum gbt_timetz_compress(PG_FUNCTION_ARGS);
-Datum gbt_time_union(PG_FUNCTION_ARGS);
-Datum gbt_time_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_time_consistent(PG_FUNCTION_ARGS);
-Datum gbt_time_distance(PG_FUNCTION_ARGS);
-Datum gbt_timetz_consistent(PG_FUNCTION_ARGS);
-Datum gbt_time_penalty(PG_FUNCTION_ARGS);
-Datum gbt_time_same(PG_FUNCTION_ARGS);
-
#ifdef USE_FLOAT8_BYVAL
#define TimeADTGetDatumFast(X) TimeADTGetDatum(X)
@@ -134,6 +124,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_time,
sizeof(TimeADT),
+ 16, /* sizeof(gbtreekey16) */
gbt_timegt,
gbt_timege,
gbt_timeeq,
@@ -145,7 +136,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(time_dist);
-Datum time_dist(PG_FUNCTION_ARGS);
Datum
time_dist(PG_FUNCTION_ARGS)
{
diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c
index 05609232d2..a13dcc8bea 100644
--- a/contrib/btree_gist/btree_ts.c
+++ b/contrib/btree_gist/btree_ts.c
@@ -28,17 +28,6 @@ PG_FUNCTION_INFO_V1(gbt_tstz_distance);
PG_FUNCTION_INFO_V1(gbt_ts_penalty);
PG_FUNCTION_INFO_V1(gbt_ts_same);
-Datum gbt_ts_compress(PG_FUNCTION_ARGS);
-Datum gbt_tstz_compress(PG_FUNCTION_ARGS);
-Datum gbt_ts_union(PG_FUNCTION_ARGS);
-Datum gbt_ts_picksplit(PG_FUNCTION_ARGS);
-Datum gbt_ts_consistent(PG_FUNCTION_ARGS);
-Datum gbt_ts_distance(PG_FUNCTION_ARGS);
-Datum gbt_tstz_consistent(PG_FUNCTION_ARGS);
-Datum gbt_tstz_distance(PG_FUNCTION_ARGS);
-Datum gbt_ts_penalty(PG_FUNCTION_ARGS);
-Datum gbt_ts_same(PG_FUNCTION_ARGS);
-
#ifdef USE_FLOAT8_BYVAL
#define TimestampGetDatumFast(X) TimestampGetDatum(X)
@@ -138,6 +127,7 @@ static const gbtree_ninfo tinfo =
{
gbt_t_ts,
sizeof(Timestamp),
+ 16, /* sizeof(gbtreekey16) */
gbt_tsgt,
gbt_tsge,
gbt_tseq,
@@ -149,7 +139,6 @@ static const gbtree_ninfo tinfo =
PG_FUNCTION_INFO_V1(ts_dist);
-Datum ts_dist(PG_FUNCTION_ARGS);
Datum
ts_dist(PG_FUNCTION_ARGS)
{
@@ -178,7 +167,6 @@ ts_dist(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(tstz_dist);
-Datum tstz_dist(PG_FUNCTION_ARGS);
Datum
tstz_dist(PG_FUNCTION_ARGS)
{
@@ -382,7 +370,6 @@ gbt_ts_union(PG_FUNCTION_ARGS)
Datum
gbt_ts_penalty(PG_FUNCTION_ARGS)
{
-
tsKEY *origentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
tsKEY *newentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *result = (float *) PG_GETARG_POINTER(2);
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 832dbc500b..505633c98b 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -28,7 +28,7 @@ gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo)
Cash ch;
} v;
- GBT_NUMKEY *r = (GBT_NUMKEY *) palloc0(2 * tinfo->size);
+ GBT_NUMKEY *r = (GBT_NUMKEY *) palloc0(tinfo->indexsize);
void *leaf = NULL;
switch (tinfo->t)
@@ -77,6 +77,8 @@ gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo)
leaf = DatumGetPointer(entry->key);
}
+ Assert(tinfo->indexsize >= 2 * tinfo->size);
+
memcpy((void *) &r[0], leaf, tinfo->size);
memcpy((void *) &r[tinfo->size], leaf, tinfo->size);
retval = palloc(sizeof(GISTENTRY));
@@ -137,7 +139,6 @@ gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec, const gbtree_nin
bool
gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo)
{
-
GBT_NUMKEY_R b1,
b2;
@@ -159,7 +160,6 @@ gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo
void
gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo)
{
-
GBT_NUMKEY_R rd;
rd.lower = &e[0];
@@ -167,7 +167,7 @@ gbt_num_bin_union(Datum *u, GBT_NUMKEY *e, const gbtree_ninfo *tinfo)
if (!DatumGetPointer(*u))
{
- *u = PointerGetDatum(palloc(2 * tinfo->size));
+ *u = PointerGetDatum(palloc0(tinfo->indexsize));
memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]), (void *) rd.lower, tinfo->size);
memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]), (void *) rd.upper, tinfo->size);
}
diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h
index d7a61d2242..0d79cd2a7f 100644
--- a/contrib/btree_gist/btree_utils_num.h
+++ b/contrib/btree_gist/btree_utils_num.h
@@ -37,7 +37,8 @@ typedef struct
/* Attribs */
enum gbtree_type t; /* data type */
- int32 size; /* size of type , 0 means variable */
+ int32 size; /* size of type, 0 means variable */
+ int32 indexsize; /* size of datums stored in index */
/* Methods */
diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c
index 9f8a132775..b7dd060a94 100644
--- a/contrib/btree_gist/btree_utils_var.c
+++ b/contrib/btree_gist/btree_utils_var.c
@@ -29,7 +29,6 @@ typedef struct
PG_FUNCTION_INFO_V1(gbt_var_decompress);
-Datum gbt_var_decompress(PG_FUNCTION_ARGS);
Datum
@@ -56,7 +55,6 @@ gbt_var_decompress(PG_FUNCTION_ARGS)
GBT_VARKEY_R
gbt_var_key_readable(const GBT_VARKEY *k)
{
-
GBT_VARKEY_R r;
r.lower = (bytea *) &(((char *) k)[VARHDRSZ]);
@@ -72,19 +70,21 @@ GBT_VARKEY *
gbt_var_key_copy(const GBT_VARKEY_R *u, bool force_node)
{
GBT_VARKEY *r = NULL;
+ int32 lowersize = VARSIZE(u->lower);
+ int32 uppersize = VARSIZE(u->upper);
if (u->lower == u->upper && !force_node)
{ /* leaf key mode */
- r = (GBT_VARKEY *) palloc(VARSIZE(u->lower) + VARHDRSZ);
- memcpy(VARDATA(r), u->lower, VARSIZE(u->lower));
- SET_VARSIZE(r, VARSIZE(u->lower) + VARHDRSZ);
+ r = (GBT_VARKEY *) palloc(lowersize + VARHDRSZ);
+ memcpy(VARDATA(r), u->lower, lowersize);
+ SET_VARSIZE(r, lowersize + VARHDRSZ);
}
else
{ /* node key mode */
- r = (GBT_VARKEY *) palloc(INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ);
- memcpy(VARDATA(r), u->lower, VARSIZE(u->lower));
- memcpy(VARDATA(r) + INTALIGN(VARSIZE(u->lower)), u->upper, VARSIZE(u->upper));
- SET_VARSIZE(r, INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ);
+ r = (GBT_VARKEY *) palloc0(INTALIGN(lowersize) + uppersize + VARHDRSZ);
+ memcpy(VARDATA(r), u->lower, lowersize);
+ memcpy(VARDATA(r) + INTALIGN(lowersize), u->upper, uppersize);
+ SET_VARSIZE(r, INTALIGN(lowersize) + uppersize + VARHDRSZ);
}
return r;
}
@@ -108,14 +108,12 @@ gbt_var_leaf2node(GBT_VARKEY *leaf, const gbtree_vinfo *tinfo)
static int32
gbt_var_node_cp_len(const GBT_VARKEY *node, const gbtree_vinfo *tinfo)
{
-
GBT_VARKEY_R r = gbt_var_key_readable(node);
int32 i = 0;
int32 l = 0;
int32 t1len = VARSIZE(r.lower) - VARHDRSZ;
int32 t2len = VARSIZE(r.upper) - VARHDRSZ;
int32 ml = Min(t1len, t2len);
-
char *p1 = VARDATA(r.lower);
char *p2 = VARDATA(r.upper);
@@ -126,7 +124,6 @@ gbt_var_node_cp_len(const GBT_VARKEY *node, const gbtree_vinfo *tinfo)
{
if (tinfo->eml > 1 && l == 0)
{
-
if ((l = pg_mblen(p1)) != pg_mblen(p2))
{
return i;
@@ -206,7 +203,7 @@ gbt_var_node_truncate(const GBT_VARKEY *node, int32 cpf_length, const gbtree_vin
len2 = Min(len2, (cpf_length + 1));
si = 2 * VARHDRSZ + INTALIGN(len1 + VARHDRSZ) + len2;
- out = (GBT_VARKEY *) palloc(si);
+ out = (GBT_VARKEY *) palloc0(si);
SET_VARSIZE(out, si);
memcpy(VARDATA(out), r.lower, len1 + VARHDRSZ);
@@ -225,13 +222,13 @@ void
gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation,
const gbtree_vinfo *tinfo)
{
- GBT_VARKEY *nk = NULL;
- GBT_VARKEY *tmp = NULL;
- GBT_VARKEY_R nr;
GBT_VARKEY_R eo = gbt_var_key_readable(e);
+ GBT_VARKEY_R nr;
if (eo.lower == eo.upper) /* leaf */
{
+ GBT_VARKEY *tmp;
+
tmp = gbt_var_leaf2node(e, tinfo);
if (tmp != e)
eo = gbt_var_key_readable(tmp);
@@ -239,25 +236,26 @@ gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation,
if (DatumGetPointer(*u))
{
-
GBT_VARKEY_R ro = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(*u));
+ bool update = false;
+
+ nr.lower = ro.lower;
+ nr.upper = ro.upper;
if ((*tinfo->f_cmp) (ro.lower, eo.lower, collation) > 0)
{
nr.lower = eo.lower;
- nr.upper = ro.upper;
- nk = gbt_var_key_copy(&nr, TRUE);
+ update = true;
}
if ((*tinfo->f_cmp) (ro.upper, eo.upper, collation) < 0)
{
nr.upper = eo.upper;
- nr.lower = ro.lower;
- nk = gbt_var_key_copy(&nr, TRUE);
+ update = true;
}
- if (nk)
- *u = PointerGetDatum(nk);
+ if (update)
+ *u = PointerGetDatum(gbt_var_key_copy(&nr, TRUE));
}
else
{
@@ -272,7 +270,6 @@ gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation,
GISTENTRY *
gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo)
{
-
GISTENTRY *retval;
if (entry->leafkey)
@@ -301,7 +298,6 @@ GBT_VARKEY *
gbt_var_union(const GistEntryVector *entryvec, int32 *size, Oid collation,
const gbtree_vinfo *tinfo)
{
-
int i = 0,
numranges = entryvec->n;
GBT_VARKEY *cur;
@@ -368,13 +364,14 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
GBT_VARKEY *newe = (GBT_VARKEY *) DatumGetPointer(n->key);
GBT_VARKEY_R ok,
nk;
- GBT_VARKEY *tmp = NULL;
*res = 0.0;
nk = gbt_var_key_readable(newe);
if (nk.lower == nk.upper) /* leaf */
{
+ GBT_VARKEY *tmp;
+
tmp = gbt_var_leaf2node(newe, tinfo);
if (tmp != newe)
nk = gbt_var_key_readable(tmp);
@@ -389,7 +386,7 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
gbt_bytea_pf_match(ok.upper, nk.upper, tinfo))))
{
Datum d = PointerGetDatum(0);
- double dres = 0.0;
+ double dres;
int32 ol,
ul;
@@ -400,20 +397,18 @@ gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n,
if (ul < ol)
{
- dres = (ol - ul); /* lost of common prefix len */
+ dres = (ol - ul); /* reduction of common prefix len */
}
else
{
GBT_VARKEY_R uk = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(d));
+ unsigned char tmp[4];
- char tmp[4];
-
- tmp[0] = ((VARSIZE(ok.lower) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(ok.lower)[ul]);
- tmp[1] = ((VARSIZE(uk.lower) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(uk.lower)[ul]);
- tmp[2] = ((VARSIZE(ok.upper) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(ok.upper)[ul]);
- tmp[3] = ((VARSIZE(uk.upper) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(uk.upper)[ul]);
- dres = (tmp[0] - tmp[1]) +
- (tmp[3] - tmp[2]);
+ tmp[0] = (unsigned char) (((VARSIZE(ok.lower) - VARHDRSZ) <= ul) ? 0 : (VARDATA(ok.lower)[ul]));
+ tmp[1] = (unsigned char) (((VARSIZE(uk.lower) - VARHDRSZ) <= ul) ? 0 : (VARDATA(uk.lower)[ul]));
+ tmp[2] = (unsigned char) (((VARSIZE(ok.upper) - VARHDRSZ) <= ul) ? 0 : (VARDATA(ok.upper)[ul]));
+ tmp[3] = (unsigned char) (((VARSIZE(uk.upper) - VARHDRSZ) <= ul) ? 0 : (VARDATA(uk.upper)[ul]));
+ dres = Abs(tmp[0] - tmp[1]) + Abs(tmp[3] - tmp[2]);
dres /= 256.0;
}
diff --git a/contrib/btree_gist/expected/not_equal.out b/contrib/btree_gist/expected/not_equal.out
index d9b91e2d56..1d5b55db5a 100644
--- a/contrib/btree_gist/expected/not_equal.out
+++ b/contrib/btree_gist/expected/not_equal.out
@@ -31,7 +31,6 @@ CREATE TABLE zoo (
animal TEXT,
EXCLUDE USING gist (cage WITH =, animal WITH <>)
);
-NOTICE: CREATE TABLE / EXCLUDE will create implicit index "zoo_cage_animal_excl" for table "zoo"
INSERT INTO zoo VALUES(123, 'zebra');
INSERT INTO zoo VALUES(123, 'zebra');
INSERT INTO zoo VALUES(123, 'lion');
diff --git a/contrib/chkpass/chkpass.c b/contrib/chkpass/chkpass.c
index 0c9fec0e67..283ad9a538 100644
--- a/contrib/chkpass/chkpass.c
+++ b/contrib/chkpass/chkpass.c
@@ -39,18 +39,6 @@ typedef struct chkpass
char password[16];
} chkpass;
-/*
- * Various forward declarations:
- */
-
-Datum chkpass_in(PG_FUNCTION_ARGS);
-Datum chkpass_out(PG_FUNCTION_ARGS);
-Datum chkpass_rout(PG_FUNCTION_ARGS);
-
-/* Only equal or not equal make sense */
-Datum chkpass_eq(PG_FUNCTION_ARGS);
-Datum chkpass_ne(PG_FUNCTION_ARGS);
-
/* This function checks that the password is a good one
* It's just a placeholder for now */
@@ -70,6 +58,7 @@ chkpass_in(PG_FUNCTION_ARGS)
char *str = PG_GETARG_CSTRING(0);
chkpass *result;
char mysalt[4];
+ char *crypt_output;
static char salt_chars[] =
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
@@ -92,7 +81,15 @@ chkpass_in(PG_FUNCTION_ARGS)
mysalt[1] = salt_chars[random() & 0x3f];
mysalt[2] = 0; /* technically the terminator is not necessary
* but I like to play safe */
- strcpy(result->password, crypt(str, mysalt));
+
+ crypt_output = crypt(str, mysalt);
+ if (crypt_output == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("crypt() failed")));
+
+ strlcpy(result->password, crypt_output, sizeof(result->password));
+
PG_RETURN_POINTER(result);
}
@@ -141,9 +138,16 @@ chkpass_eq(PG_FUNCTION_ARGS)
chkpass *a1 = (chkpass *) PG_GETARG_POINTER(0);
text *a2 = PG_GETARG_TEXT_PP(1);
char str[9];
+ char *crypt_output;
text_to_cstring_buffer(a2, str, sizeof(str));
- PG_RETURN_BOOL(strcmp(a1->password, crypt(str, a1->password)) == 0);
+ crypt_output = crypt(str, a1->password);
+ if (crypt_output == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("crypt() failed")));
+
+ PG_RETURN_BOOL(strcmp(a1->password, crypt_output) == 0);
}
PG_FUNCTION_INFO_V1(chkpass_ne);
@@ -153,7 +157,14 @@ chkpass_ne(PG_FUNCTION_ARGS)
chkpass *a1 = (chkpass *) PG_GETARG_POINTER(0);
text *a2 = PG_GETARG_TEXT_PP(1);
char str[9];
+ char *crypt_output;
text_to_cstring_buffer(a2, str, sizeof(str));
- PG_RETURN_BOOL(strcmp(a1->password, crypt(str, a1->password)) != 0);
+ crypt_output = crypt(str, a1->password);
+ if (crypt_output == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("crypt() failed")));
+
+ PG_RETURN_BOOL(strcmp(a1->password, crypt_output) != 0);
}
diff --git a/contrib/citext/citext.c b/contrib/citext/citext.c
index a584f57455..1174b70aa7 100644
--- a/contrib/citext/citext.c
+++ b/contrib/citext/citext.c
@@ -19,16 +19,6 @@ PG_MODULE_MAGIC;
*/
static int32 citextcmp(text *left, text *right, Oid collid);
-extern Datum citext_cmp(PG_FUNCTION_ARGS);
-extern Datum citext_hash(PG_FUNCTION_ARGS);
-extern Datum citext_eq(PG_FUNCTION_ARGS);
-extern Datum citext_ne(PG_FUNCTION_ARGS);
-extern Datum citext_gt(PG_FUNCTION_ARGS);
-extern Datum citext_ge(PG_FUNCTION_ARGS);
-extern Datum citext_lt(PG_FUNCTION_ARGS);
-extern Datum citext_le(PG_FUNCTION_ARGS);
-extern Datum citext_smaller(PG_FUNCTION_ARGS);
-extern Datum citext_larger(PG_FUNCTION_ARGS);
/*
* =================
diff --git a/contrib/citext/expected/citext.out b/contrib/citext/expected/citext.out
index 5392a7d1f3..411b689b4b 100644
--- a/contrib/citext/expected/citext.out
+++ b/contrib/citext/expected/citext.out
@@ -218,7 +218,6 @@ SELECT citext_cmp('B'::citext, 'a'::citext) > 0 AS true;
CREATE TEMP TABLE try (
name citext PRIMARY KEY
);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "try_pkey" for table "try"
INSERT INTO try (name)
VALUES ('a'), ('ab'), ('â'), ('aba'), ('b'), ('ba'), ('bab'), ('AZ');
SELECT name, 'a' = name AS eq_a FROM try WHERE name <> 'â';
@@ -326,6 +325,7 @@ VALUES ('aardvark'),
('aba'),
('ABC'),
('abd');
+CREATE INDEX srt_name ON srt (name);
-- Check the min() and max() aggregates, with and without index.
set enable_seqscan = off;
SELECT MIN(name) AS "AAA" FROM srt;
@@ -1691,7 +1691,7 @@ SELECT btrim('xyxtrimyyx'::citext, 'xy'::text ) = 'trim' AS t;
-- chr() takes an int and returns text.
-- convert() and convert_from take bytea and return text.
-SELECT convert_to( name, 'ISO-8859-1' ) = convert_to( name::text, 'ISO-8859-1' ) AS t FROM srt;
+SELECT convert_from( name::bytea, 'SQL_ASCII' ) = convert_from( name::text::bytea, 'SQL_ASCII' ) AS t FROM srt;
t
---
t
@@ -2276,3 +2276,44 @@ SELECT like_escape( name::text, ''::citext ) = like_escape( name::text, '' ) AS
t
(5 rows)
+-- Ensure correct behavior for citext with materialized views.
+CREATE TABLE citext_table (
+ id serial primary key,
+ name citext
+);
+INSERT INTO citext_table (name)
+ VALUES ('one'), ('two'), ('three'), (NULL), (NULL);
+CREATE MATERIALIZED VIEW citext_matview AS
+ SELECT * FROM citext_table;
+CREATE UNIQUE INDEX citext_matview_id
+ ON citext_matview (id);
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+ id | name | id | name
+----+------+----+------
+(0 rows)
+
+UPDATE citext_table SET name = 'Two' WHERE name = 'TWO';
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+ id | name | id | name
+----+------+----+------
+ | | 2 | Two
+ 2 | two | |
+(2 rows)
+
+REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
+SELECT * FROM citext_matview ORDER BY id;
+ id | name
+----+-------
+ 1 | one
+ 2 | Two
+ 3 | three
+ 4 |
+ 5 |
+(5 rows)
+
diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out
index 5316ad0cda..da3862f49b 100644
--- a/contrib/citext/expected/citext_1.out
+++ b/contrib/citext/expected/citext_1.out
@@ -218,7 +218,6 @@ SELECT citext_cmp('B'::citext, 'a'::citext) > 0 AS true;
CREATE TEMP TABLE try (
name citext PRIMARY KEY
);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "try_pkey" for table "try"
INSERT INTO try (name)
VALUES ('a'), ('ab'), ('â'), ('aba'), ('b'), ('ba'), ('bab'), ('AZ');
SELECT name, 'a' = name AS eq_a FROM try WHERE name <> 'â';
@@ -326,6 +325,7 @@ VALUES ('aardvark'),
('aba'),
('ABC'),
('abd');
+CREATE INDEX srt_name ON srt (name);
-- Check the min() and max() aggregates, with and without index.
set enable_seqscan = off;
SELECT MIN(name) AS "AAA" FROM srt;
@@ -1691,7 +1691,7 @@ SELECT btrim('xyxtrimyyx'::citext, 'xy'::text ) = 'trim' AS t;
-- chr() takes an int and returns text.
-- convert() and convert_from take bytea and return text.
-SELECT convert_to( name, 'ISO-8859-1' ) = convert_to( name::text, 'ISO-8859-1' ) AS t FROM srt;
+SELECT convert_from( name::bytea, 'SQL_ASCII' ) = convert_from( name::text::bytea, 'SQL_ASCII' ) AS t FROM srt;
t
---
t
@@ -2276,3 +2276,44 @@ SELECT like_escape( name::text, ''::citext ) = like_escape( name::text, '' ) AS
t
(5 rows)
+-- Ensure correct behavior for citext with materialized views.
+CREATE TABLE citext_table (
+ id serial primary key,
+ name citext
+);
+INSERT INTO citext_table (name)
+ VALUES ('one'), ('two'), ('three'), (NULL), (NULL);
+CREATE MATERIALIZED VIEW citext_matview AS
+ SELECT * FROM citext_table;
+CREATE UNIQUE INDEX citext_matview_id
+ ON citext_matview (id);
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+ id | name | id | name
+----+------+----+------
+(0 rows)
+
+UPDATE citext_table SET name = 'Two' WHERE name = 'TWO';
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+ id | name | id | name
+----+------+----+------
+ | | 2 | Two
+ 2 | two | |
+(2 rows)
+
+REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
+SELECT * FROM citext_matview ORDER BY id;
+ id | name
+----+-------
+ 1 | one
+ 2 | Two
+ 3 | three
+ 4 |
+ 5 |
+(5 rows)
+
diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql
index 07497401a4..27678fab5d 100644
--- a/contrib/citext/sql/citext.sql
+++ b/contrib/citext/sql/citext.sql
@@ -127,6 +127,8 @@ VALUES ('aardvark'),
('ABC'),
('abd');
+CREATE INDEX srt_name ON srt (name);
+
-- Check the min() and max() aggregates, with and without index.
set enable_seqscan = off;
SELECT MIN(name) AS "AAA" FROM srt;
@@ -570,7 +572,7 @@ SELECT btrim('xyxtrimyyx'::citext, 'xy'::text ) = 'trim' AS t;
-- chr() takes an int and returns text.
-- convert() and convert_from take bytea and return text.
-SELECT convert_to( name, 'ISO-8859-1' ) = convert_to( name::text, 'ISO-8859-1' ) AS t FROM srt;
+SELECT convert_from( name::bytea, 'SQL_ASCII' ) = convert_from( name::text::bytea, 'SQL_ASCII' ) AS t FROM srt;
SELECT decode('MTIzAAE='::citext, 'base64') = decode('MTIzAAE='::text, 'base64') AS t;
-- encode() takes bytea and returns text.
SELECT initcap('hi THOMAS'::citext) = initcap('hi THOMAS'::text) AS t;
@@ -709,3 +711,26 @@ SELECT COUNT(*) = 19::bigint AS t FROM try;
SELECT like_escape( name, '' ) = like_escape( name::text, '' ) AS t FROM srt;
SELECT like_escape( name::text, ''::citext ) = like_escape( name::text, '' ) AS t FROM srt;
+
+-- Ensure correct behavior for citext with materialized views.
+CREATE TABLE citext_table (
+ id serial primary key,
+ name citext
+);
+INSERT INTO citext_table (name)
+ VALUES ('one'), ('two'), ('three'), (NULL), (NULL);
+CREATE MATERIALIZED VIEW citext_matview AS
+ SELECT * FROM citext_table;
+CREATE UNIQUE INDEX citext_matview_id
+ ON citext_matview (id);
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+UPDATE citext_table SET name = 'Two' WHERE name = 'TWO';
+SELECT *
+ FROM citext_matview m
+ FULL JOIN citext_table t ON (t.id = m.id AND t *= m)
+ WHERE t.id IS NULL OR m.id IS NULL;
+REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
+SELECT * FROM citext_matview ORDER BY id;
diff --git a/contrib/cube/Makefile b/contrib/cube/Makefile
index 19fd7dc658..b5cd5d0f33 100644
--- a/contrib/cube/Makefile
+++ b/contrib/cube/Makefile
@@ -27,20 +27,6 @@ endif
# cubescan is compiled as part of cubeparse
cubeparse.o: cubescan.c
-cubeparse.c: cubeparse.y
-ifdef BISON
- $(BISON) $(BISONFLAGS) -o $@ $<
-else
- @$(missing) bison $< $@
-endif
-
-cubescan.c: cubescan.l
-ifdef FLEX
- $(FLEX) $(FLEXFLAGS) -o'$@' $<
-else
- @$(missing) flex $< $@
-endif
-
distprep: cubeparse.c cubescan.c
maintainer-clean:
diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c
index 37cbfdd20b..b0305ef431 100644
--- a/contrib/cube/cube.c
+++ b/contrib/cube/cube.c
@@ -35,7 +35,6 @@ extern void cube_scanner_finish(void);
** Input/Output routines
*/
PG_FUNCTION_INFO_V1(cube_in);
-PG_FUNCTION_INFO_V1(cube);
PG_FUNCTION_INFO_V1(cube_a_f8_f8);
PG_FUNCTION_INFO_V1(cube_a_f8);
PG_FUNCTION_INFO_V1(cube_out);
@@ -48,20 +47,6 @@ PG_FUNCTION_INFO_V1(cube_ll_coord);
PG_FUNCTION_INFO_V1(cube_ur_coord);
PG_FUNCTION_INFO_V1(cube_subset);
-Datum cube_in(PG_FUNCTION_ARGS);
-Datum cube(PG_FUNCTION_ARGS);
-Datum cube_a_f8_f8(PG_FUNCTION_ARGS);
-Datum cube_a_f8(PG_FUNCTION_ARGS);
-Datum cube_out(PG_FUNCTION_ARGS);
-Datum cube_f8(PG_FUNCTION_ARGS);
-Datum cube_f8_f8(PG_FUNCTION_ARGS);
-Datum cube_c_f8(PG_FUNCTION_ARGS);
-Datum cube_c_f8_f8(PG_FUNCTION_ARGS);
-Datum cube_dim(PG_FUNCTION_ARGS);
-Datum cube_ll_coord(PG_FUNCTION_ARGS);
-Datum cube_ur_coord(PG_FUNCTION_ARGS);
-Datum cube_subset(PG_FUNCTION_ARGS);
-
/*
** GiST support methods
*/
@@ -74,14 +59,6 @@ PG_FUNCTION_INFO_V1(g_cube_picksplit);
PG_FUNCTION_INFO_V1(g_cube_union);
PG_FUNCTION_INFO_V1(g_cube_same);
-Datum g_cube_consistent(PG_FUNCTION_ARGS);
-Datum g_cube_compress(PG_FUNCTION_ARGS);
-Datum g_cube_decompress(PG_FUNCTION_ARGS);
-Datum g_cube_penalty(PG_FUNCTION_ARGS);
-Datum g_cube_picksplit(PG_FUNCTION_ARGS);
-Datum g_cube_union(PG_FUNCTION_ARGS);
-Datum g_cube_same(PG_FUNCTION_ARGS);
-
/*
** B-tree support functions
*/
@@ -93,14 +70,6 @@ PG_FUNCTION_INFO_V1(cube_le);
PG_FUNCTION_INFO_V1(cube_ge);
PG_FUNCTION_INFO_V1(cube_cmp);
-Datum cube_eq(PG_FUNCTION_ARGS);
-Datum cube_ne(PG_FUNCTION_ARGS);
-Datum cube_lt(PG_FUNCTION_ARGS);
-Datum cube_gt(PG_FUNCTION_ARGS);
-Datum cube_le(PG_FUNCTION_ARGS);
-Datum cube_ge(PG_FUNCTION_ARGS);
-Datum cube_cmp(PG_FUNCTION_ARGS);
-
/*
** R-tree support functions
*/
@@ -112,13 +81,6 @@ PG_FUNCTION_INFO_V1(cube_union);
PG_FUNCTION_INFO_V1(cube_inter);
PG_FUNCTION_INFO_V1(cube_size);
-Datum cube_contains(PG_FUNCTION_ARGS);
-Datum cube_contained(PG_FUNCTION_ARGS);
-Datum cube_overlap(PG_FUNCTION_ARGS);
-Datum cube_union(PG_FUNCTION_ARGS);
-Datum cube_inter(PG_FUNCTION_ARGS);
-Datum cube_size(PG_FUNCTION_ARGS);
-
/*
** miscellaneous
*/
@@ -126,10 +88,6 @@ PG_FUNCTION_INFO_V1(cube_distance);
PG_FUNCTION_INFO_V1(cube_is_point);
PG_FUNCTION_INFO_V1(cube_enlarge);
-Datum cube_distance(PG_FUNCTION_ARGS);
-Datum cube_is_point(PG_FUNCTION_ARGS);
-Datum cube_enlarge(PG_FUNCTION_ARGS);
-
/*
** For internal use only
*/
@@ -146,6 +104,7 @@ bool g_cube_internal_consistent(NDBOX *key, NDBOX *query, StrategyNumber strate
** Auxiliary funxtions
*/
static double distance_1D(double a1, double a2, double b1, double b2);
+static bool cube_is_point_internal(NDBOX *cube);
/*****************************************************************************
@@ -183,6 +142,7 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
int i;
int dim;
int size;
+ bool point;
double *dur,
*dll;
@@ -200,16 +160,32 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
dur = ARRPTR(ur);
dll = ARRPTR(ll);
- size = offsetof(NDBOX, x[0]) +sizeof(double) * 2 * dim;
+ /* Check if it's a point */
+ point = true;
+ for (i = 0; i < dim; i++)
+ {
+ if (dur[i] != dll[i])
+ {
+ point = false;
+ break;
+ }
+ }
+
+ size = point ? POINT_SIZE(dim) : CUBE_SIZE(dim);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
- result->dim = dim;
+ SET_DIM(result, dim);
for (i = 0; i < dim; i++)
- {
result->x[i] = dur[i];
- result->x[i + dim] = dll[i];
+
+ if (!point)
+ {
+ for (i = 0; i < dim; i++)
+ result->x[i + dim] = dll[i];
}
+ else
+ SET_POINT_BIT(result);
PG_RETURN_NDBOX(result);
}
@@ -236,16 +212,14 @@ cube_a_f8(PG_FUNCTION_ARGS)
dur = ARRPTR(ur);
- size = offsetof(NDBOX, x[0]) +sizeof(double) * 2 * dim;
+ size = POINT_SIZE(dim);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
- result->dim = dim;
+ SET_DIM(result, dim);
+ SET_POINT_BIT(result);
for (i = 0; i < dim; i++)
- {
result->x[i] = dur[i];
- result->x[i + dim] = dur[i];
- }
PG_RETURN_NDBOX(result);
}
@@ -266,17 +240,20 @@ cube_subset(PG_FUNCTION_ARGS)
(errcode(ERRCODE_ARRAY_ELEMENT_ERROR),
errmsg("cannot work with arrays containing NULLs")));
- dx = (int4 *) ARR_DATA_PTR(idx);
+ dx = (int32 *) ARR_DATA_PTR(idx);
dim = ARRNELEMS(idx);
- size = offsetof(NDBOX, x[0]) +sizeof(double) * 2 * dim;
+ size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
- result->dim = dim;
+ SET_DIM(result, dim);
+
+ if (IS_POINT(c))
+ SET_POINT_BIT(result);
for (i = 0; i < dim; i++)
{
- if ((dx[i] <= 0) || (dx[i] > c->dim))
+ if ((dx[i] <= 0) || (dx[i] > DIM(c)))
{
pfree(result);
ereport(ERROR,
@@ -284,7 +261,8 @@ cube_subset(PG_FUNCTION_ARGS)
errmsg("Index out of bounds")));
}
result->x[i] = c->x[dx[i] - 1];
- result->x[i + dim] = c->x[dx[i] + c->dim - 1];
+ if (!IS_POINT(c))
+ result->x[i + dim] = c->x[dx[i] + DIM(c) - 1];
}
PG_FREE_IF_COPY(c, 0);
@@ -296,8 +274,7 @@ cube_out(PG_FUNCTION_ARGS)
{
NDBOX *cube = PG_GETARG_NDBOX(0);
StringInfoData buf;
- int dim = cube->dim;
- bool equal = true;
+ int dim = DIM(cube);
int i;
int ndig;
@@ -318,21 +295,19 @@ cube_out(PG_FUNCTION_ARGS)
for (i = 0; i < dim; i++)
{
if (i > 0)
- appendStringInfo(&buf, ", ");
- appendStringInfo(&buf, "%.*g", ndig, cube->x[i]);
- if (cube->x[i] != cube->x[i + dim])
- equal = false;
+ appendStringInfoString(&buf, ", ");
+ appendStringInfo(&buf, "%.*g", ndig, LL_COORD(cube, i));
}
appendStringInfoChar(&buf, ')');
- if (!equal)
+ if (!cube_is_point_internal(cube))
{
- appendStringInfo(&buf, ",(");
+ appendStringInfoString(&buf, ",(");
for (i = 0; i < dim; i++)
{
if (i > 0)
- appendStringInfo(&buf, ", ");
- appendStringInfo(&buf, "%.*g", ndig, cube->x[i + dim]);
+ appendStringInfoString(&buf, ", ");
+ appendStringInfo(&buf, "%.*g", ndig, UR_COORD(cube, i));
}
appendStringInfoChar(&buf, ')');
}
@@ -563,7 +538,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
rt_cube_size(datum_r, &size_r);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@@ -579,7 +554,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
{
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
@@ -731,51 +706,60 @@ cube_union_v0(NDBOX *a, NDBOX *b)
{
int i;
NDBOX *result;
+ int dim;
+ int size;
- if (a->dim >= b->dim)
- {
- result = palloc0(VARSIZE(a));
- SET_VARSIZE(result, VARSIZE(a));
- result->dim = a->dim;
- }
- else
- {
- result = palloc0(VARSIZE(b));
- SET_VARSIZE(result, VARSIZE(b));
- result->dim = b->dim;
- }
+ /* trivial case */
+ if (a == b)
+ return a;
- /* swap the box pointers if needed */
- if (a->dim < b->dim)
+ /* swap the arguments if needed, so that 'a' is always larger than 'b' */
+ if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
b = a;
a = tmp;
}
+ dim = DIM(a);
- /*
- * use the potentially smaller of the two boxes (b) to fill in the result,
- * padding absent dimensions with zeroes
- */
- for (i = 0; i < b->dim; i++)
+ size = CUBE_SIZE(dim);
+ result = palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, dim);
+
+ /* First compute the union of the dimensions present in both args */
+ for (i = 0; i < DIM(b); i++)
{
- result->x[i] = Min(b->x[i], b->x[i + b->dim]);
- result->x[i + a->dim] = Max(b->x[i], b->x[i + b->dim]);
+ result->x[i] = Min(
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
+ result->x[i + DIM(a)] = Max(
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
- for (i = b->dim; i < a->dim; i++)
+ /* continue on the higher dimensions only present in 'a' */
+ for (; i < DIM(a); i++)
{
- result->x[i] = 0;
- result->x[i + a->dim] = 0;
+ result->x[i] = Min(0,
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
+ result->x[i + dim] = Max(0,
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
- /* compute the union */
- for (i = 0; i < a->dim; i++)
+ /*
+ * Check if the result was in fact a point, and set the flag in the datum
+ * accordingly. (we don't bother to repalloc it smaller)
+ */
+ if (cube_is_point_internal(result))
{
- result->x[i] =
- Min(Min(a->x[i], a->x[i + a->dim]), result->x[i]);
- result->x[i + a->dim] = Max(Max(a->x[i],
- a->x[i + a->dim]), result->x[i + a->dim]);
+ size = POINT_SIZE(dim);
+ SET_VARSIZE(result, size);
+ SET_POINT_BIT(result);
}
return (result);
@@ -804,22 +788,11 @@ cube_inter(PG_FUNCTION_ARGS)
NDBOX *result;
bool swapped = false;
int i;
+ int dim;
+ int size;
- if (a->dim >= b->dim)
- {
- result = palloc0(VARSIZE(a));
- SET_VARSIZE(result, VARSIZE(a));
- result->dim = a->dim;
- }
- else
- {
- result = palloc0(VARSIZE(b));
- SET_VARSIZE(result, VARSIZE(b));
- result->dim = b->dim;
- }
-
- /* swap the box pointers if needed */
- if (a->dim < b->dim)
+ /* swap the arguments if needed, so that 'a' is always larger than 'b' */
+ if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
@@ -827,29 +800,46 @@ cube_inter(PG_FUNCTION_ARGS)
a = tmp;
swapped = true;
}
+ dim = DIM(a);
- /*
- * use the potentially smaller of the two boxes (b) to fill in the
- * result, padding absent dimensions with zeroes
- */
- for (i = 0; i < b->dim; i++)
+ size = CUBE_SIZE(dim);
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, dim);
+
+ /* First compute intersection of the dimensions present in both args */
+ for (i = 0; i < DIM(b); i++)
{
- result->x[i] = Min(b->x[i], b->x[i + b->dim]);
- result->x[i + a->dim] = Max(b->x[i], b->x[i + b->dim]);
+ result->x[i] = Max(
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
+ result->x[i + DIM(a)] = Min(
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
- for (i = b->dim; i < a->dim; i++)
+ /* continue on the higher dimemsions only present in 'a' */
+ for (; i < DIM(a); i++)
{
- result->x[i] = 0;
- result->x[i + a->dim] = 0;
+ result->x[i] = Max(0,
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
+ result->x[i + DIM(a)] = Min(0,
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
- /* compute the intersection */
- for (i = 0; i < a->dim; i++)
+ /*
+ * Check if the result was in fact a point, and set the flag in the datum
+ * accordingly. (we don't bother to repalloc it smaller)
+ */
+ if (cube_is_point_internal(result))
{
- result->x[i] =
- Max(Min(a->x[i], a->x[i + a->dim]), result->x[i]);
- result->x[i + a->dim] = Min(Max(a->x[i],
- a->x[i + a->dim]), result->x[i + a->dim]);
+ size = POINT_SIZE(dim);
+ result = repalloc(result, size);
+ SET_VARSIZE(result, size);
+ SET_POINT_BIT(result);
}
if (swapped)
@@ -875,12 +865,11 @@ cube_size(PG_FUNCTION_ARGS)
{
NDBOX *a = PG_GETARG_NDBOX(0);
double result;
- int i,
- j;
+ int i;
result = 1.0;
- for (i = 0, j = a->dim; i < a->dim; i++, j++)
- result = result * Abs((a->x[j] - a->x[i]));
+ for (i = 0; i < DIM(a); i++)
+ result = result * Abs((LL_COORD(a, i) - UR_COORD(a, i)));
PG_FREE_IF_COPY(a, 0);
PG_RETURN_FLOAT8(result);
@@ -889,16 +878,15 @@ cube_size(PG_FUNCTION_ARGS)
void
rt_cube_size(NDBOX *a, double *size)
{
- int i,
- j;
+ int i;
if (a == (NDBOX *) NULL)
*size = 0.0;
else
{
*size = 1.0;
- for (i = 0, j = a->dim; i < a->dim; i++, j++)
- *size = (*size) * Abs((a->x[j] - a->x[i]));
+ for (i = 0; i < DIM(a); i++)
+ *size = (*size) * Abs(UR_COORD(a, i) - LL_COORD(a, i));
}
return;
}
@@ -911,43 +899,43 @@ cube_cmp_v0(NDBOX *a, NDBOX *b)
int i;
int dim;
- dim = Min(a->dim, b->dim);
+ dim = Min(DIM(a), DIM(b));
/* compare the common dimensions */
for (i = 0; i < dim; i++)
{
- if (Min(a->x[i], a->x[a->dim + i]) >
- Min(b->x[i], b->x[b->dim + i]))
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) >
+ Min(LL_COORD(b, i), UR_COORD(b, i)))
return 1;
- if (Min(a->x[i], a->x[a->dim + i]) <
- Min(b->x[i], b->x[b->dim + i]))
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) <
+ Min(LL_COORD(b, i), UR_COORD(b, i)))
return -1;
}
for (i = 0; i < dim; i++)
{
- if (Max(a->x[i], a->x[a->dim + i]) >
- Max(b->x[i], b->x[b->dim + i]))
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) >
+ Max(LL_COORD(b, i), UR_COORD(b, i)))
return 1;
- if (Max(a->x[i], a->x[a->dim + i]) <
- Max(b->x[i], b->x[b->dim + i]))
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) <
+ Max(LL_COORD(b, i), UR_COORD(b, i)))
return -1;
}
/* compare extra dimensions to zero */
- if (a->dim > b->dim)
+ if (DIM(a) > DIM(b))
{
- for (i = dim; i < a->dim; i++)
+ for (i = dim; i < DIM(a); i++)
{
- if (Min(a->x[i], a->x[a->dim + i]) > 0)
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) > 0)
return 1;
- if (Min(a->x[i], a->x[a->dim + i]) < 0)
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) < 0)
return -1;
}
- for (i = dim; i < a->dim; i++)
+ for (i = dim; i < DIM(a); i++)
{
- if (Max(a->x[i], a->x[a->dim + i]) > 0)
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) > 0)
return 1;
- if (Max(a->x[i], a->x[a->dim + i]) < 0)
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) < 0)
return -1;
}
@@ -957,20 +945,20 @@ cube_cmp_v0(NDBOX *a, NDBOX *b)
*/
return 1;
}
- if (a->dim < b->dim)
+ if (DIM(a) < DIM(b))
{
- for (i = dim; i < b->dim; i++)
+ for (i = dim; i < DIM(b); i++)
{
- if (Min(b->x[i], b->x[b->dim + i]) > 0)
+ if (Min(LL_COORD(b, i), UR_COORD(b, i)) > 0)
return -1;
- if (Min(b->x[i], b->x[b->dim + i]) < 0)
+ if (Min(LL_COORD(b, i), UR_COORD(b, i)) < 0)
return 1;
}
- for (i = dim; i < b->dim; i++)
+ for (i = dim; i < DIM(b); i++)
{
- if (Max(b->x[i], b->x[b->dim + i]) > 0)
+ if (Max(LL_COORD(b, i), UR_COORD(b, i)) > 0)
return -1;
- if (Max(b->x[i], b->x[b->dim + i]) < 0)
+ if (Max(LL_COORD(b, i), UR_COORD(b, i)) < 0)
return 1;
}
@@ -1100,30 +1088,30 @@ cube_contains_v0(NDBOX *a, NDBOX *b)
if ((a == NULL) || (b == NULL))
return (FALSE);
- if (a->dim < b->dim)
+ if (DIM(a) < DIM(b))
{
/*
* the further comparisons will make sense if the excess dimensions of
* (b) were zeroes Since both UL and UR coordinates must be zero, we
* can check them all without worrying about which is which.
*/
- for (i = a->dim; i < b->dim; i++)
+ for (i = DIM(a); i < DIM(b); i++)
{
- if (b->x[i] != 0)
+ if (LL_COORD(b, i) != 0)
return (FALSE);
- if (b->x[i + b->dim] != 0)
+ if (UR_COORD(b, i) != 0)
return (FALSE);
}
}
/* Can't care less about the excess dimensions of (a), if any */
- for (i = 0; i < Min(a->dim, b->dim); i++)
+ for (i = 0; i < Min(DIM(a), DIM(b)); i++)
{
- if (Min(a->x[i], a->x[a->dim + i]) >
- Min(b->x[i], b->x[b->dim + i]))
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) >
+ Min(LL_COORD(b, i), UR_COORD(b, i)))
return (FALSE);
- if (Max(a->x[i], a->x[a->dim + i]) <
- Max(b->x[i], b->x[b->dim + i]))
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) <
+ Max(LL_COORD(b, i), UR_COORD(b, i)))
return (FALSE);
}
@@ -1175,7 +1163,7 @@ cube_overlap_v0(NDBOX *a, NDBOX *b)
return (FALSE);
/* swap the box pointers if needed */
- if (a->dim < b->dim)
+ if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
@@ -1184,22 +1172,20 @@ cube_overlap_v0(NDBOX *a, NDBOX *b)
}
/* compare within the dimensions of (b) */
- for (i = 0; i < b->dim; i++)
+ for (i = 0; i < DIM(b); i++)
{
- if (Min(a->x[i], a->x[a->dim + i]) >
- Max(b->x[i], b->x[b->dim + i]))
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) > Max(LL_COORD(b, i), UR_COORD(b, i)))
return (FALSE);
- if (Max(a->x[i], a->x[a->dim + i]) <
- Min(b->x[i], b->x[b->dim + i]))
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) < Min(LL_COORD(b, i), UR_COORD(b, i)))
return (FALSE);
}
/* compare to zero those dimensions in (a) absent in (b) */
- for (i = b->dim; i < a->dim; i++)
+ for (i = DIM(b); i < DIM(a); i++)
{
- if (Min(a->x[i], a->x[a->dim + i]) > 0)
+ if (Min(LL_COORD(a, i), UR_COORD(a, i)) > 0)
return (FALSE);
- if (Max(a->x[i], a->x[a->dim + i]) < 0)
+ if (Max(LL_COORD(a, i), UR_COORD(a, i)) < 0)
return (FALSE);
}
@@ -1238,7 +1224,7 @@ cube_distance(PG_FUNCTION_ARGS)
int i;
/* swap the box pointers if needed */
- if (a->dim < b->dim)
+ if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
@@ -1249,16 +1235,16 @@ cube_distance(PG_FUNCTION_ARGS)
distance = 0.0;
/* compute within the dimensions of (b) */
- for (i = 0; i < b->dim; i++)
+ for (i = 0; i < DIM(b); i++)
{
- d = distance_1D(a->x[i], a->x[i + a->dim], b->x[i], b->x[i + b->dim]);
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), LL_COORD(b, i), UR_COORD(b, i));
distance += d * d;
}
/* compute distance to zero for those dimensions in (a) absent in (b) */
- for (i = b->dim; i < a->dim; i++)
+ for (i = DIM(b); i < DIM(a); i++)
{
- d = distance_1D(a->x[i], a->x[i + a->dim], 0.0, 0.0);
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), 0.0, 0.0);
distance += d * d;
}
@@ -1295,18 +1281,35 @@ distance_1D(double a1, double a2, double b1, double b2)
Datum
cube_is_point(PG_FUNCTION_ARGS)
{
- NDBOX *a = PG_GETARG_NDBOX(0);
- int i,
- j;
+ NDBOX *cube = PG_GETARG_NDBOX(0);
+ bool result;
+
+ result = cube_is_point_internal(cube);
+ PG_FREE_IF_COPY(cube, 0);
+ PG_RETURN_BOOL(result);
+}
- for (i = 0, j = a->dim; i < a->dim; i++, j++)
+static bool
+cube_is_point_internal(NDBOX *cube)
+{
+ int i;
+
+ if (IS_POINT(cube))
+ return true;
+
+ /*
+ * Even if the point-flag is not set, all the lower-left coordinates might
+ * match the upper-right coordinates, so that the value is in fact a
+ * point. Such values don't arise with current code - the point flag is
+ * always set if appropriate - but they might be present on-disk in
+ * clusters upgraded from pre-9.4 versions.
+ */
+ for (i = 0; i < DIM(cube); i++)
{
- if (a->x[i] != a->x[j])
- PG_RETURN_BOOL(FALSE);
+ if (LL_COORD(cube, i) != UR_COORD(cube, i))
+ return false;
}
-
- PG_FREE_IF_COPY(a, 0);
- PG_RETURN_BOOL(TRUE);
+ return true;
}
/* Return dimensions in use in the data structure */
@@ -1314,7 +1317,7 @@ Datum
cube_dim(PG_FUNCTION_ARGS)
{
NDBOX *c = PG_GETARG_NDBOX(0);
- int dim = c->dim;
+ int dim = DIM(c);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_INT32(dim);
@@ -1328,8 +1331,8 @@ cube_ll_coord(PG_FUNCTION_ARGS)
int n = PG_GETARG_INT16(1);
double result;
- if (c->dim >= n && n > 0)
- result = Min(c->x[n - 1], c->x[c->dim + n - 1]);
+ if (DIM(c) >= n && n > 0)
+ result = Min(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
@@ -1345,8 +1348,8 @@ cube_ur_coord(PG_FUNCTION_ARGS)
int n = PG_GETARG_INT16(1);
double result;
- if (c->dim >= n && n > 0)
- result = Max(c->x[n - 1], c->x[c->dim + n - 1]);
+ if (DIM(c) >= n && n > 0)
+ result = Max(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
@@ -1360,35 +1363,36 @@ cube_enlarge(PG_FUNCTION_ARGS)
{
NDBOX *a = PG_GETARG_NDBOX(0);
double r = PG_GETARG_FLOAT8(1);
- int4 n = PG_GETARG_INT32(2);
+ int32 n = PG_GETARG_INT32(2);
NDBOX *result;
int dim = 0;
int size;
int i,
- j,
- k;
+ j;
if (n > CUBE_MAX_DIM)
n = CUBE_MAX_DIM;
if (r > 0 && n > 0)
dim = n;
- if (a->dim > dim)
- dim = a->dim;
- size = offsetof(NDBOX, x[0]) +sizeof(double) * dim * 2;
+ if (DIM(a) > dim)
+ dim = DIM(a);
+
+ size = CUBE_SIZE(dim);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
- result->dim = dim;
- for (i = 0, j = dim, k = a->dim; i < a->dim; i++, j++, k++)
+ SET_DIM(result, dim);
+
+ for (i = 0, j = dim; i < DIM(a); i++, j++)
{
- if (a->x[i] >= a->x[k])
+ if (LL_COORD(a, i) >= UR_COORD(a, i))
{
- result->x[i] = a->x[k] - r;
- result->x[j] = a->x[i] + r;
+ result->x[i] = UR_COORD(a, i) - r;
+ result->x[j] = LL_COORD(a, i) + r;
}
else
{
- result->x[i] = a->x[i] - r;
- result->x[j] = a->x[k] + r;
+ result->x[i] = LL_COORD(a, i) - r;
+ result->x[j] = UR_COORD(a, i) + r;
}
if (result->x[i] > result->x[j])
{
@@ -1403,6 +1407,17 @@ cube_enlarge(PG_FUNCTION_ARGS)
result->x[j] = r;
}
+ /*
+ * Check if the result was in fact a point, and set the flag in the datum
+ * accordingly. (we don't bother to repalloc it smaller)
+ */
+ if (cube_is_point_internal(result))
+ {
+ size = POINT_SIZE(dim);
+ SET_VARSIZE(result, size);
+ SET_POINT_BIT(result);
+ }
+
PG_FREE_IF_COPY(a, 0);
PG_RETURN_NDBOX(result);
}
@@ -1415,11 +1430,12 @@ cube_f8(PG_FUNCTION_ARGS)
NDBOX *result;
int size;
- size = offsetof(NDBOX, x[0]) +sizeof(double) * 2;
+ size = POINT_SIZE(1);
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
- result->dim = 1;
- result->x[0] = result->x[1] = x;
+ SET_DIM(result, 1);
+ SET_POINT_BIT(result);
+ result->x[0] = x;
PG_RETURN_NDBOX(result);
}
@@ -1433,12 +1449,24 @@ cube_f8_f8(PG_FUNCTION_ARGS)
NDBOX *result;
int size;
- size = offsetof(NDBOX, x[0]) +sizeof(double) * 2;
- result = (NDBOX *) palloc0(size);
- SET_VARSIZE(result, size);
- result->dim = 1;
- result->x[0] = x0;
- result->x[1] = x1;
+ if (x0 == x1)
+ {
+ size = POINT_SIZE(1);
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, 1);
+ SET_POINT_BIT(result);
+ result->x[0] = x0;
+ }
+ else
+ {
+ size = CUBE_SIZE(1);
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, 1);
+ result->x[0] = x0;
+ result->x[1] = x1;
+ }
PG_RETURN_NDBOX(result);
}
@@ -1448,25 +1476,39 @@ cube_f8_f8(PG_FUNCTION_ARGS)
Datum
cube_c_f8(PG_FUNCTION_ARGS)
{
- NDBOX *c = PG_GETARG_NDBOX(0);
+ NDBOX *cube = PG_GETARG_NDBOX(0);
double x = PG_GETARG_FLOAT8(1);
NDBOX *result;
int size;
int i;
- size = offsetof(NDBOX, x[0]) +sizeof(double) * (c->dim + 1) *2;
- result = (NDBOX *) palloc0(size);
- SET_VARSIZE(result, size);
- result->dim = c->dim + 1;
- for (i = 0; i < c->dim; i++)
+ if (IS_POINT(cube))
{
- result->x[i] = c->x[i];
- result->x[result->dim + i] = c->x[c->dim + i];
+ size = POINT_SIZE((DIM(cube) + 1));
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, DIM(cube) + 1);
+ SET_POINT_BIT(result);
+ for (i = 0; i < DIM(cube); i++)
+ result->x[i] = cube->x[i];
+ result->x[DIM(result) - 1] = x;
+ }
+ else
+ {
+ size = CUBE_SIZE((DIM(cube) + 1));
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, DIM(cube) + 1);
+ for (i = 0; i < DIM(cube); i++)
+ {
+ result->x[i] = cube->x[i];
+ result->x[DIM(result) + i] = cube->x[DIM(cube) + i];
+ }
+ result->x[DIM(result) - 1] = x;
+ result->x[2 * DIM(result) - 1] = x;
}
- result->x[result->dim - 1] = x;
- result->x[2 * result->dim - 1] = x;
- PG_FREE_IF_COPY(c, 0);
+ PG_FREE_IF_COPY(cube, 0);
PG_RETURN_NDBOX(result);
}
@@ -1474,25 +1516,39 @@ cube_c_f8(PG_FUNCTION_ARGS)
Datum
cube_c_f8_f8(PG_FUNCTION_ARGS)
{
- NDBOX *c = PG_GETARG_NDBOX(0);
+ NDBOX *cube = PG_GETARG_NDBOX(0);
double x1 = PG_GETARG_FLOAT8(1);
double x2 = PG_GETARG_FLOAT8(2);
NDBOX *result;
int size;
int i;
- size = offsetof(NDBOX, x[0]) +sizeof(double) * (c->dim + 1) *2;
- result = (NDBOX *) palloc0(size);
- SET_VARSIZE(result, size);
- result->dim = c->dim + 1;
- for (i = 0; i < c->dim; i++)
+ if (IS_POINT(cube) && (x1 == x2))
{
- result->x[i] = c->x[i];
- result->x[result->dim + i] = c->x[c->dim + i];
+ size = POINT_SIZE((DIM(cube) + 1));
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, DIM(cube) + 1);
+ SET_POINT_BIT(result);
+ for (i = 0; i < DIM(cube); i++)
+ result->x[i] = cube->x[i];
+ result->x[DIM(result) - 1] = x1;
+ }
+ else
+ {
+ size = CUBE_SIZE((DIM(cube) + 1));
+ result = (NDBOX *) palloc0(size);
+ SET_VARSIZE(result, size);
+ SET_DIM(result, DIM(cube) + 1);
+ for (i = 0; i < DIM(cube); i++)
+ {
+ result->x[i] = LL_COORD(cube, i);
+ result->x[DIM(result) + i] = UR_COORD(cube, i);
+ }
+ result->x[DIM(result) - 1] = x1;
+ result->x[2 * DIM(result) - 1] = x2;
}
- result->x[result->dim - 1] = x1;
- result->x[2 * result->dim - 1] = x2;
- PG_FREE_IF_COPY(c, 0);
+ PG_FREE_IF_COPY(cube, 0);
PG_RETURN_NDBOX(result);
}
diff --git a/contrib/cube/cubedata.h b/contrib/cube/cubedata.h
index fd0c26a381..5d44e11081 100644
--- a/contrib/cube/cubedata.h
+++ b/contrib/cube/cubedata.h
@@ -4,11 +4,46 @@
typedef struct NDBOX
{
- int32 vl_len_; /* varlena header (do not touch directly!) */
- unsigned int dim;
+ /* varlena header (do not touch directly!) */
+ int32 vl_len_;
+
+ /*----------
+ * Header contains info about NDBOX. For binary compatibility with old
+ * versions, it is defined as "unsigned int".
+ *
+ * Following information is stored:
+ *
+ * bits 0-7 : number of cube dimensions;
+ * bits 8-30 : unused, initialize to zero;
+ * bit 31 : point flag. If set, the upper right coordinates are not
+ * stored, and are implicitly the same as the lower left
+ * coordinates.
+ *----------
+ */
+ unsigned int header;
+
+ /*
+ * Variable length array. The lower left coordinates for each dimension
+ * come first, followed by upper right coordinates unless the point flag
+ * is set.
+ */
double x[1];
} NDBOX;
+#define POINT_BIT 0x80000000
+#define DIM_MASK 0x7fffffff
+
+#define IS_POINT(cube) ( ((cube)->header & POINT_BIT) != 0 )
+#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
+#define DIM(cube) ( (cube)->header & DIM_MASK )
+#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
+
+#define LL_COORD(cube, i) ( (cube)->x[i] )
+#define UR_COORD(cube, i) ( IS_POINT(cube) ? (cube)->x[i] : (cube)->x[(i) + DIM(cube)] )
+
+#define POINT_SIZE(_dim) (offsetof(NDBOX, x[0]) + sizeof(double)*(_dim))
+#define CUBE_SIZE(_dim) (offsetof(NDBOX, x[0]) + sizeof(double)*(_dim)*2)
+
#define DatumGetNDBOX(x) ((NDBOX*)DatumGetPointer(x))
#define PG_GETARG_NDBOX(x) DatumGetNDBOX( PG_DETOAST_DATUM(PG_GETARG_DATUM(x)) )
#define PG_RETURN_NDBOX(x) PG_RETURN_POINTER(x)
diff --git a/contrib/cube/cubeparse.y b/contrib/cube/cubeparse.y
index d7205b824c..0baee8e132 100644
--- a/contrib/cube/cubeparse.y
+++ b/contrib/cube/cubeparse.y
@@ -175,11 +175,12 @@ write_box(unsigned int dim, char *str1, char *str2)
NDBOX *bp;
char *s;
int i;
- int size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
+ int size = CUBE_SIZE(dim);
+ bool point = true;
bp = palloc0(size);
SET_VARSIZE(bp, size);
- bp->dim = dim;
+ SET_DIM(bp, dim);
s = str1;
bp->x[i=0] = strtod(s, NULL);
@@ -191,10 +192,28 @@ write_box(unsigned int dim, char *str1, char *str2)
s = str2;
bp->x[i=dim] = strtod(s, NULL);
+ if (bp->x[dim] != bp->x[0])
+ point = false;
while ((s = strchr(s, ',')) != NULL)
{
s++; i++;
bp->x[i] = strtod(s, NULL);
+ if (bp->x[i] != bp->x[i-dim])
+ point = false;
+ }
+
+ if (point)
+ {
+ /*
+ * The value turned out to be a point, ie. all the upper-right
+ * coordinates were equal to the lower-left coordinates. Resize the
+ * the cube we constructed. Note: we don't bother to repalloc() it
+ * smaller, it's unlikely that the tiny amount of memory free'd that
+ * way would be useful.
+ */
+ size = POINT_SIZE(dim);
+ SET_VARSIZE(bp, size);
+ SET_POINT_BIT(bp);
}
return(bp);
@@ -203,31 +222,29 @@ write_box(unsigned int dim, char *str1, char *str2)
static NDBOX *
write_point_as_box(char *str, int dim)
{
- NDBOX *bp;
- int i,
+ NDBOX *bp;
+ int i,
size;
- double x;
- char *s = str;
-
- size = offsetof(NDBOX, x[0]) + sizeof(double) * dim * 2;
-
- bp = palloc0(size);
- SET_VARSIZE(bp, size);
- bp->dim = dim;
-
- i = 0;
- x = strtod(s, NULL);
- bp->x[0] = x;
- bp->x[dim] = x;
- while ((s = strchr(s, ',')) != NULL)
- {
- s++; i++;
- x = strtod(s, NULL);
- bp->x[i] = x;
- bp->x[i+dim] = x;
- }
-
- return(bp);
+ double x;
+ char *s = str;
+
+ size = POINT_SIZE(dim);
+ bp = palloc0(size);
+ SET_VARSIZE(bp, size);
+ SET_DIM(bp, dim);
+ SET_POINT_BIT(bp);
+
+ i = 0;
+ x = strtod(s, NULL);
+ bp->x[0] = x;
+ while ((s = strchr(s, ',')) != NULL)
+ {
+ s++; i++;
+ x = strtod(s, NULL);
+ bp->x[i] = x;
+ }
+
+ return(bp);
}
#include "cubescan.c"
diff --git a/contrib/cube/cubescan.l b/contrib/cube/cubescan.l
index 17b39fd6f4..e383b59d3d 100644
--- a/contrib/cube/cubescan.l
+++ b/contrib/cube/cubescan.l
@@ -11,7 +11,13 @@
/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
#undef fprintf
-#define fprintf(file, fmt, msg) ereport(ERROR, (errmsg_internal("%s", msg)))
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
/* Handles to the buffer that the lexer uses internally */
static YY_BUFFER_STATE scanbufhandle;
diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out
index 05cf3eae3c..ca9555ed4b 100644
--- a/contrib/cube/expected/cube.out
+++ b/contrib/cube/expected/cube.out
@@ -473,8 +473,85 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
+SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
+ cube_subset
+--------------
+ (5, 3, 1, 1)
+(1 row)
+
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+ERROR: Index out of bounds
+--
+-- Test point processing
+--
+SELECT cube('(1,2),(1,2)'); -- cube_in
+ cube
+--------
+ (1, 2)
+(1 row)
+
+SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
+ cube
+-----------
+ (0, 1, 2)
+(1 row)
+
+SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
+ cube
+--------------
+ (5, 6, 7, 8)
+(1 row)
+
+SELECT cube(1.37); -- cube_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(1.37, 1.37); -- cube_f8_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(cube(1,1), 42); -- cube_c_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42); -- cube_c_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(1, 24)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 24)
+(1 row)
+
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
@@ -878,6 +955,24 @@ SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
0.5
(1 row)
+SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
+ cube_distance
+---------------
+ 0
+(1 row)
+
+SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
+ cube_distance
+---------------
+ 190
+(1 row)
+
+SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
+ cube_distance
+------------------
+ 140.762210837994
+(1 row)
+
-- Test of cube function (text to cube)
--
SELECT cube('(1,1.2)'::text);
@@ -912,6 +1007,18 @@ SELECT cube_dim('(0,0,0)'::cube);
3
(1 row)
+SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
+ cube_dim
+----------
+ 3
+(1 row)
+
+SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
+ cube_dim
+----------
+ 5
+(1 row)
+
-- Test of cube_ll_coord function (retrieves LL coodinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
@@ -932,6 +1039,42 @@ SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
+ cube_ll_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
+ cube_ll_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 1);
+ cube_ll_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 2);
+ cube_ll_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_ur_coord function (retrieves UR coodinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
@@ -952,6 +1095,42 @@ SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
+ cube_ur_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
+ cube_ur_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 1);
+ cube_ur_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 2);
+ cube_ur_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_is_point
--
SELECT cube_is_point('(0)'::cube);
@@ -1100,6 +1279,108 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
(-0.5, 1),(-0.5, 4)
(1 row)
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+-- Test of cube_union (MBR for two cubes)
+--
+SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
+ cube_union
+----------------------
+ (1, 2, 0),(8, 9, 10)
+(1 row)
+
+SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
+ cube_union
+---------------------------
+ (1, 2, 0, 0),(4, 2, 0, 0)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
+ cube_union
+---------------
+ (1, 2),(4, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
+ cube_union
+------------
+ (1, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
+ cube_union
+------------
+ (1, 2, 0)
+(1 row)
+
+-- Test of cube_inter
+--
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
+ cube_inter
+-----------------
+ (3, 4),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
+ cube_inter
+---------------
+ (3, 4),(6, 5)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
+ cube_inter
+-------------------
+ (13, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
+ cube_inter
+------------------
+ (3, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
+ cube_inter
+------------
+ (10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
+ cube_inter
+------------
+ (1, 2, 3)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
+ cube_inter
+---------------------
+ (5, 6, 3),(1, 2, 3)
+(1 row)
+
+-- Test of cube_size
+--
+SELECT cube_size('(4,8),(15,16)'::cube);
+ cube_size
+-----------
+ 88
+(1 row)
+
+SELECT cube_size('(42,137)'::cube);
+ cube_size
+-----------
+ 0
+(1 row)
+
-- Load some example data and build the index
--
CREATE TABLE test_cube (c cube);
diff --git a/contrib/cube/expected/cube_1.out b/contrib/cube/expected/cube_1.out
index fefebf5fb9..c07d61d0b0 100644
--- a/contrib/cube/expected/cube_1.out
+++ b/contrib/cube/expected/cube_1.out
@@ -473,8 +473,85 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
+SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
+ cube_subset
+--------------
+ (5, 3, 1, 1)
+(1 row)
+
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+ERROR: Index out of bounds
+--
+-- Test point processing
+--
+SELECT cube('(1,2),(1,2)'); -- cube_in
+ cube
+--------
+ (1, 2)
+(1 row)
+
+SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
+ cube
+-----------
+ (0, 1, 2)
+(1 row)
+
+SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
+ cube
+--------------
+ (5, 6, 7, 8)
+(1 row)
+
+SELECT cube(1.37); -- cube_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(1.37, 1.37); -- cube_f8_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(cube(1,1), 42); -- cube_c_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42); -- cube_c_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(1, 24)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 24)
+(1 row)
+
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
@@ -878,6 +955,24 @@ SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
0.5
(1 row)
+SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
+ cube_distance
+---------------
+ 0
+(1 row)
+
+SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
+ cube_distance
+---------------
+ 190
+(1 row)
+
+SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
+ cube_distance
+------------------
+ 140.762210837994
+(1 row)
+
-- Test of cube function (text to cube)
--
SELECT cube('(1,1.2)'::text);
@@ -912,6 +1007,18 @@ SELECT cube_dim('(0,0,0)'::cube);
3
(1 row)
+SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
+ cube_dim
+----------
+ 3
+(1 row)
+
+SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
+ cube_dim
+----------
+ 5
+(1 row)
+
-- Test of cube_ll_coord function (retrieves LL coodinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
@@ -932,6 +1039,42 @@ SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
+ cube_ll_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
+ cube_ll_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 1);
+ cube_ll_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 2);
+ cube_ll_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_ur_coord function (retrieves UR coodinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
@@ -952,6 +1095,42 @@ SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
+ cube_ur_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
+ cube_ur_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 1);
+ cube_ur_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 2);
+ cube_ur_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_is_point
--
SELECT cube_is_point('(0)'::cube);
@@ -1100,6 +1279,108 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
(-0.5, 1),(-0.5, 4)
(1 row)
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+-- Test of cube_union (MBR for two cubes)
+--
+SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
+ cube_union
+----------------------
+ (1, 2, 0),(8, 9, 10)
+(1 row)
+
+SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
+ cube_union
+---------------------------
+ (1, 2, 0, 0),(4, 2, 0, 0)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
+ cube_union
+---------------
+ (1, 2),(4, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
+ cube_union
+------------
+ (1, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
+ cube_union
+------------
+ (1, 2, 0)
+(1 row)
+
+-- Test of cube_inter
+--
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
+ cube_inter
+-----------------
+ (3, 4),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
+ cube_inter
+---------------
+ (3, 4),(6, 5)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
+ cube_inter
+-------------------
+ (13, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
+ cube_inter
+------------------
+ (3, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
+ cube_inter
+------------
+ (10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
+ cube_inter
+------------
+ (1, 2, 3)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
+ cube_inter
+---------------------
+ (5, 6, 3),(1, 2, 3)
+(1 row)
+
+-- Test of cube_size
+--
+SELECT cube_size('(4,8),(15,16)'::cube);
+ cube_size
+-----------
+ 88
+(1 row)
+
+SELECT cube_size('(42,137)'::cube);
+ cube_size
+-----------
+ 0
+(1 row)
+
-- Load some example data and build the index
--
CREATE TABLE test_cube (c cube);
diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out
index 6d15d63570..3767d0ef9b 100644
--- a/contrib/cube/expected/cube_2.out
+++ b/contrib/cube/expected/cube_2.out
@@ -473,8 +473,85 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
+SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
+ cube_subset
+--------------
+ (5, 3, 1, 1)
+(1 row)
+
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+ERROR: Index out of bounds
+--
+-- Test point processing
+--
+SELECT cube('(1,2),(1,2)'); -- cube_in
+ cube
+--------
+ (1, 2)
+(1 row)
+
+SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
+ cube
+-----------
+ (0, 1, 2)
+(1 row)
+
+SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
+ cube
+--------------
+ (5, 6, 7, 8)
+(1 row)
+
+SELECT cube(1.37); -- cube_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(1.37, 1.37); -- cube_f8_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(cube(1,1), 42); -- cube_c_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42); -- cube_c_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(1, 24)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 24)
+(1 row)
+
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
@@ -878,6 +955,24 @@ SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
0.5
(1 row)
+SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
+ cube_distance
+---------------
+ 0
+(1 row)
+
+SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
+ cube_distance
+---------------
+ 190
+(1 row)
+
+SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
+ cube_distance
+------------------
+ 140.762210837994
+(1 row)
+
-- Test of cube function (text to cube)
--
SELECT cube('(1,1.2)'::text);
@@ -912,6 +1007,18 @@ SELECT cube_dim('(0,0,0)'::cube);
3
(1 row)
+SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
+ cube_dim
+----------
+ 3
+(1 row)
+
+SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
+ cube_dim
+----------
+ 5
+(1 row)
+
-- Test of cube_ll_coord function (retrieves LL coodinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
@@ -932,6 +1039,42 @@ SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
+ cube_ll_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
+ cube_ll_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 1);
+ cube_ll_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 2);
+ cube_ll_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_ur_coord function (retrieves UR coodinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
@@ -952,6 +1095,42 @@ SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
+ cube_ur_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
+ cube_ur_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 1);
+ cube_ur_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 2);
+ cube_ur_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_is_point
--
SELECT cube_is_point('(0)'::cube);
@@ -1100,6 +1279,108 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
(-0.5, 1),(-0.5, 4)
(1 row)
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+-- Test of cube_union (MBR for two cubes)
+--
+SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
+ cube_union
+----------------------
+ (1, 2, 0),(8, 9, 10)
+(1 row)
+
+SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
+ cube_union
+---------------------------
+ (1, 2, 0, 0),(4, 2, 0, 0)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
+ cube_union
+---------------
+ (1, 2),(4, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
+ cube_union
+------------
+ (1, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
+ cube_union
+------------
+ (1, 2, 0)
+(1 row)
+
+-- Test of cube_inter
+--
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
+ cube_inter
+-----------------
+ (3, 4),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
+ cube_inter
+---------------
+ (3, 4),(6, 5)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
+ cube_inter
+-------------------
+ (13, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
+ cube_inter
+------------------
+ (3, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
+ cube_inter
+------------
+ (10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
+ cube_inter
+------------
+ (1, 2, 3)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
+ cube_inter
+---------------------
+ (5, 6, 3),(1, 2, 3)
+(1 row)
+
+-- Test of cube_size
+--
+SELECT cube_size('(4,8),(15,16)'::cube);
+ cube_size
+-----------
+ 88
+(1 row)
+
+SELECT cube_size('(42,137)'::cube);
+ cube_size
+-----------
+ 0
+(1 row)
+
-- Load some example data and build the index
--
CREATE TABLE test_cube (c cube);
diff --git a/contrib/cube/expected/cube_3.out b/contrib/cube/expected/cube_3.out
index 22b4e1f207..2aa42beb86 100644
--- a/contrib/cube/expected/cube_3.out
+++ b/contrib/cube/expected/cube_3.out
@@ -473,8 +473,85 @@ SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
(5, 3, 1, 1),(8, 7, 6, 6)
(1 row)
+SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
+ cube_subset
+--------------
+ (5, 3, 1, 1)
+(1 row)
+
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
ERROR: Index out of bounds
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+ERROR: Index out of bounds
+--
+-- Test point processing
+--
+SELECT cube('(1,2),(1,2)'); -- cube_in
+ cube
+--------
+ (1, 2)
+(1 row)
+
+SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
+ cube
+-----------
+ (0, 1, 2)
+(1 row)
+
+SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
+ cube
+--------------
+ (5, 6, 7, 8)
+(1 row)
+
+SELECT cube(1.37); -- cube_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(1.37, 1.37); -- cube_f8_f8
+ cube
+--------
+ (1.37)
+(1 row)
+
+SELECT cube(cube(1,1), 42); -- cube_c_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42); -- cube_c_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
+ cube
+---------
+ (1, 42)
+(1 row)
+
+SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(1, 24)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 42)
+(1 row)
+
+SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
+ cube
+-----------------
+ (1, 42),(2, 24)
+(1 row)
+
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
--
@@ -878,6 +955,24 @@ SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
0.5
(1 row)
+SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
+ cube_distance
+---------------
+ 0
+(1 row)
+
+SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
+ cube_distance
+---------------
+ 190
+(1 row)
+
+SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
+ cube_distance
+------------------
+ 140.762210837994
+(1 row)
+
-- Test of cube function (text to cube)
--
SELECT cube('(1,1.2)'::text);
@@ -912,6 +1007,18 @@ SELECT cube_dim('(0,0,0)'::cube);
3
(1 row)
+SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
+ cube_dim
+----------
+ 3
+(1 row)
+
+SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
+ cube_dim
+----------
+ 5
+(1 row)
+
-- Test of cube_ll_coord function (retrieves LL coodinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
@@ -932,6 +1039,42 @@ SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
+ cube_ll_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
+ cube_ll_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 1);
+ cube_ll_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 2);
+ cube_ll_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ll_coord('(42,137)'::cube, 3);
+ cube_ll_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_ur_coord function (retrieves UR coodinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
@@ -952,6 +1095,42 @@ SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
0
(1 row)
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
+ cube_ur_coord
+---------------
+ 1
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
+ cube_ur_coord
+---------------
+ 2
+(1 row)
+
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 1);
+ cube_ur_coord
+---------------
+ 42
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 2);
+ cube_ur_coord
+---------------
+ 137
+(1 row)
+
+SELECT cube_ur_coord('(42,137)'::cube, 3);
+ cube_ur_coord
+---------------
+ 0
+(1 row)
+
-- Test of cube_is_point
--
SELECT cube_is_point('(0)'::cube);
@@ -1100,6 +1279,108 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
(-0.5, 1),(-0.5, 4)
(1 row)
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
+ cube_enlarge
+--------------
+ (42, 0, 0)
+(1 row)
+
+-- Test of cube_union (MBR for two cubes)
+--
+SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
+ cube_union
+----------------------
+ (1, 2, 0),(8, 9, 10)
+(1 row)
+
+SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
+ cube_union
+---------------------------
+ (1, 2, 0, 0),(4, 2, 0, 0)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
+ cube_union
+---------------
+ (1, 2),(4, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
+ cube_union
+------------
+ (1, 2)
+(1 row)
+
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
+ cube_union
+------------
+ (1, 2, 0)
+(1 row)
+
+-- Test of cube_inter
+--
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
+ cube_inter
+-----------------
+ (3, 4),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
+ cube_inter
+---------------
+ (3, 4),(6, 5)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
+ cube_inter
+-------------------
+ (13, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
+ cube_inter
+------------------
+ (3, 14),(10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
+ cube_inter
+------------
+ (10, 11)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
+ cube_inter
+------------
+ (1, 2, 3)
+(1 row)
+
+SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
+ cube_inter
+---------------------
+ (5, 6, 3),(1, 2, 3)
+(1 row)
+
+-- Test of cube_size
+--
+SELECT cube_size('(4,8),(15,16)'::cube);
+ cube_size
+-----------
+ 88
+(1 row)
+
+SELECT cube_size('(42,137)'::cube);
+ cube_size
+-----------
+ 0
+(1 row)
+
-- Load some example data and build the index
--
CREATE TABLE test_cube (c cube);
diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql
index 02e068edf4..d58974c408 100644
--- a/contrib/cube/sql/cube.sql
+++ b/contrib/cube/sql/cube.sql
@@ -112,7 +112,24 @@ SELECT cube('{0,1,2}'::float[], '{3}'::float[]);
SELECT cube(NULL::float[], '{3}'::float[]);
SELECT cube('{0,1,2}'::float[]);
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[3,2,1,1]);
+SELECT cube_subset(cube('(1,3,5),(1,3,5)'), ARRAY[3,2,1,1]);
SELECT cube_subset(cube('(1,3,5),(6,7,8)'), ARRAY[4,0]);
+SELECT cube_subset(cube('(6,7,8),(6,7,8)'), ARRAY[4,0]);
+
+--
+-- Test point processing
+--
+SELECT cube('(1,2),(1,2)'); -- cube_in
+SELECT cube('{0,1,2}'::float[], '{0,1,2}'::float[]); -- cube_a_f8_f8
+SELECT cube('{5,6,7,8}'::float[]); -- cube_a_f8
+SELECT cube(1.37); -- cube_f8
+SELECT cube(1.37, 1.37); -- cube_f8_f8
+SELECT cube(cube(1,1), 42); -- cube_c_f8
+SELECT cube(cube(1,2), 42); -- cube_c_f8
+SELECT cube(cube(1,1), 42, 42); -- cube_c_f8_f8
+SELECT cube(cube(1,1), 42, 24); -- cube_c_f8_f8
+SELECT cube(cube(1,2), 42, 42); -- cube_c_f8_f8
+SELECT cube(cube(1,2), 42, 24); -- cube_c_f8_f8
--
-- Testing limit of CUBE_MAX_DIM dimensions check in cube_in.
@@ -212,6 +229,9 @@ SELECT '(-1,-1),(1,1)'::cube @> '(-2),(1)'::cube AS bool;
--
SELECT cube_distance('(0)'::cube,'(2,2,2,2)'::cube);
SELECT cube_distance('(0)'::cube,'(.3,.4)'::cube);
+SELECT cube_distance('(2,3,4)'::cube,'(2,3,4)'::cube);
+SELECT cube_distance('(42,42,42,42)'::cube,'(137,137,137,137)'::cube);
+SELECT cube_distance('(42,42,42)'::cube,'(137,137)'::cube);
-- Test of cube function (text to cube)
--
@@ -223,18 +243,32 @@ SELECT cube(NULL);
SELECT cube_dim('(0)'::cube);
SELECT cube_dim('(0,0)'::cube);
SELECT cube_dim('(0,0,0)'::cube);
+SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
+SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
-- Test of cube_ll_coord function (retrieves LL coodinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 3);
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 1);
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 2);
+SELECT cube_ll_coord('(1,2),(1,2)'::cube, 3);
+SELECT cube_ll_coord('(42,137)'::cube, 1);
+SELECT cube_ll_coord('(42,137)'::cube, 2);
+SELECT cube_ll_coord('(42,137)'::cube, 3);
-- Test of cube_ur_coord function (retrieves UR coodinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 3);
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 1);
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 2);
+SELECT cube_ur_coord('(1,2),(1,2)'::cube, 3);
+SELECT cube_ur_coord('(42,137)'::cube, 1);
+SELECT cube_ur_coord('(42,137)'::cube, 2);
+SELECT cube_ur_coord('(42,137)'::cube, 3);
-- Test of cube_is_point
--
@@ -265,6 +299,31 @@ SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 1, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, 3, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -1, 2);
SELECT cube_enlarge('(2,-2),(-3,7)'::cube, -3, 2);
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -23, 5);
+SELECT cube_enlarge('(42,-23,-23),(42,23,23)'::cube, -24, 5);
+
+-- Test of cube_union (MBR for two cubes)
+--
+SELECT cube_union('(1,2),(3,4)'::cube, '(5,6,7),(8,9,10)'::cube);
+SELECT cube_union('(1,2)'::cube, '(4,2,0,0)'::cube);
+SELECT cube_union('(1,2),(1,2)'::cube, '(4,2),(4,2)'::cube);
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2),(1,2)'::cube);
+SELECT cube_union('(1,2),(1,2)'::cube, '(1,2,0),(1,2,0)'::cube);
+
+-- Test of cube_inter
+--
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (16,15)'::cube); -- intersects
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,4), (6,5)'::cube); -- includes
+SELECT cube_inter('(1,2),(10,11)'::cube, '(13,14), (16,15)'::cube); -- no intersection
+SELECT cube_inter('(1,2),(10,11)'::cube, '(3,14), (16,15)'::cube); -- no intersection, but one dimension intersects
+SELECT cube_inter('(1,2),(10,11)'::cube, '(10,11), (16,15)'::cube); -- point intersection
+SELECT cube_inter('(1,2,3)'::cube, '(1,2,3)'::cube); -- point args
+SELECT cube_inter('(1,2,3)'::cube, '(5,6,3)'::cube); -- point args
+
+-- Test of cube_size
+--
+SELECT cube_size('(4,8),(15,16)'::cube);
+SELECT cube_size('(42,137)'::cube);
-- Load some example data and build the index
--
diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile
index ac637480eb..32314a0abb 100644
--- a/contrib/dblink/Makefile
+++ b/contrib/dblink/Makefile
@@ -7,10 +7,13 @@ SHLIB_LINK = $(libpq)
SHLIB_PREREQS = submake-libpq
EXTENSION = dblink
-DATA = dblink--1.0.sql dblink--unpackaged--1.0.sql
+DATA = dblink--1.1.sql dblink--1.0--1.1.sql dblink--unpackaged--1.0.sql
REGRESS = dblink
+# the db name is hard-coded in the tests
+override USE_MODULE_DB =
+
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
diff --git a/contrib/dblink/dblink--1.0--1.1.sql b/contrib/dblink/dblink--1.0--1.1.sql
new file mode 100644
index 0000000000..67293f0ef8
--- /dev/null
+++ b/contrib/dblink/dblink--1.0--1.1.sql
@@ -0,0 +1,14 @@
+/* contrib/dblink/dblink--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION dblink UPDATE TO '1.1'" to load this file. \quit
+
+CREATE FUNCTION dblink_fdw_validator(
+ options text[],
+ catalog oid
+)
+RETURNS void
+AS 'MODULE_PATHNAME', 'dblink_fdw_validator'
+LANGUAGE C STRICT;
+
+CREATE FOREIGN DATA WRAPPER dblink_fdw VALIDATOR dblink_fdw_validator;
diff --git a/contrib/dblink/dblink--1.0.sql b/contrib/dblink/dblink--1.1.sql
index 1fec9e3944..873355382c 100644
--- a/contrib/dblink/dblink--1.0.sql
+++ b/contrib/dblink/dblink--1.1.sql
@@ -1,4 +1,4 @@
-/* contrib/dblink/dblink--1.0.sql */
+/* contrib/dblink/dblink--1.1.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION dblink" to load this file. \quit
@@ -221,3 +221,15 @@ CREATE FUNCTION dblink_get_notify(
RETURNS setof record
AS 'MODULE_PATHNAME', 'dblink_get_notify'
LANGUAGE C STRICT;
+
+/* New stuff in 1.1 begins here */
+
+CREATE FUNCTION dblink_fdw_validator(
+ options text[],
+ catalog oid
+)
+RETURNS void
+AS 'MODULE_PATHNAME', 'dblink_fdw_validator'
+LANGUAGE C STRICT;
+
+CREATE FOREIGN DATA WRAPPER dblink_fdw VALIDATOR dblink_fdw_validator;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 1e62d8091a..a81853fa91 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -9,7 +9,7 @@
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
* contrib/dblink/dblink.c
- * Copyright (c) 2001-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2014, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* Permission to use, copy, modify, and distribute this software and its
@@ -35,18 +35,25 @@
#include <limits.h>
#include "libpq-fe.h"
-#include "funcapi.h"
+
+#include "access/htup_details.h"
+#include "access/reloptions.h"
#include "catalog/indexing.h"
#include "catalog/namespace.h"
+#include "catalog/pg_foreign_server.h"
#include "catalog/pg_type.h"
+#include "catalog/pg_user_mapping.h"
#include "executor/spi.h"
#include "foreign/foreign.h"
+#include "funcapi.h"
+#include "lib/stringinfo.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "parser/scansup.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
+#include "utils/guc.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -70,6 +77,9 @@ typedef struct storeInfo
AttInMetadata *attinmeta;
MemoryContext tmpcontext;
char **cstrs;
+ /* temp storage for results to avoid leaks on exception */
+ PGresult *last_res;
+ PGresult *cur_res;
} storeInfo;
/*
@@ -77,14 +87,15 @@ typedef struct storeInfo
*/
static Datum dblink_record_internal(FunctionCallInfo fcinfo, bool is_async);
static void prepTuplestoreResult(FunctionCallInfo fcinfo);
-static void materializeResult(FunctionCallInfo fcinfo, PGresult *res);
+static void materializeResult(FunctionCallInfo fcinfo, PGconn *conn,
+ PGresult *res);
static void materializeQueryResult(FunctionCallInfo fcinfo,
PGconn *conn,
const char *conname,
const char *sql,
bool fail);
-static int storeHandler(PGresult *res, const PGdataValue *columns,
- const char **errmsgp, void *param);
+static PGresult *storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql);
+static void storeRow(storeInfo *sinfo, PGresult *res, bool first);
static remoteConn *getConnectionByName(const char *name);
static HTAB *createConnHash(void);
static void createNewConnection(const char *name, remoteConn *rconn);
@@ -107,6 +118,10 @@ static char *escape_param_str(const char *from);
static void validate_pkattnums(Relation rel,
int2vector *pkattnums_arg, int32 pknumatts_arg,
int **pkattnums, int *pknumatts);
+static bool is_valid_dblink_option(const PQconninfoOption *options,
+ const char *option, Oid context);
+static int applyRemoteGucs(PGconn *conn);
+static void restoreLocalGucs(int nestlevel);
/* Global */
static remoteConn *pconn = NULL;
@@ -194,7 +209,8 @@ typedef struct remoteConnHashEnt
errdetail_internal("%s", msg))); \
} \
dblink_security_check(conn, rconn); \
- PQsetClientEncoding(conn, GetDatabaseEncodingName()); \
+ if (PQclientEncoding(conn) != GetDatabaseEncoding()) \
+ PQsetClientEncoding(conn, GetDatabaseEncodingName()); \
freeconn = true; \
} \
} while (0)
@@ -273,8 +289,9 @@ dblink_connect(PG_FUNCTION_ARGS)
/* check password actually used if not superuser */
dblink_security_check(conn, rconn);
- /* attempt to set client encoding to match server encoding */
- PQsetClientEncoding(conn, GetDatabaseEncodingName());
+ /* attempt to set client encoding to match server encoding, if needed */
+ if (PQclientEncoding(conn) != GetDatabaseEncoding())
+ PQsetClientEncoding(conn, GetDatabaseEncodingName());
if (connname)
{
@@ -594,7 +611,7 @@ dblink_fetch(PG_FUNCTION_ARGS)
errmsg("cursor \"%s\" does not exist", curname)));
}
- materializeResult(fcinfo, res);
+ materializeResult(fcinfo, conn, res);
return (Datum) 0;
}
@@ -630,7 +647,7 @@ dblink_send_query(PG_FUNCTION_ARGS)
/* async query send */
retval = PQsendQuery(conn, sql);
if (retval != 1)
- elog(NOTICE, "%s", PQerrorMessage(conn));
+ elog(NOTICE, "could not send query: %s", PQerrorMessage(conn));
PG_RETURN_INT32(retval);
}
@@ -739,7 +756,7 @@ dblink_record_internal(FunctionCallInfo fcinfo, bool is_async)
}
else
{
- materializeResult(fcinfo, res);
+ materializeResult(fcinfo, conn, res);
}
}
}
@@ -795,7 +812,7 @@ prepTuplestoreResult(FunctionCallInfo fcinfo)
* The PGresult will be released in this function.
*/
static void
-materializeResult(FunctionCallInfo fcinfo, PGresult *res)
+materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
@@ -805,7 +822,7 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
PG_TRY();
{
TupleDesc tupdesc;
- bool is_sql_cmd = false;
+ bool is_sql_cmd;
int ntuples;
int nfields;
@@ -866,6 +883,7 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
if (ntuples > 0)
{
AttInMetadata *attinmeta;
+ int nestlevel = -1;
Tuplestorestate *tupstore;
MemoryContext oldcontext;
int row;
@@ -873,6 +891,10 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
attinmeta = TupleDescGetAttInMetadata(tupdesc);
+ /* Set GUCs to ensure we read GUC-sensitive data types correctly */
+ if (!is_sql_cmd)
+ nestlevel = applyRemoteGucs(conn);
+
oldcontext = MemoryContextSwitchTo(
rsinfo->econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(true, false, work_mem);
@@ -909,6 +931,9 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
tuplestore_puttuple(tupstore, tuple);
}
+ /* clean up GUC settings, if we changed any */
+ restoreLocalGucs(nestlevel);
+
/* clean up and return the tuplestore */
tuplestore_donestoring(tupstore);
}
@@ -927,8 +952,10 @@ materializeResult(FunctionCallInfo fcinfo, PGresult *res)
/*
* Execute the given SQL command and store its results into a tuplestore
* to be returned as the result of the current function.
+ *
* This is equivalent to PQexec followed by materializeResult, but we make
- * use of libpq's "row processor" API to reduce per-row overhead.
+ * use of libpq's single-row mode to avoid accumulating the whole result
+ * inside libpq before it gets transferred to the tuplestore.
*/
static void
materializeQueryResult(FunctionCallInfo fcinfo,
@@ -944,19 +971,14 @@ materializeQueryResult(FunctionCallInfo fcinfo,
/* prepTuplestoreResult must have been called previously */
Assert(rsinfo->returnMode == SFRM_Materialize);
+ /* initialize storeInfo to empty */
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.fcinfo = fcinfo;
+
PG_TRY();
{
- /* initialize storeInfo to empty */
- memset(&sinfo, 0, sizeof(sinfo));
- sinfo.fcinfo = fcinfo;
-
- /* We'll collect tuples using storeHandler */
- PQsetRowProcessor(conn, storeHandler, &sinfo);
-
- res = PQexec(conn, sql);
-
- /* We don't keep the custom row processor installed permanently */
- PQsetRowProcessor(conn, NULL, NULL);
+ /* execute query, collecting any tuples into the tuplestore */
+ res = storeQueryResult(&sinfo, conn, sql);
if (!res ||
(PQresultStatus(res) != PGRES_COMMAND_OK &&
@@ -975,8 +997,8 @@ materializeQueryResult(FunctionCallInfo fcinfo,
else if (PQresultStatus(res) == PGRES_COMMAND_OK)
{
/*
- * storeHandler didn't get called, so we need to convert the
- * command status string to a tuple manually
+ * storeRow didn't get called, so we need to convert the command
+ * status string to a tuple manually
*/
TupleDesc tupdesc;
AttInMetadata *attinmeta;
@@ -1008,25 +1030,30 @@ materializeQueryResult(FunctionCallInfo fcinfo,
tuplestore_puttuple(tupstore, tuple);
PQclear(res);
+ res = NULL;
}
else
{
Assert(PQresultStatus(res) == PGRES_TUPLES_OK);
- /* storeHandler should have created a tuplestore */
+ /* storeRow should have created a tuplestore */
Assert(rsinfo->setResult != NULL);
PQclear(res);
+ res = NULL;
}
+ PQclear(sinfo.last_res);
+ sinfo.last_res = NULL;
+ PQclear(sinfo.cur_res);
+ sinfo.cur_res = NULL;
}
PG_CATCH();
{
- /* be sure to unset the custom row processor */
- PQsetRowProcessor(conn, NULL, NULL);
/* be sure to release any libpq result we collected */
- if (res)
- PQclear(res);
+ PQclear(res);
+ PQclear(sinfo.last_res);
+ PQclear(sinfo.cur_res);
/* and clear out any pending data in libpq */
- while ((res = PQskipResult(conn)) != NULL)
+ while ((res = PQgetResult(conn)) != NULL)
PQclear(res);
PG_RE_THROW();
}
@@ -1034,23 +1061,85 @@ materializeQueryResult(FunctionCallInfo fcinfo,
}
/*
- * Custom row processor for materializeQueryResult.
- * Prototype of this function must match PQrowProcessor.
+ * Execute query, and send any result rows to sinfo->tuplestore.
*/
-static int
-storeHandler(PGresult *res, const PGdataValue *columns,
- const char **errmsgp, void *param)
+static PGresult *
+storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql)
+{
+ bool first = true;
+ int nestlevel = -1;
+ PGresult *res;
+
+ if (!PQsendQuery(conn, sql))
+ elog(ERROR, "could not send query: %s", PQerrorMessage(conn));
+
+ if (!PQsetSingleRowMode(conn)) /* shouldn't fail */
+ elog(ERROR, "failed to set single-row mode for dblink query");
+
+ for (;;)
+ {
+ CHECK_FOR_INTERRUPTS();
+
+ sinfo->cur_res = PQgetResult(conn);
+ if (!sinfo->cur_res)
+ break;
+
+ if (PQresultStatus(sinfo->cur_res) == PGRES_SINGLE_TUPLE)
+ {
+ /* got one row from possibly-bigger resultset */
+
+ /*
+ * Set GUCs to ensure we read GUC-sensitive data types correctly.
+ * We shouldn't do this until we have a row in hand, to ensure
+ * libpq has seen any earlier ParameterStatus protocol messages.
+ */
+ if (first && nestlevel < 0)
+ nestlevel = applyRemoteGucs(conn);
+
+ storeRow(sinfo, sinfo->cur_res, first);
+
+ PQclear(sinfo->cur_res);
+ sinfo->cur_res = NULL;
+ first = false;
+ }
+ else
+ {
+ /* if empty resultset, fill tuplestore header */
+ if (first && PQresultStatus(sinfo->cur_res) == PGRES_TUPLES_OK)
+ storeRow(sinfo, sinfo->cur_res, first);
+
+ /* store completed result at last_res */
+ PQclear(sinfo->last_res);
+ sinfo->last_res = sinfo->cur_res;
+ sinfo->cur_res = NULL;
+ first = true;
+ }
+ }
+
+ /* clean up GUC settings, if we changed any */
+ restoreLocalGucs(nestlevel);
+
+ /* return last_res */
+ res = sinfo->last_res;
+ sinfo->last_res = NULL;
+ return res;
+}
+
+/*
+ * Send single row to sinfo->tuplestore.
+ *
+ * If "first" is true, create the tuplestore using PGresult's metadata
+ * (in this case the PGresult might contain either zero or one row).
+ */
+static void
+storeRow(storeInfo *sinfo, PGresult *res, bool first)
{
- storeInfo *sinfo = (storeInfo *) param;
int nfields = PQnfields(res);
- char **cstrs = sinfo->cstrs;
HeapTuple tuple;
- char *pbuf;
- int pbuflen;
int i;
MemoryContext oldcontext;
- if (columns == NULL)
+ if (first)
{
/* Prepare for new result set */
ReturnSetInfo *rsinfo = (ReturnSetInfo *) sinfo->fcinfo->resultinfo;
@@ -1098,13 +1187,16 @@ storeHandler(PGresult *res, const PGdataValue *columns,
sinfo->attinmeta = TupleDescGetAttInMetadata(tupdesc);
/* Create a new, empty tuplestore */
- oldcontext = MemoryContextSwitchTo(
- rsinfo->econtext->ecxt_per_query_memory);
+ oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
sinfo->tuplestore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->setResult = sinfo->tuplestore;
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
+ /* Done if empty resultset */
+ if (PQntuples(res) == 0)
+ return;
+
/*
* Set up sufficiently-wide string pointers array; this won't change
* in size so it's easy to preallocate.
@@ -1121,11 +1213,10 @@ storeHandler(PGresult *res, const PGdataValue *columns,
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
-
- return 1;
}
- CHECK_FOR_INTERRUPTS();
+ /* Should have a single-row result if we get here */
+ Assert(PQntuples(res) == 1);
/*
* Do the following work in a temp context that we reset after each tuple.
@@ -1135,46 +1226,24 @@ storeHandler(PGresult *res, const PGdataValue *columns,
oldcontext = MemoryContextSwitchTo(sinfo->tmpcontext);
/*
- * The strings passed to us are not null-terminated, but the datatype
- * input functions we're about to call require null termination. Copy the
- * strings and add null termination. As a micro-optimization, allocate
- * all the strings with one palloc.
+ * Fill cstrs with null-terminated strings of column values.
*/
- pbuflen = nfields; /* count the null terminators themselves */
for (i = 0; i < nfields; i++)
{
- int len = columns[i].len;
-
- if (len > 0)
- pbuflen += len;
- }
- pbuf = (char *) palloc(pbuflen);
-
- for (i = 0; i < nfields; i++)
- {
- int len = columns[i].len;
-
- if (len < 0)
- cstrs[i] = NULL;
+ if (PQgetisnull(res, 0, i))
+ sinfo->cstrs[i] = NULL;
else
- {
- cstrs[i] = pbuf;
- memcpy(pbuf, columns[i].value, len);
- pbuf += len;
- *pbuf++ = '\0';
- }
+ sinfo->cstrs[i] = PQgetvalue(res, 0, i);
}
/* Convert row to a tuple, and add it to the tuplestore */
- tuple = BuildTupleFromCStrings(sinfo->attinmeta, cstrs);
+ tuple = BuildTupleFromCStrings(sinfo->attinmeta, sinfo->cstrs);
tuplestore_puttuple(sinfo->tuplestore, tuple);
/* Clean up */
MemoryContextSwitchTo(oldcontext);
MemoryContextReset(sinfo->tmpcontext);
-
- return 1;
}
/*
@@ -1494,10 +1563,7 @@ dblink_get_pkey(PG_FUNCTION_ARGS)
Datum result;
values = (char **) palloc(2 * sizeof(char *));
- values[0] = (char *) palloc(12); /* sign, 10 digits, '\0' */
-
- sprintf(values[0], "%d", call_cntr + 1);
-
+ values[0] = psprintf("%d", call_cntr + 1);
values[1] = results[call_cntr];
/* build the tuple */
@@ -1875,6 +1941,75 @@ dblink_get_notify(PG_FUNCTION_ARGS)
return (Datum) 0;
}
+/*
+ * Validate the options given to a dblink foreign server or user mapping.
+ * Raise an error if any option is invalid.
+ *
+ * We just check the names of options here, so semantic errors in options,
+ * such as invalid numeric format, will be detected at the attempt to connect.
+ */
+PG_FUNCTION_INFO_V1(dblink_fdw_validator);
+Datum
+dblink_fdw_validator(PG_FUNCTION_ARGS)
+{
+ List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
+ Oid context = PG_GETARG_OID(1);
+ ListCell *cell;
+
+ static const PQconninfoOption *options = NULL;
+
+ /*
+ * Get list of valid libpq options.
+ *
+ * To avoid unnecessary work, we get the list once and use it throughout
+ * the lifetime of this backend process. We don't need to care about
+ * memory context issues, because PQconndefaults allocates with malloc.
+ */
+ if (!options)
+ {
+ options = PQconndefaults();
+ if (!options) /* assume reason for failure is OOM */
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
+ errmsg("out of memory"),
+ errdetail("could not get libpq's default connection options")));
+ }
+
+ /* Validate each supplied option. */
+ foreach(cell, options_list)
+ {
+ DefElem *def = (DefElem *) lfirst(cell);
+
+ if (!is_valid_dblink_option(options, def->defname, context))
+ {
+ /*
+ * Unknown option, or invalid option for the context specified, so
+ * complain about it. Provide a hint with list of valid options
+ * for the context.
+ */
+ StringInfoData buf;
+ const PQconninfoOption *opt;
+
+ initStringInfo(&buf);
+ for (opt = options; opt->keyword; opt++)
+ {
+ if (is_valid_dblink_option(options, opt->keyword, context))
+ appendStringInfo(&buf, "%s%s",
+ (buf.len > 0) ? ", " : "",
+ opt->keyword);
+ }
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OPTION_NAME_NOT_FOUND),
+ errmsg("invalid option \"%s\"", def->defname),
+ errhint("Valid options in this context are: %s",
+ buf.data)));
+ }
+ }
+
+ PG_RETURN_VOID();
+}
+
+
/*************************************************************
* internal functions
*/
@@ -1910,7 +2045,7 @@ get_pkey_attnames(Relation rel, int16 *numatts)
ObjectIdGetDatum(RelationGetRelid(rel)));
scan = systable_beginscan(indexRelation, IndexIndrelidIndexId, true,
- SnapshotNow, 1, &skey);
+ NULL, 1, &skey);
while (HeapTupleIsValid(indexTuple = systable_getnext(scan)))
{
@@ -2033,14 +2168,14 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
continue;
if (needComma)
- appendStringInfo(&buf, ",");
+ appendStringInfoChar(&buf, ',');
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
needComma = true;
}
- appendStringInfo(&buf, ") VALUES(");
+ appendStringInfoString(&buf, ") VALUES(");
/*
* Note: i is physical column number (counting from 0).
@@ -2052,7 +2187,7 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
continue;
if (needComma)
- appendStringInfo(&buf, ",");
+ appendStringInfoChar(&buf, ',');
key = get_attnum_pk_pos(pkattnums, pknumatts, i);
@@ -2067,10 +2202,10 @@ get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
pfree(val);
}
else
- appendStringInfo(&buf, "NULL");
+ appendStringInfoString(&buf, "NULL");
needComma = true;
}
- appendStringInfo(&buf, ")");
+ appendStringInfoChar(&buf, ')');
return (buf.data);
}
@@ -2096,7 +2231,7 @@ get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals
int pkattnum = pkattnums[i];
if (i > 0)
- appendStringInfo(&buf, " AND ");
+ appendStringInfoString(&buf, " AND ");
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
@@ -2105,7 +2240,7 @@ get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals
appendStringInfo(&buf, " = %s",
quote_literal_cstr(tgt_pkattvals[i]));
else
- appendStringInfo(&buf, " IS NULL");
+ appendStringInfoString(&buf, " IS NULL");
}
return (buf.data);
@@ -2150,7 +2285,7 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
continue;
if (needComma)
- appendStringInfo(&buf, ", ");
+ appendStringInfoString(&buf, ", ");
appendStringInfo(&buf, "%s = ",
quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
@@ -2172,16 +2307,16 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
needComma = true;
}
- appendStringInfo(&buf, " WHERE ");
+ appendStringInfoString(&buf, " WHERE ");
for (i = 0; i < pknumatts; i++)
{
int pkattnum = pkattnums[i];
if (i > 0)
- appendStringInfo(&buf, " AND ");
+ appendStringInfoString(&buf, " AND ");
- appendStringInfo(&buf, "%s",
+ appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
val = tgt_pkattvals[i];
@@ -2189,7 +2324,7 @@ get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals
if (val != NULL)
appendStringInfo(&buf, " = %s", quote_literal_cstr(val));
else
- appendStringInfo(&buf, " IS NULL");
+ appendStringInfoString(&buf, " IS NULL");
}
return (buf.data);
@@ -2259,7 +2394,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
- * with NULLs for any dropped columns. Otherwise we have to deal with two
+ * with NULLs for any dropped columns. Otherwise we have to deal with two
* different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
@@ -2283,7 +2418,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
int pkattnum = pkattnums[i];
if (i > 0)
- appendStringInfo(&buf, " AND ");
+ appendStringInfoString(&buf, " AND ");
appendStringInfoString(&buf,
quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
@@ -2292,7 +2427,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
appendStringInfo(&buf, " = %s",
quote_literal_cstr(src_pkattvals[i]));
else
- appendStringInfo(&buf, " IS NULL");
+ appendStringInfoString(&buf, " IS NULL");
}
/*
@@ -2485,7 +2620,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn)
}
/*
- * For non-superusers, insist that the connstr specify a password. This
+ * For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.
@@ -2731,3 +2866,129 @@ validate_pkattnums(Relation rel,
errmsg("invalid attribute number %d", pkattnum)));
}
}
+
+/*
+ * Check if the specified connection option is valid.
+ *
+ * We basically allow whatever libpq thinks is an option, with these
+ * restrictions:
+ * debug options: disallowed
+ * "client_encoding": disallowed
+ * "user": valid only in USER MAPPING options
+ * secure options (eg password): valid only in USER MAPPING options
+ * others: valid only in FOREIGN SERVER options
+ *
+ * We disallow client_encoding because it would be overridden anyway via
+ * PQclientEncoding; allowing it to be specified would merely promote
+ * confusion.
+ */
+static bool
+is_valid_dblink_option(const PQconninfoOption *options, const char *option,
+ Oid context)
+{
+ const PQconninfoOption *opt;
+
+ /* Look up the option in libpq result */
+ for (opt = options; opt->keyword; opt++)
+ {
+ if (strcmp(opt->keyword, option) == 0)
+ break;
+ }
+ if (opt->keyword == NULL)
+ return false;
+
+ /* Disallow debug options (particularly "replication") */
+ if (strchr(opt->dispchar, 'D'))
+ return false;
+
+ /* Disallow "client_encoding" */
+ if (strcmp(opt->keyword, "client_encoding") == 0)
+ return false;
+
+ /*
+ * If the option is "user" or marked secure, it should be specified only
+ * in USER MAPPING. Others should be specified only in SERVER.
+ */
+ if (strcmp(opt->keyword, "user") == 0 || strchr(opt->dispchar, '*'))
+ {
+ if (context != UserMappingRelationId)
+ return false;
+ }
+ else
+ {
+ if (context != ForeignServerRelationId)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Copy the remote session's values of GUCs that affect datatype I/O
+ * and apply them locally in a new GUC nesting level. Returns the new
+ * nestlevel (which is needed by restoreLocalGucs to undo the settings),
+ * or -1 if no new nestlevel was needed.
+ *
+ * We use the equivalent of a function SET option to allow the settings to
+ * persist only until the caller calls restoreLocalGucs. If an error is
+ * thrown in between, guc.c will take care of undoing the settings.
+ */
+static int
+applyRemoteGucs(PGconn *conn)
+{
+ static const char *const GUCsAffectingIO[] = {
+ "DateStyle",
+ "IntervalStyle"
+ };
+
+ int nestlevel = -1;
+ int i;
+
+ for (i = 0; i < lengthof(GUCsAffectingIO); i++)
+ {
+ const char *gucName = GUCsAffectingIO[i];
+ const char *remoteVal = PQparameterStatus(conn, gucName);
+ const char *localVal;
+
+ /*
+ * If the remote server is pre-8.4, it won't have IntervalStyle, but
+ * that's okay because its output format won't be ambiguous. So just
+ * skip the GUC if we don't get a value for it. (We might eventually
+ * need more complicated logic with remote-version checks here.)
+ */
+ if (remoteVal == NULL)
+ continue;
+
+ /*
+ * Avoid GUC-setting overhead if the remote and local GUCs already
+ * have the same value.
+ */
+ localVal = GetConfigOption(gucName, false, false);
+ Assert(localVal != NULL);
+
+ if (strcmp(remoteVal, localVal) == 0)
+ continue;
+
+ /* Create new GUC nest level if we didn't already */
+ if (nestlevel < 0)
+ nestlevel = NewGUCNestLevel();
+
+ /* Apply the option (this will throw error on failure) */
+ (void) set_config_option(gucName, remoteVal,
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0);
+ }
+
+ return nestlevel;
+}
+
+/*
+ * Restore local GUCs after they have been overlaid with remote settings.
+ */
+static void
+restoreLocalGucs(int nestlevel)
+{
+ /* Do nothing if no new nestlevel was created */
+ if (nestlevel > 0)
+ AtEOXact_GUC(true, nestlevel);
+}
diff --git a/contrib/dblink/dblink.control b/contrib/dblink/dblink.control
index 4333a9b618..39f439affc 100644
--- a/contrib/dblink/dblink.control
+++ b/contrib/dblink/dblink.control
@@ -1,5 +1,5 @@
# dblink extension
comment = 'connect to other PostgreSQL databases from within a database'
-default_version = '1.0'
+default_version = '1.1'
module_pathname = '$libdir/dblink'
relocatable = true
diff --git a/contrib/dblink/dblink.h b/contrib/dblink/dblink.h
index 935d283d31..270d619ed6 100644
--- a/contrib/dblink/dblink.h
+++ b/contrib/dblink/dblink.h
@@ -9,7 +9,7 @@
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
* contrib/dblink/dblink.h
- * Copyright (c) 2001-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2014, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* Permission to use, copy, modify, and distribute this software and its
@@ -58,5 +58,6 @@ extern Datum dblink_build_sql_delete(PG_FUNCTION_ARGS);
extern Datum dblink_build_sql_update(PG_FUNCTION_ARGS);
extern Datum dblink_current_query(PG_FUNCTION_ARGS);
extern Datum dblink_get_notify(PG_FUNCTION_ARGS);
+extern Datum dblink_fdw_validator(PG_FUNCTION_ARGS);
#endif /* DBLINK_H */
diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out
index 1db153aeb1..f237c43d3d 100644
--- a/contrib/dblink/expected/dblink.out
+++ b/contrib/dblink/expected/dblink.out
@@ -1,6 +1,5 @@
CREATE EXTENSION dblink;
CREATE TABLE foo(f1 int, f2 text, f3 text[], primary key (f1,f2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "foo_pkey" for table "foo"
INSERT INTO foo VALUES (0,'a','{"a0","b0","c0"}');
INSERT INTO foo VALUES (1,'b','{"a1","b1","c1"}');
INSERT INTO foo VALUES (2,'c','{"a2","b2","c2"}');
@@ -56,7 +55,6 @@ ERROR: invalid attribute number 4
-- retest using a quoted and schema qualified table
CREATE SCHEMA "MySchema";
CREATE TABLE "MySchema"."Foo"(f1 int, f2 text, f3 text[], primary key (f1,f2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "Foo_pkey" for table "Foo"
INSERT INTO "MySchema"."Foo" VALUES (0,'a','{"a0","b0","c0"}');
-- list the primary key fields
SELECT *
@@ -785,8 +783,12 @@ SELECT dblink_disconnect('dtest1');
-- test foreign data wrapper functionality
CREATE USER dblink_regression_test;
-CREATE FOREIGN DATA WRAPPER postgresql;
-CREATE SERVER fdtest FOREIGN DATA WRAPPER postgresql OPTIONS (dbname 'contrib_regression');
+CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw
+ OPTIONS (dbname 'contrib_regression');
+CREATE USER MAPPING FOR public SERVER fdtest
+ OPTIONS (server 'localhost'); -- fail, can't specify server here
+ERROR: invalid option "server"
+HINT: Valid options in this context are: user, password
CREATE USER MAPPING FOR public SERVER fdtest;
GRANT USAGE ON FOREIGN SERVER fdtest TO dblink_regression_test;
GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO dblink_regression_test;
@@ -825,7 +827,6 @@ REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_t
DROP USER dblink_regression_test;
DROP USER MAPPING FOR public SERVER fdtest;
DROP SERVER fdtest;
-DROP FOREIGN DATA WRAPPER postgresql;
-- test asynchronous notifications
SELECT dblink_connect('dbname=contrib_regression');
dblink_connect
@@ -885,8 +886,6 @@ CREATE TEMP TABLE test_dropped
col2 INT NOT NULL DEFAULT 112,
col2b INT NOT NULL DEFAULT 113
);
-NOTICE: CREATE TABLE will create implicit sequence "test_dropped_id_seq" for serial column "test_dropped.id"
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_dropped_pkey" for table "test_dropped"
INSERT INTO test_dropped VALUES(default);
ALTER TABLE test_dropped
DROP COLUMN col1,
@@ -914,3 +913,179 @@ SELECT dblink_build_sql_delete('test_dropped', '1', 1,
DELETE FROM test_dropped WHERE id = '2'
(1 row)
+-- test local mimicry of remote GUC values that affect datatype I/O
+SET datestyle = ISO, MDY;
+SET intervalstyle = postgres;
+SET timezone = UTC;
+SELECT dblink_connect('myconn','dbname=contrib_regression');
+ dblink_connect
+----------------
+ OK
+(1 row)
+
+SELECT dblink_exec('myconn', 'SET datestyle = GERMAN, DMY;');
+ dblink_exec
+-------------
+ SET
+(1 row)
+
+-- single row synchronous case
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+ a
+------------------------
+ 2013-03-12 00:00:00+00
+(1 row)
+
+-- multi-row synchronous case
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00''),
+ (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+ a
+------------------------
+ 2013-03-12 00:00:00+00
+ 2013-03-12 00:00:00+00
+(2 rows)
+
+-- single-row asynchronous case
+SELECT *
+FROM dblink_send_query('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00'')) t');
+ dblink_send_query
+-------------------
+ 1
+(1 row)
+
+CREATE TEMPORARY TABLE result AS
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz));
+SELECT * FROM result;
+ t
+------------------------
+ 2013-03-12 00:00:00+00
+(1 row)
+
+DROP TABLE result;
+-- multi-row asynchronous case
+SELECT *
+FROM dblink_send_query('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00''),
+ (''12.03.2013 00:00:00+00'')) t');
+ dblink_send_query
+-------------------
+ 1
+(1 row)
+
+CREATE TEMPORARY TABLE result AS
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz));
+SELECT * FROM result;
+ t
+------------------------
+ 2013-03-12 00:00:00+00
+ 2013-03-12 00:00:00+00
+(2 rows)
+
+DROP TABLE result;
+-- Try an ambiguous interval
+SELECT dblink_exec('myconn', 'SET intervalstyle = sql_standard;');
+ dblink_exec
+-------------
+ SET
+(1 row)
+
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''-1 2:03:04'')) i')
+ AS i(i interval);
+ i
+-------------------
+ -1 days -02:03:04
+(1 row)
+
+-- Try swapping to another format to ensure the GUCs are tracked
+-- properly through a change.
+CREATE TEMPORARY TABLE result (t timestamptz);
+SELECT dblink_exec('myconn', 'SET datestyle = ISO, MDY;');
+ dblink_exec
+-------------
+ SET
+(1 row)
+
+INSERT INTO result
+ SELECT *
+ FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''03.12.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+SELECT dblink_exec('myconn', 'SET datestyle = GERMAN, DMY;');
+ dblink_exec
+-------------
+ SET
+(1 row)
+
+INSERT INTO result
+ SELECT *
+ FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+SELECT * FROM result;
+ t
+------------------------
+ 2013-03-12 00:00:00+00
+ 2013-03-12 00:00:00+00
+(2 rows)
+
+DROP TABLE result;
+-- Check error throwing in dblink_fetch
+SELECT dblink_open('myconn','error_cursor',
+ 'SELECT * FROM (VALUES (''1''), (''not an int'')) AS t(text);');
+ dblink_open
+-------------
+ OK
+(1 row)
+
+SELECT *
+FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
+ i
+---
+ 1
+(1 row)
+
+SELECT *
+FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
+ERROR: invalid input syntax for integer: "not an int"
+-- Make sure that the local settings have retained their values in spite
+-- of shenanigans on the connection.
+SHOW datestyle;
+ DateStyle
+-----------
+ ISO, MDY
+(1 row)
+
+SHOW intervalstyle;
+ IntervalStyle
+---------------
+ postgres
+(1 row)
+
+-- Clean up GUC-setting tests
+SELECT dblink_disconnect('myconn');
+ dblink_disconnect
+-------------------
+ OK
+(1 row)
+
+RESET datestyle;
+RESET intervalstyle;
+RESET timezone;
diff --git a/contrib/dblink/sql/dblink.sql b/contrib/dblink/sql/dblink.sql
index 8c8ffe233c..2a107601c5 100644
--- a/contrib/dblink/sql/dblink.sql
+++ b/contrib/dblink/sql/dblink.sql
@@ -360,10 +360,12 @@ SELECT dblink_disconnect('dtest1');
-- test foreign data wrapper functionality
CREATE USER dblink_regression_test;
-
-CREATE FOREIGN DATA WRAPPER postgresql;
-CREATE SERVER fdtest FOREIGN DATA WRAPPER postgresql OPTIONS (dbname 'contrib_regression');
+CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw
+ OPTIONS (dbname 'contrib_regression');
+CREATE USER MAPPING FOR public SERVER fdtest
+ OPTIONS (server 'localhost'); -- fail, can't specify server here
CREATE USER MAPPING FOR public SERVER fdtest;
+
GRANT USAGE ON FOREIGN SERVER fdtest TO dblink_regression_test;
GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO dblink_regression_test;
@@ -381,7 +383,6 @@ REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_t
DROP USER dblink_regression_test;
DROP USER MAPPING FOR public SERVER fdtest;
DROP SERVER fdtest;
-DROP FOREIGN DATA WRAPPER postgresql;
-- test asynchronous notifications
SELECT dblink_connect('dbname=contrib_regression');
@@ -425,3 +426,99 @@ SELECT dblink_build_sql_update('test_dropped', '1', 1,
SELECT dblink_build_sql_delete('test_dropped', '1', 1,
ARRAY['2'::TEXT]);
+
+-- test local mimicry of remote GUC values that affect datatype I/O
+SET datestyle = ISO, MDY;
+SET intervalstyle = postgres;
+SET timezone = UTC;
+SELECT dblink_connect('myconn','dbname=contrib_regression');
+SELECT dblink_exec('myconn', 'SET datestyle = GERMAN, DMY;');
+
+-- single row synchronous case
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+
+-- multi-row synchronous case
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00''),
+ (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+
+-- single-row asynchronous case
+SELECT *
+FROM dblink_send_query('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00'')) t');
+CREATE TEMPORARY TABLE result AS
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz));
+SELECT * FROM result;
+DROP TABLE result;
+
+-- multi-row asynchronous case
+SELECT *
+FROM dblink_send_query('myconn',
+ 'SELECT * FROM
+ (VALUES (''12.03.2013 00:00:00+00''),
+ (''12.03.2013 00:00:00+00'')) t');
+CREATE TEMPORARY TABLE result AS
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz))
+UNION ALL
+(SELECT * from dblink_get_result('myconn') as t(t timestamptz));
+SELECT * FROM result;
+DROP TABLE result;
+
+-- Try an ambiguous interval
+SELECT dblink_exec('myconn', 'SET intervalstyle = sql_standard;');
+SELECT *
+FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''-1 2:03:04'')) i')
+ AS i(i interval);
+
+-- Try swapping to another format to ensure the GUCs are tracked
+-- properly through a change.
+CREATE TEMPORARY TABLE result (t timestamptz);
+
+SELECT dblink_exec('myconn', 'SET datestyle = ISO, MDY;');
+INSERT INTO result
+ SELECT *
+ FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''03.12.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+
+SELECT dblink_exec('myconn', 'SET datestyle = GERMAN, DMY;');
+INSERT INTO result
+ SELECT *
+ FROM dblink('myconn',
+ 'SELECT * FROM (VALUES (''12.03.2013 00:00:00+00'')) t')
+ AS t(a timestamptz);
+
+SELECT * FROM result;
+
+DROP TABLE result;
+
+-- Check error throwing in dblink_fetch
+SELECT dblink_open('myconn','error_cursor',
+ 'SELECT * FROM (VALUES (''1''), (''not an int'')) AS t(text);');
+SELECT *
+FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
+SELECT *
+FROM dblink_fetch('myconn','error_cursor', 1) AS t(i int);
+
+-- Make sure that the local settings have retained their values in spite
+-- of shenanigans on the connection.
+SHOW datestyle;
+SHOW intervalstyle;
+
+-- Clean up GUC-setting tests
+SELECT dblink_disconnect('myconn');
+RESET datestyle;
+RESET intervalstyle;
+RESET timezone;
diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c
index 4beaf92987..79067a86f0 100644
--- a/contrib/dict_int/dict_int.c
+++ b/contrib/dict_int/dict_int.c
@@ -3,7 +3,7 @@
* dict_int.c
* Text search dictionary for integers
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/dict_int/dict_int.c
@@ -26,10 +26,7 @@ typedef struct
PG_FUNCTION_INFO_V1(dintdict_init);
-Datum dintdict_init(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(dintdict_lexize);
-Datum dintdict_lexize(PG_FUNCTION_ARGS);
Datum
dintdict_init(PG_FUNCTION_ARGS)
diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c
index 59f977e503..1c27565f5e 100644
--- a/contrib/dict_xsyn/dict_xsyn.c
+++ b/contrib/dict_xsyn/dict_xsyn.c
@@ -3,7 +3,7 @@
* dict_xsyn.c
* Extended synonym dictionary
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/dict_xsyn/dict_xsyn.c
@@ -40,10 +40,7 @@ typedef struct
PG_FUNCTION_INFO_V1(dxsyn_init);
-Datum dxsyn_init(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(dxsyn_lexize);
-Datum dxsyn_lexize(PG_FUNCTION_ARGS);
static char *
find_word(char *in, char **end)
diff --git a/contrib/dummy_seclabel/dummy_seclabel.c b/contrib/dummy_seclabel/dummy_seclabel.c
index 75e4f8cda4..b5753cc908 100644
--- a/contrib/dummy_seclabel/dummy_seclabel.c
+++ b/contrib/dummy_seclabel/dummy_seclabel.c
@@ -7,7 +7,7 @@
* perspective, but allows regression testing independent of platform-specific
* features like SELinux.
*
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*/
#include "postgres.h"
diff --git a/contrib/earthdistance/earthdistance.c b/contrib/earthdistance/earthdistance.c
index 2f344a7011..6bbebdfd1a 100644
--- a/contrib/earthdistance/earthdistance.c
+++ b/contrib/earthdistance/earthdistance.c
@@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2)
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
- * is unportable, so we use version-1. However, if it's passed-by-reference,
+ * is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.
@@ -99,8 +99,6 @@ geo_distance_internal(Point *pt1, Point *pt2)
#ifdef USE_FLOAT8_BYVAL
-Datum geo_distance(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(geo_distance);
Datum
diff --git a/contrib/file_fdw/data/text.csv b/contrib/file_fdw/data/text.csv
index ed348a9e84..f55d9cf14d 100644
--- a/contrib/file_fdw/data/text.csv
+++ b/contrib/file_fdw/data/text.csv
@@ -1,4 +1,5 @@
-AAA,aaa
-XYZ,xyz
-NULL,NULL
-ABC,abc
+AAA,aaa,123,""
+XYZ,xyz,"",321
+NULL,NULL,NULL,NULL
+NULL,NULL,"NULL",NULL
+ABC,abc,"",""
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index e3b9223b3e..5a4d5aac21 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -3,7 +3,7 @@
* file_fdw.c
* foreign-data wrapper for server-side flat files.
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/file_fdw/file_fdw.c
@@ -15,7 +15,9 @@
#include <sys/stat.h>
#include <unistd.h>
+#include "access/htup_details.h"
#include "access/reloptions.h"
+#include "access/sysattr.h"
#include "catalog/pg_foreign_table.h"
#include "commands/copy.h"
#include "commands/defrem.h"
@@ -29,6 +31,7 @@
#include "optimizer/pathnode.h"
#include "optimizer/planmain.h"
#include "optimizer/restrictinfo.h"
+#include "optimizer/var.h"
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -45,9 +48,9 @@ struct FileFdwOption
/*
* Valid options for file_fdw.
- * These options are based on the options for COPY FROM command.
- * But note that force_not_null is handled as a boolean option attached to
- * each column, not as a table option.
+ * These options are based on the options for the COPY FROM command.
+ * But note that force_not_null and force_null are handled as boolean options
+ * attached to a column, not as table options.
*
* Note: If you are adding new option for user mapping, you need to modify
* fileGetOptions(), which currently doesn't bother to look at user mappings.
@@ -66,6 +69,7 @@ static const struct FileFdwOption valid_options[] = {
{"null", ForeignTableRelationId},
{"encoding", ForeignTableRelationId},
{"force_not_null", AttributeRelationId},
+ {"force_null", AttributeRelationId},
/*
* force_quote is not supported by file_fdw because it's for COPY TO.
@@ -99,9 +103,6 @@ typedef struct FileFdwExecutionState
/*
* SQL functions
*/
-extern Datum file_fdw_handler(PG_FUNCTION_ARGS);
-extern Datum file_fdw_validator(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(file_fdw_handler);
PG_FUNCTION_INFO_V1(file_fdw_validator);
@@ -136,6 +137,9 @@ static bool is_valid_option(const char *option, Oid context);
static void fileGetOptions(Oid foreigntableid,
char **filename, List **other_options);
static List *get_file_fdw_attribute_options(Oid relid);
+static bool check_selective_binary_conversion(RelOptInfo *baserel,
+ Oid foreigntableid,
+ List **columns);
static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private);
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
@@ -181,6 +185,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
Oid catalog = PG_GETARG_OID(1);
char *filename = NULL;
DefElem *force_not_null = NULL;
+ DefElem *force_null = NULL;
List *other_options = NIL;
ListCell *cell;
@@ -237,10 +242,10 @@ file_fdw_validator(PG_FUNCTION_ARGS)
}
/*
- * Separate out filename and force_not_null, since ProcessCopyOptions
- * won't accept them. (force_not_null only comes in a boolean
- * per-column flavor here.)
+ * Separate out filename and column-specific options, since
+ * ProcessCopyOptions won't accept them.
*/
+
if (strcmp(def->defname, "filename") == 0)
{
if (filename)
@@ -249,16 +254,33 @@ file_fdw_validator(PG_FUNCTION_ARGS)
errmsg("conflicting or redundant options")));
filename = defGetString(def);
}
+
+ /*
+ * force_not_null is a boolean option; after validation we can discard
+ * it - it will be retrieved later in get_file_fdw_attribute_options()
+ */
else if (strcmp(def->defname, "force_not_null") == 0)
{
if (force_not_null)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting or redundant options")));
+ errmsg("conflicting or redundant options"),
+ errhint("option \"force_not_null\" supplied more than once for a column")));
force_not_null = def;
/* Don't care what the value is, as long as it's a legal boolean */
(void) defGetBoolean(def);
}
+ /* See comments for force_not_null above */
+ else if (strcmp(def->defname, "force_null") == 0)
+ {
+ if (force_null)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting or redundant options"),
+ errhint("option \"force_null\" supplied more than once for a column")));
+ force_null = def;
+ (void) defGetBoolean(def);
+ }
else
other_options = lappend(other_options, def);
}
@@ -363,8 +385,9 @@ fileGetOptions(Oid foreigntableid,
* Retrieve per-column generic options from pg_attribute and construct a list
* of DefElems representing them.
*
- * At the moment we only have "force_not_null", which should be combined into
- * a single DefElem listing all such columns, since that's what COPY expects.
+ * At the moment we only have "force_not_null", and "force_null",
+ * which should each be combined into a single DefElem listing all such
+ * columns, since that's what COPY expects.
*/
static List *
get_file_fdw_attribute_options(Oid relid)
@@ -374,6 +397,9 @@ get_file_fdw_attribute_options(Oid relid)
AttrNumber natts;
AttrNumber attnum;
List *fnncolumns = NIL;
+ List *fncolumns = NIL;
+
+ List *options = NIL;
rel = heap_open(relid, AccessShareLock);
tupleDesc = RelationGetDescr(rel);
@@ -404,17 +430,32 @@ get_file_fdw_attribute_options(Oid relid)
fnncolumns = lappend(fnncolumns, makeString(attname));
}
}
+ else if (strcmp(def->defname, "force_null") == 0)
+ {
+ if (defGetBoolean(def))
+ {
+ char *attname = pstrdup(NameStr(attr->attname));
+
+ fncolumns = lappend(fncolumns, makeString(attname));
+ }
+ }
/* maybe in future handle other options here */
}
}
heap_close(rel, AccessShareLock);
- /* Return DefElem only when some column(s) have force_not_null */
+ /*
+ * Return DefElem only when some column(s) have force_not_null /
+ * force_null options set
+ */
if (fnncolumns != NIL)
- return list_make1(makeDefElem("force_not_null", (Node *) fnncolumns));
- else
- return NIL;
+ options = lappend(options, makeDefElem("force_not_null", (Node *) fnncolumns));
+
+ if (fncolumns != NIL)
+ options = lappend(options, makeDefElem("force_null", (Node *) fncolumns));
+
+ return options;
}
/*
@@ -457,12 +498,25 @@ fileGetForeignPaths(PlannerInfo *root,
FileFdwPlanState *fdw_private = (FileFdwPlanState *) baserel->fdw_private;
Cost startup_cost;
Cost total_cost;
+ List *columns;
+ List *coptions = NIL;
+
+ /* Decide whether to selectively perform binary conversion */
+ if (check_selective_binary_conversion(baserel,
+ foreigntableid,
+ &columns))
+ coptions = list_make1(makeDefElem("convert_selectively",
+ (Node *) columns));
/* Estimate costs */
estimate_costs(root, baserel, fdw_private,
&startup_cost, &total_cost);
- /* Create a ForeignPath node and add it as only possible path */
+ /*
+ * Create a ForeignPath node and add it as only possible path. We use the
+ * fdw_private list of the path to carry the convert_selectively option;
+ * it will be propagated into the fdw_private list of the Plan node.
+ */
add_path(baserel, (Path *)
create_foreignscan_path(root, baserel,
baserel->rows,
@@ -470,7 +524,7 @@ fileGetForeignPaths(PlannerInfo *root,
total_cost,
NIL, /* no pathkeys */
NULL, /* no outer rel either */
- NIL)); /* no fdw_private data */
+ coptions));
/*
* If data file was sorted, and we knew it somehow, we could insert
@@ -507,7 +561,7 @@ fileGetForeignPlan(PlannerInfo *root,
scan_clauses,
scan_relid,
NIL, /* no expressions to evaluate */
- NIL); /* no private state either */
+ best_path->fdw_private);
}
/*
@@ -544,6 +598,7 @@ fileExplainForeignScan(ForeignScanState *node, ExplainState *es)
static void
fileBeginForeignScan(ForeignScanState *node, int eflags)
{
+ ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;
char *filename;
List *options;
CopyState cstate;
@@ -559,12 +614,16 @@ fileBeginForeignScan(ForeignScanState *node, int eflags)
fileGetOptions(RelationGetRelid(node->ss.ss_currentRelation),
&filename, &options);
+ /* Add any options from the plan (currently only convert_selectively) */
+ options = list_concat(options, plan->fdw_private);
+
/*
* Create CopyState from FDW options. We always acquire all columns, so
* as to match the expected ScanTupleSlot signature.
*/
cstate = BeginCopyFrom(node->ss.ss_currentRelation,
filename,
+ false,
NIL,
options);
@@ -591,13 +650,13 @@ fileIterateForeignScan(ForeignScanState *node)
FileFdwExecutionState *festate = (FileFdwExecutionState *) node->fdw_state;
TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
bool found;
- ErrorContextCallback errcontext;
+ ErrorContextCallback errcallback;
/* Set up callback to identify error line number. */
- errcontext.callback = CopyFromErrorCallback;
- errcontext.arg = (void *) festate->cstate;
- errcontext.previous = error_context_stack;
- error_context_stack = &errcontext;
+ errcallback.callback = CopyFromErrorCallback;
+ errcallback.arg = (void *) festate->cstate;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
/*
* The protocol for loading a virtual tuple into a slot is first
@@ -619,7 +678,7 @@ fileIterateForeignScan(ForeignScanState *node)
ExecStoreVirtualTuple(slot);
/* Remove error callback. */
- error_context_stack = errcontext.previous;
+ error_context_stack = errcallback.previous;
return slot;
}
@@ -637,6 +696,7 @@ fileReScanForeignScan(ForeignScanState *node)
festate->cstate = BeginCopyFrom(node->ss.ss_currentRelation,
festate->filename,
+ false,
NIL,
festate->options);
}
@@ -695,6 +755,125 @@ fileAnalyzeForeignTable(Relation relation,
}
/*
+ * check_selective_binary_conversion
+ *
+ * Check to see if it's useful to convert only a subset of the file's columns
+ * to binary. If so, construct a list of the column names to be converted,
+ * return that at *columns, and return TRUE. (Note that it's possible to
+ * determine that no columns need be converted, for instance with a COUNT(*)
+ * query. So we can't use returning a NIL list to indicate failure.)
+ */
+static bool
+check_selective_binary_conversion(RelOptInfo *baserel,
+ Oid foreigntableid,
+ List **columns)
+{
+ ForeignTable *table;
+ ListCell *lc;
+ Relation rel;
+ TupleDesc tupleDesc;
+ AttrNumber attnum;
+ Bitmapset *attrs_used = NULL;
+ bool has_wholerow = false;
+ int numattrs;
+ int i;
+
+ *columns = NIL; /* default result */
+
+ /*
+ * Check format of the file. If binary format, this is irrelevant.
+ */
+ table = GetForeignTable(foreigntableid);
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "format") == 0)
+ {
+ char *format = defGetString(def);
+
+ if (strcmp(format, "binary") == 0)
+ return false;
+ break;
+ }
+ }
+
+ /* Collect all the attributes needed for joins or final output. */
+ pull_varattnos((Node *) baserel->reltargetlist, baserel->relid,
+ &attrs_used);
+
+ /* Add all the attributes used by restriction clauses. */
+ foreach(lc, baserel->baserestrictinfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ pull_varattnos((Node *) rinfo->clause, baserel->relid,
+ &attrs_used);
+ }
+
+ /* Convert attribute numbers to column names. */
+ rel = heap_open(foreigntableid, AccessShareLock);
+ tupleDesc = RelationGetDescr(rel);
+
+ while ((attnum = bms_first_member(attrs_used)) >= 0)
+ {
+ /* Adjust for system attributes. */
+ attnum += FirstLowInvalidHeapAttributeNumber;
+
+ if (attnum == 0)
+ {
+ has_wholerow = true;
+ break;
+ }
+
+ /* Ignore system attributes. */
+ if (attnum < 0)
+ continue;
+
+ /* Get user attributes. */
+ if (attnum > 0)
+ {
+ Form_pg_attribute attr = tupleDesc->attrs[attnum - 1];
+ char *attname = NameStr(attr->attname);
+
+ /* Skip dropped attributes (probably shouldn't see any here). */
+ if (attr->attisdropped)
+ continue;
+ *columns = lappend(*columns, makeString(pstrdup(attname)));
+ }
+ }
+
+ /* Count non-dropped user attributes while we have the tupdesc. */
+ numattrs = 0;
+ for (i = 0; i < tupleDesc->natts; i++)
+ {
+ Form_pg_attribute attr = tupleDesc->attrs[i];
+
+ if (attr->attisdropped)
+ continue;
+ numattrs++;
+ }
+
+ heap_close(rel, AccessShareLock);
+
+ /* If there's a whole-row reference, fail: we need all the columns. */
+ if (has_wholerow)
+ {
+ *columns = NIL;
+ return false;
+ }
+
+ /* If all the user attributes are needed, fail. */
+ if (numattrs == list_length(*columns))
+ {
+ *columns = NIL;
+ return false;
+ }
+
+ return true;
+}
+
+/*
* Estimate size of a foreign table.
*
* The main result is returned in baserel->rows. We also set
@@ -747,7 +926,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
* planner's idea of the relation width; which is bogus if not all
* columns are being read, not to mention that the text representation
* of a row probably isn't the same size as its internal
- * representation. Possibly we could do something better, but the
+ * representation. Possibly we could do something better, but the
* real answer to anyone who complains is "ANALYZE" ...
*/
int tuple_width;
@@ -812,7 +991,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the file and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the file. Therefore, correlation estimates derived later
@@ -834,7 +1013,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
char *filename;
List *options;
CopyState cstate;
- ErrorContextCallback errcontext;
+ ErrorContextCallback errcallback;
MemoryContext oldcontext = CurrentMemoryContext;
MemoryContext tupcontext;
@@ -851,7 +1030,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
/*
* Create CopyState from FDW options.
*/
- cstate = BeginCopyFrom(onerel, filename, NIL, options);
+ cstate = BeginCopyFrom(onerel, filename, false, NIL, options);
/*
* Use per-tuple memory context to prevent leak of memory used to read
@@ -867,10 +1046,10 @@ file_acquire_sample_rows(Relation onerel, int elevel,
rstate = anl_init_selection_state(targrows);
/* Set up callback to identify error line number. */
- errcontext.callback = CopyFromErrorCallback;
- errcontext.arg = (void *) cstate;
- errcontext.previous = error_context_stack;
- error_context_stack = &errcontext;
+ errcallback.callback = CopyFromErrorCallback;
+ errcallback.arg = (void *) cstate;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
*totalrows = 0;
*totaldeadrows = 0;
@@ -930,7 +1109,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
}
/* Remove error callback. */
- error_context_stack = errcontext.previous;
+ error_context_stack = errcallback.previous;
/* Clean up. */
MemoryContextDelete(tupcontext);
diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source
index 8e3d553f90..b608372825 100644
--- a/contrib/file_fdw/input/file_fdw.source
+++ b/contrib/file_fdw/input/file_fdw.source
@@ -81,19 +81,32 @@ OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.bad', header 'true', deli
-- per-column options tests
CREATE FOREIGN TABLE text_csv (
word1 text OPTIONS (force_not_null 'true'),
- word2 text OPTIONS (force_not_null 'off')
+ word2 text OPTIONS (force_not_null 'off'),
+ word3 text OPTIONS (force_null 'true'),
+ word4 text OPTIONS (force_null 'off')
) SERVER file_server
OPTIONS (format 'text', filename '@abs_srcdir@/data/text.csv', null 'NULL');
SELECT * FROM text_csv; -- ERROR
ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv');
+\pset null _null_
SELECT * FROM text_csv;
+-- force_not_null and force_null can be used together on the same column
+ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true');
+ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true');
+
-- force_not_null is not allowed to be specified at any foreign object level:
ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR
ALTER SERVER file_server OPTIONS (ADD force_not_null '*'); -- ERROR
CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_not_null '*'); -- ERROR
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR
+-- force_null is not allowed to be specified at any foreign object level:
+ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR
+ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR
+CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR
+
-- basic query tests
SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a;
SELECT * FROM agg_csv ORDER BY a;
@@ -118,7 +131,6 @@ SELECT tableoid::regclass, b FROM agg_csv;
INSERT INTO agg_csv VALUES(1,2.0);
UPDATE agg_csv SET a = 1;
DELETE FROM agg_csv WHERE a = 100;
-SELECT * FROM agg_csv FOR UPDATE OF agg_csv;
-- but this should be ignored
SELECT * FROM agg_csv FOR UPDATE;
diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source
index 6f906e1fc8..bc183b8874 100644
--- a/contrib/file_fdw/output/file_fdw.source
+++ b/contrib/file_fdw/output/file_fdw.source
@@ -96,21 +96,28 @@ OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.bad', header 'true', deli
-- per-column options tests
CREATE FOREIGN TABLE text_csv (
word1 text OPTIONS (force_not_null 'true'),
- word2 text OPTIONS (force_not_null 'off')
+ word2 text OPTIONS (force_not_null 'off'),
+ word3 text OPTIONS (force_null 'true'),
+ word4 text OPTIONS (force_null 'off')
) SERVER file_server
OPTIONS (format 'text', filename '@abs_srcdir@/data/text.csv', null 'NULL');
SELECT * FROM text_csv; -- ERROR
ERROR: COPY force not null available only in CSV mode
ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv');
+\pset null _null_
SELECT * FROM text_csv;
- word1 | word2
--------+-------
- AAA | aaa
- XYZ | xyz
- NULL |
- ABC | abc
-(4 rows)
+ word1 | word2 | word3 | word4
+-------+--------+--------+--------
+ AAA | aaa | 123 |
+ XYZ | xyz | | 321
+ NULL | _null_ | _null_ | _null_
+ NULL | _null_ | _null_ | _null_
+ ABC | abc | |
+(5 rows)
+-- force_not_null and force_null can be used together on the same column
+ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true');
+ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true');
-- force_not_null is not allowed to be specified at any foreign object level:
ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR
ERROR: invalid option "force_not_null"
@@ -124,6 +131,19 @@ HINT: There are no valid options in this context.
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR
ERROR: invalid option "force_not_null"
HINT: Valid options in this context are: filename, format, header, delimiter, quote, escape, null, encoding
+-- force_null is not allowed to be specified at any foreign object level:
+ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR
+ERROR: invalid option "force_null"
+HINT: There are no valid options in this context.
+ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR
+ERROR: invalid option "force_null"
+HINT: There are no valid options in this context.
+CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR
+ERROR: invalid option "force_null"
+HINT: There are no valid options in this context.
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR
+ERROR: invalid option "force_null"
+HINT: Valid options in this context are: filename, format, header, delimiter, quote, escape, null, encoding
-- basic query tests
SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a;
a | b
@@ -185,15 +205,11 @@ SELECT tableoid::regclass, b FROM agg_csv;
-- updates aren't supported
INSERT INTO agg_csv VALUES(1,2.0);
-ERROR: cannot change foreign table "agg_csv"
+ERROR: cannot insert into foreign table "agg_csv"
UPDATE agg_csv SET a = 1;
-ERROR: cannot change foreign table "agg_csv"
+ERROR: cannot update foreign table "agg_csv"
DELETE FROM agg_csv WHERE a = 100;
-ERROR: cannot change foreign table "agg_csv"
-SELECT * FROM agg_csv FOR UPDATE OF agg_csv;
-ERROR: SELECT FOR UPDATE/SHARE cannot be used with foreign table "agg_csv"
-LINE 1: SELECT * FROM agg_csv FOR UPDATE OF agg_csv;
- ^
+ERROR: cannot delete from foreign table "agg_csv"
-- but this should be ignored
SELECT * FROM agg_csv FOR UPDATE;
a | b
diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c
index f562f5484c..5001288bb6 100644
--- a/contrib/fuzzystrmatch/dmetaphone.c
+++ b/contrib/fuzzystrmatch/dmetaphone.c
@@ -10,7 +10,7 @@
* Information on using Double Metaphone can be found at
* http://www.codeproject.com/string/dmetaphone1.asp
* and the original article describing it can be found at
- * http://www.cuj.com/documents/s=8038/cuj0006philips/
+ * http://drdobbs.com/184401251
*
* For PostgreSQL we provide 2 functions - one for the primary and one for
* the alternate. That way the functions are pure text->text mappings that
@@ -114,9 +114,6 @@ The remaining code is authored by Andrew Dunstan <amdunstan@ncshp.org> and
#include <stdarg.h>
#include <assert.h>
-extern Datum dmetaphone(PG_FUNCTION_ARGS);
-extern Datum dmetaphone_alt(PG_FUNCTION_ARGS);
-
/* prototype for the main function we got from the perl module */
static void DoubleMetaphone(char *, char **);
diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c
index 8a1771b970..7a53d8a008 100644
--- a/contrib/fuzzystrmatch/fuzzystrmatch.c
+++ b/contrib/fuzzystrmatch/fuzzystrmatch.c
@@ -6,7 +6,7 @@
* Joe Conway <mail@joeconway.com>
*
* contrib/fuzzystrmatch/fuzzystrmatch.c
- * Copyright (c) 2001-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2014, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* metaphone()
@@ -45,18 +45,6 @@
PG_MODULE_MAGIC;
-
-/*
- * External declarations for exported functions
- */
-extern Datum levenshtein_with_costs(PG_FUNCTION_ARGS);
-extern Datum levenshtein(PG_FUNCTION_ARGS);
-extern Datum levenshtein_less_equal_with_costs(PG_FUNCTION_ARGS);
-extern Datum levenshtein_less_equal(PG_FUNCTION_ARGS);
-extern Datum metaphone(PG_FUNCTION_ARGS);
-extern Datum soundex(PG_FUNCTION_ARGS);
-extern Datum difference(PG_FUNCTION_ARGS);
-
/*
* Soundex
*/
diff --git a/contrib/fuzzystrmatch/levenshtein.c b/contrib/fuzzystrmatch/levenshtein.c
index 32827cb5f6..4f37a54b1e 100644
--- a/contrib/fuzzystrmatch/levenshtein.c
+++ b/contrib/fuzzystrmatch/levenshtein.c
@@ -5,7 +5,7 @@
*
* Joe Conway <mail@joeconway.com>
*
- * Copyright (c) 2001-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2014, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* levenshtein()
@@ -50,7 +50,7 @@ static int levenshtein_internal(text *s, text *t,
* array.
*
* If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound. From any cell in the matrix, there is
+ * is less than or equal to the bound. From any cell in the matrix, there is
* theoretical "minimum residual distance" from that cell to the last column
* of the final row. This minimum residual distance is zero when the
* untransformed portions of the strings are of equal length (because we might
@@ -141,7 +141,7 @@ levenshtein_internal(text *s, text *t,
stop_column = m + 1;
/*
- * If max_d >= 0, determine whether the bound is impossibly tight. If so,
+ * If max_d >= 0, determine whether the bound is impossibly tight. If so,
* return max_d + 1 immediately. Otherwise, determine whether it's tight
* enough to limit the computation we must perform. If so, figure out
* initial stop column.
@@ -168,7 +168,7 @@ levenshtein_internal(text *s, text *t,
* need to fill in. If the string is growing, the theoretical
* minimum distance already incorporates the cost of deleting the
* number of characters necessary to make the two strings equal in
- * length. Each additional deletion forces another insertion, so
+ * length. Each additional deletion forces another insertion, so
* the best-case total cost increases by ins_c + del_c. If the
* string is shrinking, the minimum theoretical cost assumes no
* excess deletions; that is, we're starting no further right than
@@ -246,7 +246,7 @@ levenshtein_internal(text *s, text *t,
/*
* The main loop fills in curr, but curr[0] needs a special case: to
* transform the first 0 characters of s into the first j characters
- * of t, we must perform j insertions. However, if start_column > 0,
+ * of t, we must perform j insertions. However, if start_column > 0,
* this special case does not apply.
*/
if (start_column == 0)
diff --git a/contrib/hstore/Makefile b/contrib/hstore/Makefile
index 1236e7958f..2b60fbed0e 100644
--- a/contrib/hstore/Makefile
+++ b/contrib/hstore/Makefile
@@ -5,7 +5,9 @@ OBJS = hstore_io.o hstore_op.o hstore_gist.o hstore_gin.o hstore_compat.o \
crc32.o
EXTENSION = hstore
-DATA = hstore--1.1.sql hstore--1.0--1.1.sql hstore--unpackaged--1.0.sql
+DATA = hstore--1.3.sql hstore--1.2--1.3.sql \
+ hstore--1.1--1.2.sql hstore--1.0--1.1.sql \
+ hstore--unpackaged--1.0.sql
REGRESS = hstore
diff --git a/contrib/hstore/crc32.c b/contrib/hstore/crc32.c
index d541d0cc95..c82fc66472 100644
--- a/contrib/hstore/crc32.c
+++ b/contrib/hstore/crc32.c
@@ -13,7 +13,7 @@
* This code implements the AUTODIN II polynomial
* The variable corresponding to the macro argument "crc" should
* be an unsigned long.
- * Oroginal code by Spencer Garrett <srg@quick.com>
+ * Original code by Spencer Garrett <srg@quick.com>
*/
#define _CRC32_(crc, ch) (crc = (crc >> 8) ^ crc32tab[(crc ^ (ch)) & 0xff])
diff --git a/contrib/hstore/expected/hstore.out b/contrib/hstore/expected/hstore.out
index 813b9c0478..9749e45c14 100644
--- a/contrib/hstore/expected/hstore.out
+++ b/contrib/hstore/expected/hstore.out
@@ -1453,3 +1453,57 @@ select count(*) from testhstore where h = 'pos=>98, line=>371, node=>CBA, indexe
1
(1 row)
+-- json and jsonb
+select hstore_to_json('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+ hstore_to_json
+-------------------------------------------------------------------------------------------------
+ {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}
+(1 row)
+
+select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as json);
+ json
+-------------------------------------------------------------------------------------------------
+ {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}
+(1 row)
+
+select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+ hstore_to_json_loose
+------------------------------------------------------------------------------------------
+ {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4, "a key": 1}
+(1 row)
+
+select hstore_to_jsonb('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+ hstore_to_jsonb
+-------------------------------------------------------------------------------------------------
+ {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}
+(1 row)
+
+select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as jsonb);
+ jsonb
+-------------------------------------------------------------------------------------------------
+ {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}
+(1 row)
+
+select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+ hstore_to_jsonb_loose
+---------------------------------------------------------------------------------------
+ {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 23450, "a key": 1}
+(1 row)
+
+create table test_json_agg (f1 text, f2 hstore);
+insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'),
+ ('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
+select json_agg(q) from test_json_agg q;
+ json_agg
+----------------------------------------------------------------------------------------------------------------------------
+ [{"f1":"rec1","f2":{"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}}, +
+ {"f1":"rec2","f2":{"b": "f", "c": "null", "d": "-12345", "e": "012345.6", "f": "-1.234", "g": "0.345e-4", "a key": "2"}}]
+(1 row)
+
+select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
+ json_agg
+----------------------------------------------------------------------------------------------------------------------
+ [{"f1":"rec1","f2":{"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4, "a key": 1}}, +
+ {"f1":"rec2","f2":{"b": false, "c": "null", "d": -12345, "e": "012345.6", "f": -1.234, "g": 0.345e-4, "a key": 2}}]
+(1 row)
+
diff --git a/contrib/hstore/hstore--1.0--1.1.sql b/contrib/hstore/hstore--1.0--1.1.sql
index 3027ecfaa1..4e32a575c5 100644
--- a/contrib/hstore/hstore--1.0--1.1.sql
+++ b/contrib/hstore/hstore--1.0--1.1.sql
@@ -1,6 +1,6 @@
/* contrib/hstore/hstore--1.0--1.1.sql */
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION hstore UPDATE TO '1.1'" to load this file. \quit
ALTER EXTENSION hstore DROP OPERATOR => (text, text);
diff --git a/contrib/hstore/hstore--1.1--1.2.sql b/contrib/hstore/hstore--1.1--1.2.sql
new file mode 100644
index 0000000000..a868ffe48e
--- /dev/null
+++ b/contrib/hstore/hstore--1.1--1.2.sql
@@ -0,0 +1,48 @@
+/* contrib/hstore/hstore--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION hstore UPDATE TO '1.2'" to load this file. \quit
+
+
+-- A version of 1.1 was shipped with these objects mistakenly in 9.3.0.
+-- Therefore we only add them if we detect that they aren't already there and
+-- dependent on the extension.
+
+DO LANGUAGE plpgsql
+
+$$
+
+BEGIN
+
+ PERFORM 1
+ FROM pg_proc p
+ JOIN pg_depend d
+ ON p.proname = 'hstore_to_json_loose'
+ AND d.classid = 'pg_proc'::regclass
+ AND d.objid = p.oid
+ AND d.refclassid = 'pg_extension'::regclass
+ JOIN pg_extension x
+ ON d.refobjid = x.oid
+ AND x.extname = 'hstore';
+
+ IF NOT FOUND
+ THEN
+
+ CREATE FUNCTION hstore_to_json(hstore)
+ RETURNS json
+ AS 'MODULE_PATHNAME', 'hstore_to_json'
+ LANGUAGE C IMMUTABLE STRICT;
+
+ CREATE CAST (hstore AS json)
+ WITH FUNCTION hstore_to_json(hstore);
+
+ CREATE FUNCTION hstore_to_json_loose(hstore)
+ RETURNS json
+ AS 'MODULE_PATHNAME', 'hstore_to_json_loose'
+ LANGUAGE C IMMUTABLE STRICT;
+
+ END IF;
+
+END;
+
+$$;
diff --git a/contrib/hstore/hstore--1.1.sql b/contrib/hstore/hstore--1.1.sql
deleted file mode 100644
index e95ad328aa..0000000000
--- a/contrib/hstore/hstore--1.1.sql
+++ /dev/null
@@ -1,524 +0,0 @@
-/* contrib/hstore/hstore--1.1.sql */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION hstore" to load this file. \quit
-
-CREATE TYPE hstore;
-
-CREATE FUNCTION hstore_in(cstring)
-RETURNS hstore
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_out(hstore)
-RETURNS cstring
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_recv(internal)
-RETURNS hstore
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_send(hstore)
-RETURNS bytea
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE TYPE hstore (
- INTERNALLENGTH = -1,
- INPUT = hstore_in,
- OUTPUT = hstore_out,
- RECEIVE = hstore_recv,
- SEND = hstore_send,
- STORAGE = extended
-);
-
-CREATE FUNCTION hstore_version_diag(hstore)
-RETURNS integer
-AS 'MODULE_PATHNAME','hstore_version_diag'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION fetchval(hstore,text)
-RETURNS text
-AS 'MODULE_PATHNAME','hstore_fetchval'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR -> (
- LEFTARG = hstore,
- RIGHTARG = text,
- PROCEDURE = fetchval
-);
-
-CREATE FUNCTION slice_array(hstore,text[])
-RETURNS text[]
-AS 'MODULE_PATHNAME','hstore_slice_to_array'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR -> (
- LEFTARG = hstore,
- RIGHTARG = text[],
- PROCEDURE = slice_array
-);
-
-CREATE FUNCTION slice(hstore,text[])
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_slice_to_hstore'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION isexists(hstore,text)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_exists'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION exist(hstore,text)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_exists'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR ? (
- LEFTARG = hstore,
- RIGHTARG = text,
- PROCEDURE = exist,
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE FUNCTION exists_any(hstore,text[])
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_exists_any'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR ?| (
- LEFTARG = hstore,
- RIGHTARG = text[],
- PROCEDURE = exists_any,
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE FUNCTION exists_all(hstore,text[])
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_exists_all'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR ?& (
- LEFTARG = hstore,
- RIGHTARG = text[],
- PROCEDURE = exists_all,
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE FUNCTION isdefined(hstore,text)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_defined'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION defined(hstore,text)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_defined'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION delete(hstore,text)
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_delete'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION delete(hstore,text[])
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_delete_array'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION delete(hstore,hstore)
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_delete_hstore'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR - (
- LEFTARG = hstore,
- RIGHTARG = text,
- PROCEDURE = delete
-);
-
-CREATE OPERATOR - (
- LEFTARG = hstore,
- RIGHTARG = text[],
- PROCEDURE = delete
-);
-
-CREATE OPERATOR - (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = delete
-);
-
-CREATE FUNCTION hs_concat(hstore,hstore)
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_concat'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR || (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hs_concat
-);
-
-CREATE FUNCTION hs_contains(hstore,hstore)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_contains'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hs_contained(hstore,hstore)
-RETURNS bool
-AS 'MODULE_PATHNAME','hstore_contained'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR @> (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hs_contains,
- COMMUTATOR = '<@',
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE OPERATOR <@ (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hs_contained,
- COMMUTATOR = '@>',
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
--- obsolete:
-CREATE OPERATOR @ (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hs_contains,
- COMMUTATOR = '~',
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE OPERATOR ~ (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hs_contained,
- COMMUTATOR = '@',
- RESTRICT = contsel,
- JOIN = contjoinsel
-);
-
-CREATE FUNCTION tconvert(text,text)
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_from_text'
-LANGUAGE C IMMUTABLE; -- not STRICT; needs to allow (key,NULL)
-
-CREATE FUNCTION hstore(text,text)
-RETURNS hstore
-AS 'MODULE_PATHNAME','hstore_from_text'
-LANGUAGE C IMMUTABLE; -- not STRICT; needs to allow (key,NULL)
-
-CREATE FUNCTION hstore(text[],text[])
-RETURNS hstore
-AS 'MODULE_PATHNAME', 'hstore_from_arrays'
-LANGUAGE C IMMUTABLE; -- not STRICT; allows (keys,null)
-
-CREATE FUNCTION hstore(text[])
-RETURNS hstore
-AS 'MODULE_PATHNAME', 'hstore_from_array'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE CAST (text[] AS hstore)
- WITH FUNCTION hstore(text[]);
-
-CREATE FUNCTION hstore(record)
-RETURNS hstore
-AS 'MODULE_PATHNAME', 'hstore_from_record'
-LANGUAGE C IMMUTABLE; -- not STRICT; allows (null::recordtype)
-
-CREATE FUNCTION hstore_to_array(hstore)
-RETURNS text[]
-AS 'MODULE_PATHNAME','hstore_to_array'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR %% (
- RIGHTARG = hstore,
- PROCEDURE = hstore_to_array
-);
-
-CREATE FUNCTION hstore_to_matrix(hstore)
-RETURNS text[]
-AS 'MODULE_PATHNAME','hstore_to_matrix'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR %# (
- RIGHTARG = hstore,
- PROCEDURE = hstore_to_matrix
-);
-
-CREATE FUNCTION akeys(hstore)
-RETURNS text[]
-AS 'MODULE_PATHNAME','hstore_akeys'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION avals(hstore)
-RETURNS text[]
-AS 'MODULE_PATHNAME','hstore_avals'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION skeys(hstore)
-RETURNS setof text
-AS 'MODULE_PATHNAME','hstore_skeys'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION svals(hstore)
-RETURNS setof text
-AS 'MODULE_PATHNAME','hstore_svals'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION each(IN hs hstore,
- OUT key text,
- OUT value text)
-RETURNS SETOF record
-AS 'MODULE_PATHNAME','hstore_each'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION populate_record(anyelement,hstore)
-RETURNS anyelement
-AS 'MODULE_PATHNAME', 'hstore_populate_record'
-LANGUAGE C IMMUTABLE; -- not STRICT; allows (null::rectype,hstore)
-
-CREATE OPERATOR #= (
- LEFTARG = anyelement,
- RIGHTARG = hstore,
- PROCEDURE = populate_record
-);
-
--- btree support
-
-CREATE FUNCTION hstore_eq(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_eq'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_ne(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_ne'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_gt(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_gt'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_ge(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_ge'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_lt(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_lt'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_le(hstore,hstore)
-RETURNS boolean
-AS 'MODULE_PATHNAME','hstore_le'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION hstore_cmp(hstore,hstore)
-RETURNS integer
-AS 'MODULE_PATHNAME','hstore_cmp'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR = (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_eq,
- COMMUTATOR = =,
- NEGATOR = <>,
- RESTRICT = eqsel,
- JOIN = eqjoinsel,
- MERGES,
- HASHES
-);
-CREATE OPERATOR <> (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_ne,
- COMMUTATOR = <>,
- NEGATOR = =,
- RESTRICT = neqsel,
- JOIN = neqjoinsel
-);
-
--- the comparison operators have funky names (and are undocumented)
--- in an attempt to discourage anyone from actually using them. they
--- only exist to support the btree opclass
-
-CREATE OPERATOR #<# (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_lt,
- COMMUTATOR = #>#,
- NEGATOR = #>=#,
- RESTRICT = scalarltsel,
- JOIN = scalarltjoinsel
-);
-CREATE OPERATOR #<=# (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_le,
- COMMUTATOR = #>=#,
- NEGATOR = #>#,
- RESTRICT = scalarltsel,
- JOIN = scalarltjoinsel
-);
-CREATE OPERATOR #># (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_gt,
- COMMUTATOR = #<#,
- NEGATOR = #<=#,
- RESTRICT = scalargtsel,
- JOIN = scalargtjoinsel
-);
-CREATE OPERATOR #>=# (
- LEFTARG = hstore,
- RIGHTARG = hstore,
- PROCEDURE = hstore_ge,
- COMMUTATOR = #<=#,
- NEGATOR = #<#,
- RESTRICT = scalargtsel,
- JOIN = scalargtjoinsel
-);
-
-CREATE OPERATOR CLASS btree_hstore_ops
-DEFAULT FOR TYPE hstore USING btree
-AS
- OPERATOR 1 #<# ,
- OPERATOR 2 #<=# ,
- OPERATOR 3 = ,
- OPERATOR 4 #>=# ,
- OPERATOR 5 #># ,
- FUNCTION 1 hstore_cmp(hstore,hstore);
-
--- hash support
-
-CREATE FUNCTION hstore_hash(hstore)
-RETURNS integer
-AS 'MODULE_PATHNAME','hstore_hash'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE OPERATOR CLASS hash_hstore_ops
-DEFAULT FOR TYPE hstore USING hash
-AS
- OPERATOR 1 = ,
- FUNCTION 1 hstore_hash(hstore);
-
--- GiST support
-
-CREATE TYPE ghstore;
-
-CREATE FUNCTION ghstore_in(cstring)
-RETURNS ghstore
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE FUNCTION ghstore_out(ghstore)
-RETURNS cstring
-AS 'MODULE_PATHNAME'
-LANGUAGE C STRICT IMMUTABLE;
-
-CREATE TYPE ghstore (
- INTERNALLENGTH = -1,
- INPUT = ghstore_in,
- OUTPUT = ghstore_out
-);
-
-CREATE FUNCTION ghstore_compress(internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_decompress(internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_penalty(internal,internal,internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_picksplit(internal, internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_union(internal, internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_same(internal, internal, internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION ghstore_consistent(internal,internal,int,oid,internal)
-RETURNS bool
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE OPERATOR CLASS gist_hstore_ops
-DEFAULT FOR TYPE hstore USING gist
-AS
- OPERATOR 7 @> ,
- OPERATOR 9 ?(hstore,text) ,
- OPERATOR 10 ?|(hstore,text[]) ,
- OPERATOR 11 ?&(hstore,text[]) ,
- --OPERATOR 8 <@ ,
- OPERATOR 13 @ ,
- --OPERATOR 14 ~ ,
- FUNCTION 1 ghstore_consistent (internal, internal, int, oid, internal),
- FUNCTION 2 ghstore_union (internal, internal),
- FUNCTION 3 ghstore_compress (internal),
- FUNCTION 4 ghstore_decompress (internal),
- FUNCTION 5 ghstore_penalty (internal, internal, internal),
- FUNCTION 6 ghstore_picksplit (internal, internal),
- FUNCTION 7 ghstore_same (internal, internal, internal),
- STORAGE ghstore;
-
--- GIN support
-
-CREATE FUNCTION gin_extract_hstore(internal, internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION gin_extract_hstore_query(internal, internal, int2, internal, internal)
-RETURNS internal
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE FUNCTION gin_consistent_hstore(internal, int2, internal, int4, internal, internal)
-RETURNS bool
-AS 'MODULE_PATHNAME'
-LANGUAGE C IMMUTABLE STRICT;
-
-CREATE OPERATOR CLASS gin_hstore_ops
-DEFAULT FOR TYPE hstore USING gin
-AS
- OPERATOR 7 @>,
- OPERATOR 9 ?(hstore,text),
- OPERATOR 10 ?|(hstore,text[]),
- OPERATOR 11 ?&(hstore,text[]),
- FUNCTION 1 bttextcmp(text,text),
- FUNCTION 2 gin_extract_hstore(internal, internal),
- FUNCTION 3 gin_extract_hstore_query(internal, internal, int2, internal, internal),
- FUNCTION 4 gin_consistent_hstore(internal, int2, internal, int4, internal, internal),
- STORAGE text;
diff --git a/contrib/hstore/hstore--1.2--1.3.sql b/contrib/hstore/hstore--1.2--1.3.sql
new file mode 100644
index 0000000000..0a7056015b
--- /dev/null
+++ b/contrib/hstore/hstore--1.2--1.3.sql
@@ -0,0 +1,17 @@
+/* contrib/hstore/hstore--1.2--1.3.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION hstore UPDATE TO '1.3'" to load this file. \quit
+
+CREATE FUNCTION hstore_to_jsonb(hstore)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'hstore_to_jsonb'
+LANGUAGE C IMMUTABLE STRICT;
+
+CREATE CAST (hstore AS jsonb)
+ WITH FUNCTION hstore_to_jsonb(hstore);
+
+CREATE FUNCTION hstore_to_jsonb_loose(hstore)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'hstore_to_jsonb_loose'
+LANGUAGE C IMMUTABLE STRICT;
diff --git a/contrib/hstore/hstore--1.0.sql b/contrib/hstore/hstore--1.3.sql
index 8b211c4669..995ade1b3c 100644
--- a/contrib/hstore/hstore--1.0.sql
+++ b/contrib/hstore/hstore--1.3.sql
@@ -1,4 +1,4 @@
-/* contrib/hstore/hstore--1.0.sql */
+/* contrib/hstore/hstore--1.3.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION hstore" to load this file. \quit
@@ -221,12 +221,6 @@ RETURNS hstore
AS 'MODULE_PATHNAME','hstore_from_text'
LANGUAGE C IMMUTABLE; -- not STRICT; needs to allow (key,NULL)
-CREATE OPERATOR => (
- LEFTARG = text,
- RIGHTARG = text,
- PROCEDURE = hstore
-);
-
CREATE FUNCTION hstore(text[],text[])
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_arrays'
@@ -240,6 +234,32 @@ LANGUAGE C IMMUTABLE STRICT;
CREATE CAST (text[] AS hstore)
WITH FUNCTION hstore(text[]);
+CREATE FUNCTION hstore_to_json(hstore)
+RETURNS json
+AS 'MODULE_PATHNAME', 'hstore_to_json'
+LANGUAGE C IMMUTABLE STRICT;
+
+CREATE CAST (hstore AS json)
+ WITH FUNCTION hstore_to_json(hstore);
+
+CREATE FUNCTION hstore_to_json_loose(hstore)
+RETURNS json
+AS 'MODULE_PATHNAME', 'hstore_to_json_loose'
+LANGUAGE C IMMUTABLE STRICT;
+
+CREATE FUNCTION hstore_to_jsonb(hstore)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'hstore_to_jsonb'
+LANGUAGE C IMMUTABLE STRICT;
+
+CREATE CAST (hstore AS jsonb)
+ WITH FUNCTION hstore_to_jsonb(hstore);
+
+CREATE FUNCTION hstore_to_jsonb_loose(hstore)
+RETURNS jsonb
+AS 'MODULE_PATHNAME', 'hstore_to_jsonb_loose'
+LANGUAGE C IMMUTABLE STRICT;
+
CREATE FUNCTION hstore(record)
RETURNS hstore
AS 'MODULE_PATHNAME', 'hstore_from_record'
diff --git a/contrib/hstore/hstore.control b/contrib/hstore/hstore.control
index 4104e17e29..dcc3b687cf 100644
--- a/contrib/hstore/hstore.control
+++ b/contrib/hstore/hstore.control
@@ -1,5 +1,5 @@
# hstore extension
comment = 'data type for storing sets of (key, value) pairs'
-default_version = '1.1'
+default_version = '1.3'
module_pathname = '$libdir/hstore'
relocatable = true
diff --git a/contrib/hstore/hstore.h b/contrib/hstore/hstore.h
index 8906397ad2..f9088f8b1b 100644
--- a/contrib/hstore/hstore.h
+++ b/contrib/hstore/hstore.h
@@ -12,7 +12,7 @@
* HEntry: there is one of these for each key _and_ value in an hstore
*
* the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry. the ISFIRST flag lets us tell
+ * by subtraction from the previous entry. the ISFIRST flag lets us tell
* whether there is a previous entry.
*/
typedef struct
@@ -49,9 +49,12 @@ typedef struct
} HStore;
/*
- * it's not possible to get more than 2^28 items into an hstore,
- * so we reserve the top few bits of the size field. See hstore_compat.c
- * for one reason why. Some bits are left for future use here.
+ * It's not possible to get more than 2^28 items into an hstore, so we reserve
+ * the top few bits of the size field. See hstore_compat.c for one reason
+ * why. Some bits are left for future use here. MaxAllocSize makes the
+ * practical count limit slightly more than 2^28 / 3, or INT_MAX / 24, the
+ * limit for an hstore full of 4-byte keys and null values. Therefore, we
+ * don't explicitly check the format-imposed limit.
*/
#define HS_FLAG_NEWVERSION 0x80000000
@@ -59,6 +62,12 @@ typedef struct
#define HS_SETCOUNT(hsp_,c_) ((hsp_)->size_ = (c_) | HS_FLAG_NEWVERSION)
+/*
+ * "x" comes from an existing HS_COUNT() (as discussed, <= INT_MAX/24) or a
+ * Pairs array length (due to MaxAllocSize, <= INT_MAX/40). "lenstr" is no
+ * more than INT_MAX, that extreme case arising in hstore_from_arrays().
+ * Therefore, this calculation is limited to about INT_MAX / 5 + INT_MAX.
+ */
#define HSHRDSIZE (sizeof(HStore))
#define CALCDATASIZE(x, lenstr) ( (x) * 2 * sizeof(HEntry) + HSHRDSIZE + (lenstr) )
@@ -159,8 +168,8 @@ typedef struct
bool needfree; /* need to pfree the value? */
} Pairs;
-extern int hstoreUniquePairs(Pairs *a, int4 l, int4 *buflen);
-extern HStore *hstorePairs(Pairs *pairs, int4 pcount, int4 buflen);
+extern int hstoreUniquePairs(Pairs *a, int32 l, int32 *buflen);
+extern HStore *hstorePairs(Pairs *pairs, int32 pcount, int32 buflen);
extern size_t hstoreCheckKeyLen(size_t len);
extern size_t hstoreCheckValLen(size_t len);
@@ -185,7 +194,6 @@ extern Pairs *hstoreArrayToPairs(ArrayType *a, int *npairs);
#if HSTORE_POLLUTE_NAMESPACE
#define HSTORE_POLLUTE(newname_,oldname_) \
PG_FUNCTION_INFO_V1(oldname_); \
- Datum oldname_(PG_FUNCTION_ARGS); \
Datum newname_(PG_FUNCTION_ARGS); \
Datum oldname_(PG_FUNCTION_ARGS) { return newname_(fcinfo); } \
extern int no_such_variable
diff --git a/contrib/hstore/hstore_compat.c b/contrib/hstore/hstore_compat.c
index 88764b1b69..6364f032f9 100644
--- a/contrib/hstore/hstore_compat.c
+++ b/contrib/hstore/hstore_compat.c
@@ -94,7 +94,7 @@
* etc. are compatible.
*
* If the above statement isn't true on some bizarre platform, we're
- * a bit hosed (see Assert in hstoreValidOldFormat).
+ * a bit hosed (see StaticAssertStmt in hstoreValidOldFormat).
*/
typedef struct
{
@@ -180,7 +180,8 @@ hstoreValidOldFormat(HStore *hs)
return 0;
/* New format uses an HEntry for key and another for value */
- Assert(sizeof(HOldEntry) == (2 * sizeof(HEntry)));
+ StaticAssertStmt(sizeof(HOldEntry) == 2 * sizeof(HEntry),
+ "old hstore format is not upward-compatible");
if (count == 0)
return 2;
@@ -356,7 +357,6 @@ hstoreUpgrade(Datum orig)
PG_FUNCTION_INFO_V1(hstore_version_diag);
-Datum hstore_version_diag(PG_FUNCTION_ARGS);
Datum
hstore_version_diag(PG_FUNCTION_ARGS)
{
diff --git a/contrib/hstore/hstore_gin.c b/contrib/hstore/hstore_gin.c
index 2007801cf0..68f9061db1 100644
--- a/contrib/hstore/hstore_gin.c
+++ b/contrib/hstore/hstore_gin.c
@@ -13,7 +13,7 @@
/*
* When using a GIN index for hstore, we choose to index both keys and values.
* The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values. (As of 9.1 it might be better to
+ * to indicate key, value, or null values. (As of 9.1 it might be better to
* store null values as nulls, but we'll keep it this way for on-disk
* compatibility.)
*/
@@ -22,7 +22,6 @@
#define NULLFLAG 'N'
PG_FUNCTION_INFO_V1(gin_extract_hstore);
-Datum gin_extract_hstore(PG_FUNCTION_ARGS);
/* Build an indexable text value */
static text *
@@ -76,7 +75,6 @@ gin_extract_hstore(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(gin_extract_hstore_query);
-Datum gin_extract_hstore_query(PG_FUNCTION_ARGS);
Datum
gin_extract_hstore_query(PG_FUNCTION_ARGS)
@@ -148,7 +146,6 @@ gin_extract_hstore_query(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(gin_consistent_hstore);
-Datum gin_consistent_hstore(PG_FUNCTION_ARGS);
Datum
gin_consistent_hstore(PG_FUNCTION_ARGS)
@@ -168,7 +165,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS)
{
/*
* Index doesn't have information about correspondence of keys and
- * values, so we need recheck. However, if not all the keys are
+ * values, so we need recheck. However, if not all the keys are
* present, we can fail at once.
*/
*recheck = true;
diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c
index f5c4b71eaf..d4a9aaa4c1 100644
--- a/contrib/hstore/hstore_gist.c
+++ b/contrib/hstore/hstore_gist.c
@@ -40,7 +40,7 @@ typedef char *BITVECP;
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
- int4 flag;
+ int32 flag;
char data[1];
} GISTTYPE;
@@ -48,7 +48,7 @@ typedef struct
#define ISALLTRUE(x) ( ((GISTTYPE*)x)->flag & ALLISTRUE )
-#define GTHDRSIZE (VARHDRSZ + sizeof(int4))
+#define GTHDRSIZE (VARHDRSZ + sizeof(int32))
#define CALCGTSIZE(flag) ( GTHDRSIZE+(((flag) & ALLISTRUE) ? 0 : SIGLEN) )
#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
@@ -69,10 +69,7 @@ typedef struct
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
PG_FUNCTION_INFO_V1(ghstore_in);
-Datum ghstore_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ghstore_out);
-Datum ghstore_out(PG_FUNCTION_ARGS);
Datum
@@ -97,14 +94,6 @@ PG_FUNCTION_INFO_V1(ghstore_picksplit);
PG_FUNCTION_INFO_V1(ghstore_union);
PG_FUNCTION_INFO_V1(ghstore_same);
-Datum ghstore_consistent(PG_FUNCTION_ARGS);
-Datum ghstore_compress(PG_FUNCTION_ARGS);
-Datum ghstore_decompress(PG_FUNCTION_ARGS);
-Datum ghstore_penalty(PG_FUNCTION_ARGS);
-Datum ghstore_picksplit(PG_FUNCTION_ARGS);
-Datum ghstore_union(PG_FUNCTION_ARGS);
-Datum ghstore_same(PG_FUNCTION_ARGS);
-
Datum
ghstore_compress(PG_FUNCTION_ARGS)
{
@@ -143,7 +132,7 @@ ghstore_compress(PG_FUNCTION_ARGS)
}
else if (!ISALLTRUE(DatumGetPointer(entry->key)))
{
- int4 i;
+ int32 i;
GISTTYPE *res;
BITVECP sign = GETSIGN(DatumGetPointer(entry->key));
@@ -192,7 +181,7 @@ ghstore_same(PG_FUNCTION_ARGS)
*result = false;
else
{
- int4 i;
+ int32 i;
BITVECP sa = GETSIGN(a),
sb = GETSIGN(b);
@@ -209,10 +198,10 @@ ghstore_same(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
-static int4
+static int32
sizebitvec(BITVECP sign)
{
- int4 size = 0,
+ int32 size = 0,
i;
LOOPBYTE
@@ -253,10 +242,10 @@ hemdist(GISTTYPE *a, GISTTYPE *b)
return hemdistsign(GETSIGN(a), GETSIGN(b));
}
-static int4
+static int32
unionkey(BITVECP sbase, GISTTYPE *add)
{
- int4 i;
+ int32 i;
BITVECP sadd = GETSIGN(add);
if (ISALLTRUE(add))
@@ -270,12 +259,12 @@ Datum
ghstore_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
- int4 len = entryvec->n;
+ int32 len = entryvec->n;
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
- int4 i;
- int4 flag = 0;
+ int32 i;
+ int32 flag = 0;
GISTTYPE *result;
MemSet((void *) base, 0, sizeof(BITVEC));
@@ -316,7 +305,7 @@ ghstore_penalty(PG_FUNCTION_ARGS)
typedef struct
{
OffsetNumber pos;
- int4 cost;
+ int32 cost;
} SPLITCOST;
static int
@@ -339,11 +328,11 @@ ghstore_picksplit(PG_FUNCTION_ARGS)
*datum_r;
BITVECP union_l,
union_r;
- int4 size_alpha,
+ int32 size_alpha,
size_beta;
- int4 size_waste,
+ int32 size_waste,
waste = -1;
- int4 nbytes;
+ int32 nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c
index a20d05516a..6acd18fdb0 100644
--- a/contrib/hstore/hstore_io.c
+++ b/contrib/hstore/hstore_io.c
@@ -5,10 +5,16 @@
#include <ctype.h>
+#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
+#include "lib/stringinfo.h"
#include "libpq/pqformat.h"
+#include "utils/builtins.h"
+#include "utils/json.h"
+#include "utils/jsonb.h"
#include "utils/lsyscache.h"
+#include "utils/memutils.h"
#include "utils/typcache.h"
#include "hstore.h"
@@ -36,7 +42,7 @@ typedef struct
do { \
if ( state->cur - state->word + 1 >= state->wordlen ) \
{ \
- int4 clen = state->cur - state->word; \
+ int32 clen = state->cur - state->word; \
state->wordlen *= 2; \
state->word = (char*)repalloc( (void*)state->word, state->wordlen ); \
state->cur = state->word + clen; \
@@ -74,7 +80,7 @@ get_val(HSParser *state, bool ignoreeq, bool *escaped)
}
else if (*(state->ptr) == '=' && !ignoreeq)
{
- elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin));
+ elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
else if (*(state->ptr) == '\\')
{
@@ -163,8 +169,6 @@ get_val(HSParser *state, bool ignoreeq, bool *escaped)
state->ptr++;
}
-
- return false;
}
#define WKEY 0
@@ -215,7 +219,7 @@ parse_hstore(HSParser *state)
}
else if (!isspace((unsigned char) *(state->ptr)))
{
- elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin));
+ elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else if (st == WGT)
@@ -230,7 +234,7 @@ parse_hstore(HSParser *state)
}
else
{
- elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin));
+ elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else if (st == WVAL)
@@ -263,7 +267,7 @@ parse_hstore(HSParser *state)
}
else if (!isspace((unsigned char) *(state->ptr)))
{
- elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int4) (state->ptr - state->begin));
+ elog(ERROR, "Syntax error near '%c' at position %d", *(state->ptr), (int32) (state->ptr - state->begin));
}
}
else
@@ -304,7 +308,7 @@ comparePairs(const void *a, const void *b)
* and (b) who knows whether they might be needed by some caller.
*/
int
-hstoreUniquePairs(Pairs *a, int4 l, int4 *buflen)
+hstoreUniquePairs(Pairs *a, int32 l, int32 *buflen)
{
Pairs *ptr,
*res;
@@ -367,14 +371,14 @@ hstoreCheckValLen(size_t len)
HStore *
-hstorePairs(Pairs *pairs, int4 pcount, int4 buflen)
+hstorePairs(Pairs *pairs, int32 pcount, int32 buflen)
{
HStore *out;
HEntry *entry;
char *ptr;
char *buf;
- int4 len;
- int4 i;
+ int32 len;
+ int32 i;
len = CALCDATASIZE(pcount, buflen);
out = palloc(len);
@@ -397,12 +401,11 @@ hstorePairs(Pairs *pairs, int4 pcount, int4 buflen)
PG_FUNCTION_INFO_V1(hstore_in);
-Datum hstore_in(PG_FUNCTION_ARGS);
Datum
hstore_in(PG_FUNCTION_ARGS)
{
HSParser state;
- int4 buflen;
+ int32 buflen;
HStore *out;
state.begin = PG_GETARG_CSTRING(0);
@@ -418,15 +421,14 @@ hstore_in(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_recv);
-Datum hstore_recv(PG_FUNCTION_ARGS);
Datum
hstore_recv(PG_FUNCTION_ARGS)
{
- int4 buflen;
+ int32 buflen;
HStore *out;
Pairs *pairs;
- int4 i;
- int4 pcount;
+ int32 i;
+ int32 pcount;
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
pcount = pq_getmsgint(buf, 4);
@@ -437,6 +439,11 @@ hstore_recv(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(out);
}
+ if (pcount < 0 || pcount > MaxAllocSize / sizeof(Pairs))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
+ pcount, (int) (MaxAllocSize / sizeof(Pairs)))));
pairs = palloc(pcount * sizeof(Pairs));
for (i = 0; i < pcount; ++i)
@@ -477,7 +484,6 @@ hstore_recv(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_from_text);
-Datum hstore_from_text(PG_FUNCTION_ARGS);
Datum
hstore_from_text(PG_FUNCTION_ARGS)
{
@@ -514,11 +520,10 @@ hstore_from_text(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_from_arrays);
-Datum hstore_from_arrays(PG_FUNCTION_ARGS);
Datum
hstore_from_arrays(PG_FUNCTION_ARGS)
{
- int4 buflen;
+ int32 buflen;
HStore *out;
Pairs *pairs;
Datum *key_datums;
@@ -552,6 +557,13 @@ hstore_from_arrays(PG_FUNCTION_ARGS)
TEXTOID, -1, false, 'i',
&key_datums, &key_nulls, &key_count);
+ /* see discussion in hstoreArrayToPairs() */
+ if (key_count > MaxAllocSize / sizeof(Pairs))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
+ key_count, (int) (MaxAllocSize / sizeof(Pairs)))));
+
/* value_array might be NULL */
if (PG_ARGISNULL(1))
@@ -625,14 +637,13 @@ hstore_from_arrays(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_from_array);
-Datum hstore_from_array(PG_FUNCTION_ARGS);
Datum
hstore_from_array(PG_FUNCTION_ARGS)
{
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
int ndims = ARR_NDIM(in_array);
int count;
- int4 buflen;
+ int32 buflen;
HStore *out;
Pairs *pairs;
Datum *in_datums;
@@ -674,6 +685,13 @@ hstore_from_array(PG_FUNCTION_ARGS)
count = in_count / 2;
+ /* see discussion in hstoreArrayToPairs() */
+ if (count > MaxAllocSize / sizeof(Pairs))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
+ count, (int) (MaxAllocSize / sizeof(Pairs)))));
+
pairs = palloc(count * sizeof(Pairs));
for (i = 0; i < count; ++i)
@@ -732,12 +750,11 @@ typedef struct RecordIOData
} RecordIOData;
PG_FUNCTION_INFO_V1(hstore_from_record);
-Datum hstore_from_record(PG_FUNCTION_ARGS);
Datum
hstore_from_record(PG_FUNCTION_ARGS)
{
HeapTupleHeader rec;
- int4 buflen;
+ int32 buflen;
HStore *out;
Pairs *pairs;
Oid tupType;
@@ -805,6 +822,7 @@ hstore_from_record(PG_FUNCTION_ARGS)
my_extra->ncolumns = ncolumns;
}
+ Assert(ncolumns <= MaxTupleAttributeNumber); /* thus, no overflow */
pairs = palloc(ncolumns * sizeof(Pairs));
if (rec)
@@ -888,7 +906,6 @@ hstore_from_record(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_populate_record);
-Datum hstore_populate_record(PG_FUNCTION_ARGS);
Datum
hstore_populate_record(PG_FUNCTION_ARGS)
{
@@ -1104,7 +1121,6 @@ cpw(char *dst, char *src, int len)
}
PG_FUNCTION_INFO_V1(hstore_out);
-Datum hstore_out(PG_FUNCTION_ARGS);
Datum
hstore_out(PG_FUNCTION_ARGS)
{
@@ -1118,11 +1134,7 @@ hstore_out(PG_FUNCTION_ARGS)
HEntry *entries = ARRPTR(in);
if (count == 0)
- {
- out = palloc(1);
- *out = '\0';
- PG_RETURN_CSTRING(out);
- }
+ PG_RETURN_CSTRING(pstrdup(""));
buflen = 0;
@@ -1180,7 +1192,6 @@ hstore_out(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_send);
-Datum hstore_send(PG_FUNCTION_ARGS);
Datum
hstore_send(PG_FUNCTION_ARGS)
{
@@ -1216,3 +1227,299 @@ hstore_send(PG_FUNCTION_ARGS)
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
+
+
+/*
+ * hstore_to_json_loose
+ *
+ * This is a heuristic conversion to json which treats
+ * 't' and 'f' as booleans and strings that look like numbers as numbers,
+ * as long as they don't start with a leading zero followed by another digit
+ * (think zip codes or phone numbers starting with 0).
+ */
+PG_FUNCTION_INFO_V1(hstore_to_json_loose);
+Datum
+hstore_to_json_loose(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ bool is_number;
+ StringInfoData tmp,
+ dst;
+
+ if (count == 0)
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
+
+ initStringInfo(&tmp);
+ initStringInfo(&dst);
+
+ appendStringInfoChar(&dst, '{');
+
+ for (i = 0; i < count; i++)
+ {
+ resetStringInfo(&tmp);
+ appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
+ escape_json(&dst, tmp.data);
+ appendStringInfoString(&dst, ": ");
+ if (HS_VALISNULL(entries, i))
+ appendStringInfoString(&dst, "null");
+ /* guess that values of 't' or 'f' are booleans */
+ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
+ appendStringInfoString(&dst, "true");
+ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f')
+ appendStringInfoString(&dst, "false");
+ else
+ {
+ is_number = false;
+ resetStringInfo(&tmp);
+ appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
+
+ /*
+ * don't treat something with a leading zero followed by another
+ * digit as numeric - could be a zip code or similar
+ */
+ if (tmp.len > 0 &&
+ !(tmp.data[0] == '0' &&
+ isdigit((unsigned char) tmp.data[1])) &&
+ strspn(tmp.data, "+-0123456789Ee.") == tmp.len)
+ {
+ /*
+ * might be a number. See if we can input it as a numeric
+ * value. Ignore any actual parsed value.
+ */
+ char *endptr = "junk";
+ long lval;
+
+ lval = strtol(tmp.data, &endptr, 10);
+ (void) lval;
+ if (*endptr == '\0')
+ {
+ /*
+ * strol man page says this means the whole string is
+ * valid
+ */
+ is_number = true;
+ }
+ else
+ {
+ /* not an int - try a double */
+ double dval;
+
+ dval = strtod(tmp.data, &endptr);
+ (void) dval;
+ if (*endptr == '\0')
+ is_number = true;
+ }
+ }
+ if (is_number)
+ appendBinaryStringInfo(&dst, tmp.data, tmp.len);
+ else
+ escape_json(&dst, tmp.data);
+ }
+
+ if (i + 1 != count)
+ appendStringInfoString(&dst, ", ");
+ }
+ appendStringInfoChar(&dst, '}');
+
+ PG_RETURN_TEXT_P(cstring_to_text(dst.data));
+}
+
+PG_FUNCTION_INFO_V1(hstore_to_json);
+Datum
+hstore_to_json(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ StringInfoData tmp,
+ dst;
+
+ if (count == 0)
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
+
+ initStringInfo(&tmp);
+ initStringInfo(&dst);
+
+ appendStringInfoChar(&dst, '{');
+
+ for (i = 0; i < count; i++)
+ {
+ resetStringInfo(&tmp);
+ appendBinaryStringInfo(&tmp, HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
+ escape_json(&dst, tmp.data);
+ appendStringInfoString(&dst, ": ");
+ if (HS_VALISNULL(entries, i))
+ appendStringInfoString(&dst, "null");
+ else
+ {
+ resetStringInfo(&tmp);
+ appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
+ escape_json(&dst, tmp.data);
+ }
+
+ if (i + 1 != count)
+ appendStringInfoString(&dst, ", ");
+ }
+ appendStringInfoChar(&dst, '}');
+
+ PG_RETURN_TEXT_P(cstring_to_text(dst.data));
+}
+
+PG_FUNCTION_INFO_V1(hstore_to_jsonb);
+Datum
+hstore_to_jsonb(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ JsonbParseState *state = NULL;
+ JsonbValue *res;
+
+ res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL);
+
+ for (i = 0; i < count; i++)
+ {
+ JsonbValue key,
+ val;
+
+ key.type = jbvString;
+ key.val.string.len = HS_KEYLEN(entries, i);
+ key.val.string.val = HS_KEY(entries, base, i);
+
+ res = pushJsonbValue(&state, WJB_KEY, &key);
+
+ if (HS_VALISNULL(entries, i))
+ {
+ val.type = jbvNull;
+ }
+ else
+ {
+ val.type = jbvString;
+ val.val.string.len = HS_VALLEN(entries, i);
+ val.val.string.val = HS_VAL(entries, base, i);
+ }
+ res = pushJsonbValue(&state, WJB_VALUE, &val);
+ }
+
+ res = pushJsonbValue(&state, WJB_END_OBJECT, NULL);
+
+ PG_RETURN_POINTER(JsonbValueToJsonb(res));
+}
+
+PG_FUNCTION_INFO_V1(hstore_to_jsonb_loose);
+Datum
+hstore_to_jsonb_loose(PG_FUNCTION_ARGS)
+{
+ HStore *in = PG_GETARG_HS(0);
+ int i;
+ int count = HS_COUNT(in);
+ char *base = STRPTR(in);
+ HEntry *entries = ARRPTR(in);
+ JsonbParseState *state = NULL;
+ JsonbValue *res;
+ StringInfoData tmp;
+ bool is_number;
+
+ initStringInfo(&tmp);
+
+ res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL);
+
+ for (i = 0; i < count; i++)
+ {
+ JsonbValue key,
+ val;
+
+ key.type = jbvString;
+ key.val.string.len = HS_KEYLEN(entries, i);
+ key.val.string.val = HS_KEY(entries, base, i);
+
+ res = pushJsonbValue(&state, WJB_KEY, &key);
+
+ if (HS_VALISNULL(entries, i))
+ {
+ val.type = jbvNull;
+ }
+ /* guess that values of 't' or 'f' are booleans */
+ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 't')
+ {
+ val.type = jbvBool;
+ val.val.boolean = true;
+ }
+ else if (HS_VALLEN(entries, i) == 1 && *(HS_VAL(entries, base, i)) == 'f')
+ {
+ val.type = jbvBool;
+ val.val.boolean = false;
+ }
+ else
+ {
+ is_number = false;
+ resetStringInfo(&tmp);
+
+ appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i));
+
+ /*
+ * don't treat something with a leading zero followed by another
+ * digit as numeric - could be a zip code or similar
+ */
+ if (tmp.len > 0 &&
+ !(tmp.data[0] == '0' &&
+ isdigit((unsigned char) tmp.data[1])) &&
+ strspn(tmp.data, "+-0123456789Ee.") == tmp.len)
+ {
+ /*
+ * might be a number. See if we can input it as a numeric
+ * value. Ignore any actual parsed value.
+ */
+ char *endptr = "junk";
+ long lval;
+
+ lval = strtol(tmp.data, &endptr, 10);
+ (void) lval;
+ if (*endptr == '\0')
+ {
+ /*
+ * strol man page says this means the whole string is
+ * valid
+ */
+ is_number = true;
+ }
+ else
+ {
+ /* not an int - try a double */
+ double dval;
+
+ dval = strtod(tmp.data, &endptr);
+ (void) dval;
+ if (*endptr == '\0')
+ is_number = true;
+ }
+ }
+ if (is_number)
+ {
+ val.type = jbvNumeric;
+ val.val.numeric = DatumGetNumeric(
+ DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+
+ }
+ else
+ {
+ val.type = jbvString;
+ val.val.string.len = HS_VALLEN(entries, i);
+ val.val.string.val = HS_VAL(entries, base, i);
+ }
+ }
+ res = pushJsonbValue(&state, WJB_VALUE, &val);
+ }
+
+ res = pushJsonbValue(&state, WJB_END_OBJECT, NULL);
+
+ PG_RETURN_POINTER(JsonbValueToJsonb(res));
+}
diff --git a/contrib/hstore/hstore_op.c b/contrib/hstore/hstore_op.c
index fee2c3c5ae..9e18a2b512 100644
--- a/contrib/hstore/hstore_op.c
+++ b/contrib/hstore/hstore_op.c
@@ -4,9 +4,11 @@
#include "postgres.h"
#include "access/hash.h"
+#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "utils/builtins.h"
+#include "utils/memutils.h"
#include "hstore.h"
@@ -89,6 +91,19 @@ hstoreArrayToPairs(ArrayType *a, int *npairs)
return NULL;
}
+ /*
+ * A text array uses at least eight bytes per element, so any overflow in
+ * "key_count * sizeof(Pairs)" is small enough for palloc() to catch.
+ * However, credible improvements to the array format could invalidate
+ * that assumption. Therefore, use an explicit check rather than relying
+ * on palloc() to complain.
+ */
+ if (key_count > MaxAllocSize / sizeof(Pairs))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
+ key_count, (int) (MaxAllocSize / sizeof(Pairs)))));
+
key_pairs = palloc(sizeof(Pairs) * key_count);
for (i = 0, j = 0; i < key_count; i++)
@@ -112,7 +127,6 @@ hstoreArrayToPairs(ArrayType *a, int *npairs)
PG_FUNCTION_INFO_V1(hstore_fetchval);
-Datum hstore_fetchval(PG_FUNCTION_ARGS);
Datum
hstore_fetchval(PG_FUNCTION_ARGS)
{
@@ -134,7 +148,6 @@ hstore_fetchval(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_exists);
-Datum hstore_exists(PG_FUNCTION_ARGS);
Datum
hstore_exists(PG_FUNCTION_ARGS)
{
@@ -148,7 +161,6 @@ hstore_exists(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_exists_any);
-Datum hstore_exists_any(PG_FUNCTION_ARGS);
Datum
hstore_exists_any(PG_FUNCTION_ARGS)
{
@@ -183,7 +195,6 @@ hstore_exists_any(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_exists_all);
-Datum hstore_exists_all(PG_FUNCTION_ARGS);
Datum
hstore_exists_all(PG_FUNCTION_ARGS)
{
@@ -218,7 +229,6 @@ hstore_exists_all(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_defined);
-Datum hstore_defined(PG_FUNCTION_ARGS);
Datum
hstore_defined(PG_FUNCTION_ARGS)
{
@@ -234,7 +244,6 @@ hstore_defined(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_delete);
-Datum hstore_delete(PG_FUNCTION_ARGS);
Datum
hstore_delete(PG_FUNCTION_ARGS)
{
@@ -281,7 +290,6 @@ hstore_delete(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_delete_array);
-Datum hstore_delete_array(PG_FUNCTION_ARGS);
Datum
hstore_delete_array(PG_FUNCTION_ARGS)
{
@@ -361,7 +369,6 @@ hstore_delete_array(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_delete_hstore);
-Datum hstore_delete_hstore(PG_FUNCTION_ARGS);
Datum
hstore_delete_hstore(PG_FUNCTION_ARGS)
{
@@ -461,7 +468,6 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_concat);
-Datum hstore_concat(PG_FUNCTION_ARGS);
Datum
hstore_concat(PG_FUNCTION_ARGS)
{
@@ -560,7 +566,6 @@ hstore_concat(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_slice_to_array);
-Datum hstore_slice_to_array(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_array(PG_FUNCTION_ARGS)
{
@@ -624,7 +629,6 @@ hstore_slice_to_array(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_slice_to_hstore);
-Datum hstore_slice_to_hstore(PG_FUNCTION_ARGS);
Datum
hstore_slice_to_hstore(PG_FUNCTION_ARGS)
{
@@ -647,6 +651,7 @@ hstore_slice_to_hstore(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(out);
}
+ /* hstoreArrayToPairs() checked overflow */
out_pairs = palloc(sizeof(Pairs) * nkeys);
bufsiz = 0;
@@ -686,7 +691,6 @@ hstore_slice_to_hstore(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_akeys);
-Datum hstore_akeys(PG_FUNCTION_ARGS);
Datum
hstore_akeys(PG_FUNCTION_ARGS)
{
@@ -722,7 +726,6 @@ hstore_akeys(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_avals);
-Datum hstore_avals(PG_FUNCTION_ARGS);
Datum
hstore_avals(PG_FUNCTION_ARGS)
{
@@ -819,7 +822,6 @@ hstore_to_array_internal(HStore *hs, int ndims)
}
PG_FUNCTION_INFO_V1(hstore_to_array);
-Datum hstore_to_array(PG_FUNCTION_ARGS);
Datum
hstore_to_array(PG_FUNCTION_ARGS)
{
@@ -830,7 +832,6 @@ hstore_to_array(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_to_matrix);
-Datum hstore_to_matrix(PG_FUNCTION_ARGS);
Datum
hstore_to_matrix(PG_FUNCTION_ARGS)
{
@@ -879,7 +880,6 @@ setup_firstcall(FuncCallContext *funcctx, HStore *hs,
PG_FUNCTION_INFO_V1(hstore_skeys);
-Datum hstore_skeys(PG_FUNCTION_ARGS);
Datum
hstore_skeys(PG_FUNCTION_ARGS)
{
@@ -914,7 +914,6 @@ hstore_skeys(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_svals);
-Datum hstore_svals(PG_FUNCTION_ARGS);
Datum
hstore_svals(PG_FUNCTION_ARGS)
{
@@ -963,7 +962,6 @@ hstore_svals(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_contains);
-Datum hstore_contains(PG_FUNCTION_ARGS);
Datum
hstore_contains(PG_FUNCTION_ARGS)
{
@@ -1010,7 +1008,6 @@ hstore_contains(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_contained);
-Datum hstore_contained(PG_FUNCTION_ARGS);
Datum
hstore_contained(PG_FUNCTION_ARGS)
{
@@ -1022,7 +1019,6 @@ hstore_contained(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_each);
-Datum hstore_each(PG_FUNCTION_ARGS);
Datum
hstore_each(PG_FUNCTION_ARGS)
{
@@ -1084,7 +1080,6 @@ hstore_each(PG_FUNCTION_ARGS)
*/
PG_FUNCTION_INFO_V1(hstore_cmp);
-Datum hstore_cmp(PG_FUNCTION_ARGS);
Datum
hstore_cmp(PG_FUNCTION_ARGS)
{
@@ -1166,7 +1161,6 @@ hstore_cmp(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_eq);
-Datum hstore_eq(PG_FUNCTION_ARGS);
Datum
hstore_eq(PG_FUNCTION_ARGS)
{
@@ -1178,7 +1172,6 @@ hstore_eq(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_ne);
-Datum hstore_ne(PG_FUNCTION_ARGS);
Datum
hstore_ne(PG_FUNCTION_ARGS)
{
@@ -1190,7 +1183,6 @@ hstore_ne(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_gt);
-Datum hstore_gt(PG_FUNCTION_ARGS);
Datum
hstore_gt(PG_FUNCTION_ARGS)
{
@@ -1202,7 +1194,6 @@ hstore_gt(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_ge);
-Datum hstore_ge(PG_FUNCTION_ARGS);
Datum
hstore_ge(PG_FUNCTION_ARGS)
{
@@ -1214,7 +1205,6 @@ hstore_ge(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_lt);
-Datum hstore_lt(PG_FUNCTION_ARGS);
Datum
hstore_lt(PG_FUNCTION_ARGS)
{
@@ -1226,7 +1216,6 @@ hstore_lt(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(hstore_le);
-Datum hstore_le(PG_FUNCTION_ARGS);
Datum
hstore_le(PG_FUNCTION_ARGS)
{
@@ -1239,7 +1228,6 @@ hstore_le(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(hstore_hash);
-Datum hstore_hash(PG_FUNCTION_ARGS);
Datum
hstore_hash(PG_FUNCTION_ARGS)
{
diff --git a/contrib/hstore/sql/hstore.sql b/contrib/hstore/sql/hstore.sql
index d046a7f384..5a9e9ee5ae 100644
--- a/contrib/hstore/sql/hstore.sql
+++ b/contrib/hstore/sql/hstore.sql
@@ -330,3 +330,18 @@ set enable_seqscan=off;
select count(*) from testhstore where h #># 'p=>1';
select count(*) from testhstore where h = 'pos=>98, line=>371, node=>CBA, indexed=>t';
+
+-- json and jsonb
+select hstore_to_json('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as json);
+select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+
+select hstore_to_jsonb('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as jsonb);
+select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4');
+
+create table test_json_agg (f1 text, f2 hstore);
+insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'),
+ ('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
+select json_agg(q) from test_json_agg q;
+select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
diff --git a/contrib/intarray/_int.h b/contrib/intarray/_int.h
index 11c069890e..7f93206e89 100644
--- a/contrib/intarray/_int.h
+++ b/contrib/intarray/_int.h
@@ -5,12 +5,13 @@
#define ___INT_H__
#include "utils/array.h"
+#include "utils/memutils.h"
/* number ranges for compression */
#define MAXNUMRANGE 100
/* useful macros for accessing int4 arrays */
-#define ARRPTR(x) ( (int4 *) ARR_DATA_PTR(x) )
+#define ARRPTR(x) ( (int32 *) ARR_DATA_PTR(x) )
#define ARRNELEMS(x) ArrayGetNItems(ARR_NDIM(x), ARR_DIMS(x))
/* reject arrays we can't handle; to wit, those containing nulls */
@@ -71,7 +72,7 @@ typedef char *BITVECP;
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
- int4 flag;
+ int32 flag;
char data[1];
} GISTTYPE;
@@ -79,7 +80,7 @@ typedef struct
#define ISALLTRUE(x) ( ((GISTTYPE*)x)->flag & ALLISTRUE )
-#define GTHDRSIZE (VARHDRSZ + sizeof(int4))
+#define GTHDRSIZE (VARHDRSZ + sizeof(int32))
#define CALCGTSIZE(flag) ( GTHDRSIZE+(((flag) & ALLISTRUE) ? 0 : SIGLEN) )
#define GETSIGN(x) ( (BITVECP)( (char*)x+GTHDRSIZE ) )
@@ -93,7 +94,7 @@ typedef void (*formfloat) (ArrayType *, float *);
/*
* useful functions
*/
-bool isort(int4 *a, int len);
+bool isort(int32 *a, int len);
ArrayType *new_intArrayType(int num);
ArrayType *copy_intArrayType(ArrayType *a);
ArrayType *resize_intArrayType(ArrayType *a, int num);
@@ -123,20 +124,21 @@ void gensign(BITVEC sign, int *a, int len);
*/
typedef struct ITEM
{
- int2 type;
- int2 left;
- int4 val;
+ int16 type;
+ int16 left;
+ int32 val;
} ITEM;
typedef struct QUERYTYPE
{
int32 vl_len_; /* varlena header (do not touch directly!) */
- int4 size; /* number of ITEMs */
+ int32 size; /* number of ITEMs */
ITEM items[1]; /* variable length array */
} QUERYTYPE;
#define HDRSIZEQT offsetof(QUERYTYPE, items)
#define COMPUTESIZE(size) ( HDRSIZEQT + (size) * sizeof(ITEM) )
+#define QUERYTYPEMAXITEMS ((MaxAllocSize - HDRSIZEQT) / sizeof(ITEM))
#define GETQUERY(x) ( (x)->items )
/* "type" codes for ITEM */
@@ -167,7 +169,7 @@ int compDESC(const void *a, const void *b);
do { \
int _nelems_ = ARRNELEMS(a); \
if (_nelems_ > 1) \
- qsort((void*) ARRPTR(a), _nelems_, sizeof(int4), \
+ qsort((void*) ARRPTR(a), _nelems_, sizeof(int32), \
(direction) ? compASC : compDESC ); \
} while(0)
diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c
index 4e63f6d66c..c3c39d194b 100644
--- a/contrib/intarray/_int_bool.c
+++ b/contrib/intarray/_int_bool.c
@@ -10,17 +10,9 @@
PG_FUNCTION_INFO_V1(bqarr_in);
PG_FUNCTION_INFO_V1(bqarr_out);
-Datum bqarr_in(PG_FUNCTION_ARGS);
-Datum bqarr_out(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(boolop);
-Datum boolop(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(rboolop);
-Datum rboolop(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(querytree);
-Datum querytree(PG_FUNCTION_ARGS);
/* parser's states */
@@ -34,27 +26,27 @@ Datum querytree(PG_FUNCTION_ARGS);
*/
typedef struct NODE
{
- int4 type;
- int4 val;
+ int32 type;
+ int32 val;
struct NODE *next;
} NODE;
typedef struct
{
char *buf;
- int4 state;
- int4 count;
+ int32 state;
+ int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
- int4 num;
+ int32 num;
} WORKSTATE;
/*
* get token from query string
*/
-static int4
-gettoken(WORKSTATE *state, int4 *val)
+static int32
+gettoken(WORKSTATE *state, int32 *val)
{
char nnn[16];
int innn;
@@ -79,7 +71,7 @@ gettoken(WORKSTATE *state, int4 *val)
else if (*(state->buf) == '!')
{
(state->buf)++;
- *val = (int4) '!';
+ *val = (int32) '!';
return OPR;
}
else if (*(state->buf) == '(')
@@ -103,7 +95,7 @@ gettoken(WORKSTATE *state, int4 *val)
nnn[innn] = '\0';
errno = 0;
lval = strtol(nnn, NULL, 0);
- *val = (int4) lval;
+ *val = (int32) lval;
if (errno != 0 || (long) *val != lval)
return ERR;
state->state = WAITOPERATOR;
@@ -115,7 +107,7 @@ gettoken(WORKSTATE *state, int4 *val)
if (*(state->buf) == '&' || *(state->buf) == '|')
{
state->state = WAITOPERAND;
- *val = (int4) *(state->buf);
+ *val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
@@ -136,14 +128,13 @@ gettoken(WORKSTATE *state, int4 *val)
}
(state->buf)++;
}
- return END;
}
/*
* push new one in polish notation reverse view
*/
static void
-pushquery(WORKSTATE *state, int4 type, int4 val)
+pushquery(WORKSTATE *state, int32 type, int32 val)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
@@ -159,13 +150,13 @@ pushquery(WORKSTATE *state, int4 type, int4 val)
/*
* make polish notation of query
*/
-static int4
+static int32
makepol(WORKSTATE *state)
{
- int4 val,
+ int32 val,
type;
- int4 stack[STACKDEPTH];
- int4 lenstack = 0;
+ int32 stack[STACKDEPTH];
+ int32 lenstack = 0;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
@@ -176,15 +167,15 @@ makepol(WORKSTATE *state)
{
case VAL:
pushquery(state, type, val);
- while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
- stack[lenstack - 1] == (int4) '!'))
+ while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
+ stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
}
break;
case OPR:
- if (lenstack && val == (int4) '|')
+ if (lenstack && val == (int32) '|')
pushquery(state, OPR, val);
else
{
@@ -199,8 +190,8 @@ makepol(WORKSTATE *state)
case OPEN:
if (makepol(state) == ERR)
return ERR;
- while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
- stack[lenstack - 1] == (int4) '!'))
+ while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
+ stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack]);
@@ -234,8 +225,8 @@ makepol(WORKSTATE *state)
typedef struct
{
- int4 *arrb;
- int4 *arre;
+ int32 *arrb;
+ int32 *arre;
} CHKVAL;
/*
@@ -244,9 +235,9 @@ typedef struct
static bool
checkcondition_arr(void *checkval, ITEM *item)
{
- int4 *StopLow = ((CHKVAL *) checkval)->arrb;
- int4 *StopHigh = ((CHKVAL *) checkval)->arre;
- int4 *StopMiddle;
+ int32 *StopLow = ((CHKVAL *) checkval)->arrb;
+ int32 *StopHigh = ((CHKVAL *) checkval)->arre;
+ int32 *StopMiddle;
/* Loop invariant: StopLow <= val < StopHigh */
@@ -281,13 +272,13 @@ execute(ITEM *curitem, void *checkval, bool calcnot,
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
- else if (curitem->val == (int4) '!')
+ else if (curitem->val == (int32) '!')
{
return (calcnot) ?
((execute(curitem - 1, checkval, calcnot, chkcond)) ? false : true)
: true;
}
- else if (curitem->val == (int4) '&')
+ else if (curitem->val == (int32) '&')
{
if (execute(curitem + curitem->left, checkval, calcnot, chkcond))
return execute(curitem - 1, checkval, calcnot, chkcond);
@@ -301,7 +292,6 @@ execute(ITEM *curitem, void *checkval, bool calcnot,
else
return execute(curitem - 1, checkval, calcnot, chkcond);
}
- return false;
}
/*
@@ -355,7 +345,7 @@ gin_bool_consistent(QUERYTYPE *query, bool *check)
return FALSE;
/*
- * Set up data for checkcondition_gin. This must agree with the query
+ * Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
@@ -379,7 +369,7 @@ contains_required_value(ITEM *curitem)
if (curitem->type == VAL)
return true;
- else if (curitem->val == (int4) '!')
+ else if (curitem->val == (int32) '!')
{
/*
* Assume anything under a NOT is non-required. For some cases with
@@ -388,7 +378,7 @@ contains_required_value(ITEM *curitem)
*/
return false;
}
- else if (curitem->val == (int4) '&')
+ else if (curitem->val == (int32) '&')
{
/* If either side has a required value, we're good */
if (contains_required_value(curitem + curitem->left))
@@ -404,7 +394,6 @@ contains_required_value(ITEM *curitem)
else
return false;
}
- return false;
}
bool
@@ -449,8 +438,11 @@ boolop(PG_FUNCTION_ARGS)
}
static void
-findoprnd(ITEM *ptr, int4 *pos)
+findoprnd(ITEM *ptr, int32 *pos)
{
+ /* since this function recurses, it could be driven to stack overflow. */
+ check_stack_depth();
+
#ifdef BS_DEBUG
elog(DEBUG3, (ptr[*pos].type == OPR) ?
"%d %c" : "%d %d", *pos, ptr[*pos].val);
@@ -460,7 +452,7 @@ findoprnd(ITEM *ptr, int4 *pos)
ptr[*pos].left = 0;
(*pos)--;
}
- else if (ptr[*pos].val == (int4) '!')
+ else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = -1;
(*pos)--;
@@ -469,7 +461,7 @@ findoprnd(ITEM *ptr, int4 *pos)
else
{
ITEM *curitem = &ptr[*pos];
- int4 tmp = *pos;
+ int32 tmp = *pos;
(*pos)--;
findoprnd(ptr, pos);
@@ -487,12 +479,12 @@ bqarr_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
WORKSTATE state;
- int4 i;
+ int32 i;
QUERYTYPE *query;
- int4 commonlen;
+ int32 commonlen;
ITEM *ptr;
NODE *tmp;
- int4 pos = 0;
+ int32 pos = 0;
#ifdef BS_DEBUG
StringInfoData pbuf;
@@ -511,7 +503,13 @@ bqarr_in(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("empty query")));
+ if (state.num > QUERYTYPEMAXITEMS)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of query items (%d) exceeds the maximum allowed (%d)",
+ state.num, (int) QUERYTYPEMAXITEMS)));
commonlen = COMPUTESIZE(state.num);
+
query = (QUERYTYPE *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
@@ -553,11 +551,11 @@ typedef struct
ITEM *curpol;
char *buf;
char *cur;
- int4 buflen;
+ int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) { \
- int4 len = inf->cur - inf->buf; \
+ int32 len = inf->cur - inf->buf; \
inf->buflen *= 2; \
inf->buf = (char*) repalloc( (void*)inf->buf, inf->buflen ); \
inf->cur = inf->buf + len; \
@@ -573,7 +571,7 @@ infix(INFIX *in, bool first)
in->cur = strchr(in->cur, '\0');
in->curpol--;
}
- else if (in->curpol->val == (int4) '!')
+ else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
@@ -599,11 +597,11 @@ infix(INFIX *in, bool first)
}
else
{
- int4 op = in->curpol->val;
+ int32 op = in->curpol->val;
INFIX nrm;
in->curpol--;
- if (op == (int4) '|' && !first)
+ if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
@@ -627,7 +625,7 @@ infix(INFIX *in, bool first)
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
- if (op == (int4) '|' && !first)
+ if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
diff --git a/contrib/intarray/_int_gin.c b/contrib/intarray/_int_gin.c
index 9abe54e55f..58352cac80 100644
--- a/contrib/intarray/_int_gin.c
+++ b/contrib/intarray/_int_gin.c
@@ -10,7 +10,6 @@
#include "_int.h"
PG_FUNCTION_INFO_V1(ginint4_queryextract);
-Datum ginint4_queryextract(PG_FUNCTION_ARGS);
Datum
ginint4_queryextract(PG_FUNCTION_ARGS)
@@ -65,7 +64,7 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
*nentries = ARRNELEMS(query);
if (*nentries > 0)
{
- int4 *arr;
+ int32 *arr;
int32 i;
res = (Datum *) palloc(sizeof(Datum) * (*nentries));
@@ -108,7 +107,6 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(ginint4_consistent);
-Datum ginint4_consistent(PG_FUNCTION_ARGS);
Datum
ginint4_consistent(PG_FUNCTION_ARGS)
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index 0123906be8..53abcc45a5 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -21,14 +21,6 @@ PG_FUNCTION_INFO_V1(g_int_picksplit);
PG_FUNCTION_INFO_V1(g_int_union);
PG_FUNCTION_INFO_V1(g_int_same);
-Datum g_int_consistent(PG_FUNCTION_ARGS);
-Datum g_int_compress(PG_FUNCTION_ARGS);
-Datum g_int_decompress(PG_FUNCTION_ARGS);
-Datum g_int_penalty(PG_FUNCTION_ARGS);
-Datum g_int_picksplit(PG_FUNCTION_ARGS);
-Datum g_int_union(PG_FUNCTION_ARGS);
-Datum g_int_same(PG_FUNCTION_ARGS);
-
/*
** The GiST Consistent method for _intments
@@ -106,7 +98,7 @@ g_int_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
- int4 i,
+ int32 i,
*ptr;
ArrayType *res;
int totlen = 0;
@@ -128,7 +120,7 @@ g_int_union(PG_FUNCTION_ARGS)
int nel;
nel = ARRNELEMS(ent);
- memcpy(ptr, ARRPTR(ent), nel * sizeof(int4));
+ memcpy(ptr, ARRPTR(ent), nel * sizeof(int32));
ptr += nel;
}
@@ -217,8 +209,6 @@ g_int_compress(PG_FUNCTION_ARGS)
}
else
PG_RETURN_POINTER(entry);
-
- PG_RETURN_POINTER(entry);
}
Datum
@@ -317,8 +307,8 @@ g_int_same(PG_FUNCTION_ARGS)
ArrayType *a = PG_GETARG_ARRAYTYPE_P(0);
ArrayType *b = PG_GETARG_ARRAYTYPE_P(1);
bool *result = (bool *) PG_GETARG_POINTER(2);
- int4 n = ARRNELEMS(a);
- int4 *da,
+ int32 n = ARRNELEMS(a);
+ int32 *da,
*db;
CHECKARRVALID(a);
@@ -482,7 +472,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
@@ -500,7 +490,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
diff --git a/contrib/intarray/_int_op.c b/contrib/intarray/_int_op.c
index 4c2aa7af8b..70849be57f 100644
--- a/contrib/intarray/_int_op.c
+++ b/contrib/intarray/_int_op.c
@@ -17,14 +17,6 @@ PG_FUNCTION_INFO_V1(_int_overlap);
PG_FUNCTION_INFO_V1(_int_union);
PG_FUNCTION_INFO_V1(_int_inter);
-Datum _int_different(PG_FUNCTION_ARGS);
-Datum _int_same(PG_FUNCTION_ARGS);
-Datum _int_contains(PG_FUNCTION_ARGS);
-Datum _int_contained(PG_FUNCTION_ARGS);
-Datum _int_overlap(PG_FUNCTION_ARGS);
-Datum _int_union(PG_FUNCTION_ARGS);
-Datum _int_inter(PG_FUNCTION_ARGS);
-
Datum
_int_contained(PG_FUNCTION_ARGS)
{
@@ -188,19 +180,6 @@ PG_FUNCTION_INFO_V1(intarray_push_array);
PG_FUNCTION_INFO_V1(intarray_del_elem);
PG_FUNCTION_INFO_V1(intset_union_elem);
PG_FUNCTION_INFO_V1(intset_subtract);
-Datum intset(PG_FUNCTION_ARGS);
-Datum icount(PG_FUNCTION_ARGS);
-Datum sort(PG_FUNCTION_ARGS);
-Datum sort_asc(PG_FUNCTION_ARGS);
-Datum sort_desc(PG_FUNCTION_ARGS);
-Datum uniq(PG_FUNCTION_ARGS);
-Datum idx(PG_FUNCTION_ARGS);
-Datum subarray(PG_FUNCTION_ARGS);
-Datum intarray_push_elem(PG_FUNCTION_ARGS);
-Datum intarray_push_array(PG_FUNCTION_ARGS);
-Datum intarray_del_elem(PG_FUNCTION_ARGS);
-Datum intset_union_elem(PG_FUNCTION_ARGS);
-Datum intset_subtract(PG_FUNCTION_ARGS);
Datum
intset(PG_FUNCTION_ARGS)
diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c
index 132d153160..511c7acb54 100644
--- a/contrib/intarray/_int_tool.c
+++ b/contrib/intarray/_int_tool.c
@@ -184,19 +184,19 @@ rt__int_size(ArrayType *a, float *size)
*size = (float) ARRNELEMS(a);
}
-/* Sort the given data (len >= 2). Return true if any duplicates found */
+/* Sort the given data (len >= 2). Return true if any duplicates found */
bool
-isort(int4 *a, int len)
+isort(int32 *a, int len)
{
- int4 cur,
+ int32 cur,
prev;
- int4 *pcur,
+ int32 *pcur,
*pprev,
*end;
bool r = FALSE;
/*
- * We use a simple insertion sort. While this is O(N^2) in the worst
+ * We use a simple insertion sort. While this is O(N^2) in the worst
* case, it's quite fast if the input is already sorted or nearly so.
* Also, for not-too-large inputs it's faster than more complex methods
* anyhow.
@@ -246,6 +246,13 @@ resize_intArrayType(ArrayType *a, int num)
int nbytes = ARR_DATA_OFFSET(a) + sizeof(int) * num;
int i;
+ /* if no elements, return a zero-dimensional array */
+ if (num == 0)
+ {
+ ARR_NDIM(a) = 0;
+ return a;
+ }
+
if (num == ARRNELEMS(a))
return a;
@@ -268,7 +275,7 @@ copy_intArrayType(ArrayType *a)
int n = ARRNELEMS(a);
r = new_intArrayType(n);
- memcpy(ARRPTR(r), ARRPTR(a), n * sizeof(int4));
+ memcpy(ARRPTR(r), ARRPTR(a), n * sizeof(int32));
return r;
}
@@ -389,15 +396,15 @@ int_to_intset(int32 n)
int
compASC(const void *a, const void *b)
{
- if (*(const int4 *) a == *(const int4 *) b)
+ if (*(const int32 *) a == *(const int32 *) b)
return 0;
- return (*(const int4 *) a > *(const int4 *) b) ? 1 : -1;
+ return (*(const int32 *) a > *(const int32 *) b) ? 1 : -1;
}
int
compDESC(const void *a, const void *b)
{
- if (*(const int4 *) a == *(const int4 *) b)
+ if (*(const int32 *) a == *(const int32 *) b)
return 0;
- return (*(const int4 *) a < *(const int4 *) b) ? 1 : -1;
+ return (*(const int32 *) a < *(const int32 *) b) ? 1 : -1;
}
diff --git a/contrib/intarray/_intbig_gist.c b/contrib/intarray/_intbig_gist.c
index c6b00eaeff..235db38957 100644
--- a/contrib/intarray/_intbig_gist.c
+++ b/contrib/intarray/_intbig_gist.c
@@ -20,14 +20,6 @@ PG_FUNCTION_INFO_V1(g_intbig_picksplit);
PG_FUNCTION_INFO_V1(g_intbig_union);
PG_FUNCTION_INFO_V1(g_intbig_same);
-Datum g_intbig_consistent(PG_FUNCTION_ARGS);
-Datum g_intbig_compress(PG_FUNCTION_ARGS);
-Datum g_intbig_decompress(PG_FUNCTION_ARGS);
-Datum g_intbig_penalty(PG_FUNCTION_ARGS);
-Datum g_intbig_picksplit(PG_FUNCTION_ARGS);
-Datum g_intbig_union(PG_FUNCTION_ARGS);
-Datum g_intbig_same(PG_FUNCTION_ARGS);
-
/* Number of one-bits in an unsigned byte */
static const uint8 number_of_ones[256] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
@@ -49,11 +41,7 @@ static const uint8 number_of_ones[256] = {
};
PG_FUNCTION_INFO_V1(_intbig_in);
-Datum _intbig_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_intbig_out);
-Datum _intbig_out(PG_FUNCTION_ARGS);
-
Datum
_intbig_in(PG_FUNCTION_ARGS)
@@ -81,7 +69,7 @@ static bool
_intbig_overlap(GISTTYPE *a, ArrayType *b)
{
int num = ARRNELEMS(b);
- int4 *ptr = ARRPTR(b);
+ int32 *ptr = ARRPTR(b);
CHECKARRVALID(b);
@@ -99,7 +87,7 @@ static bool
_intbig_contains(GISTTYPE *a, ArrayType *b)
{
int num = ARRNELEMS(b);
- int4 *ptr = ARRPTR(b);
+ int32 *ptr = ARRPTR(b);
CHECKARRVALID(b);
@@ -128,7 +116,7 @@ g_intbig_same(PG_FUNCTION_ARGS)
*result = false;
else
{
- int4 i;
+ int32 i;
BITVECP sa = GETSIGN(a),
sb = GETSIGN(b);
@@ -154,7 +142,7 @@ g_intbig_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *retval;
ArrayType *in = DatumGetArrayTypeP(entry->key);
- int4 *ptr;
+ int32 *ptr;
int num;
GISTTYPE *res = (GISTTYPE *) palloc0(CALCGTSIZE(0));
@@ -216,10 +204,10 @@ g_intbig_compress(PG_FUNCTION_ARGS)
}
-static int4
+static int32
sizebitvec(BITVECP sign)
{
- int4 size = 0,
+ int32 size = 0,
i;
LOOPBYTE
@@ -264,10 +252,10 @@ g_intbig_decompress(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(PG_GETARG_DATUM(0));
}
-static int4
+static int32
unionkey(BITVECP sbase, GISTTYPE *add)
{
- int4 i;
+ int32 i;
BITVECP sadd = GETSIGN(add);
if (ISALLTRUE(add))
@@ -283,9 +271,9 @@ g_intbig_union(PG_FUNCTION_ARGS)
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
- int4 i,
+ int32 i,
len;
- int4 flag = 0;
+ int32 flag = 0;
GISTTYPE *result;
MemSet((void *) base, 0, sizeof(BITVEC));
@@ -326,7 +314,7 @@ g_intbig_penalty(PG_FUNCTION_ARGS)
typedef struct
{
OffsetNumber pos;
- int4 cost;
+ int32 cost;
} SPLITCOST;
static int
@@ -347,11 +335,11 @@ g_intbig_picksplit(PG_FUNCTION_ARGS)
*datum_r;
BITVECP union_l,
union_r;
- int4 size_alpha,
+ int32 size_alpha,
size_beta;
- int4 size_waste,
+ int32 size_waste,
waste = -1;
- int4 nbytes;
+ int32 nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
@@ -538,7 +526,7 @@ g_intbig_consistent(PG_FUNCTION_ARGS)
{
int i,
num = ARRNELEMS(query);
- int4 *ptr = ARRPTR(query);
+ int32 *ptr = ARRPTR(query);
BITVEC qp;
BITVECP dq,
de;
@@ -577,7 +565,7 @@ g_intbig_consistent(PG_FUNCTION_ARGS)
{
int i,
num = ARRNELEMS(query);
- int4 *ptr = ARRPTR(query);
+ int32 *ptr = ARRPTR(query);
BITVEC qp;
BITVECP dq,
de;
diff --git a/contrib/intarray/bench/bench.pl b/contrib/intarray/bench/bench.pl
index 4e18624b9c..8746291114 100755
--- a/contrib/intarray/bench/bench.pl
+++ b/contrib/intarray/bench/bench.pl
@@ -1,6 +1,7 @@
#!/usr/bin/perl
use strict;
+
# make sure we are in a sane environment.
use DBI();
use DBD::Pg();
@@ -10,7 +11,8 @@ use Getopt::Std;
my %opt;
getopts('d:b:s:veorauc', \%opt);
-if ( !( scalar %opt && defined $opt{s} ) ) {
+if (!(scalar %opt && defined $opt{s}))
+{
print <<EOT;
Usage:
$0 -d DATABASE -s SECTIONS [-b NUMBER] [-v] [-e] [-o] [-r] [-a] [-u]
@@ -30,27 +32,37 @@ EOT
}
$opt{d} ||= '_int4';
-my $dbi=DBI->connect('DBI:Pg:dbname='.$opt{d});
+my $dbi = DBI->connect('DBI:Pg:dbname=' . $opt{d});
my %table;
my @where;
-$table{message}=1;
+$table{message} = 1;
-if ( $opt{a} ) {
- if ( $opt{r} ) {
+if ($opt{a})
+{
+ if ($opt{r})
+ {
push @where, "message.sections @ '{$opt{s}}'";
- } else {
- foreach my $sid ( split(/[,\s]+/, $opt{s} )) {
+ }
+ else
+ {
+ foreach my $sid (split(/[,\s]+/, $opt{s}))
+ {
push @where, "message.mid = msp$sid.mid";
push @where, "msp$sid.sid = $sid";
- $table{"message_section_map msp$sid"}=1;
+ $table{"message_section_map msp$sid"} = 1;
}
}
-} else {
- if ( $opt{r} ) {
+}
+else
+{
+ if ($opt{r})
+ {
push @where, "message.sections && '{$opt{s}}'";
- } else {
+ }
+ else
+ {
$table{message_section_map} = 1;
push @where, "message.mid = message_section_map.mid";
push @where, "message_section_map.sid in ($opt{s})";
@@ -58,48 +70,66 @@ if ( $opt{a} ) {
}
my $outf;
-if ( $opt{c} ) {
- $outf = ( $opt{u} ) ? 'count( distinct message.mid )' : 'count( message.mid )';
-} else {
- $outf = ( $opt{u} ) ? 'distinct( message.mid )' : 'message.mid';
+if ($opt{c})
+{
+ $outf =
+ ($opt{u}) ? 'count( distinct message.mid )' : 'count( message.mid )';
+}
+else
+{
+ $outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
-my $sql = "select $outf from ".join(', ', keys %table)." where ".join(' AND ', @where).';';
+my $sql =
+ "select $outf from "
+ . join(', ', keys %table)
+ . " where "
+ . join(' AND ', @where) . ';';
-if ( $opt{v} ) {
+if ($opt{v})
+{
print "$sql\n";
}
-if ( $opt{e} ) {
+if ($opt{e})
+{
$dbi->do("explain $sql");
}
-my $t0 = [gettimeofday];
-my $count=0;
-my $b=$opt{b};
-$b||=1;
+my $t0 = [gettimeofday];
+my $count = 0;
+my $b = $opt{b};
+$b ||= 1;
my @a;
-foreach ( 1..$b ) {
- @a=exec_sql($dbi,$sql);
- $count=$#a;
+foreach (1 .. $b)
+{
+ @a = exec_sql($dbi, $sql);
+ $count = $#a;
}
-my $elapsed = tv_interval ( $t0, [gettimeofday]);
-if ( $opt{o} ) {
- foreach ( @a ) {
+my $elapsed = tv_interval($t0, [gettimeofday]);
+if ($opt{o})
+{
+ foreach (@a)
+ {
print "$_->{mid}\t$_->{sections}\n";
}
}
-print sprintf("total: %.02f sec; number: %d; for one: %.03f sec; found %d docs\n", $elapsed, $b, $elapsed/$b, $count+1 );
-$dbi -> disconnect;
+print sprintf(
+ "total: %.02f sec; number: %d; for one: %.03f sec; found %d docs\n",
+ $elapsed, $b, $elapsed / $b,
+ $count + 1);
+$dbi->disconnect;
-sub exec_sql {
- my ($dbi, $sql, @keys) = @_;
- my $sth=$dbi->prepare($sql) || die;
- $sth->execute( @keys ) || die;
- my $r;
- my @row;
- while ( defined ( $r=$sth->fetchrow_hashref ) ) {
- push @row, $r;
- }
- $sth->finish;
- return @row;
+sub exec_sql
+{
+ my ($dbi, $sql, @keys) = @_;
+ my $sth = $dbi->prepare($sql) || die;
+ $sth->execute(@keys) || die;
+ my $r;
+ my @row;
+ while (defined($r = $sth->fetchrow_hashref))
+ {
+ push @row, $r;
+ }
+ $sth->finish;
+ return @row;
}
diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl
index 67394f87b7..1323b31e4d 100755
--- a/contrib/intarray/bench/create_test.pl
+++ b/contrib/intarray/bench/create_test.pl
@@ -15,28 +15,38 @@ create table message_section_map (
EOT
-open(MSG,">message.tmp") || die;
-open(MAP,">message_section_map.tmp") || die;
+open(MSG, ">message.tmp") || die;
+open(MAP, ">message_section_map.tmp") || die;
+
+srand(1);
-srand( 1 );
#foreach my $i ( 1..1778 ) {
#foreach my $i ( 1..3443 ) {
#foreach my $i ( 1..5000 ) {
#foreach my $i ( 1..29362 ) {
#foreach my $i ( 1..33331 ) {
#foreach my $i ( 1..83268 ) {
-foreach my $i ( 1..200000 ) {
+foreach my $i (1 .. 200000)
+{
my @sect;
- if ( rand() < 0.7 ) {
- $sect[0] = int( (rand()**4)*100 );
- } else {
+ if (rand() < 0.7)
+ {
+ $sect[0] = int((rand()**4) * 100);
+ }
+ else
+ {
my %hash;
- @sect = grep { $hash{$_}++; $hash{$_} <= 1 } map { int( (rand()**4)*100) } 0..( int(rand()*5) );
+ @sect =
+ grep { $hash{$_}++; $hash{$_} <= 1 }
+ map { int((rand()**4) * 100) } 0 .. (int(rand() * 5));
}
- if ( $#sect < 0 || rand() < 0.1 ) {
+ if ($#sect < 0 || rand() < 0.1)
+ {
print MSG "$i\t\\N\n";
- } else {
- print MSG "$i\t{".join(',',@sect)."}\n";
+ }
+ else
+ {
+ print MSG "$i\t{" . join(',', @sect) . "}\n";
map { print MAP "$i\t$_\n" } @sect;
}
}
@@ -64,12 +74,13 @@ EOT
unlink 'message.tmp', 'message_section_map.tmp';
-sub copytable {
+sub copytable
+{
my $t = shift;
print "COPY $t from stdin;\n";
- open( FFF, "$t.tmp") || die;
- while(<FFF>) { print; }
+ open(FFF, "$t.tmp") || die;
+ while (<FFF>) { print; }
close FFF;
print "\\.\n";
}
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
index 181bbd4072..1124744979 100644
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -4,7 +4,7 @@
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
* Author: German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/isn/isn.c
@@ -827,7 +827,7 @@ string2ean(const char *str, bool errorOK, ean13 *result,
case ISMN:
strncpy(buf, "9790", 4); /* this isn't for sure yet, for now
* ISMN it's only 9790 */
- valid = (valid && ((rcheck = checkdig(buf + 3, 10)) == check || magic));
+ valid = (valid && ((rcheck = checkdig(buf, 13)) == check || magic));
break;
case ISBN:
strncpy(buf, "978", 3);
diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h
index 7a4b3ca102..aca00d8ffa 100644
--- a/contrib/isn/isn.h
+++ b/contrib/isn/isn.h
@@ -4,7 +4,7 @@
* PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC)
*
* Author: German Mendez Bravo (Kronuz)
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/isn/isn.h
diff --git a/contrib/lo/lo.c b/contrib/lo/lo.c
index 9dbbbcebd2..4dee64724d 100644
--- a/contrib/lo/lo.c
+++ b/contrib/lo/lo.c
@@ -18,10 +18,6 @@ PG_MODULE_MAGIC;
#define atooid(x) ((Oid) strtoul((x), NULL, 10))
-/* forward declarations */
-Datum lo_manage(PG_FUNCTION_ARGS);
-
-
/*
* This is the trigger that protects us from orphaned large objects
*/
@@ -40,7 +36,12 @@ lo_manage(PG_FUNCTION_ARGS)
HeapTuple trigtuple; /* The original value of tuple */
if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */
- elog(ERROR, "not fired by trigger manager");
+ elog(ERROR, "%s: not fired by trigger manager",
+ trigdata->tg_trigger->tgname);
+
+ if (!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) /* internal error */
+ elog(ERROR, "%s: must be fired for row",
+ trigdata->tg_trigger->tgname);
/*
* Fetch some values from trigdata
@@ -50,6 +51,10 @@ lo_manage(PG_FUNCTION_ARGS)
tupdesc = trigdata->tg_relation->rd_att;
args = trigdata->tg_trigger->tgargs;
+ if (args == NULL) /* internal error */
+ elog(ERROR, "%s: no column name provided in the trigger definition",
+ trigdata->tg_trigger->tgname);
+
/* tuple to return to Executor */
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
rettuple = newtuple;
@@ -63,7 +68,8 @@ lo_manage(PG_FUNCTION_ARGS)
attnum = SPI_fnumber(tupdesc, args[0]);
if (attnum <= 0)
- elog(ERROR, "column \"%s\" does not exist", args[0]);
+ elog(ERROR, "%s: column \"%s\" does not exist",
+ trigdata->tg_trigger->tgname, args[0]);
/*
* Handle updates
diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c
index 8afc2bd540..41be68d7ee 100644
--- a/contrib/ltree/_ltree_gist.c
+++ b/contrib/ltree/_ltree_gist.c
@@ -14,22 +14,11 @@
PG_FUNCTION_INFO_V1(_ltree_compress);
-Datum _ltree_compress(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_same);
-Datum _ltree_same(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_union);
-Datum _ltree_union(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_penalty);
-Datum _ltree_penalty(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_picksplit);
-Datum _ltree_picksplit(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_consistent);
-Datum _ltree_consistent(PG_FUNCTION_ARGS);
#define GETENTRY(vec,pos) ((ltree_gist *) DatumGetPointer((vec)->vector[(pos)].key))
#define NEXTVAL(x) ( (ltree*)( (char*)(x) + INTALIGN( VARSIZE(x) ) ) )
@@ -83,7 +72,7 @@ _ltree_compress(PG_FUNCTION_ARGS)
{ /* ltree */
ltree_gist *key;
ArrayType *val = DatumGetArrayTypeP(entry->key);
- int4 len = LTG_HDRSIZE + ASIGLEN;
+ int32 len = LTG_HDRSIZE + ASIGLEN;
int num = ArrayGetNItems(ARR_NDIM(val), ARR_DIMS(val));
ltree *item = (ltree *) ARR_DATA_PTR(val);
@@ -115,7 +104,7 @@ _ltree_compress(PG_FUNCTION_ARGS)
}
else if (!LTG_ISALLTRUE(entry->key))
{
- int4 i,
+ int32 i,
len;
ltree_gist *key;
@@ -154,7 +143,7 @@ _ltree_same(PG_FUNCTION_ARGS)
*result = false;
else
{
- int4 i;
+ int32 i;
BITVECP sa = LTG_SIGN(a),
sb = LTG_SIGN(b);
@@ -171,10 +160,10 @@ _ltree_same(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
-static int4
+static int32
unionkey(BITVECP sbase, ltree_gist *add)
{
- int4 i;
+ int32 i;
BITVECP sadd = LTG_SIGN(add);
if (LTG_ISALLTRUE(add))
@@ -191,9 +180,9 @@ _ltree_union(PG_FUNCTION_ARGS)
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
ABITVEC base;
- int4 i,
+ int32 i,
len;
- int4 flag = 0;
+ int32 flag = 0;
ltree_gist *result;
MemSet((void *) base, 0, sizeof(ABITVEC));
@@ -217,10 +206,10 @@ _ltree_union(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
-static int4
+static int32
sizebitvec(BITVECP sign)
{
- int4 size = 0,
+ int32 size = 0,
i;
ALOOPBYTE
@@ -274,7 +263,7 @@ _ltree_penalty(PG_FUNCTION_ARGS)
typedef struct
{
OffsetNumber pos;
- int4 cost;
+ int32 cost;
} SPLITCOST;
static int
@@ -294,11 +283,11 @@ _ltree_picksplit(PG_FUNCTION_ARGS)
*datum_r;
BITVECP union_l,
union_r;
- int4 size_alpha,
+ int32 size_alpha,
size_beta;
- int4 size_waste,
+ int32 size_waste,
waste = -1;
- int4 nbytes;
+ int32 nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
diff --git a/contrib/ltree/_ltree_op.c b/contrib/ltree/_ltree_op.c
index 1b53af816d..44270d4614 100644
--- a/contrib/ltree/_ltree_op.c
+++ b/contrib/ltree/_ltree_op.c
@@ -22,20 +22,12 @@ PG_FUNCTION_INFO_V1(_lt_q_rregex);
PG_FUNCTION_INFO_V1(_ltxtq_exec);
PG_FUNCTION_INFO_V1(_ltxtq_rexec);
-Datum _ltree_r_isparent(PG_FUNCTION_ARGS);
-Datum _ltree_r_risparent(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(_ltree_extract_isparent);
PG_FUNCTION_INFO_V1(_ltree_extract_risparent);
PG_FUNCTION_INFO_V1(_ltq_extract_regex);
PG_FUNCTION_INFO_V1(_ltxtq_extract_exec);
-Datum _ltree_extract_isparent(PG_FUNCTION_ARGS);
-Datum _ltree_extract_risparent(PG_FUNCTION_ARGS);
-Datum _ltq_extract_regex(PG_FUNCTION_ARGS);
-Datum _ltxtq_extract_exec(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(_lca);
-Datum _lca(PG_FUNCTION_ARGS);
typedef Datum (*PGCALL2) (PG_FUNCTION_ARGS);
diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h
index aec445847f..1b1305b483 100644
--- a/contrib/ltree/ltree.h
+++ b/contrib/ltree/ltree.h
@@ -5,6 +5,7 @@
#include "fmgr.h"
#include "tsearch/ts_locale.h"
+#include "utils/memutils.h"
typedef struct
{
@@ -30,7 +31,7 @@ typedef struct
typedef struct
{
- int4 val;
+ int32 val;
uint16 len;
uint8 flag;
char name[1];
@@ -89,9 +90,9 @@ typedef struct
*/
typedef struct ITEM
{
- int2 type;
- int2 left;
- int4 val;
+ int16 type;
+ int16 left;
+ int32 val;
uint8 flag;
/* user-friendly value */
uint8 length;
@@ -105,12 +106,14 @@ typedef struct ITEM
typedef struct
{
int32 vl_len_; /* varlena header (do not touch directly!) */
- int4 size;
+ int32 size;
char data[1];
} ltxtquery;
-#define HDRSIZEQT MAXALIGN(VARHDRSZ + sizeof(int4))
+#define HDRSIZEQT MAXALIGN(VARHDRSZ + sizeof(int32))
#define COMPUTESIZE(size,lenofoperand) ( HDRSIZEQT + (size) * sizeof(ITEM) + (lenofoperand) )
+#define LTXTQUERY_TOO_BIG(size,lenofoperand) \
+ ((size) > (MaxAllocSize - HDRSIZEQT - (lenofoperand)) / sizeof(ITEM))
#define GETQUERY(x) (ITEM*)( (char*)(x)+HDRSIZEQT )
#define GETOPERAND(x) ( (char*)GETQUERY(x) + ((ltxtquery*)x)->size * sizeof(ITEM) )
@@ -173,7 +176,7 @@ int ltree_strncasecmp(const char *a, const char *b, size_t s);
#define BITBYTE 8
#define SIGLENINT 2
-#define SIGLEN ( sizeof(int4)*SIGLENINT )
+#define SIGLEN ( sizeof(int32)*SIGLENINT )
#define SIGLENBIT (SIGLEN*BITBYTE)
typedef unsigned char BITVEC[SIGLEN];
typedef unsigned char *BITVECP;
@@ -229,7 +232,7 @@ typedef struct
/* GiST support for ltree[] */
#define ASIGLENINT (7)
-#define ASIGLEN (sizeof(int4)*ASIGLENINT)
+#define ASIGLEN (sizeof(int32)*ASIGLENINT)
#define ASIGLENBIT (ASIGLEN*BITBYTE)
typedef unsigned char ABITVEC[ASIGLEN];
diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c
index 13d96656d2..2d89f1aed4 100644
--- a/contrib/ltree/ltree_gist.c
+++ b/contrib/ltree/ltree_gist.c
@@ -13,10 +13,7 @@
#define NEXTVAL(x) ( (lquery*)( (char*)(x) + INTALIGN( VARSIZE(x) ) ) )
PG_FUNCTION_INFO_V1(ltree_gist_in);
-Datum ltree_gist_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_gist_out);
-Datum ltree_gist_out(PG_FUNCTION_ARGS);
Datum
ltree_gist_in(PG_FUNCTION_ARGS)
@@ -37,25 +34,12 @@ ltree_gist_out(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(ltree_compress);
-Datum ltree_compress(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_decompress);
-Datum ltree_decompress(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_same);
-Datum ltree_same(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_union);
-Datum ltree_union(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_penalty);
-Datum ltree_penalty(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_picksplit);
-Datum ltree_picksplit(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_consistent);
-Datum ltree_consistent(PG_FUNCTION_ARGS);
#define ISEQ(a,b) ( (a)->numlevel == (b)->numlevel && ltree_compare(a,b)==0 )
#define GETENTRY(vec,pos) ((ltree_gist *) DatumGetPointer((vec)->vector[(pos)].key))
@@ -70,7 +54,7 @@ ltree_compress(PG_FUNCTION_ARGS)
{ /* ltree */
ltree_gist *key;
ltree *val = (ltree *) DatumGetPointer(PG_DETOAST_DATUM(entry->key));
- int4 len = LTG_HDRSIZE + VARSIZE(val);
+ int32 len = LTG_HDRSIZE + VARSIZE(val);
key = (ltree_gist *) palloc(len);
SET_VARSIZE(key, len);
@@ -118,7 +102,7 @@ ltree_same(PG_FUNCTION_ARGS)
*result = (ISEQ(LTG_NODE(a), LTG_NODE(b))) ? true : false;
else
{
- int4 i;
+ int32 i;
BITVECP sa = LTG_SIGN(a),
sb = LTG_SIGN(b);
@@ -169,7 +153,7 @@ ltree_union(PG_FUNCTION_ARGS)
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
- int4 i,
+ int32 i,
j;
ltree_gist *result,
*cur;
@@ -253,7 +237,7 @@ ltree_penalty(PG_FUNCTION_ARGS)
ltree_gist *origval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
ltree_gist *newval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
- int4 cmpr,
+ int32 cmpr,
cmpl;
cmpl = ltree_compare(LTG_GETLNODE(origval), LTG_GETLNODE(newval));
@@ -287,7 +271,7 @@ ltree_picksplit(PG_FUNCTION_ARGS)
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber j;
- int4 i;
+ int32 i;
RIX *array;
OffsetNumber maxoff;
int nbytes;
@@ -441,7 +425,7 @@ ltree_picksplit(PG_FUNCTION_ARGS)
static bool
gist_isparent(ltree_gist *key, ltree *query)
{
- int4 numlevel = query->numlevel;
+ int32 numlevel = query->numlevel;
int i;
for (i = query->numlevel; i >= 0; i--)
diff --git a/contrib/ltree/ltree_io.c b/contrib/ltree/ltree_io.c
index 3e88b81c16..a1d4a0d38f 100644
--- a/contrib/ltree/ltree_io.c
+++ b/contrib/ltree/ltree_io.c
@@ -8,19 +8,13 @@
#include <ctype.h>
#include "ltree.h"
+#include "utils/memutils.h"
#include "crc32.h"
PG_FUNCTION_INFO_V1(ltree_in);
-Datum ltree_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltree_out);
-Datum ltree_out(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(lquery_in);
-Datum lquery_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(lquery_out);
-Datum lquery_out(PG_FUNCTION_ARGS);
#define UNCHAR ereport(ERROR, \
@@ -64,6 +58,11 @@ ltree_in(PG_FUNCTION_ARGS)
ptr += charlen;
}
+ if (num + 1 > MaxAllocSize / sizeof(nodeitem))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of levels (%d) exceeds the maximum allowed (%d)",
+ num + 1, (int) (MaxAllocSize / sizeof(nodeitem)))));
list = lptr = (nodeitem *) palloc(sizeof(nodeitem) * (num + 1));
ptr = buf;
while (*ptr)
@@ -228,6 +227,11 @@ lquery_in(PG_FUNCTION_ARGS)
}
num++;
+ if (num > MaxAllocSize / ITEMSIZE)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of levels (%d) exceeds the maximum allowed (%d)",
+ num, (int) (MaxAllocSize / ITEMSIZE))));
curqlevel = tmpql = (lquery_level *) palloc0(ITEMSIZE * num);
ptr = buf;
while (*ptr)
diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c
index 4290ad4e63..4561073fa0 100644
--- a/contrib/ltree/ltree_op.c
+++ b/contrib/ltree/ltree_op.c
@@ -7,6 +7,7 @@
#include <ctype.h>
+#include "access/htup_details.h"
#include "catalog/pg_statistic.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@@ -37,25 +38,6 @@ PG_FUNCTION_INFO_V1(ltree2text);
PG_FUNCTION_INFO_V1(text2ltree);
PG_FUNCTION_INFO_V1(ltreeparentsel);
-Datum ltree_cmp(PG_FUNCTION_ARGS);
-Datum ltree_lt(PG_FUNCTION_ARGS);
-Datum ltree_le(PG_FUNCTION_ARGS);
-Datum ltree_eq(PG_FUNCTION_ARGS);
-Datum ltree_ne(PG_FUNCTION_ARGS);
-Datum ltree_ge(PG_FUNCTION_ARGS);
-Datum ltree_gt(PG_FUNCTION_ARGS);
-Datum nlevel(PG_FUNCTION_ARGS);
-Datum subltree(PG_FUNCTION_ARGS);
-Datum subpath(PG_FUNCTION_ARGS);
-Datum ltree_index(PG_FUNCTION_ARGS);
-Datum ltree_addltree(PG_FUNCTION_ARGS);
-Datum ltree_addtext(PG_FUNCTION_ARGS);
-Datum ltree_textadd(PG_FUNCTION_ARGS);
-Datum lca(PG_FUNCTION_ARGS);
-Datum ltree2text(PG_FUNCTION_ARGS);
-Datum text2ltree(PG_FUNCTION_ARGS);
-Datum ltreeparentsel(PG_FUNCTION_ARGS);
-
int
ltree_compare(const ltree *a, const ltree *b)
{
@@ -200,7 +182,7 @@ ltree_risparent(PG_FUNCTION_ARGS)
static ltree *
-inner_subltree(ltree *t, int4 startpos, int4 endpos)
+inner_subltree(ltree *t, int32 startpos, int32 endpos)
{
char *start = NULL,
*end = NULL;
@@ -252,9 +234,9 @@ Datum
subpath(PG_FUNCTION_ARGS)
{
ltree *t = PG_GETARG_LTREE(0);
- int4 start = PG_GETARG_INT32(1);
- int4 len = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0;
- int4 end;
+ int32 start = PG_GETARG_INT32(1);
+ int32 len = (fcinfo->nargs == 3) ? PG_GETARG_INT32(2) : 0;
+ int32 end;
ltree *res;
end = start + len;
@@ -611,7 +593,7 @@ ltreeparentsel(PG_FUNCTION_ARGS)
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
- * non-MCV population. Otherwise use the default selectivity for the
+ * non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
index 826f4e1c9d..ddc63d7b66 100644
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -9,12 +9,10 @@
#include "crc32.h"
#include "ltree.h"
+#include "miscadmin.h"
PG_FUNCTION_INFO_V1(ltxtq_in);
-Datum ltxtq_in(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(ltxtq_out);
-Datum ltxtq_out(PG_FUNCTION_ARGS);
/* parser's states */
@@ -28,10 +26,10 @@ Datum ltxtq_out(PG_FUNCTION_ARGS);
*/
typedef struct NODE
{
- int4 type;
- int4 val;
- int2 distance;
- int2 length;
+ int32 type;
+ int32 val;
+ int16 distance;
+ int16 length;
uint16 flag;
struct NODE *next;
} NODE;
@@ -39,16 +37,16 @@ typedef struct NODE
typedef struct
{
char *buf;
- int4 state;
- int4 count;
+ int32 state;
+ int32 count;
/* reverse polish notation in list (for temporary usage) */
NODE *str;
/* number in str */
- int4 num;
+ int32 num;
/* user-friendly operand */
- int4 lenop;
- int4 sumlen;
+ int32 lenop;
+ int32 sumlen;
char *op;
char *curop;
} QPRS_STATE;
@@ -56,8 +54,8 @@ typedef struct
/*
* get token from query string
*/
-static int4
-gettoken_query(QPRS_STATE *state, int4 *val, int4 *lenval, char **strval, uint16 *flag)
+static int32
+gettoken_query(QPRS_STATE *state, int32 *val, int32 *lenval, char **strval, uint16 *flag)
{
int charlen;
@@ -71,7 +69,7 @@ gettoken_query(QPRS_STATE *state, int4 *val, int4 *lenval, char **strval, uint16
if (charlen == 1 && t_iseq(state->buf, '!'))
{
(state->buf)++;
- *val = (int4) '!';
+ *val = (int32) '!';
return OPR;
}
else if (charlen == 1 && t_iseq(state->buf, '('))
@@ -117,7 +115,7 @@ gettoken_query(QPRS_STATE *state, int4 *val, int4 *lenval, char **strval, uint16
if (charlen == 1 && (t_iseq(state->buf, '&') || t_iseq(state->buf, '|')))
{
state->state = WAITOPERAND;
- *val = (int4) *(state->buf);
+ *val = (int32) *(state->buf);
(state->buf)++;
return OPR;
}
@@ -139,14 +137,13 @@ gettoken_query(QPRS_STATE *state, int4 *val, int4 *lenval, char **strval, uint16
state->buf += charlen;
}
- return END;
}
/*
* push new one in polish notation reverse view
*/
static void
-pushquery(QPRS_STATE *state, int4 type, int4 val, int4 distance, int4 lenval, uint16 flag)
+pushquery(QPRS_STATE *state, int32 type, int32 val, int32 distance, int32 lenval, uint16 flag)
{
NODE *tmp = (NODE *) palloc(sizeof(NODE));
@@ -184,7 +181,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
while (state->curop - state->op + lenval + 1 >= state->lenop)
{
- int4 tmp = state->curop - state->op;
+ int32 tmp = state->curop - state->op;
state->lenop *= 2;
state->op = (char *) repalloc((void *) state->op, state->lenop);
@@ -202,32 +199,35 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
/*
* make polish notaion of query
*/
-static int4
+static int32
makepol(QPRS_STATE *state)
{
- int4 val = 0,
+ int32 val = 0,
type;
- int4 lenval = 0;
+ int32 lenval = 0;
char *strval = NULL;
- int4 stack[STACKDEPTH];
- int4 lenstack = 0;
+ int32 stack[STACKDEPTH];
+ int32 lenstack = 0;
uint16 flag = 0;
+ /* since this function recurses, it could be driven to stack overflow */
+ check_stack_depth();
+
while ((type = gettoken_query(state, &val, &lenval, &strval, &flag)) != END)
{
switch (type)
{
case VAL:
pushval_asis(state, VAL, strval, lenval, flag);
- while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
- stack[lenstack - 1] == (int4) '!'))
+ while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
+ stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
}
break;
case OPR:
- if (lenstack && val == (int4) '|')
+ if (lenstack && val == (int32) '|')
pushquery(state, OPR, val, 0, 0, 0);
else
{
@@ -241,8 +241,8 @@ makepol(QPRS_STATE *state)
case OPEN:
if (makepol(state) == ERR)
return ERR;
- while (lenstack && (stack[lenstack - 1] == (int4) '&' ||
- stack[lenstack - 1] == (int4) '!'))
+ while (lenstack && (stack[lenstack - 1] == (int32) '&' ||
+ stack[lenstack - 1] == (int32) '!'))
{
lenstack--;
pushquery(state, OPR, stack[lenstack], 0, 0, 0);
@@ -275,14 +275,17 @@ makepol(QPRS_STATE *state)
}
static void
-findoprnd(ITEM *ptr, int4 *pos)
+findoprnd(ITEM *ptr, int32 *pos)
{
+ /* since this function recurses, it could be driven to stack overflow. */
+ check_stack_depth();
+
if (ptr[*pos].type == VAL || ptr[*pos].type == VALTRUE)
{
ptr[*pos].left = 0;
(*pos)++;
}
- else if (ptr[*pos].val == (int4) '!')
+ else if (ptr[*pos].val == (int32) '!')
{
ptr[*pos].left = 1;
(*pos)++;
@@ -291,7 +294,7 @@ findoprnd(ITEM *ptr, int4 *pos)
else
{
ITEM *curitem = &ptr[*pos];
- int4 tmp = *pos;
+ int32 tmp = *pos;
(*pos)++;
findoprnd(ptr, pos);
@@ -308,12 +311,12 @@ static ltxtquery *
queryin(char *buf)
{
QPRS_STATE state;
- int4 i;
+ int32 i;
ltxtquery *query;
- int4 commonlen;
+ int32 commonlen;
ITEM *ptr;
NODE *tmp;
- int4 pos = 0;
+ int32 pos = 0;
#ifdef BS_DEBUG
char pbuf[16384],
@@ -341,8 +344,12 @@ queryin(char *buf)
errmsg("syntax error"),
errdetail("Empty query.")));
- /* make finish struct */
+ if (LTXTQUERY_TOO_BIG(state.num, state.sumlen))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("ltxtquery is too large")));
commonlen = COMPUTESIZE(state.num, state.sumlen);
+
query = (ltxtquery *) palloc(commonlen);
SET_VARSIZE(query, commonlen);
query->size = state.num;
@@ -390,13 +397,13 @@ typedef struct
char *buf;
char *cur;
char *op;
- int4 buflen;
+ int32 buflen;
} INFIX;
#define RESIZEBUF(inf,addsize) \
while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \
{ \
- int4 len = (inf)->cur - (inf)->buf; \
+ int32 len = (inf)->cur - (inf)->buf; \
(inf)->buflen *= 2; \
(inf)->buf = (char*) repalloc( (void*)(inf)->buf, (inf)->buflen ); \
(inf)->cur = (inf)->buf + len; \
@@ -438,7 +445,7 @@ infix(INFIX *in, bool first)
*(in->cur) = '\0';
in->curpol++;
}
- else if (in->curpol->val == (int4) '!')
+ else if (in->curpol->val == (int32) '!')
{
bool isopr = false;
@@ -464,11 +471,11 @@ infix(INFIX *in, bool first)
}
else
{
- int4 op = in->curpol->val;
+ int32 op = in->curpol->val;
INFIX nrm;
in->curpol++;
- if (op == (int4) '|' && !first)
+ if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, "( ");
@@ -493,7 +500,7 @@ infix(INFIX *in, bool first)
in->cur = strchr(in->cur, '\0');
pfree(nrm.buf);
- if (op == (int4) '|' && !first)
+ if (op == (int32) '|' && !first)
{
RESIZEBUF(in, 2);
sprintf(in->cur, " )");
diff --git a/contrib/ltree/ltxtquery_op.c b/contrib/ltree/ltxtquery_op.c
index 1c13888605..64f9d219f7 100644
--- a/contrib/ltree/ltxtquery_op.c
+++ b/contrib/ltree/ltxtquery_op.c
@@ -20,13 +20,13 @@ ltree_execute(ITEM *curitem, void *checkval, bool calcnot, bool (*chkcond) (void
{
if (curitem->type == VAL)
return (*chkcond) (checkval, curitem);
- else if (curitem->val == (int4) '!')
+ else if (curitem->val == (int32) '!')
{
return (calcnot) ?
((ltree_execute(curitem + 1, checkval, calcnot, chkcond)) ? false : true)
: true;
}
- else if (curitem->val == (int4) '&')
+ else if (curitem->val == (int32) '&')
{
if (ltree_execute(curitem + curitem->left, checkval, calcnot, chkcond))
return ltree_execute(curitem + 1, checkval, calcnot, chkcond);
@@ -40,7 +40,6 @@ ltree_execute(ITEM *curitem, void *checkval, bool calcnot, bool (*chkcond) (void
else
return ltree_execute(curitem + 1, checkval, calcnot, chkcond);
}
- return false;
}
typedef struct
diff --git a/contrib/oid2name/oid2name.c b/contrib/oid2name/oid2name.c
index c7ba1bd101..e5eeec21c1 100644
--- a/contrib/oid2name/oid2name.c
+++ b/contrib/oid2name/oid2name.c
@@ -9,14 +9,8 @@
*/
#include "postgres_fe.h"
-#include <unistd.h>
-#ifdef HAVE_GETOPT_H
-#include <getopt.h>
-#endif
-
-extern char *optarg;
-
#include "libpq-fe.h"
+#include "pg_getopt.h"
/* an extensible array to keep track of elements to show */
typedef struct
@@ -44,13 +38,12 @@ struct options
char *hostname;
char *port;
char *username;
+ const char *progname;
};
/* function prototypes */
static void help(const char *progname);
void get_opts(int, char **, struct options *);
-void *myalloc(size_t size);
-char *mystrdup(const char *str);
void add_one_elt(char *eltname, eary *eary);
char *get_comma_elts(eary *eary);
PGconn *sql_conn(struct options *);
@@ -80,6 +73,7 @@ get_opts(int argc, char **argv, struct options * my_opts)
my_opts->hostname = NULL;
my_opts->port = NULL;
my_opts->username = NULL;
+ my_opts->progname = progname;
if (argc > 1)
{
@@ -102,7 +96,7 @@ get_opts(int argc, char **argv, struct options * my_opts)
{
/* specify the database */
case 'd':
- my_opts->dbname = mystrdup(optarg);
+ my_opts->dbname = pg_strdup(optarg);
break;
/* specify one tablename to show */
@@ -127,17 +121,17 @@ get_opts(int argc, char **argv, struct options * my_opts)
/* host to connect to */
case 'H':
- my_opts->hostname = mystrdup(optarg);
+ my_opts->hostname = pg_strdup(optarg);
break;
/* port to connect to on remote host */
case 'p':
- my_opts->port = mystrdup(optarg);
+ my_opts->port = pg_strdup(optarg);
break;
/* username */
case 'U':
- my_opts->username = mystrdup(optarg);
+ my_opts->username = pg_strdup(optarg);
break;
/* display system tables */
@@ -179,51 +173,25 @@ help(const char *progname)
"Usage:\n"
" %s [OPTION]...\n"
"\nOptions:\n"
- " -d DBNAME database to connect to\n"
- " -f FILENODE show info for table with given file node\n"
- " -H HOSTNAME database server host or socket directory\n"
- " -i show indexes and sequences too\n"
- " -o OID show info for table with given OID\n"
- " -p PORT database server port number\n"
- " -q quiet (don't show headers)\n"
- " -s show all tablespaces\n"
- " -S show system objects too\n"
- " -t TABLE show info for named table\n"
- " -U NAME connect as specified database user\n"
- " -x extended (show additional columns)\n"
- " --help show this help, then exit\n"
- " --version output version information, then exit\n"
+ " -d DBNAME database to connect to\n"
+ " -f FILENODE show info for table with given file node\n"
+ " -H HOSTNAME database server host or socket directory\n"
+ " -i show indexes and sequences too\n"
+ " -o OID show info for table with given OID\n"
+ " -p PORT database server port number\n"
+ " -q quiet (don't show headers)\n"
+ " -s show all tablespaces\n"
+ " -S show system objects too\n"
+ " -t TABLE show info for named table\n"
+ " -U NAME connect as specified database user\n"
+ " -V, --version output version information, then exit\n"
+ " -x extended (show additional columns)\n"
+ " -?, --help show this help, then exit\n"
"\nThe default action is to show all database OIDs.\n\n"
"Report bugs to <pgsql-bugs@postgresql.org>.\n",
progname, progname);
}
-void *
-myalloc(size_t size)
-{
- void *ptr = malloc(size);
-
- if (!ptr)
- {
- fprintf(stderr, "out of memory");
- exit(1);
- }
- return ptr;
-}
-
-char *
-mystrdup(const char *str)
-{
- char *result = strdup(str);
-
- if (!result)
- {
- fprintf(stderr, "out of memory");
- exit(1);
- }
- return result;
-}
-
/*
* add_one_elt
*
@@ -235,22 +203,16 @@ add_one_elt(char *eltname, eary *eary)
if (eary->alloc == 0)
{
eary ->alloc = 8;
- eary ->array = (char **) myalloc(8 * sizeof(char *));
+ eary ->array = (char **) pg_malloc(8 * sizeof(char *));
}
else if (eary->num >= eary->alloc)
{
eary ->alloc *= 2;
- eary ->array = (char **)
- realloc(eary->array, eary->alloc * sizeof(char *));
-
- if (!eary->array)
- {
- fprintf(stderr, "out of memory");
- exit(1);
- }
+ eary ->array = (char **) pg_realloc(eary->array,
+ eary->alloc * sizeof(char *));
}
- eary ->array[eary->num] = mystrdup(eltname);
+ eary ->array[eary->num] = pg_strdup(eltname);
eary ->num++;
}
@@ -270,7 +232,7 @@ get_comma_elts(eary *eary)
length = 0;
if (eary->num == 0)
- return mystrdup("");
+ return pg_strdup("");
/*
* PQescapeString wants 2 * length + 1 bytes of breath space. Add two
@@ -279,7 +241,7 @@ get_comma_elts(eary *eary)
for (i = 0; i < eary->num; i++)
length += strlen(eary->array[i]);
- ret = (char *) myalloc(length * 2 + 4 * eary->num);
+ ret = (char *) pg_malloc(length * 2 + 4 * eary->num);
ptr = ret;
for (i = 0; i < eary->num; i++)
@@ -308,14 +270,29 @@ sql_conn(struct options * my_opts)
*/
do
{
+#define PARAMS_ARRAY_SIZE 7
+
+ const char *keywords[PARAMS_ARRAY_SIZE];
+ const char *values[PARAMS_ARRAY_SIZE];
+
+ keywords[0] = "host";
+ values[0] = my_opts->hostname;
+ keywords[1] = "port";
+ values[1] = my_opts->port;
+ keywords[2] = "user";
+ values[2] = my_opts->username;
+ keywords[3] = "password";
+ values[3] = password;
+ keywords[4] = "dbname";
+ values[4] = my_opts->dbname;
+ keywords[5] = "fallback_application_name";
+ values[5] = my_opts->progname;
+ keywords[6] = NULL;
+ values[6] = NULL;
+
new_pass = false;
- conn = PQsetdbLogin(my_opts->hostname,
- my_opts->port,
- NULL, /* options */
- NULL, /* tty */
- my_opts->dbname,
- my_opts->username,
- password);
+ conn = PQconnectdbParams(keywords, values, true);
+
if (!conn)
{
fprintf(stderr, "%s: could not connect to database %s\n",
@@ -384,7 +361,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
nfields = PQnfields(res);
/* for each field, get the needed width */
- length = (int *) myalloc(sizeof(int) * nfields);
+ length = (int *) pg_malloc(sizeof(int) * nfields);
for (j = 0; j < nfields; j++)
length[j] = strlen(PQfname(res, j));
@@ -407,7 +384,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
l += length[j] + 2;
}
fprintf(stdout, "\n");
- pad = (char *) myalloc(l + 1);
+ pad = (char *) pg_malloc(l + 1);
MemSet(pad, '-', l);
pad[l] = '\0';
fprintf(stdout, "%s\n", pad);
@@ -430,7 +407,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
}
/*
- * Dump all databases. There are no system objects to worry about.
+ * Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
@@ -461,7 +438,7 @@ sql_exec_dumpalltables(PGconn *conn, struct options * opts)
" LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace "
" LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),"
" pg_catalog.pg_tablespace t "
- "WHERE relkind IN ('r'%s%s) AND "
+ "WHERE relkind IN ('r', 'm'%s%s) AND "
" %s"
" t.oid = CASE"
" WHEN reltablespace <> 0 THEN reltablespace"
@@ -498,8 +475,8 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
comma_filenodes = get_comma_elts(opts->filenodes);
/* 80 extra chars for SQL expression */
- qualifiers = (char *) myalloc(strlen(comma_oids) + strlen(comma_tables) +
- strlen(comma_filenodes) + 80);
+ qualifiers = (char *) pg_malloc(strlen(comma_oids) + strlen(comma_tables) +
+ strlen(comma_filenodes) + 80);
ptr = qualifiers;
if (opts->oids->num > 0)
@@ -525,22 +502,21 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
free(comma_filenodes);
/* now build the query */
- todo = (char *) myalloc(650 + strlen(qualifiers));
- snprintf(todo, 650 + strlen(qualifiers),
- "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
- "FROM pg_catalog.pg_class c \n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
- " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
- " pg_catalog.pg_tablespace t \n"
- "WHERE relkind IN ('r', 'i', 'S', 't') AND \n"
- " t.oid = CASE\n"
- " WHEN reltablespace <> 0 THEN reltablespace\n"
- " ELSE dattablespace\n"
- " END AND \n"
- " (%s) \n"
- "ORDER BY relname\n",
- opts->extended ? addfields : "",
- qualifiers);
+ todo = psprintf(
+ "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+ "FROM pg_catalog.pg_class c \n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
+ " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
+ " pg_catalog.pg_tablespace t \n"
+ "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
+ " t.oid = CASE\n"
+ " WHEN reltablespace <> 0 THEN reltablespace\n"
+ " ELSE dattablespace\n"
+ " END AND \n"
+ " (%s) \n"
+ "ORDER BY relname\n",
+ opts->extended ? addfields : "",
+ qualifiers);
free(qualifiers);
@@ -565,11 +541,11 @@ main(int argc, char **argv)
struct options *my_opts;
PGconn *pgconn;
- my_opts = (struct options *) myalloc(sizeof(struct options));
+ my_opts = (struct options *) pg_malloc(sizeof(struct options));
- my_opts->oids = (eary *) myalloc(sizeof(eary));
- my_opts->tables = (eary *) myalloc(sizeof(eary));
- my_opts->filenodes = (eary *) myalloc(sizeof(eary));
+ my_opts->oids = (eary *) pg_malloc(sizeof(eary));
+ my_opts->tables = (eary *) pg_malloc(sizeof(eary));
+ my_opts->filenodes = (eary *) pg_malloc(sizeof(eary));
my_opts->oids->num = my_opts->oids->alloc = 0;
my_opts->tables->num = my_opts->tables->alloc = 0;
diff --git a/contrib/pageinspect/Makefile b/contrib/pageinspect/Makefile
index 13ba6d3911..ee78cb2989 100644
--- a/contrib/pageinspect/Makefile
+++ b/contrib/pageinspect/Makefile
@@ -4,7 +4,8 @@ MODULE_big = pageinspect
OBJS = rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o
EXTENSION = pageinspect
-DATA = pageinspect--1.0.sql pageinspect--unpackaged--1.0.sql
+DATA = pageinspect--1.2.sql pageinspect--1.0--1.1.sql \
+ pageinspect--1.1--1.2.sql pageinspect--unpackaged--1.0.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c
index dbb2158ba8..c1e83f301b 100644
--- a/contrib/pageinspect/btreefuncs.c
+++ b/contrib/pageinspect/btreefuncs.c
@@ -35,10 +35,6 @@
#include "utils/rel.h"
-extern Datum bt_metap(PG_FUNCTION_ARGS);
-extern Datum bt_page_items(PG_FUNCTION_ARGS);
-extern Datum bt_page_stats(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(bt_metap);
PG_FUNCTION_INFO_V1(bt_page_items);
PG_FUNCTION_INFO_V1(bt_page_stats);
@@ -156,9 +152,9 @@ GetBTPageStatistics(BlockNumber blkno, Buffer buffer, BTPageStat *stat)
}
/* -----------------------------------------------
- * bt_page()
+ * bt_page_stats()
*
- * Usage: SELECT * FROM bt_page('t1_pkey', 1);
+ * Usage: SELECT * FROM bt_page_stats('t1_pkey', 1);
* -----------------------------------------------
*/
Datum
@@ -204,6 +200,7 @@ bt_page_stats(PG_FUNCTION_ARGS)
CHECK_RELATION_BLOCK_RANGE(rel, blkno);
buffer = ReadBuffer(rel, blkno);
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
/* keep compiler quiet */
stat.btpo_prev = stat.btpo_next = InvalidBlockNumber;
@@ -211,46 +208,31 @@ bt_page_stats(PG_FUNCTION_ARGS)
GetBTPageStatistics(blkno, buffer, &stat);
+ UnlockReleaseBuffer(buffer);
+ relation_close(rel, AccessShareLock);
+
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
j = 0;
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.blkno);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%c", stat.type);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.live_items);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.dead_items);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.avg_item_size);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.page_size);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.free_size);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.btpo_prev);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.btpo_next);
- values[j] = palloc(32);
- if (stat.type == 'd')
- snprintf(values[j++], 32, "%d", stat.btpo.xact);
- else
- snprintf(values[j++], 32, "%d", stat.btpo.level);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", stat.btpo_flags);
+ values[j++] = psprintf("%d", stat.blkno);
+ values[j++] = psprintf("%c", stat.type);
+ values[j++] = psprintf("%d", stat.live_items);
+ values[j++] = psprintf("%d", stat.dead_items);
+ values[j++] = psprintf("%d", stat.avg_item_size);
+ values[j++] = psprintf("%d", stat.page_size);
+ values[j++] = psprintf("%d", stat.free_size);
+ values[j++] = psprintf("%d", stat.btpo_prev);
+ values[j++] = psprintf("%d", stat.btpo_next);
+ values[j++] = psprintf("%d", (stat.type == 'd') ? stat.btpo.xact : stat.btpo.level);
+ values[j++] = psprintf("%d", stat.btpo_flags);
tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc),
values);
result = HeapTupleGetDatum(tuple);
- ReleaseBuffer(buffer);
-
- relation_close(rel, AccessShareLock);
-
PG_RETURN_DATUM(result);
}
@@ -322,6 +304,7 @@ bt_page_items(PG_FUNCTION_ARGS)
CHECK_RELATION_BLOCK_RANGE(rel, blkno);
buffer = ReadBuffer(rel, blkno);
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
/*
* We copy the page into local storage to avoid holding pin on the
@@ -335,7 +318,7 @@ bt_page_items(PG_FUNCTION_ARGS)
uargs->page = palloc(BLCKSZ);
memcpy(uargs->page, BufferGetPage(buffer), BLCKSZ);
- ReleaseBuffer(buffer);
+ UnlockReleaseBuffer(buffer);
relation_close(rel, AccessShareLock);
uargs->offset = FirstOffsetNumber;
@@ -379,18 +362,13 @@ bt_page_items(PG_FUNCTION_ARGS)
itup = (IndexTuple) PageGetItem(uargs->page, id);
j = 0;
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", uargs->offset);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "(%u,%u)",
- BlockIdGetBlockNumber(&(itup->t_tid.ip_blkid)),
- itup->t_tid.ip_posid);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", (int) IndexTupleSize(itup));
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%c", IndexTupleHasNulls(itup) ? 't' : 'f');
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%c", IndexTupleHasVarwidths(itup) ? 't' : 'f');
+ values[j++] = psprintf("%d", uargs->offset);
+ values[j++] = psprintf("(%u,%u)",
+ BlockIdGetBlockNumber(&(itup->t_tid.ip_blkid)),
+ itup->t_tid.ip_posid);
+ values[j++] = psprintf("%d", (int) IndexTupleSize(itup));
+ values[j++] = psprintf("%c", IndexTupleHasNulls(itup) ? 't' : 'f');
+ values[j++] = psprintf("%c", IndexTupleHasVarwidths(itup) ? 't' : 'f');
ptr = (char *) itup + IndexInfoFindDataOffset(itup->t_info);
dlen = IndexTupleSize(itup) - IndexInfoFindDataOffset(itup->t_info);
@@ -466,6 +444,8 @@ bt_metap(PG_FUNCTION_ARGS)
errmsg("cannot access temporary tables of other sessions")));
buffer = ReadBuffer(rel, 0);
+ LockBuffer(buffer, BUFFER_LOCK_SHARE);
+
page = BufferGetPage(buffer);
metad = BTPageGetMeta(page);
@@ -474,26 +454,19 @@ bt_metap(PG_FUNCTION_ARGS)
elog(ERROR, "return type must be a row type");
j = 0;
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_magic);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_version);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_root);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_level);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_fastroot);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", metad->btm_fastlevel);
+ values[j++] = psprintf("%d", metad->btm_magic);
+ values[j++] = psprintf("%d", metad->btm_version);
+ values[j++] = psprintf("%d", metad->btm_root);
+ values[j++] = psprintf("%d", metad->btm_level);
+ values[j++] = psprintf("%d", metad->btm_fastroot);
+ values[j++] = psprintf("%d", metad->btm_fastlevel);
tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc),
values);
result = HeapTupleGetDatum(tuple);
- ReleaseBuffer(buffer);
-
+ UnlockReleaseBuffer(buffer);
relation_close(rel, AccessShareLock);
PG_RETURN_DATUM(result);
diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c
index c96d4be2b9..8c1960445a 100644
--- a/contrib/pageinspect/fsmfuncs.c
+++ b/contrib/pageinspect/fsmfuncs.c
@@ -9,7 +9,7 @@
* there's hardly any use case for using these without superuser-rights
* anyway.
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/fsmfuncs.c
@@ -18,12 +18,12 @@
*/
#include "postgres.h"
+
+#include "funcapi.h"
+#include "lib/stringinfo.h"
+#include "miscadmin.h"
#include "storage/fsm_internals.h"
#include "utils/builtins.h"
-#include "miscadmin.h"
-#include "funcapi.h"
-
-Datum fsm_page_contents(PG_FUNCTION_ARGS);
/*
* Dumps the contents of a FSM page.
@@ -54,5 +54,5 @@ fsm_page_contents(PG_FUNCTION_ARGS)
}
appendStringInfo(&sinfo, "fp_next_slot: %d\n", fsmpage->fp_next_slot);
- PG_RETURN_TEXT_P(cstring_to_text(sinfo.data));
+ PG_RETURN_TEXT_P(cstring_to_text_with_len(sinfo.data, sinfo.len));
}
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index 260ccffdc7..dedc8feaeb 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -15,7 +15,7 @@
* there's hardly any use case for using these without superuser-rights
* anyway.
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/heapfuncs.c
@@ -25,12 +25,11 @@
#include "postgres.h"
+#include "access/htup_details.h"
#include "funcapi.h"
#include "utils/builtins.h"
#include "miscadmin.h"
-Datum heap_page_items(PG_FUNCTION_ARGS);
-
/*
* bits_to_text
@@ -161,8 +160,8 @@ heap_page_items(PG_FUNCTION_ARGS)
tuphdr = (HeapTupleHeader) PageGetItem(page, id);
- values[4] = UInt32GetDatum(HeapTupleHeaderGetXmin(tuphdr));
- values[5] = UInt32GetDatum(HeapTupleHeaderGetXmax(tuphdr));
+ values[4] = UInt32GetDatum(HeapTupleHeaderGetRawXmin(tuphdr));
+ values[5] = UInt32GetDatum(HeapTupleHeaderGetRawXmax(tuphdr));
values[6] = UInt32GetDatum(HeapTupleHeaderGetRawCommandId(tuphdr)); /* shared with xvac */
values[7] = PointerGetDatum(&tuphdr->t_ctid);
values[8] = UInt32GetDatum(tuphdr->t_infomask2);
diff --git a/contrib/pageinspect/pageinspect--1.0--1.1.sql b/contrib/pageinspect/pageinspect--1.0--1.1.sql
new file mode 100644
index 0000000000..49e83264d3
--- /dev/null
+++ b/contrib/pageinspect/pageinspect--1.0--1.1.sql
@@ -0,0 +1,18 @@
+/* contrib/pageinspect/pageinspect--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.1" to load this file. \quit
+
+DROP FUNCTION page_header(bytea);
+CREATE FUNCTION page_header(IN page bytea,
+ OUT lsn text,
+ OUT checksum smallint,
+ OUT flags smallint,
+ OUT lower smallint,
+ OUT upper smallint,
+ OUT special smallint,
+ OUT pagesize smallint,
+ OUT version smallint,
+ OUT prune_xid xid)
+AS 'MODULE_PATHNAME', 'page_header'
+LANGUAGE C STRICT;
diff --git a/contrib/pageinspect/pageinspect--1.1--1.2.sql b/contrib/pageinspect/pageinspect--1.1--1.2.sql
new file mode 100644
index 0000000000..5e23ca4dd5
--- /dev/null
+++ b/contrib/pageinspect/pageinspect--1.1--1.2.sql
@@ -0,0 +1,18 @@
+/* contrib/pageinspect/pageinspect--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.2" to load this file. \quit
+
+DROP FUNCTION page_header(bytea);
+CREATE FUNCTION page_header(IN page bytea,
+ OUT lsn pg_lsn,
+ OUT checksum smallint,
+ OUT flags smallint,
+ OUT lower smallint,
+ OUT upper smallint,
+ OUT special smallint,
+ OUT pagesize smallint,
+ OUT version smallint,
+ OUT prune_xid xid)
+AS 'MODULE_PATHNAME', 'page_header'
+LANGUAGE C STRICT;
diff --git a/contrib/pageinspect/pageinspect--1.0.sql b/contrib/pageinspect/pageinspect--1.2.sql
index 5613956fd8..15e8e1e381 100644
--- a/contrib/pageinspect/pageinspect--1.0.sql
+++ b/contrib/pageinspect/pageinspect--1.2.sql
@@ -1,4 +1,4 @@
-/* contrib/pageinspect/pageinspect--1.0.sql */
+/* contrib/pageinspect/pageinspect--1.2.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pageinspect" to load this file. \quit
@@ -20,8 +20,8 @@ LANGUAGE C STRICT;
-- page_header()
--
CREATE FUNCTION page_header(IN page bytea,
- OUT lsn text,
- OUT tli smallint,
+ OUT lsn pg_lsn,
+ OUT checksum smallint,
OUT flags smallint,
OUT lower smallint,
OUT upper smallint,
diff --git a/contrib/pageinspect/pageinspect.control b/contrib/pageinspect/pageinspect.control
index f9da0e86ed..aecd91a711 100644
--- a/contrib/pageinspect/pageinspect.control
+++ b/contrib/pageinspect/pageinspect.control
@@ -1,5 +1,5 @@
# pageinspect extension
comment = 'inspect the contents of database pages at a low level'
-default_version = '1.0'
+default_version = '1.2'
module_pathname = '$libdir/pageinspect'
relocatable = true
diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c
index f51a4e31f5..cc66fc8b04 100644
--- a/contrib/pageinspect/rawpage.c
+++ b/contrib/pageinspect/rawpage.c
@@ -5,7 +5,7 @@
*
* Access-method specific inspection functions are in separate files.
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pageinspect/rawpage.c
@@ -15,20 +15,19 @@
#include "postgres.h"
+#include "access/htup_details.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
+#include "catalog/pg_type.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "utils/builtins.h"
+#include "utils/pg_lsn.h"
#include "utils/rel.h"
PG_MODULE_MAGIC;
-Datum get_raw_page(PG_FUNCTION_ARGS);
-Datum get_raw_page_fork(PG_FUNCTION_ARGS);
-Datum page_header(PG_FUNCTION_ARGS);
-
static bytea *get_raw_page_internal(text *relname, ForkNumber forknum,
BlockNumber blkno);
@@ -179,7 +178,6 @@ page_header(PG_FUNCTION_ARGS)
PageHeader page;
XLogRecPtr lsn;
- char lsnchar[64];
if (!superuser())
ereport(ERROR,
@@ -206,10 +204,19 @@ page_header(PG_FUNCTION_ARGS)
/* Extract information from the page header */
lsn = PageGetLSN(page);
- snprintf(lsnchar, sizeof(lsnchar), "%X/%X", lsn.xlogid, lsn.xrecoff);
- values[0] = CStringGetTextDatum(lsnchar);
- values[1] = UInt16GetDatum(PageGetTLI(page));
+ /* pageinspect >= 1.2 uses pg_lsn instead of text for the LSN field. */
+ if (tupdesc->attrs[0]->atttypid == TEXTOID)
+ {
+ char lsnchar[64];
+
+ snprintf(lsnchar, sizeof(lsnchar), "%X/%X",
+ (uint32) (lsn >> 32), (uint32) lsn);
+ values[0] = CStringGetTextDatum(lsnchar);
+ }
+ else
+ values[0] = LSNGetDatum(lsn);
+ values[1] = UInt16GetDatum(page->pd_checksum);
values[2] = UInt16GetDatum(page->pd_flags);
values[3] = UInt16GetDatum(page->pd_lower);
values[4] = UInt16GetDatum(page->pd_upper);
diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c
index 8d6dea2a06..405896d40a 100644
--- a/contrib/passwordcheck/passwordcheck.c
+++ b/contrib/passwordcheck/passwordcheck.c
@@ -3,7 +3,7 @@
* passwordcheck.c
*
*
- * Copyright (c) 2009-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2014, PostgreSQL Global Development Group
*
* Author: Laurenz Albe <laurenz.albe@wien.gv.at>
*
diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c
index a226101bbc..212b267fcf 100644
--- a/contrib/pg_archivecleanup/pg_archivecleanup.c
+++ b/contrib/pg_archivecleanup/pg_archivecleanup.c
@@ -17,20 +17,9 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <signal.h>
-
-#ifndef WIN32
#include <sys/time.h>
-#include <unistd.h>
-
-#ifdef HAVE_GETOPT_H
-#include <getopt.h>
-#endif
-#else /* WIN32 */
-extern int getopt(int argc, char *const argv[], const char *optstring);
-#endif /* ! WIN32 */
-extern char *optarg;
-extern int optind;
+#include "pg_getopt.h"
const char *progname;
@@ -117,14 +106,14 @@ CleanupPriorWALFiles(void)
if ((xldir = opendir(archiveLocation)) != NULL)
{
- while ((xlde = readdir(xldir)) != NULL)
+ while (errno = 0, (xlde = readdir(xldir)) != NULL)
{
strncpy(walfile, xlde->d_name, MAXPGPATH);
TrimExtension(walfile, additional_ext);
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that
+ * deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
@@ -151,7 +140,7 @@ CleanupPriorWALFiles(void)
{
/*
* Prints the name of the file to be removed and skips the
- * actual removal. The regular printout is so that the
+ * actual removal. The regular printout is so that the
* user can pipe the output into some other program.
*/
printf("%s\n", WALFilePath);
@@ -175,7 +164,13 @@ CleanupPriorWALFiles(void)
}
}
}
- closedir(xldir);
+
+ if (errno)
+ fprintf(stderr, "%s: could not read archive location \"%s\": %s\n",
+ progname, archiveLocation, strerror(errno));
+ if (closedir(xldir))
+ fprintf(stderr, "%s: could not close archive location \"%s\": %s\n",
+ progname, archiveLocation, strerror(errno));
}
else
fprintf(stderr, "%s: could not open archive location \"%s\": %s\n",
@@ -249,11 +244,11 @@ usage(void)
printf("Usage:\n");
printf(" %s [OPTION]... ARCHIVELOCATION OLDESTKEPTWALFILE\n", progname);
printf("\nOptions:\n");
- printf(" -d generate debug output (verbose mode)\n");
- printf(" -n dry run, show the names of the files that would be removed\n");
- printf(" -x EXT clean up files if they have this extension\n");
- printf(" --help show this help, then exit\n");
- printf(" --version output version information, then exit\n");
+ printf(" -d generate debug output (verbose mode)\n");
+ printf(" -n dry run, show the names of the files that would be removed\n");
+ printf(" -V, --version output version information, then exit\n");
+ printf(" -x EXT clean up files if they have this extension\n");
+ printf(" -?, --help show this help, then exit\n");
printf("\n"
"For use as archive_cleanup_command in recovery.conf when standby_mode = on:\n"
" archive_cleanup_command = 'pg_archivecleanup [OPTION]... ARCHIVELOCATION %%r'\n"
@@ -299,8 +294,8 @@ main(int argc, char **argv)
dryrun = true;
break;
case 'x':
- additional_ext = optarg; /* Extension to remove from
- * xlogfile names */
+ additional_ext = strdup(optarg); /* Extension to remove
+ * from xlogfile names */
break;
default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index 27e52b3b35..f39fe255db 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -8,6 +8,7 @@
*/
#include "postgres.h"
+#include "access/htup_details.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
#include "storage/buf_internals.h"
@@ -18,8 +19,6 @@
PG_MODULE_MAGIC;
-Datum pg_buffercache_pages(PG_FUNCTION_ARGS);
-
/*
* Record structure holding the to be exposed cache data.
@@ -115,7 +114,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
* possible deadlocks.
*/
for (i = 0; i < NUM_BUFFER_PARTITIONS; i++)
- LWLockAcquire(FirstBufMappingLock + i, LW_SHARED);
+ LWLockAcquire(BufMappingPartitionLockByIndex(i), LW_SHARED);
/*
* Scan though all the buffers, saving the relevant fields in the
@@ -156,7 +155,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
* avoids O(N^2) behavior inside LWLockRelease.
*/
for (i = NUM_BUFFER_PARTITIONS; --i >= 0;)
- LWLockRelease(FirstBufMappingLock + i);
+ LWLockRelease(BufMappingPartitionLockByIndex(i));
}
funcctx = SRF_PERCALL_SETUP();
diff --git a/contrib/pg_freespacemap/pg_freespacemap.c b/contrib/pg_freespacemap/pg_freespacemap.c
index f6f7d2e743..7805345add 100644
--- a/contrib/pg_freespacemap/pg_freespacemap.c
+++ b/contrib/pg_freespacemap/pg_freespacemap.c
@@ -14,8 +14,6 @@
PG_MODULE_MAGIC;
-Datum pg_freespace(PG_FUNCTION_ARGS);
-
/*
* Returns the amount of free space on a given page, according to the
* free space map.
diff --git a/contrib/pg_prewarm/Makefile b/contrib/pg_prewarm/Makefile
new file mode 100644
index 0000000000..176a29a003
--- /dev/null
+++ b/contrib/pg_prewarm/Makefile
@@ -0,0 +1,18 @@
+# contrib/pg_prewarm/Makefile
+
+MODULE_big = pg_prewarm
+OBJS = pg_prewarm.o
+
+EXTENSION = pg_prewarm
+DATA = pg_prewarm--1.0.sql
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pg_prewarm
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/pg_prewarm/pg_prewarm--1.0.sql b/contrib/pg_prewarm/pg_prewarm--1.0.sql
new file mode 100644
index 0000000000..2bec7765c0
--- /dev/null
+++ b/contrib/pg_prewarm/pg_prewarm--1.0.sql
@@ -0,0 +1,14 @@
+/* contrib/pg_prewarm/pg_prewarm--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION pg_prewarm" to load this file. \quit
+
+-- Register the function.
+CREATE FUNCTION pg_prewarm(regclass,
+ mode text default 'buffer',
+ fork text default 'main',
+ first_block int8 default null,
+ last_block int8 default null)
+RETURNS int8
+AS 'MODULE_PATHNAME', 'pg_prewarm'
+LANGUAGE C;
diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c
new file mode 100644
index 0000000000..df20e888ef
--- /dev/null
+++ b/contrib/pg_prewarm/pg_prewarm.c
@@ -0,0 +1,203 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_prewarm.c
+ * prewarming utilities
+ *
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pg_prewarm/pg_prewarm.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "access/heapam.h"
+#include "catalog/catalog.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "storage/bufmgr.h"
+#include "storage/smgr.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/rel.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(pg_prewarm);
+
+typedef enum
+{
+ PREWARM_PREFETCH,
+ PREWARM_READ,
+ PREWARM_BUFFER
+} PrewarmType;
+
+static char blockbuffer[BLCKSZ];
+
+/*
+ * pg_prewarm(regclass, mode text, fork text,
+ * first_block int8, last_block int8)
+ *
+ * The first argument is the relation to be prewarmed; the second controls
+ * how prewarming is done; legal options are 'prefetch', 'read', and 'buffer'.
+ * The third is the name of the relation fork to be prewarmed. The fourth
+ * and fifth arguments specify the first and last block to be prewarmed.
+ * If the fourth argument is NULL, it will be taken as 0; if the fifth argument
+ * is NULL, it will be taken as the number of blocks in the relation. The
+ * return value is the number of blocks successfully prewarmed.
+ */
+Datum
+pg_prewarm(PG_FUNCTION_ARGS)
+{
+ Oid relOid;
+ text *forkName;
+ text *type;
+ int64 first_block;
+ int64 last_block;
+ int64 nblocks;
+ int64 blocks_done = 0;
+ int64 block;
+ Relation rel;
+ ForkNumber forkNumber;
+ char *forkString;
+ char *ttype;
+ PrewarmType ptype;
+ AclResult aclresult;
+
+ /* Basic sanity checking. */
+ if (PG_ARGISNULL(0))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("relation cannot be null")));
+ relOid = PG_GETARG_OID(0);
+ if (PG_ARGISNULL(1))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ (errmsg("prewarm type cannot be null"))));
+ type = PG_GETARG_TEXT_P(1);
+ ttype = text_to_cstring(type);
+ if (strcmp(ttype, "prefetch") == 0)
+ ptype = PREWARM_PREFETCH;
+ else if (strcmp(ttype, "read") == 0)
+ ptype = PREWARM_READ;
+ else if (strcmp(ttype, "buffer") == 0)
+ ptype = PREWARM_BUFFER;
+ else
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid prewarm type"),
+ errhint("Valid prewarm types are \"prefetch\", \"read\", and \"buffer\".")));
+ PG_RETURN_INT64(0); /* Placate compiler. */
+ }
+ if (PG_ARGISNULL(2))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ (errmsg("relation fork cannot be null"))));
+ forkName = PG_GETARG_TEXT_P(2);
+ forkString = text_to_cstring(forkName);
+ forkNumber = forkname_to_number(forkString);
+
+ /* Open relation and check privileges. */
+ rel = relation_open(relOid, AccessShareLock);
+ aclresult = pg_class_aclcheck(relOid, GetUserId(), ACL_SELECT);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_CLASS, get_rel_name(relOid));
+
+ /* Check that the fork exists. */
+ RelationOpenSmgr(rel);
+ if (!smgrexists(rel->rd_smgr, forkNumber))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("fork \"%s\" does not exist for this relation",
+ forkString)));
+
+ /* Validate block numbers, or handle nulls. */
+ nblocks = RelationGetNumberOfBlocksInFork(rel, forkNumber);
+ if (PG_ARGISNULL(3))
+ first_block = 0;
+ else
+ {
+ first_block = PG_GETARG_INT64(3);
+ if (first_block < 0 || first_block >= nblocks)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("starting block number must be between 0 and " INT64_FORMAT,
+ nblocks - 1)));
+ }
+ if (PG_ARGISNULL(4))
+ last_block = nblocks - 1;
+ else
+ {
+ last_block = PG_GETARG_INT64(4);
+ if (last_block < 0 || last_block >= nblocks)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("ending block number must be between 0 and " INT64_FORMAT,
+ nblocks - 1)));
+ }
+
+ /* Now we're ready to do the real work. */
+ if (ptype == PREWARM_PREFETCH)
+ {
+#ifdef USE_PREFETCH
+
+ /*
+ * In prefetch mode, we just hint the OS to read the blocks, but we
+ * don't know whether it really does it, and we don't wait for it to
+ * finish.
+ *
+ * It would probably be better to pass our prefetch requests in chunks
+ * of a megabyte or maybe even a whole segment at a time, but there's
+ * no practical way to do that at present without a gross modularity
+ * violation, so we just do this.
+ */
+ for (block = first_block; block <= last_block; ++block)
+ {
+ PrefetchBuffer(rel, forkNumber, block);
+ ++blocks_done;
+ }
+#else
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("prefetch is not supported by this build")));
+#endif
+ }
+ else if (ptype == PREWARM_READ)
+ {
+ /*
+ * In read mode, we actually read the blocks, but not into shared
+ * buffers. This is more portable than prefetch mode (it works
+ * everywhere) and is synchronous.
+ */
+ for (block = first_block; block <= last_block; ++block)
+ {
+ smgrread(rel->rd_smgr, forkNumber, block, blockbuffer);
+ ++blocks_done;
+ }
+ }
+ else if (ptype == PREWARM_BUFFER)
+ {
+ /*
+ * In buffer mode, we actually pull the data into shared_buffers.
+ */
+ for (block = first_block; block <= last_block; ++block)
+ {
+ Buffer buf;
+
+ buf = ReadBufferExtended(rel, forkNumber, block, RBM_NORMAL, NULL);
+ ReleaseBuffer(buf);
+ ++blocks_done;
+ }
+ }
+
+ /* Close relation, release lock. */
+ relation_close(rel, AccessShareLock);
+
+ PG_RETURN_INT64(blocks_done);
+}
diff --git a/contrib/pg_prewarm/pg_prewarm.control b/contrib/pg_prewarm/pg_prewarm.control
new file mode 100644
index 0000000000..8e5dee5068
--- /dev/null
+++ b/contrib/pg_prewarm/pg_prewarm.control
@@ -0,0 +1,5 @@
+# pg_prewarm extension
+comment = 'prewarm relation data'
+default_version = '1.0'
+module_pathname = '$libdir/pg_prewarm'
+relocatable = true
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
index 57241ff897..d6b169264c 100644
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -28,20 +28,9 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <signal.h>
-
-#ifdef WIN32
-int getopt(int argc, char *const argv[], const char *optstring);
-#else
#include <sys/time.h>
-#include <unistd.h>
-
-#ifdef HAVE_GETOPT_H
-#include <getopt.h>
-#endif
-#endif /* ! WIN32 */
-extern char *optarg;
-extern int optind;
+#include "pg_getopt.h"
const char *progname;
@@ -256,7 +245,7 @@ CustomizableCleanupPriorWALFiles(void)
*/
if ((xldir = opendir(archiveLocation)) != NULL)
{
- while ((xlde = readdir(xldir)) != NULL)
+ while (errno = 0, (xlde = readdir(xldir)) != NULL)
{
/*
* We ignore the timeline part of the XLOG segment identifiers
@@ -294,6 +283,10 @@ CustomizableCleanupPriorWALFiles(void)
}
}
}
+
+ if (errno)
+ fprintf(stderr, "%s: could not read archive location \"%s\": %s\n",
+ progname, archiveLocation, strerror(errno));
if (debug)
fprintf(stderr, "\n");
}
@@ -301,7 +294,10 @@ CustomizableCleanupPriorWALFiles(void)
fprintf(stderr, "%s: could not open archive location \"%s\": %s\n",
progname, archiveLocation, strerror(errno));
- closedir(xldir);
+ if (closedir(xldir))
+ fprintf(stderr, "%s: could not close archive location \"%s\": %s\n",
+ progname, archiveLocation, strerror(errno));
+
fflush(stderr);
}
}
@@ -338,14 +334,14 @@ SetWALFileNameForCleanup(void)
if (strcmp(restartWALFileName, nextWALFileName) > 0)
return false;
- strcpy(exclusiveCleanupFileName, restartWALFileName);
+ strlcpy(exclusiveCleanupFileName, restartWALFileName, sizeof(exclusiveCleanupFileName));
return true;
}
if (keepfiles > 0)
{
sscanf(nextWALFileName, "%08X%08X%08X", &tli, &log, &seg);
- if (tli > 0 && log >= 0 && seg > 0)
+ if (tli > 0 && seg > 0)
{
log_diff = keepfiles / MaxSegmentsPerLogFile;
seg_diff = keepfiles % MaxSegmentsPerLogFile;
@@ -527,9 +523,9 @@ usage(void)
printf(" -s SLEEPTIME seconds to wait between file checks (min=1, max=60,\n"
" default=5)\n");
printf(" -t TRIGGERFILE trigger file to initiate failover (no default)\n");
+ printf(" -V, --version output version information, then exit\n");
printf(" -w MAXWAITTIME max seconds to wait for a file (0=no limit) (default=0)\n");
- printf(" --help show this help, then exit\n");
- printf(" --version output version information, then exit\n");
+ printf(" -?, --help show this help, then exit\n");
printf("\n"
"Main intended use as restore_command in recovery.conf:\n"
" restore_command = 'pg_standby [OPTION]... ARCHIVELOCATION %%f %%p %%r'\n"
@@ -549,7 +545,7 @@ sighandler(int sig)
static void
sigquit_handler(int sig)
{
- signal(SIGINT, SIG_DFL);
+ pqsignal(SIGINT, SIG_DFL);
kill(getpid(), SIGINT);
}
#endif
@@ -592,9 +588,9 @@ main(int argc, char **argv)
*
* There's no way to trigger failover via signal on Windows.
*/
- (void) signal(SIGUSR1, sighandler);
- (void) signal(SIGINT, sighandler); /* deprecated, use SIGUSR1 */
- (void) signal(SIGQUIT, sigquit_handler);
+ (void) pqsignal(SIGUSR1, sighandler);
+ (void) pqsignal(SIGINT, sighandler); /* deprecated, use SIGUSR1 */
+ (void) pqsignal(SIGQUIT, sigquit_handler);
#endif
while ((c = getopt(argc, argv, "cdk:lr:s:t:w:")) != -1)
@@ -643,7 +639,7 @@ main(int argc, char **argv)
}
break;
case 't': /* Trigger file */
- triggerPath = optarg;
+ triggerPath = strdup(optarg);
break;
case 'w': /* Max wait time */
maxwaittime = atoi(optarg);
diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile
index e8aed61216..95a2767006 100644
--- a/contrib/pg_stat_statements/Makefile
+++ b/contrib/pg_stat_statements/Makefile
@@ -4,8 +4,8 @@ MODULE_big = pg_stat_statements
OBJS = pg_stat_statements.o
EXTENSION = pg_stat_statements
-DATA = pg_stat_statements--1.1.sql pg_stat_statements--1.0--1.1.sql \
- pg_stat_statements--unpackaged--1.0.sql
+DATA = pg_stat_statements--1.2.sql pg_stat_statements--1.1--1.2.sql \
+ pg_stat_statements--1.0--1.1.sql pg_stat_statements--unpackaged--1.0.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql b/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql
index 5662273e16..5be281ea47 100644
--- a/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql
@@ -1,7 +1,7 @@
/* contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql */
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
-\echo Use "ALTER EXTENSION pg_stat_statements UPDATE" to load this file. \quit
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.1'" to load this file. \quit
/* First we have to remove them from the extension */
ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.0.sql b/contrib/pg_stat_statements/pg_stat_statements--1.0.sql
deleted file mode 100644
index 5294a01dd5..0000000000
--- a/contrib/pg_stat_statements/pg_stat_statements--1.0.sql
+++ /dev/null
@@ -1,39 +0,0 @@
-/* contrib/pg_stat_statements/pg_stat_statements--1.0.sql */
-
--- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit
-
--- Register functions.
-CREATE FUNCTION pg_stat_statements_reset()
-RETURNS void
-AS 'MODULE_PATHNAME'
-LANGUAGE C;
-
-CREATE FUNCTION pg_stat_statements(
- OUT userid oid,
- OUT dbid oid,
- OUT query text,
- OUT calls int8,
- OUT total_time float8,
- OUT rows int8,
- OUT shared_blks_hit int8,
- OUT shared_blks_read int8,
- OUT shared_blks_written int8,
- OUT local_blks_hit int8,
- OUT local_blks_read int8,
- OUT local_blks_written int8,
- OUT temp_blks_read int8,
- OUT temp_blks_written int8
-)
-RETURNS SETOF record
-AS 'MODULE_PATHNAME'
-LANGUAGE C;
-
--- Register a view on the function for ease of use.
-CREATE VIEW pg_stat_statements AS
- SELECT * FROM pg_stat_statements();
-
-GRANT SELECT ON pg_stat_statements TO PUBLIC;
-
--- Don't want this to be available to non-superusers.
-REVOKE ALL ON FUNCTION pg_stat_statements_reset() FROM PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql b/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql
new file mode 100644
index 0000000000..74ae43868d
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql
@@ -0,0 +1,43 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.2'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements();
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements();
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT queryid bigint,
+ OUT query text,
+ OUT calls int8,
+ OUT total_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_2'
+LANGUAGE C STRICT VOLATILE;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.1.sql b/contrib/pg_stat_statements/pg_stat_statements--1.2.sql
index 42e4d689ca..5bfa9a55d0 100644
--- a/contrib/pg_stat_statements/pg_stat_statements--1.1.sql
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.2.sql
@@ -1,4 +1,4 @@
-/* contrib/pg_stat_statements/pg_stat_statements--1.1.sql */
+/* contrib/pg_stat_statements/pg_stat_statements--1.2.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit
@@ -9,9 +9,10 @@ RETURNS void
AS 'MODULE_PATHNAME'
LANGUAGE C;
-CREATE FUNCTION pg_stat_statements(
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
OUT userid oid,
OUT dbid oid,
+ OUT queryid bigint,
OUT query text,
OUT calls int8,
OUT total_time float8,
@@ -30,12 +31,12 @@ CREATE FUNCTION pg_stat_statements(
OUT blk_write_time float8
)
RETURNS SETOF record
-AS 'MODULE_PATHNAME'
-LANGUAGE C;
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_2'
+LANGUAGE C STRICT VOLATILE;
-- Register a view on the function for ease of use.
CREATE VIEW pg_stat_statements AS
- SELECT * FROM pg_stat_statements();
+ SELECT * FROM pg_stat_statements(true);
GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 28a7359edf..da128efb04 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -4,7 +4,7 @@
* Track statement execution times across a whole database cluster.
*
* Execution costs are totalled for each distinct source query, and kept in
- * a shared hashtable. (We track only as many distinct queries as will fit
+ * a shared hashtable. (We track only as many distinct queries as will fit
* in the designated amount of shared memory.)
*
* As of Postgres 9.2, this module normalizes query entries. Normalization
@@ -15,7 +15,7 @@
*
* Normalization is implemented by fingerprinting queries, selectively
* serializing those fields of each query tree's nodes that are judged to be
- * essential to the query. This is referred to as a query jumble. This is
+ * essential to the query. This is referred to as a query jumble. This is
* distinct from a regular serialization in that various extraneous
* information is ignored as irrelevant or not essential to the query, such
* as the collations of Vars and, most notably, the values of constants.
@@ -26,15 +26,29 @@
* tree(s) generated from the query. The executor can then use this value
* to blame query costs on the proper queryId.
*
+ * To facilitate presenting entries to users, we create "representative" query
+ * strings in which constants are replaced with '?' characters, to make it
+ * clearer what a normalized entry can represent. To save on shared memory,
+ * and to avoid having to truncate oversized query strings, we store these
+ * strings in a temporary external query-texts file. Offsets into this
+ * file are kept in shared memory.
+ *
* Note about locking issues: to create or delete an entry in the shared
* hashtable, one must hold pgss->lock exclusively. Modifying any field
* in an entry except the counters requires the same. To look up an entry,
* one must hold the lock shared. To read or update the counters within
* an entry, one must hold the lock shared or exclusive (so the entry doesn't
* disappear!) and also take the entry's mutex spinlock.
+ * The shared state variable pgss->extent (the next free spot in the external
+ * query-text file) should be accessed only while holding either the
+ * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
+ * allow reserving file space while holding only shared lock on pgss->lock.
+ * Rewriting the entire external query-text file, eg for garbage collection,
+ * requires holding pgss->lock exclusively; this allows individual entries
+ * in the file to be read or written while holding only shared lock.
*
*
- * Copyright (c) 2008-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2008-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/pg_stat_statements/pg_stat_statements.c
@@ -43,6 +57,7 @@
*/
#include "postgres.h"
+#include <sys/stat.h>
#include <unistd.h>
#include "access/hash.h"
@@ -59,20 +74,35 @@
#include "storage/spin.h"
#include "tcop/utility.h"
#include "utils/builtins.h"
+#include "utils/memutils.h"
PG_MODULE_MAGIC;
-/* Location of stats file */
-#define PGSS_DUMP_FILE "global/pg_stat_statements.stat"
+/* Location of permanent stats file (valid when database is shut down) */
+#define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
+
+/*
+ * Location of external query text file. We don't keep it in the core
+ * system's stats_temp_directory. The core system can safely use that GUC
+ * setting, because the statistics collector temp file paths are set only once
+ * as part of changing the GUC, but pg_stat_statements has no way of avoiding
+ * race conditions. Besides, we only expect modest, infrequent I/O for query
+ * strings, so placing the file on a faster filesystem is not compelling.
+ */
+#define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
+
+/* Magic number identifying the stats file format */
+static const uint32 PGSS_FILE_HEADER = 0x20140125;
-/* This constant defines the magic number in the stats file header */
-static const uint32 PGSS_FILE_HEADER = 0x20120328;
+/* PostgreSQL major version number, changes in which invalidate all entries */
+static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
/* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
#define USAGE_EXEC(duration) (1.0)
#define USAGE_INIT (1.0) /* including initial planning */
#define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
+#define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
#define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
#define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
#define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
@@ -80,18 +110,23 @@ static const uint32 PGSS_FILE_HEADER = 0x20120328;
#define JUMBLE_SIZE 1024 /* query serialization buffer size */
/*
+ * Extension version number, for supporting older extension versions' objects
+ */
+typedef enum pgssVersion
+{
+ PGSS_V1_0 = 0,
+ PGSS_V1_1,
+ PGSS_V1_2
+} pgssVersion;
+
+/*
* Hashtable key that defines the identity of a hashtable entry. We separate
* queries by user and by database even if they are otherwise identical.
- *
- * Presently, the query encoding is fully determined by the source database
- * and so we don't really need it to be in the key. But that might not always
- * be true. Anyway it's notationally convenient to pass it as part of the key.
*/
typedef struct pgssHashKey
{
Oid userid; /* user OID */
Oid dbid; /* database OID */
- int encoding; /* query encoding */
uint32 queryid; /* query identifier */
} pgssHashKey;
@@ -121,16 +156,18 @@ typedef struct Counters
/*
* Statistics per statement
*
- * NB: see the file read/write code before changing field order here.
+ * Note: in event of a failure in garbage collection of the query text file,
+ * we reset query_offset to zero and query_len to -1. This will be seen as
+ * an invalid state by qtext_fetch().
*/
typedef struct pgssEntry
{
pgssHashKey key; /* hash key of entry - MUST BE FIRST */
Counters counters; /* the statistics for this query */
+ Size query_offset; /* query text offset in external file */
int query_len; /* # of valid bytes in query string */
+ int encoding; /* query text encoding */
slock_t mutex; /* protects the counters only */
- char query[1]; /* VARIABLE LENGTH ARRAY - MUST BE LAST */
- /* Note: the allocated length of query[] is actually pgss->query_size */
} pgssEntry;
/*
@@ -138,9 +175,13 @@ typedef struct pgssEntry
*/
typedef struct pgssSharedState
{
- LWLockId lock; /* protects hashtable search/modification */
- int query_size; /* max query length in bytes */
+ LWLock *lock; /* protects hashtable search/modification */
double cur_median_usage; /* current median usage in hashtable */
+ Size mean_query_len; /* current mean entry text length */
+ slock_t mutex; /* protects following fields only: */
+ Size extent; /* current extent of query file */
+ int n_writers; /* number of active writers to query file */
+ int gc_count; /* query file garbage collection cycle count */
} pgssSharedState;
/*
@@ -219,15 +260,21 @@ static bool pgss_save; /* whether to save stats across shutdown */
(pgss_track == PGSS_TRACK_ALL || \
(pgss_track == PGSS_TRACK_TOP && nested_level == 0))
+#define record_gc_qtexts() \
+ do { \
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss; \
+ SpinLockAcquire(&s->mutex); \
+ s->gc_count++; \
+ SpinLockRelease(&s->mutex); \
+ } while(0)
+
/*---- Function declarations ----*/
void _PG_init(void);
void _PG_fini(void);
-Datum pg_stat_statements_reset(PG_FUNCTION_ARGS);
-Datum pg_stat_statements(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
+PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
PG_FUNCTION_INFO_V1(pg_stat_statements);
static void pgss_shmem_startup(void);
@@ -239,8 +286,8 @@ static void pgss_ExecutorRun(QueryDesc *queryDesc,
long count);
static void pgss_ExecutorFinish(QueryDesc *queryDesc);
static void pgss_ExecutorEnd(QueryDesc *queryDesc);
-static void pgss_ProcessUtility(Node *parsetree,
- const char *queryString, ParamListInfo params, bool isTopLevel,
+static void pgss_ProcessUtility(Node *parsetree, const char *queryString,
+ ProcessUtilityContext context, ParamListInfo params,
DestReceiver *dest,
#ifdef PGXC
bool sentToRemote,
@@ -253,10 +300,20 @@ static void pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
pgssJumbleState *jstate);
+static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
+ pgssVersion api_version,
+ bool showtext);
static Size pgss_memsize(void);
-static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
- int query_len, bool sticky);
+static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
+ int encoding, bool sticky);
static void entry_dealloc(void);
+static bool qtext_store(const char *query, int query_len,
+ Size *query_offset, int *gc_count);
+static char *qtext_load_file(Size *buffer_size);
+static char *qtext_fetch(Size query_offset, int query_len,
+ char *buffer, Size buffer_size);
+static bool need_gc_qtexts(void);
+static void gc_qtexts(void);
static void entry_reset(void);
static void AppendJumble(pgssJumbleState *jstate,
const unsigned char *item, Size size);
@@ -294,7 +351,7 @@ _PG_init(void)
"Sets the maximum number of statements tracked by pg_stat_statements.",
NULL,
&pgss_max,
- 1000,
+ 5000,
100,
INT_MAX,
PGC_POSTMASTER,
@@ -385,17 +442,20 @@ _PG_fini(void)
/*
* shmem_startup hook: allocate or attach to shared memory,
* then load any pre-existing statistics from file.
+ * Also create and load the query-texts file, which is expected to exist
+ * (even if empty) while the module is enabled.
*/
static void
pgss_shmem_startup(void)
{
bool found;
HASHCTL info;
- FILE *file;
+ FILE *file = NULL;
+ FILE *qfile = NULL;
uint32 header;
int32 num;
+ int32 pgver;
int32 i;
- int query_size;
int buffer_size;
char *buffer = NULL;
@@ -419,16 +479,17 @@ pgss_shmem_startup(void)
{
/* First time through ... */
pgss->lock = LWLockAssign();
- pgss->query_size = pgstat_track_activity_query_size;
pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+ SpinLockInit(&pgss->mutex);
+ pgss->extent = 0;
+ pgss->n_writers = 0;
+ pgss->gc_count = 0;
}
- /* Be sure everyone agrees on the hash table entry size */
- query_size = pgss->query_size;
-
memset(&info, 0, sizeof(info));
info.keysize = sizeof(pgssHashKey);
- info.entrysize = offsetof(pgssEntry, query) +query_size;
+ info.entrysize = sizeof(pgssEntry);
info.hash = pgss_hash_fn;
info.match = pgss_match_fn;
pgss_hash = ShmemInitHash("pg_stat_statements hash",
@@ -446,68 +507,100 @@ pgss_shmem_startup(void)
on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
/*
- * Attempt to load old statistics from the dump file, if this is the first
- * time through and we weren't told not to.
+ * Done if some other process already completed our initialization.
*/
- if (found || !pgss_save)
+ if (found)
return;
/*
* Note: we don't bother with locks here, because there should be no other
* processes running when this code is reached.
*/
+
+ /* Unlink query text file possibly left over from crash */
+ unlink(PGSS_TEXT_FILE);
+
+ /* Allocate new query text temp file */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ goto write_error;
+
+ /*
+ * If we were told not to load old statistics, we're done. (Note we do
+ * not try to unlink any old dump file in this case. This seems a bit
+ * questionable but it's the historical behavior.)
+ */
+ if (!pgss_save)
+ {
+ FreeFile(qfile);
+ return;
+ }
+
+ /*
+ * Attempt to load old statistics from the dump file.
+ */
file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
if (file == NULL)
{
- if (errno == ENOENT)
- return; /* ignore not-found error */
- goto error;
+ if (errno != ENOENT)
+ goto read_error;
+ /* No existing persisted stats file, so we're done */
+ FreeFile(qfile);
+ return;
}
- buffer_size = query_size;
+ buffer_size = 2048;
buffer = (char *) palloc(buffer_size);
if (fread(&header, sizeof(uint32), 1, file) != 1 ||
- header != PGSS_FILE_HEADER ||
+ fread(&pgver, sizeof(uint32), 1, file) != 1 ||
fread(&num, sizeof(int32), 1, file) != 1)
- goto error;
+ goto read_error;
+
+ if (header != PGSS_FILE_HEADER ||
+ pgver != PGSS_PG_MAJOR_VERSION)
+ goto data_error;
for (i = 0; i < num; i++)
{
pgssEntry temp;
pgssEntry *entry;
+ Size query_offset;
- if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1)
- goto error;
+ if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
+ goto read_error;
/* Encoding is the only field we can easily sanity-check */
- if (!PG_VALID_BE_ENCODING(temp.key.encoding))
- goto error;
+ if (!PG_VALID_BE_ENCODING(temp.encoding))
+ goto data_error;
- /* Previous incarnation might have had a larger query_size */
+ /* Resize buffer as needed */
if (temp.query_len >= buffer_size)
{
- buffer = (char *) repalloc(buffer, temp.query_len + 1);
- buffer_size = temp.query_len + 1;
+ buffer_size = Max(buffer_size * 2, temp.query_len + 1);
+ buffer = repalloc(buffer, buffer_size);
}
- if (fread(buffer, 1, temp.query_len, file) != temp.query_len)
- goto error;
+ if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
+ goto read_error;
+
+ /* Should have a trailing null, but let's make sure */
buffer[temp.query_len] = '\0';
/* Skip loading "sticky" entries */
if (temp.counters.calls == 0)
continue;
- /* Clip to available length if needed */
- if (temp.query_len >= query_size)
- temp.query_len = pg_encoding_mbcliplen(temp.key.encoding,
- buffer,
- temp.query_len,
- query_size - 1);
+ /* Store the query text */
+ query_offset = pgss->extent;
+ if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
+ goto write_error;
+ pgss->extent += temp.query_len + 1;
/* make the hashtable entry (discards old entries if too many) */
- entry = entry_alloc(&temp.key, buffer, temp.query_len, false);
+ entry = entry_alloc(&temp.key, query_offset, temp.query_len,
+ temp.encoding,
+ false);
/* copy in the actual stats */
entry->counters = temp.counters;
@@ -515,26 +608,56 @@ pgss_shmem_startup(void)
pfree(buffer);
FreeFile(file);
+ FreeFile(qfile);
/*
- * Remove the file so it's not included in backups/replication slaves,
- * etc. A new file will be written on next shutdown.
+ * Remove the persisted stats file so it's not included in
+ * backups/replication slaves, etc. A new file will be written on next
+ * shutdown.
+ *
+ * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
+ * because we remove that file on startup; it acts inversely to
+ * PGSS_DUMP_FILE, in that it is only supposed to be around when the
+ * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
+ * when the server is not running. Leaving the file creates no danger of
+ * a newly restored database having a spurious record of execution costs,
+ * which is what we're really concerned about here.
*/
unlink(PGSS_DUMP_FILE);
return;
-error:
+read_error:
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not read pg_stat_statement file \"%s\": %m",
PGSS_DUMP_FILE)));
+ goto fail;
+data_error:
+ ereport(LOG,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("ignoring invalid data in pg_stat_statement file \"%s\"",
+ PGSS_DUMP_FILE)));
+ goto fail;
+write_error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+fail:
if (buffer)
pfree(buffer);
if (file)
FreeFile(file);
+ if (qfile)
+ FreeFile(qfile);
/* If possible, throw away the bogus file; ignore any error */
unlink(PGSS_DUMP_FILE);
+
+ /*
+ * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
+ * server is running with pg_stat_statements enabled
+ */
}
/*
@@ -547,6 +670,8 @@ static void
pgss_shmem_shutdown(int code, Datum arg)
{
FILE *file;
+ char *qbuffer = NULL;
+ Size qbuffer_size = 0;
HASH_SEQ_STATUS hash_seq;
int32 num_entries;
pgssEntry *entry;
@@ -569,20 +694,42 @@ pgss_shmem_shutdown(int code, Datum arg)
if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
goto error;
+ if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
+ goto error;
num_entries = hash_get_num_entries(pgss_hash);
if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
goto error;
+ qbuffer = qtext_load_file(&qbuffer_size);
+ if (qbuffer == NULL)
+ goto error;
+
+ /*
+ * When serializing to disk, we store query texts immediately after their
+ * entry data. Any orphaned query texts are thereby excluded.
+ */
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
{
int len = entry->query_len;
+ char *qstr = qtext_fetch(entry->query_offset, len,
+ qbuffer, qbuffer_size);
+
+ if (qstr == NULL)
+ continue; /* Ignore any entries with bogus texts */
- if (fwrite(entry, offsetof(pgssEntry, mutex), 1, file) != 1 ||
- fwrite(entry->query, 1, len, file) != len)
+ if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
+ fwrite(qstr, 1, len + 1, file) != len + 1)
+ {
+ /* note: we assume hash_seq_term won't change errno */
+ hash_seq_term(&hash_seq);
goto error;
+ }
}
+ free(qbuffer);
+ qbuffer = NULL;
+
if (FreeFile(file))
{
file = NULL;
@@ -590,7 +737,7 @@ pgss_shmem_shutdown(int code, Datum arg)
}
/*
- * Rename file into place, so we atomically replace the old one.
+ * Rename file into place, so we atomically replace any old one.
*/
if (rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE) != 0)
ereport(LOG,
@@ -598,6 +745,9 @@ pgss_shmem_shutdown(int code, Datum arg)
errmsg("could not rename pg_stat_statement file \"%s\": %m",
PGSS_DUMP_FILE ".tmp")));
+ /* Unlink query-texts file; it's not needed while shutdown */
+ unlink(PGSS_TEXT_FILE);
+
return;
error:
@@ -605,9 +755,12 @@ error:
(errcode_for_file_access(),
errmsg("could not write pg_stat_statement file \"%s\": %m",
PGSS_DUMP_FILE ".tmp")));
+ if (qbuffer)
+ free(qbuffer);
if (file)
FreeFile(file);
unlink(PGSS_DUMP_FILE ".tmp");
+ unlink(PGSS_TEXT_FILE);
}
/*
@@ -618,6 +771,9 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
{
pgssJumbleState jstate;
+ if (prev_post_parse_analyze_hook)
+ prev_post_parse_analyze_hook(pstate, query);
+
/* Assert we didn't do this already */
Assert(query->queryId == 0);
@@ -789,7 +945,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
*/
static void
pgss_ProcessUtility(Node *parsetree, const char *queryString,
- ParamListInfo params, bool isTopLevel,
+ ProcessUtilityContext context, ParamListInfo params,
DestReceiver *dest,
#ifdef PGXC
bool sentToRemote,
@@ -814,7 +970,7 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
{
instr_time start;
instr_time duration;
- uint64 rows = 0;
+ uint64 rows;
BufferUsage bufusage_start,
bufusage;
uint32 queryId;
@@ -826,15 +982,17 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
PG_TRY();
{
if (prev_ProcessUtility)
- prev_ProcessUtility(parsetree, queryString, params,
- isTopLevel, dest,
+ prev_ProcessUtility(parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif /* PGXC */
completionTag);
else
- standard_ProcessUtility(parsetree, queryString, params,
- isTopLevel, dest,
+ standard_ProcessUtility(parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif /* PGXC */
@@ -853,7 +1011,15 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
/* parse command tag to retrieve the number of affected rows. */
if (completionTag &&
- sscanf(completionTag, "COPY " UINT64_FORMAT, &rows) != 1)
+ strncmp(completionTag, "COPY ", 5) == 0)
+ {
+#ifdef HAVE_STRTOULL
+ rows = strtoull(completionTag + 5, NULL, 10);
+#else
+ rows = strtoul(completionTag + 5, NULL, 10);
+#endif
+ }
+ else
rows = 0;
/* calc differences of buffer counters. */
@@ -895,15 +1061,17 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
else
{
if (prev_ProcessUtility)
- prev_ProcessUtility(parsetree, queryString, params,
- isTopLevel, dest,
+ prev_ProcessUtility(parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif /* PGXC */
completionTag);
else
- standard_ProcessUtility(parsetree, queryString, params,
- isTopLevel, dest,
+ standard_ProcessUtility(parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif /* PGXC */
@@ -919,7 +1087,6 @@ pgss_hash_fn(const void *key, Size keysize)
{
const pgssHashKey *k = (const pgssHashKey *) key;
- /* we don't bother to include encoding in the hash */
return hash_uint32((uint32) k->userid) ^
hash_uint32((uint32) k->dbid) ^
hash_uint32((uint32) k->queryid);
@@ -936,7 +1103,6 @@ pgss_match_fn(const void *key1, const void *key2, Size keysize)
if (k1->userid == k2->userid &&
k1->dbid == k2->dbid &&
- k1->encoding == k2->encoding &&
k1->queryid == k2->queryid)
return 0;
else
@@ -970,6 +1136,8 @@ pgss_store(const char *query, uint32 queryId,
pgssHashKey key;
pgssEntry *entry;
char *norm_query = NULL;
+ int encoding = GetDatabaseEncoding();
+ int query_len;
Assert(query != NULL);
@@ -977,10 +1145,11 @@ pgss_store(const char *query, uint32 queryId,
if (!pgss || !pgss_hash)
return;
+ query_len = strlen(query);
+
/* Set up key for hashtable search */
key.userid = GetUserId();
key.dbid = MyDatabaseId;
- key.encoding = GetDatabaseEncoding();
key.queryid = queryId;
/* Lookup the hash table entry with shared lock. */
@@ -991,45 +1160,64 @@ pgss_store(const char *query, uint32 queryId,
/* Create new entry, if not present */
if (!entry)
{
- int query_len;
+ Size query_offset;
+ int gc_count;
+ bool stored;
+ bool do_gc;
/*
- * We'll need exclusive lock to make a new entry. There is no point
- * in holding shared lock while we normalize the string, though.
+ * Create a new, normalized query string if caller asked. We don't
+ * need to hold the lock while doing this work. (Note: in any case,
+ * it's possible that someone else creates a duplicate hashtable entry
+ * in the interval where we don't hold the lock below. That case is
+ * handled by entry_alloc.)
*/
- LWLockRelease(pgss->lock);
-
- query_len = strlen(query);
-
if (jstate)
{
- /* Normalize the string if enabled */
+ LWLockRelease(pgss->lock);
norm_query = generate_normalized_query(jstate, query,
&query_len,
- key.encoding);
+ encoding);
+ LWLockAcquire(pgss->lock, LW_SHARED);
+ }
- /* Acquire exclusive lock as required by entry_alloc() */
- LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
+ /* Append new query text to file with only shared lock held */
+ stored = qtext_store(norm_query ? norm_query : query, query_len,
+ &query_offset, &gc_count);
- entry = entry_alloc(&key, norm_query, query_len, true);
- }
- else
- {
- /*
- * We're just going to store the query string as-is; but we have
- * to truncate it if over-length.
- */
- if (query_len >= pgss->query_size)
- query_len = pg_encoding_mbcliplen(key.encoding,
- query,
- query_len,
- pgss->query_size - 1);
+ /*
+ * Determine whether we need to garbage collect external query texts
+ * while the shared lock is still held. This micro-optimization
+ * avoids taking the time to decide this while holding exclusive lock.
+ */
+ do_gc = need_gc_qtexts();
- /* Acquire exclusive lock as required by entry_alloc() */
- LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
+ /* Need exclusive lock to make a new hashtable entry - promote */
+ LWLockRelease(pgss->lock);
+ LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
- entry = entry_alloc(&key, query, query_len, false);
- }
+ /*
+ * A garbage collection may have occurred while we weren't holding the
+ * lock. In the unlikely event that this happens, the query text we
+ * stored above will have been garbage collected, so write it again.
+ * This should be infrequent enough that doing it while holding
+ * exclusive lock isn't a performance problem.
+ */
+ if (!stored || pgss->gc_count != gc_count)
+ stored = qtext_store(norm_query ? norm_query : query, query_len,
+ &query_offset, NULL);
+
+ /* If we failed to write to the text file, give up */
+ if (!stored)
+ goto done;
+
+ /* OK to create a new hashtable entry */
+ entry = entry_alloc(&key, query_offset, query_len, encoding,
+ jstate != NULL);
+
+ /* If needed, perform garbage collection while exclusive lock held */
+ if (do_gc)
+ gc_qtexts();
}
/* Increment the counts, except when jstate is not NULL */
@@ -1067,9 +1255,10 @@ pgss_store(const char *query, uint32 queryId,
SpinLockRelease(&e->mutex);
}
+done:
LWLockRelease(pgss->lock);
- /* We postpone this pfree until we're out of the lock */
+ /* We postpone this clean-up until we're out of the lock */
if (norm_query)
pfree(norm_query);
}
@@ -1088,15 +1277,51 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+/* Number of output arguments (columns) for various API versions */
#define PG_STAT_STATEMENTS_COLS_V1_0 14
-#define PG_STAT_STATEMENTS_COLS 18
+#define PG_STAT_STATEMENTS_COLS_V1_1 18
+#define PG_STAT_STATEMENTS_COLS_V1_2 19
+#define PG_STAT_STATEMENTS_COLS 19 /* maximum of above */
/*
* Retrieve statement statistics.
+ *
+ * The SQL API of this function has changed multiple times, and will likely
+ * do so again in future. To support the case where a newer version of this
+ * loadable module is being used with an old SQL declaration of the function,
+ * we continue to support the older API versions. For 1.2 and later, the
+ * expected API version is identified by embedding it in the C name of the
+ * function. Unfortunately we weren't bright enough to do that for 1.1.
+ */
+Datum
+pg_stat_statements_1_2(PG_FUNCTION_ARGS)
+{
+ bool showtext = PG_GETARG_BOOL(0);
+
+ pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
+
+ return (Datum) 0;
+}
+
+/*
+ * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
+ * This can be removed someday, perhaps.
*/
Datum
pg_stat_statements(PG_FUNCTION_ARGS)
{
+ /* If it's really API 1.1, we'll figure that out below */
+ pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
+
+ return (Datum) 0;
+}
+
+/* Common code for all versions of pg_stat_statements() */
+static void
+pg_stat_statements_internal(FunctionCallInfo fcinfo,
+ pgssVersion api_version,
+ bool showtext)
+{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
@@ -1104,10 +1329,14 @@ pg_stat_statements(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
Oid userid = GetUserId();
bool is_superuser = superuser();
+ char *qbuffer = NULL;
+ Size qbuffer_size = 0;
+ Size extent = 0;
+ int gc_count = 0;
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
- bool sql_supports_v1_1_counters = true;
+ /* hash table must exist already */
if (!pgss || !pgss_hash)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -1124,14 +1353,38 @@ pg_stat_statements(PG_FUNCTION_ARGS)
errmsg("materialize mode required, but it is not " \
"allowed in this context")));
+ /* Switch into long-lived context to construct returned data structures */
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
- if (tupdesc->natts == PG_STAT_STATEMENTS_COLS_V1_0)
- sql_supports_v1_1_counters = false;
- per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
- oldcontext = MemoryContextSwitchTo(per_query_ctx);
+ /*
+ * Check we have the expected number of output arguments. Aside from
+ * being a good safety check, we need a kluge here to detect API version
+ * 1.1, which was wedged into the code in an ill-considered way.
+ */
+ switch (tupdesc->natts)
+ {
+ case PG_STAT_STATEMENTS_COLS_V1_0:
+ if (api_version != PGSS_V1_0)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_1:
+ /* pg_stat_statements() should have told us 1.0 */
+ if (api_version != PGSS_V1_0)
+ elog(ERROR, "incorrect number of output arguments");
+ api_version = PGSS_V1_1;
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_2:
+ if (api_version != PGSS_V1_2)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ default:
+ elog(ERROR, "incorrect number of output arguments");
+ }
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->returnMode = SFRM_Materialize;
@@ -1140,8 +1393,71 @@ pg_stat_statements(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
+ /*
+ * We'd like to load the query text file (if needed) while not holding any
+ * lock on pgss->lock. In the worst case we'll have to do this again
+ * after we have the lock, but it's unlikely enough to make this a win
+ * despite occasional duplicated work. We need to reload if anybody
+ * writes to the file (either a retail qtext_store(), or a garbage
+ * collection) between this point and where we've gotten shared lock. If
+ * a qtext_store is actually in progress when we look, we might as well
+ * skip the speculative load entirely.
+ */
+ if (showtext)
+ {
+ int n_writers;
+
+ /* Take the mutex so we can examine variables */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ extent = s->extent;
+ n_writers = s->n_writers;
+ gc_count = s->gc_count;
+ SpinLockRelease(&s->mutex);
+ }
+
+ /* No point in loading file now if there are active writers */
+ if (n_writers == 0)
+ qbuffer = qtext_load_file(&qbuffer_size);
+ }
+
+ /*
+ * Get shared lock, load or reload the query text file if we must, and
+ * iterate over the hashtable entries.
+ *
+ * With a large hash table, we might be holding the lock rather longer
+ * than one could wish. However, this only blocks creation of new hash
+ * table entries, and the larger the hash table the less likely that is to
+ * be needed. So we can hope this is okay. Perhaps someday we'll decide
+ * we need to partition the hash table to limit the time spent holding any
+ * one lock.
+ */
LWLockAcquire(pgss->lock, LW_SHARED);
+ if (showtext)
+ {
+ /*
+ * Here it is safe to examine extent and gc_count without taking the
+ * mutex. Note that although other processes might change
+ * pgss->extent just after we look at it, the strings they then write
+ * into the file cannot yet be referenced in the hashtable, so we
+ * don't care whether we see them or not.
+ *
+ * If qtext_load_file fails, we just press on; we'll return NULL for
+ * every query text.
+ */
+ if (qbuffer == NULL ||
+ pgss->extent != extent ||
+ pgss->gc_count != gc_count)
+ {
+ if (qbuffer)
+ free(qbuffer);
+ qbuffer = qtext_load_file(&qbuffer_size);
+ }
+ }
+
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
{
@@ -1149,6 +1465,7 @@ pg_stat_statements(PG_FUNCTION_ARGS)
bool nulls[PG_STAT_STATEMENTS_COLS];
int i = 0;
Counters tmp;
+ int64 queryid = entry->key.queryid;
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
@@ -1158,19 +1475,56 @@ pg_stat_statements(PG_FUNCTION_ARGS)
if (is_superuser || entry->key.userid == userid)
{
- char *qstr;
-
- qstr = (char *)
- pg_do_encoding_conversion((unsigned char *) entry->query,
- entry->query_len,
- entry->key.encoding,
- GetDatabaseEncoding());
- values[i++] = CStringGetTextDatum(qstr);
- if (qstr != entry->query)
- pfree(qstr);
+ if (api_version >= PGSS_V1_2)
+ values[i++] = Int64GetDatumFast(queryid);
+
+ if (showtext)
+ {
+ char *qstr = qtext_fetch(entry->query_offset,
+ entry->query_len,
+ qbuffer,
+ qbuffer_size);
+
+ if (qstr)
+ {
+ char *enc;
+
+ enc = pg_any_to_server(qstr,
+ entry->query_len,
+ entry->encoding);
+
+ values[i++] = CStringGetTextDatum(enc);
+
+ if (enc != qstr)
+ pfree(enc);
+ }
+ else
+ {
+ /* Just return a null if we fail to find the text */
+ nulls[i++] = true;
+ }
+ }
+ else
+ {
+ /* Query text not requested */
+ nulls[i++] = true;
+ }
}
else
- values[i++] = CStringGetTextDatum("<insufficient privilege>");
+ {
+ /* Don't show queryid */
+ if (api_version >= PGSS_V1_2)
+ nulls[i++] = true;
+
+ /*
+ * Don't show query text, but hint as to the reason for not doing
+ * so if it was requested
+ */
+ if (showtext)
+ values[i++] = CStringGetTextDatum("<insufficient privilege>");
+ else
+ nulls[i++] = true;
+ }
/* copy counters to a local variable to keep locking time short */
{
@@ -1190,34 +1544,37 @@ pg_stat_statements(PG_FUNCTION_ARGS)
values[i++] = Int64GetDatumFast(tmp.rows);
values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
- if (sql_supports_v1_1_counters)
+ if (api_version >= PGSS_V1_1)
values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
values[i++] = Int64GetDatumFast(tmp.local_blks_read);
- if (sql_supports_v1_1_counters)
+ if (api_version >= PGSS_V1_1)
values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
values[i++] = Int64GetDatumFast(tmp.local_blks_written);
values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
- if (sql_supports_v1_1_counters)
+ if (api_version >= PGSS_V1_1)
{
values[i++] = Float8GetDatumFast(tmp.blk_read_time);
values[i++] = Float8GetDatumFast(tmp.blk_write_time);
}
- Assert(i == (sql_supports_v1_1_counters ?
- PG_STAT_STATEMENTS_COLS : PG_STAT_STATEMENTS_COLS_V1_0));
+ Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
+ api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
+ api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
+ -1 /* fail if you forget to update this assert */ ));
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
}
+ /* clean up and return the tuplestore */
LWLockRelease(pgss->lock);
- /* clean up and return the tuplestore */
- tuplestore_donestoring(tupstore);
+ if (qbuffer)
+ free(qbuffer);
- return (Datum) 0;
+ tuplestore_donestoring(tupstore);
}
/*
@@ -1227,11 +1584,9 @@ static Size
pgss_memsize(void)
{
Size size;
- Size entrysize;
size = MAXALIGN(sizeof(pgssSharedState));
- entrysize = offsetof(pgssEntry, query) +pgstat_track_activity_query_size;
- size = add_size(size, hash_estimate_size(pgss_max, entrysize));
+ size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
return size;
}
@@ -1249,12 +1604,13 @@ pgss_memsize(void)
* would be difficult to demonstrate this even under artificial conditions.)
*
* Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist. This is because pgss_store releases and
+ * entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/
static pgssEntry *
-entry_alloc(pgssHashKey *key, const char *query, int query_len, bool sticky)
+entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
+ bool sticky)
{
pgssEntry *entry;
bool found;
@@ -1276,11 +1632,11 @@ entry_alloc(pgssHashKey *key, const char *query, int query_len, bool sticky)
entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
/* re-initialize the mutex each time ... we assume no one using it */
SpinLockInit(&entry->mutex);
- /* ... and don't forget the query text */
- Assert(query_len >= 0 && query_len < pgss->query_size);
+ /* ... and don't forget the query text metadata */
+ Assert(query_len >= 0);
+ entry->query_offset = query_offset;
entry->query_len = query_len;
- memcpy(entry->query, query, query_len);
- entry->query[query_len] = '\0';
+ entry->encoding = encoding;
}
return entry;
@@ -1315,6 +1671,7 @@ entry_dealloc(void)
pgssEntry *entry;
int nvictims;
int i;
+ Size totlen = 0;
/*
* Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
@@ -1334,13 +1691,19 @@ entry_dealloc(void)
entry->counters.usage *= STICKY_DECREASE_FACTOR;
else
entry->counters.usage *= USAGE_DECREASE_FACTOR;
+ /* Accumulate total size, too. */
+ totlen += entry->query_len + 1;
}
qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
- /* Also, record the (approximate) median usage */
if (i > 0)
+ {
+ /* Record the (approximate) median usage */
pgss->cur_median_usage = entries[i / 2]->counters.usage;
+ /* Record the mean query length */
+ pgss->mean_query_len = totlen / i;
+ }
nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
nvictims = Min(nvictims, i);
@@ -1354,6 +1717,396 @@ entry_dealloc(void)
}
/*
+ * Given a null-terminated string, allocate a new entry in the external query
+ * text file and store the string there.
+ *
+ * Although we could compute the string length via strlen(), callers already
+ * have it handy, so we require them to pass it too.
+ *
+ * If successful, returns true, and stores the new entry's offset in the file
+ * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
+ * number of garbage collections that have occurred so far.
+ *
+ * On failure, returns false.
+ *
+ * At least a shared lock on pgss->lock must be held by the caller, so as
+ * to prevent a concurrent garbage collection. Share-lock-holding callers
+ * should pass a gc_count pointer to obtain the number of garbage collections,
+ * so that they can recheck the count after obtaining exclusive lock to
+ * detect whether a garbage collection occurred (and removed this entry).
+ */
+static bool
+qtext_store(const char *query, int query_len,
+ Size *query_offset, int *gc_count)
+{
+ Size off;
+ int fd;
+
+ /*
+ * We use a spinlock to protect extent/n_writers/gc_count, so that
+ * multiple processes may execute this function concurrently.
+ */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ off = s->extent;
+ s->extent += query_len + 1;
+ s->n_writers++;
+ if (gc_count)
+ *gc_count = s->gc_count;
+ SpinLockRelease(&s->mutex);
+ }
+
+ *query_offset = off;
+
+ /* Now write the data into the successfully-reserved part of the file */
+ fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY,
+ S_IRUSR | S_IWUSR);
+ if (fd < 0)
+ goto error;
+
+ if (lseek(fd, off, SEEK_SET) != off)
+ goto error;
+
+ if (write(fd, query, query_len + 1) != query_len + 1)
+ goto error;
+
+ CloseTransientFile(fd);
+
+ /* Mark our write complete */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ s->n_writers--;
+ SpinLockRelease(&s->mutex);
+ }
+
+ return true;
+
+error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ if (fd >= 0)
+ CloseTransientFile(fd);
+
+ /* Mark our write complete */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ s->n_writers--;
+ SpinLockRelease(&s->mutex);
+ }
+
+ return false;
+}
+
+/*
+ * Read the external query text file into a malloc'd buffer.
+ *
+ * Returns NULL (without throwing an error) if unable to read, eg
+ * file not there or insufficient memory.
+ *
+ * On success, the buffer size is also returned into *buffer_size.
+ *
+ * This can be called without any lock on pgss->lock, but in that case
+ * the caller is responsible for verifying that the result is sane.
+ */
+static char *
+qtext_load_file(Size *buffer_size)
+{
+ char *buf;
+ int fd;
+ struct stat stat;
+
+ fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0)
+ {
+ if (errno != ENOENT)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ return NULL;
+ }
+
+ /* Get file length */
+ if (fstat(fd, &stat))
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not stat pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ /* Allocate buffer; beware that off_t might be wider than size_t */
+ if (stat.st_size <= MaxAllocSize)
+ buf = (char *) malloc(stat.st_size);
+ else
+ buf = NULL;
+ if (buf == NULL)
+ {
+ ereport(LOG,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ /*
+ * OK, slurp in the file. If we get a short read and errno doesn't get
+ * set, the reason is probably that garbage collection truncated the file
+ * since we did the fstat(), so we don't log a complaint --- but we don't
+ * return the data, either, since it's most likely corrupt due to
+ * concurrent writes from garbage collection.
+ */
+ errno = 0;
+ if (read(fd, buf, stat.st_size) != stat.st_size)
+ {
+ if (errno)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ free(buf);
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ CloseTransientFile(fd);
+
+ *buffer_size = stat.st_size;
+ return buf;
+}
+
+/*
+ * Locate a query text in the file image previously read by qtext_load_file().
+ *
+ * We validate the given offset/length, and return NULL if bogus. Otherwise,
+ * the result points to a null-terminated string within the buffer.
+ */
+static char *
+qtext_fetch(Size query_offset, int query_len,
+ char *buffer, Size buffer_size)
+{
+ /* File read failed? */
+ if (buffer == NULL)
+ return NULL;
+ /* Bogus offset/length? */
+ if (query_len < 0 ||
+ query_offset + query_len >= buffer_size)
+ return NULL;
+ /* As a further sanity check, make sure there's a trailing null */
+ if (buffer[query_offset + query_len] != '\0')
+ return NULL;
+ /* Looks OK */
+ return buffer + query_offset;
+}
+
+/*
+ * Do we need to garbage-collect the external query text file?
+ *
+ * Caller should hold at least a shared lock on pgss->lock.
+ */
+static bool
+need_gc_qtexts(void)
+{
+ Size extent;
+
+ /* Read shared extent pointer */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ extent = s->extent;
+ SpinLockRelease(&s->mutex);
+ }
+
+ /* Don't proceed if file does not exceed 512 bytes per possible entry */
+ if (extent < 512 * pgss_max)
+ return false;
+
+ /*
+ * Don't proceed if file is less than about 50% bloat. Nothing can or
+ * should be done in the event of unusually large query texts accounting
+ * for file's large size. We go to the trouble of maintaining the mean
+ * query length in order to prevent garbage collection from thrashing
+ * uselessly.
+ */
+ if (extent < pgss->mean_query_len * pgss_max * 2)
+ return false;
+
+ return true;
+}
+
+/*
+ * Garbage-collect orphaned query texts in external file.
+ *
+ * This won't be called often in the typical case, since it's likely that
+ * there won't be too much churn, and besides, a similar compaction process
+ * occurs when serializing to disk at shutdown or as part of resetting.
+ * Despite this, it seems prudent to plan for the edge case where the file
+ * becomes unreasonably large, with no other method of compaction likely to
+ * occur in the foreseeable future.
+ *
+ * The caller must hold an exclusive lock on pgss->lock.
+ */
+static void
+gc_qtexts(void)
+{
+ char *qbuffer;
+ Size qbuffer_size;
+ FILE *qfile;
+ HASH_SEQ_STATUS hash_seq;
+ pgssEntry *entry;
+ Size extent;
+ int nentries;
+
+ /*
+ * When called from pgss_store, some other session might have proceeded
+ * with garbage collection in the no-lock-held interim of lock strength
+ * escalation. Check once more that this is actually necessary.
+ */
+ if (!need_gc_qtexts())
+ return;
+
+ /*
+ * Load the old texts file. If we fail (out of memory, for instance) just
+ * skip the garbage collection.
+ */
+ qbuffer = qtext_load_file(&qbuffer_size);
+ if (qbuffer == NULL)
+ return;
+
+ /*
+ * We overwrite the query texts file in place, so as to reduce the risk of
+ * an out-of-disk-space failure. Since the file is guaranteed not to get
+ * larger, this should always work on traditional filesystems; though we
+ * could still lose on copy-on-write filesystems.
+ */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ goto gc_fail;
+ }
+
+ extent = 0;
+ nentries = 0;
+
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ int query_len = entry->query_len;
+ char *qry = qtext_fetch(entry->query_offset,
+ query_len,
+ qbuffer,
+ qbuffer_size);
+
+ if (qry == NULL)
+ {
+ /* Trouble ... drop the text */
+ entry->query_offset = 0;
+ entry->query_len = -1;
+ continue;
+ }
+
+ if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ hash_seq_term(&hash_seq);
+ goto gc_fail;
+ }
+
+ entry->query_offset = extent;
+ extent += query_len + 1;
+ nentries++;
+ }
+
+ /*
+ * Truncate away any now-unused space. If this fails for some odd reason,
+ * we log it, but there's no need to fail.
+ */
+ if (ftruncate(fileno(qfile), extent) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not truncate pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ if (FreeFile(qfile))
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ qfile = NULL;
+ goto gc_fail;
+ }
+
+ elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
+ pgss->extent, extent);
+
+ /* Reset the shared extent pointer */
+ pgss->extent = extent;
+
+ /*
+ * Also update the mean query length, to be sure that need_gc_qtexts()
+ * won't still think we have a problem.
+ */
+ if (nentries > 0)
+ pgss->mean_query_len = extent / nentries;
+ else
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+
+ free(qbuffer);
+
+ /*
+ * OK, count a garbage collection cycle. (Note: even though we have
+ * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
+ * other processes may examine gc_count while holding only the mutex.
+ * Also, we have to advance the count *after* we've rewritten the file,
+ * else other processes might not realize they read a stale file.)
+ */
+ record_gc_qtexts();
+
+ return;
+
+gc_fail:
+ /* clean up resources */
+ if (qfile)
+ FreeFile(qfile);
+ if (qbuffer)
+ free(qbuffer);
+
+ /*
+ * Since the contents of the external file are now uncertain, mark all
+ * hashtable entries as having invalid texts.
+ */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ entry->query_offset = 0;
+ entry->query_len = -1;
+ }
+
+ /* Seems like a good idea to bump the GC count even though we failed */
+ record_gc_qtexts();
+}
+
+/*
* Release all entries.
*/
static void
@@ -1361,6 +2114,7 @@ entry_reset(void)
{
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
+ FILE *qfile;
LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
@@ -1370,6 +2124,34 @@ entry_reset(void)
hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
}
+ /*
+ * Write new empty query file, perhaps even creating a new one to recover
+ * if the file was missing.
+ */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not create pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ goto done;
+ }
+
+ /* If ftruncate fails, log it, but it's not a fatal problem */
+ if (ftruncate(fileno(qfile), 0) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not truncate pg_stat_statement file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ FreeFile(qfile);
+
+done:
+ pgss->extent = 0;
+ /* This counts as a query text garbage collection for our purposes */
+ record_gc_qtexts();
+
LWLockRelease(pgss->lock);
}
@@ -1476,7 +2258,7 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
APP_JUMB(rte->jointype);
break;
case RTE_FUNCTION:
- JumbleExpr(jstate, rte->funcexpr);
+ JumbleExpr(jstate, (Node *) rte->functions);
break;
case RTE_VALUES:
JumbleExpr(jstate, (Node *) rte->values_lists);
@@ -1508,7 +2290,7 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
*
* Note: the reason we don't simply use expression_tree_walker() is that the
* point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types. We should complain
+ * most tree node types, but here we care about all types. We should complain
* about any unrecognized node type.
*/
static void
@@ -1563,9 +2345,11 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
Aggref *expr = (Aggref *) node;
APP_JUMB(expr->aggfnoid);
+ JumbleExpr(jstate, (Node *) expr->aggdirectargs);
JumbleExpr(jstate, (Node *) expr->args);
JumbleExpr(jstate, (Node *) expr->aggorder);
JumbleExpr(jstate, (Node *) expr->aggdistinct);
+ JumbleExpr(jstate, (Node *) expr->aggfilter);
}
break;
case T_WindowFunc:
@@ -1575,6 +2359,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
APP_JUMB(expr->winfnoid);
APP_JUMB(expr->winref);
JumbleExpr(jstate, (Node *) expr->args);
+ JumbleExpr(jstate, (Node *) expr->aggfilter);
}
break;
case T_ArrayRef:
@@ -1884,6 +2669,13 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
JumbleExpr(jstate, setop->rarg);
}
break;
+ case T_RangeTblFunction:
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) node;
+
+ JumbleExpr(jstate, rtfunc->funcexpr);
+ }
+ break;
default:
/* Only a warning, since we can stumble along anyway */
elog(WARNING, "unrecognized node type: %d",
@@ -1929,7 +2721,7 @@ RecordConstLocation(pgssJumbleState *jstate, int location)
* *query_len_p contains the input string length, and is updated with
* the result string length (which cannot be longer) on exit.
*
- * Returns a palloc'd string, which is not necessarily null-terminated.
+ * Returns a palloc'd string.
*/
static char *
generate_normalized_query(pgssJumbleState *jstate, const char *query,
@@ -1937,7 +2729,6 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
{
char *norm_query;
int query_len = *query_len_p;
- int max_output_len;
int i,
len_to_wrt, /* Length (in bytes) to write */
quer_loc = 0, /* Source query byte location */
@@ -1951,9 +2742,8 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
*/
fill_in_constant_lengths(jstate, query);
- /* Allocate result buffer, ensuring we limit result to allowed size */
- max_output_len = Min(query_len, pgss->query_size - 1);
- norm_query = palloc(max_output_len);
+ /* Allocate result buffer */
+ norm_query = palloc(query_len + 1);
for (i = 0; i < jstate->clocations_count; i++)
{
@@ -1966,52 +2756,36 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
if (tok_len < 0)
continue; /* ignore any duplicates */
- /* Copy next chunk, or as much as will fit */
+ /* Copy next chunk (what precedes the next constant) */
len_to_wrt = off - last_off;
len_to_wrt -= last_tok_len;
- len_to_wrt = Min(len_to_wrt, max_output_len - n_quer_loc);
Assert(len_to_wrt >= 0);
memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
n_quer_loc += len_to_wrt;
- if (n_quer_loc < max_output_len)
- norm_query[n_quer_loc++] = '?';
+ /* And insert a '?' in place of the constant token */
+ norm_query[n_quer_loc++] = '?';
quer_loc = off + tok_len;
last_off = off;
last_tok_len = tok_len;
-
- /* If we run out of space, might as well stop iterating */
- if (n_quer_loc >= max_output_len)
- break;
}
/*
* We've copied up until the last ignorable constant. Copy over the
- * remaining bytes of the original query string, or at least as much as
- * will fit.
+ * remaining bytes of the original query string.
*/
len_to_wrt = query_len - quer_loc;
- len_to_wrt = Min(len_to_wrt, max_output_len - n_quer_loc);
Assert(len_to_wrt >= 0);
memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
n_quer_loc += len_to_wrt;
- /*
- * If we ran out of space, we need to do an encoding-aware truncation,
- * just to make sure we don't have an incomplete character at the end.
- */
- if (n_quer_loc >= max_output_len)
- query_len = pg_encoding_mbcliplen(encoding,
- norm_query,
- n_quer_loc,
- pgss->query_size - 1);
- else
- query_len = n_quer_loc;
+ Assert(n_quer_loc <= query_len);
+ norm_query[n_quer_loc] = '\0';
- *query_len_p = query_len;
+ *query_len_p = n_quer_loc;
return norm_query;
}
@@ -2030,7 +2804,7 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
* a problem.
*
* Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored. (Actually, we assume the
+ * marked as '-1', so that they are later ignored. (Actually, we assume the
* lengths were initialized as -1 to start with, and don't change them here.)
*
* N.B. There is an assumption that a '-' character at a Const location begins
@@ -2099,7 +2873,7 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
* adjustment of location to that of the leading '-'
* operator in the event of a negative constant. It is
* also useful for our purposes to start from the minus
- * symbol. In this way, queries like "select * from foo
+ * symbol. In this way, queries like "select * from foo
* where bar = 1" and "select * from foo where bar = -2"
* will have identical normalized query strings.
*/
diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control
index 428fbb2374..6ecf2b6d1b 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.control
+++ b/contrib/pg_stat_statements/pg_stat_statements.control
@@ -1,5 +1,5 @@
# pg_stat_statements extension
comment = 'track execution statistics of all SQL statements executed'
-default_version = '1.1'
+default_version = '1.2'
module_pathname = '$libdir/pg_stat_statements'
relocatable = true
diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c
index 9fe2301e41..842295ae3d 100644
--- a/contrib/pg_test_fsync/pg_test_fsync.c
+++ b/contrib/pg_test_fsync/pg_test_fsync.c
@@ -24,8 +24,9 @@
#define XLOG_BLCKSZ_K (XLOG_BLCKSZ / 1024)
#define LABEL_FORMAT " %-32s"
-#define NA_FORMAT "%18s"
-#define OPS_FORMAT "%9.3f ops/sec"
+#define NA_FORMAT "%20s"
+#define OPS_FORMAT "%11.3f ops/sec %6.0f usecs/op"
+#define USECS_SEC 1000000
/* These are macros to avoid timing the function call overhead. */
#ifndef WIN32
@@ -59,7 +60,7 @@ do { \
static const char *progname;
-static int secs_per_test = 2;
+static int secs_per_test = 5;
static int needs_unlink = 0;
static char full_buf[XLOG_SEG_SIZE],
*buf,
@@ -100,14 +101,14 @@ main(int argc, char *argv[])
handle_args(argc, argv);
/* Prevent leaving behind the test file */
- signal(SIGINT, signal_cleanup);
- signal(SIGTERM, signal_cleanup);
+ pqsignal(SIGINT, signal_cleanup);
+ pqsignal(SIGTERM, signal_cleanup);
#ifndef WIN32
- signal(SIGALRM, process_alarm);
+ pqsignal(SIGALRM, process_alarm);
#endif
#ifdef SIGHUP
/* Not defined on win32 */
- signal(SIGHUP, signal_cleanup);
+ pqsignal(SIGHUP, signal_cleanup);
#endif
prepare_buf();
@@ -139,13 +140,13 @@ handle_args(int argc, char *argv[])
{"secs-per-test", required_argument, NULL, 's'},
{NULL, 0, NULL, 0}
};
+
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
if (argc > 1)
{
- if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
- strcmp(argv[1], "-?") == 0)
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
printf("Usage: %s [-f FILENAME] [-s SECS-PER-TEST]\n", progname);
exit(0);
@@ -205,7 +206,7 @@ prepare_buf(void)
for (ops = 0; ops < XLOG_SEG_SIZE; ops++)
full_buf[ops] = random();
- buf = (char *) TYPEALIGN(ALIGNOF_XLOG_BUFFER, full_buf);
+ buf = (char *) TYPEALIGN(XLOG_BLCKSZ, full_buf);
}
static void
@@ -368,6 +369,13 @@ test_sync(int writes_per_op)
{
for (writes = 0; writes < writes_per_op; writes++)
if (write(tmpfile, buf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
+
+ /*
+ * This can generate write failures if the filesystem has
+ * a large block size, e.g. 4k, and there is no support
+ * for O_DIRECT writes smaller than the file system block
+ * size, e.g. XFS.
+ */
die("write failed");
if (lseek(tmpfile, 0, SEEK_SET) == -1)
die("seek failed");
@@ -568,8 +576,9 @@ print_elapse(struct timeval start_t, struct timeval stop_t, int ops)
double total_time = (stop_t.tv_sec - start_t.tv_sec) +
(stop_t.tv_usec - start_t.tv_usec) * 0.000001;
double per_second = ops / total_time;
+ double avg_op_time_us = (total_time / ops) * USECS_SEC;
- printf(OPS_FORMAT "\n", per_second);
+ printf(OPS_FORMAT "\n", per_second, avg_op_time_us);
}
#ifndef WIN32
diff --git a/contrib/pg_test_timing/pg_test_timing.c b/contrib/pg_test_timing/pg_test_timing.c
index b3f98abe5c..e44c535d09 100644
--- a/contrib/pg_test_timing/pg_test_timing.c
+++ b/contrib/pg_test_timing/pg_test_timing.c
@@ -14,16 +14,24 @@ static const char *progname;
static int32 test_duration = 3;
static void handle_args(int argc, char *argv[]);
-static void test_timing(int32);
+static uint64 test_timing(int32);
+static void output(uint64 loop_count);
+
+/* record duration in powers of 2 microseconds */
+int64 histogram[32];
int
main(int argc, char *argv[])
{
+ uint64 loop_count;
+
progname = get_progname(argv[0]);
handle_args(argc, argv);
- test_timing(test_duration);
+ loop_count = test_timing(test_duration);
+
+ output(loop_count);
return 0;
}
@@ -35,13 +43,13 @@ handle_args(int argc, char *argv[])
{"duration", required_argument, NULL, 'd'},
{NULL, 0, NULL, 0}
};
+
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
if (argc > 1)
{
- if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
- strcmp(argv[1], "-?") == 0)
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
printf("Usage: %s [-d DURATION]\n", progname);
exit(0);
@@ -95,7 +103,7 @@ handle_args(int argc, char *argv[])
}
}
-static void
+static uint64
test_timing(int32 duration)
{
uint64 total_time;
@@ -103,18 +111,10 @@ test_timing(int32 duration)
uint64 loop_count = 0;
uint64 prev,
cur;
- int32 diff,
- i,
- bits,
- found;
-
instr_time start_time,
end_time,
temp;
- static int64 histogram[32];
- char buf[100];
-
total_time = duration > 0 ? duration * 1000000 : 0;
INSTR_TIME_SET_CURRENT(start_time);
@@ -122,11 +122,15 @@ test_timing(int32 duration)
while (time_elapsed < total_time)
{
+ int32 diff,
+ bits = 0;
+
prev = cur;
INSTR_TIME_SET_CURRENT(temp);
cur = INSTR_TIME_GET_MICROSEC(temp);
diff = cur - prev;
+ /* Did time go backwards? */
if (diff < 0)
{
printf("Detected clock going backwards in time.\n");
@@ -134,12 +138,14 @@ test_timing(int32 duration)
exit(1);
}
- bits = 0;
+ /* What is the highest bit in the time diff? */
while (diff)
{
diff >>= 1;
bits++;
}
+
+ /* Update appropriate duration bucket */
histogram[bits]++;
loop_count++;
@@ -153,19 +159,30 @@ test_timing(int32 duration)
printf("Per loop time including overhead: %0.2f nsec\n",
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
+
+ return loop_count;
+}
+
+static void
+output(uint64 loop_count)
+{
+ int64 max_bit = 31,
+ i;
+
+ /* find highest bit value */
+ while (max_bit > 0 && histogram[max_bit] == 0)
+ max_bit--;
+
printf("Histogram of timing durations:\n");
- printf("%9s: %10s %9s\n", "< usec", "count", "percent");
+ printf("%6s %10s %10s\n", "< usec", "% of total", "count");
- found = 0;
- for (i = 31; i >= 0; i--)
+ for (i = 0; i <= max_bit; i++)
{
- if (found || histogram[i])
- {
- found = 1;
- /* lame hack to work around INT64_FORMAT deficiencies */
- snprintf(buf, sizeof(buf), INT64_FORMAT, histogram[i]);
- printf("%9ld: %10s %8.5f%%\n", 1l << i, buf,
- (double) histogram[i] * 100 / loop_count);
- }
+ char buf[100];
+
+ /* lame hack to work around INT64_FORMAT deficiencies */
+ snprintf(buf, sizeof(buf), INT64_FORMAT, histogram[i]);
+ printf("%6ld %9.5f %10s\n", 1l << i,
+ (double) histogram[i] * 100 / loop_count, buf);
}
}
diff --git a/contrib/pg_trgm/Makefile b/contrib/pg_trgm/Makefile
index 64fd69f2cb..0d549f8b6c 100644
--- a/contrib/pg_trgm/Makefile
+++ b/contrib/pg_trgm/Makefile
@@ -1,10 +1,10 @@
# contrib/pg_trgm/Makefile
MODULE_big = pg_trgm
-OBJS = trgm_op.o trgm_gist.o trgm_gin.o
+OBJS = trgm_op.o trgm_gist.o trgm_gin.o trgm_regexp.o
EXTENSION = pg_trgm
-DATA = pg_trgm--1.0.sql pg_trgm--unpackaged--1.0.sql
+DATA = pg_trgm--1.1.sql pg_trgm--1.0--1.1.sql pg_trgm--unpackaged--1.0.sql
REGRESS = pg_trgm
diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out
index e7af7d4890..13b1fde1b8 100644
--- a/contrib/pg_trgm/expected/pg_trgm.out
+++ b/contrib/pg_trgm/expected/pg_trgm.out
@@ -53,8 +53,14 @@ select similarity('wow',' WOW ');
1
(1 row)
+select similarity('---', '####---');
+ similarity
+------------
+ 0
+(1 row)
+
CREATE TABLE test_trgm(t text);
-\copy test_trgm from 'data/trgm.data
+\copy test_trgm from 'data/trgm.data'
select t,similarity(t,'qwertyu0988') as sml from test_trgm where t % 'qwertyu0988' order by sml desc, t;
t | sml
-------------+----------
@@ -3464,6 +3470,7 @@ select t,similarity(t,'gwertyu1988') as sml from test_trgm where t % 'gwertyu198
create table test2(t text);
insert into test2 values ('abcdef');
insert into test2 values ('quark');
+insert into test2 values (' z foo bar');
create index test2_idx_gin on test2 using gin (t gin_trgm_ops);
set enable_seqscan=off;
explain (costs off)
@@ -3497,6 +3504,12 @@ select * from test2 where t like '%bcd%';
abcdef
(1 row)
+select * from test2 where t like E'%\\bcd%';
+ t
+--------
+ abcdef
+(1 row)
+
select * from test2 where t ilike '%BCD%';
t
--------
@@ -3509,6 +3522,142 @@ select * from test2 where t ilike 'qua%';
quark
(1 row)
+select * from test2 where t like '%z foo bar%';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t like ' z foo%';
+ t
+-------------
+ z foo bar
+(1 row)
+
+explain (costs off)
+ select * from test2 where t ~ '[abc]{3}';
+ QUERY PLAN
+--------------------------------------------
+ Bitmap Heap Scan on test2
+ Recheck Cond: (t ~ '[abc]{3}'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~ '[abc]{3}'::text)
+(4 rows)
+
+explain (costs off)
+ select * from test2 where t ~* 'DEF';
+ QUERY PLAN
+------------------------------------------
+ Bitmap Heap Scan on test2
+ Recheck Cond: (t ~* 'DEF'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~* 'DEF'::text)
+(4 rows)
+
+select * from test2 where t ~ '[abc]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ 'a[bc]+d';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ '(abc)*$';
+ t
+-------------
+ abcdef
+ quark
+ z foo bar
+(3 rows)
+
+select * from test2 where t ~* 'DEF';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ 'dEf';
+ t
+---
+(0 rows)
+
+select * from test2 where t ~* '^q';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~* '[abc]{3}[def]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~* 'ab[a-z]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~* '(^| )qua';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ 'q.*rk$';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ 'q';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ '[a-z]{3}';
+ t
+-------------
+ abcdef
+ quark
+ z foo bar
+(3 rows)
+
+select * from test2 where t ~* '(a{10}|b{10}|c{10}){10}';
+ t
+---
+(0 rows)
+
+select * from test2 where t ~ 'z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo';
+ t
+-------------
+ z foo bar
+(1 row)
+
drop index test2_idx_gin;
create index test2_idx_gist on test2 using gist (t gist_trgm_ops);
set enable_seqscan=off;
@@ -3539,6 +3688,12 @@ select * from test2 where t like '%bcd%';
abcdef
(1 row)
+select * from test2 where t like E'%\\bcd%';
+ t
+--------
+ abcdef
+(1 row)
+
select * from test2 where t ilike '%BCD%';
t
--------
@@ -3551,3 +3706,135 @@ select * from test2 where t ilike 'qua%';
quark
(1 row)
+select * from test2 where t like '%z foo bar%';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t like ' z foo%';
+ t
+-------------
+ z foo bar
+(1 row)
+
+explain (costs off)
+ select * from test2 where t ~ '[abc]{3}';
+ QUERY PLAN
+------------------------------------------
+ Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~ '[abc]{3}'::text)
+(2 rows)
+
+explain (costs off)
+ select * from test2 where t ~* 'DEF';
+ QUERY PLAN
+------------------------------------------
+ Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~* 'DEF'::text)
+(2 rows)
+
+select * from test2 where t ~ '[abc]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ 'a[bc]+d';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ '(abc)*$';
+ t
+-------------
+ abcdef
+ quark
+ z foo bar
+(3 rows)
+
+select * from test2 where t ~* 'DEF';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~ 'dEf';
+ t
+---
+(0 rows)
+
+select * from test2 where t ~* '^q';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~* '[abc]{3}[def]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~* 'ab[a-z]{3}';
+ t
+--------
+ abcdef
+(1 row)
+
+select * from test2 where t ~* '(^| )qua';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ 'q.*rk$';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ 'q';
+ t
+-------
+ quark
+(1 row)
+
+select * from test2 where t ~ '[a-z]{3}';
+ t
+-------------
+ abcdef
+ quark
+ z foo bar
+(3 rows)
+
+select * from test2 where t ~* '(a{10}|b{10}|c{10}){10}';
+ t
+---
+(0 rows)
+
+select * from test2 where t ~ 'z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo bar';
+ t
+-------------
+ z foo bar
+(1 row)
+
+select * from test2 where t ~ ' z foo';
+ t
+-------------
+ z foo bar
+(1 row)
+
diff --git a/contrib/pg_trgm/pg_trgm--1.0--1.1.sql b/contrib/pg_trgm/pg_trgm--1.0--1.1.sql
new file mode 100644
index 0000000000..b4e3e26037
--- /dev/null
+++ b/contrib/pg_trgm/pg_trgm--1.0--1.1.sql
@@ -0,0 +1,12 @@
+/* contrib/pg_trgm/pg_trgm--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_trgm UPDATE TO '1.1'" to load this file. \quit
+
+ALTER OPERATOR FAMILY gist_trgm_ops USING gist ADD
+ OPERATOR 5 pg_catalog.~ (text, text),
+ OPERATOR 6 pg_catalog.~* (text, text);
+
+ALTER OPERATOR FAMILY gin_trgm_ops USING gin ADD
+ OPERATOR 5 pg_catalog.~ (text, text),
+ OPERATOR 6 pg_catalog.~* (text, text);
diff --git a/contrib/pg_trgm/pg_trgm--1.0.sql b/contrib/pg_trgm/pg_trgm--1.1.sql
index 8067bd6033..1fff7af2c4 100644
--- a/contrib/pg_trgm/pg_trgm--1.0.sql
+++ b/contrib/pg_trgm/pg_trgm--1.1.sql
@@ -1,4 +1,4 @@
-/* contrib/pg_trgm/pg_trgm--1.0.sql */
+/* contrib/pg_trgm/pg_trgm--1.1.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pg_trgm" to load this file. \quit
@@ -132,6 +132,12 @@ ALTER OPERATOR FAMILY gist_trgm_ops USING gist ADD
OPERATOR 4 pg_catalog.~~* (text, text),
FUNCTION 8 (text, text) gtrgm_distance (internal, text, int, oid);
+-- Add operators that are new in 9.3.
+
+ALTER OPERATOR FAMILY gist_trgm_ops USING gist ADD
+ OPERATOR 5 pg_catalog.~ (text, text),
+ OPERATOR 6 pg_catalog.~* (text, text);
+
-- support functions for gin
CREATE FUNCTION gin_extract_value_trgm(text, internal)
RETURNS internal
@@ -164,3 +170,9 @@ AS
ALTER OPERATOR FAMILY gin_trgm_ops USING gin ADD
OPERATOR 3 pg_catalog.~~ (text, text),
OPERATOR 4 pg_catalog.~~* (text, text);
+
+-- Add operators that are new in 9.3.
+
+ALTER OPERATOR FAMILY gin_trgm_ops USING gin ADD
+ OPERATOR 5 pg_catalog.~ (text, text),
+ OPERATOR 6 pg_catalog.~* (text, text);
diff --git a/contrib/pg_trgm/pg_trgm.control b/contrib/pg_trgm/pg_trgm.control
index 70404d881d..2ac51e6890 100644
--- a/contrib/pg_trgm/pg_trgm.control
+++ b/contrib/pg_trgm/pg_trgm.control
@@ -1,5 +1,5 @@
# pg_trgm extension
comment = 'text similarity measurement and index searching based on trigrams'
-default_version = '1.0'
+default_version = '1.1'
module_pathname = '$libdir/pg_trgm'
relocatable = true
diff --git a/contrib/pg_trgm/sql/pg_trgm.sql b/contrib/pg_trgm/sql/pg_trgm.sql
index ea902f602f..7b02d98818 100644
--- a/contrib/pg_trgm/sql/pg_trgm.sql
+++ b/contrib/pg_trgm/sql/pg_trgm.sql
@@ -11,9 +11,11 @@ select show_trgm('a b C0*%^');
select similarity('wow','WOWa ');
select similarity('wow',' WOW ');
+select similarity('---', '####---');
+
CREATE TABLE test_trgm(t text);
-\copy test_trgm from 'data/trgm.data
+\copy test_trgm from 'data/trgm.data'
select t,similarity(t,'qwertyu0988') as sml from test_trgm where t % 'qwertyu0988' order by sml desc, t;
select t,similarity(t,'gwertyu0988') as sml from test_trgm where t % 'gwertyu0988' order by sml desc, t;
@@ -41,6 +43,7 @@ select t,similarity(t,'gwertyu1988') as sml from test_trgm where t % 'gwertyu198
create table test2(t text);
insert into test2 values ('abcdef');
insert into test2 values ('quark');
+insert into test2 values (' z foo bar');
create index test2_idx_gin on test2 using gin (t gin_trgm_ops);
set enable_seqscan=off;
explain (costs off)
@@ -49,8 +52,32 @@ explain (costs off)
select * from test2 where t ilike '%BCD%';
select * from test2 where t like '%BCD%';
select * from test2 where t like '%bcd%';
+select * from test2 where t like E'%\\bcd%';
select * from test2 where t ilike '%BCD%';
select * from test2 where t ilike 'qua%';
+select * from test2 where t like '%z foo bar%';
+select * from test2 where t like ' z foo%';
+explain (costs off)
+ select * from test2 where t ~ '[abc]{3}';
+explain (costs off)
+ select * from test2 where t ~* 'DEF';
+select * from test2 where t ~ '[abc]{3}';
+select * from test2 where t ~ 'a[bc]+d';
+select * from test2 where t ~ '(abc)*$';
+select * from test2 where t ~* 'DEF';
+select * from test2 where t ~ 'dEf';
+select * from test2 where t ~* '^q';
+select * from test2 where t ~* '[abc]{3}[def]{3}';
+select * from test2 where t ~* 'ab[a-z]{3}';
+select * from test2 where t ~* '(^| )qua';
+select * from test2 where t ~ 'q.*rk$';
+select * from test2 where t ~ 'q';
+select * from test2 where t ~ '[a-z]{3}';
+select * from test2 where t ~* '(a{10}|b{10}|c{10}){10}';
+select * from test2 where t ~ 'z foo bar';
+select * from test2 where t ~ ' z foo bar';
+select * from test2 where t ~ ' z foo bar';
+select * from test2 where t ~ ' z foo';
drop index test2_idx_gin;
create index test2_idx_gist on test2 using gist (t gist_trgm_ops);
set enable_seqscan=off;
@@ -60,5 +87,29 @@ explain (costs off)
select * from test2 where t ilike '%BCD%';
select * from test2 where t like '%BCD%';
select * from test2 where t like '%bcd%';
+select * from test2 where t like E'%\\bcd%';
select * from test2 where t ilike '%BCD%';
select * from test2 where t ilike 'qua%';
+select * from test2 where t like '%z foo bar%';
+select * from test2 where t like ' z foo%';
+explain (costs off)
+ select * from test2 where t ~ '[abc]{3}';
+explain (costs off)
+ select * from test2 where t ~* 'DEF';
+select * from test2 where t ~ '[abc]{3}';
+select * from test2 where t ~ 'a[bc]+d';
+select * from test2 where t ~ '(abc)*$';
+select * from test2 where t ~* 'DEF';
+select * from test2 where t ~ 'dEf';
+select * from test2 where t ~* '^q';
+select * from test2 where t ~* '[abc]{3}[def]{3}';
+select * from test2 where t ~* 'ab[a-z]{3}';
+select * from test2 where t ~* '(^| )qua';
+select * from test2 where t ~ 'q.*rk$';
+select * from test2 where t ~ 'q';
+select * from test2 where t ~ '[a-z]{3}';
+select * from test2 where t ~* '(a{10}|b{10}|c{10}){10}';
+select * from test2 where t ~ 'z foo bar';
+select * from test2 where t ~ ' z foo bar';
+select * from test2 where t ~ ' z foo bar';
+select * from test2 where t ~ ' z foo';
diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h
index 067f29d4da..ed649b8dcc 100644
--- a/contrib/pg_trgm/trgm.h
+++ b/contrib/pg_trgm/trgm.h
@@ -7,18 +7,20 @@
#include "access/gist.h"
#include "access/itup.h"
#include "storage/bufpage.h"
-#include "utils/builtins.h"
-/* options */
+/*
+ * Options ... but note that trgm_regexp.c effectively assumes these values
+ * of LPADDING and RPADDING.
+ */
#define LPADDING 2
#define RPADDING 1
#define KEEPONLYALNUM
/*
* Caution: IGNORECASE macro means that trigrams are case-insensitive.
- * If this macro is disabled, the ~~* operator must be removed from the
- * operator classes, because we can't handle case-insensitive wildcard search
- * with case-sensitive trigrams. Failure to do this will result in "cannot
- * handle ~~* with case-sensitive trigrams" errors.
+ * If this macro is disabled, the ~* and ~~* operators must be removed from
+ * the operator classes, because we can't handle case-insensitive wildcard
+ * search with case-sensitive trigrams. Failure to do this will result in
+ * "cannot handle ~*(~~*) with case-sensitive trigrams" errors.
*/
#define IGNORECASE
#define DIVUNION
@@ -28,6 +30,8 @@
#define DistanceStrategyNumber 2
#define LikeStrategyNumber 3
#define ILikeStrategyNumber 4
+#define RegExpStrategyNumber 5
+#define RegExpICaseStrategyNumber 6
typedef char trgm[3];
@@ -42,11 +46,11 @@ typedef char trgm[3];
*(((char*)(a))+2) = *(((char*)(b))+2); \
} while(0);
-uint32 trgm2int(trgm *ptr);
-
#ifdef KEEPONLYALNUM
+#define ISWORDCHR(c) (t_isalpha(c) || t_isdigit(c))
#define ISPRINTABLECHAR(a) ( isascii( *(unsigned char*)(a) ) && (isalnum( *(unsigned char*)(a) ) || *(unsigned char*)(a)==' ') )
#else
+#define ISWORDCHR(c) (!t_isspace(c))
#define ISPRINTABLECHAR(a) ( isascii( *(unsigned char*)(a) ) && isprint( *(unsigned char*)(a) ) )
#endif
#define ISPRINTABLETRGM(t) ( ISPRINTABLECHAR( ((char*)(t)) ) && ISPRINTABLECHAR( ((char*)(t))+1 ) && ISPRINTABLECHAR( ((char*)(t))+2 ) )
@@ -99,11 +103,19 @@ typedef char *BITVECP;
#define GETARR(x) ( (trgm*)( (char*)x+TRGMHDRSIZE ) )
#define ARRNELEM(x) ( ( VARSIZE(x) - TRGMHDRSIZE )/sizeof(trgm) )
+typedef struct TrgmPackedGraph TrgmPackedGraph;
+
extern float4 trgm_limit;
-TRGM *generate_trgm(char *str, int slen);
-TRGM *generate_wildcard_trgm(const char *str, int slen);
-float4 cnt_sml(TRGM *trg1, TRGM *trg2);
-bool trgm_contained_by(TRGM *trg1, TRGM *trg2);
+extern uint32 trgm2int(trgm *ptr);
+extern void compact_trigram(trgm *tptr, char *str, int bytelen);
+extern TRGM *generate_trgm(char *str, int slen);
+extern TRGM *generate_wildcard_trgm(const char *str, int slen);
+extern float4 cnt_sml(TRGM *trg1, TRGM *trg2);
+extern bool trgm_contained_by(TRGM *trg1, TRGM *trg2);
+extern bool *trgm_presence_map(TRGM *query, TRGM *key);
+extern TRGM *createTrgmNFA(text *text_re, Oid collation,
+ TrgmPackedGraph **graph, MemoryContext rcontext);
+extern bool trigramsMatchGraph(TrgmPackedGraph *graph, bool *check);
#endif /* __TRGM_H__ */
diff --git a/contrib/pg_trgm/trgm_gin.c b/contrib/pg_trgm/trgm_gin.c
index 114fb784c4..c59925c575 100644
--- a/contrib/pg_trgm/trgm_gin.c
+++ b/contrib/pg_trgm/trgm_gin.c
@@ -10,16 +10,9 @@
PG_FUNCTION_INFO_V1(gin_extract_trgm);
-Datum gin_extract_trgm(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gin_extract_value_trgm);
-Datum gin_extract_value_trgm(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gin_extract_query_trgm);
-Datum gin_extract_query_trgm(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gin_trgm_consistent);
-Datum gin_trgm_consistent(PG_FUNCTION_ARGS);
/*
* This function can only be called if a pre-9.1 version of the GIN operator
@@ -80,13 +73,15 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
StrategyNumber strategy = PG_GETARG_UINT16(2);
/* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */
- /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+ Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4);
+
/* bool **nullFlags = (bool **) PG_GETARG_POINTER(5); */
int32 *searchMode = (int32 *) PG_GETARG_POINTER(6);
Datum *entries = NULL;
TRGM *trg;
int32 trglen;
trgm *ptr;
+ TrgmPackedGraph *graph;
int32 i;
switch (strategy)
@@ -107,6 +102,34 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
*/
trg = generate_wildcard_trgm(VARDATA(val), VARSIZE(val) - VARHDRSZ);
break;
+ case RegExpICaseStrategyNumber:
+#ifndef IGNORECASE
+ elog(ERROR, "cannot handle ~* with case-sensitive trigrams");
+#endif
+ /* FALL THRU */
+ case RegExpStrategyNumber:
+ trg = createTrgmNFA(val, PG_GET_COLLATION(),
+ &graph, CurrentMemoryContext);
+ if (trg && ARRNELEM(trg) > 0)
+ {
+ /*
+ * Successful regex processing: store NFA-like graph as
+ * extra_data. GIN API requires an array of nentries
+ * Pointers, but we just put the same value in each element.
+ */
+ trglen = ARRNELEM(trg);
+ *extra_data = (Pointer *) palloc(sizeof(Pointer) * trglen);
+ for (i = 0; i < trglen; i++)
+ (*extra_data)[i] = (Pointer) graph;
+ }
+ else
+ {
+ /* No result: have to do full index scan. */
+ *nentries = 0;
+ *searchMode = GIN_SEARCH_MODE_ALL;
+ PG_RETURN_POINTER(entries);
+ }
+ break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
trg = NULL; /* keep compiler quiet */
@@ -146,8 +169,7 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
-
- /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+ Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4);
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res;
int32 i,
@@ -189,6 +211,21 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
}
}
break;
+ case RegExpICaseStrategyNumber:
+#ifndef IGNORECASE
+ elog(ERROR, "cannot handle ~* with case-sensitive trigrams");
+#endif
+ /* FALL THRU */
+ case RegExpStrategyNumber:
+ if (nkeys < 1)
+ {
+ /* Regex processing gave no result: do full index scan */
+ res = true;
+ }
+ else
+ res = trigramsMatchGraph((TrgmPackedGraph *) extra_data[0],
+ check);
+ break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
res = false; /* keep compiler quiet */
diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c
index d59c8eb670..69dc7f71f0 100644
--- a/contrib/pg_trgm/trgm_gist.c
+++ b/contrib/pg_trgm/trgm_gist.c
@@ -8,37 +8,35 @@
#include "access/skey.h"
-PG_FUNCTION_INFO_V1(gtrgm_in);
-Datum gtrgm_in(PG_FUNCTION_ARGS);
+typedef struct
+{
+ /* most recent inputs to gtrgm_consistent */
+ StrategyNumber strategy;
+ text *query;
+ /* extracted trigrams for query */
+ TRGM *trigrams;
+ /* if a regex operator, the extracted graph */
+ TrgmPackedGraph *graph;
-PG_FUNCTION_INFO_V1(gtrgm_out);
-Datum gtrgm_out(PG_FUNCTION_ARGS);
+ /*
+ * The "query" and "trigrams" are stored in the same palloc block as this
+ * cache struct, at MAXALIGN'ed offsets. The graph however isn't.
+ */
+} gtrgm_consistent_cache;
-PG_FUNCTION_INFO_V1(gtrgm_compress);
-Datum gtrgm_compress(PG_FUNCTION_ARGS);
+#define GETENTRY(vec,pos) ((TRGM *) DatumGetPointer((vec)->vector[(pos)].key))
-PG_FUNCTION_INFO_V1(gtrgm_decompress);
-Datum gtrgm_decompress(PG_FUNCTION_ARGS);
+PG_FUNCTION_INFO_V1(gtrgm_in);
+PG_FUNCTION_INFO_V1(gtrgm_out);
+PG_FUNCTION_INFO_V1(gtrgm_compress);
+PG_FUNCTION_INFO_V1(gtrgm_decompress);
PG_FUNCTION_INFO_V1(gtrgm_consistent);
-Datum gtrgm_consistent(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gtrgm_distance);
-Datum gtrgm_distance(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gtrgm_union);
-Datum gtrgm_union(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gtrgm_same);
-Datum gtrgm_same(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gtrgm_penalty);
-Datum gtrgm_penalty(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(gtrgm_picksplit);
-Datum gtrgm_picksplit(PG_FUNCTION_ARGS);
-
-#define GETENTRY(vec,pos) ((TRGM *) DatumGetPointer((vec)->vector[(pos)].key))
/* Number of one-bits in an unsigned byte */
static const uint8 number_of_ones[256] = {
@@ -78,10 +76,10 @@ gtrgm_out(PG_FUNCTION_ARGS)
static void
makesign(BITVECP sign, TRGM *a)
{
- int4 k,
+ int32 k,
len = ARRNELEM(a);
trgm *ptr = GETARR(a);
- int4 tmp = 0;
+ int32 tmp = 0;
MemSet((void *) sign, 0, sizeof(BITVEC));
SETBIT(sign, SIGLENBIT); /* set last unused bit */
@@ -112,7 +110,7 @@ gtrgm_compress(PG_FUNCTION_ARGS)
else if (ISSIGNKEY(DatumGetPointer(entry->key)) &&
!ISALLTRUE(DatumGetPointer(entry->key)))
{
- int4 i,
+ int32 i,
len;
TRGM *res;
BITVECP sign = GETSIGN(DatumGetPointer(entry->key));
@@ -160,14 +158,14 @@ gtrgm_decompress(PG_FUNCTION_ARGS)
}
}
-static int4
+static int32
cnt_sml_sign_common(TRGM *qtrg, BITVECP sign)
{
- int4 count = 0;
- int4 k,
+ int32 count = 0;
+ int32 k,
len = ARRNELEM(qtrg);
trgm *ptr = GETARR(qtrg);
- int4 tmp = 0;
+ int32 tmp = 0;
for (k = 0; k < len; k++)
{
@@ -191,24 +189,30 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
TRGM *qtrg;
bool res;
Size querysize = VARSIZE(query);
- char *cache = (char *) fcinfo->flinfo->fn_extra,
- *cachedQuery = cache + MAXALIGN(sizeof(StrategyNumber));
+ gtrgm_consistent_cache *cache;
/*
- * Store both the strategy number and extracted trigrams in cache, because
- * trigram extraction is relatively CPU-expensive. We must include
- * strategy number because trigram extraction depends on strategy.
+ * We keep the extracted trigrams in cache, because trigram extraction is
+ * relatively CPU-expensive. When trying to reuse a cached value, check
+ * strategy number not just query itself, because trigram extraction
+ * depends on strategy.
*
- * The cached structure contains the strategy number, then the input query
- * (starting at a MAXALIGN boundary), then the TRGM value (also starting
- * at a MAXALIGN boundary).
+ * The cached structure is a single palloc chunk containing the
+ * gtrgm_consistent_cache header, then the input query (starting at a
+ * MAXALIGN boundary), then the TRGM value (also starting at a MAXALIGN
+ * boundary). However we don't try to include the regex graph (if any) in
+ * that struct. (XXX currently, this approach can leak regex graphs
+ * across index rescans. Not clear if that's worth fixing.)
*/
+ cache = (gtrgm_consistent_cache *) fcinfo->flinfo->fn_extra;
if (cache == NULL ||
- strategy != *((StrategyNumber *) cache) ||
- VARSIZE(cachedQuery) != querysize ||
- memcmp(cachedQuery, query, querysize) != 0)
+ cache->strategy != strategy ||
+ VARSIZE(cache->query) != querysize ||
+ memcmp((char *) cache->query, (char *) query, querysize) != 0)
{
- char *newcache;
+ gtrgm_consistent_cache *newcache;
+ TrgmPackedGraph *graph = NULL;
+ Size qtrgsize;
switch (strategy)
{
@@ -225,28 +229,58 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
qtrg = generate_wildcard_trgm(VARDATA(query),
querysize - VARHDRSZ);
break;
+ case RegExpICaseStrategyNumber:
+#ifndef IGNORECASE
+ elog(ERROR, "cannot handle ~* with case-sensitive trigrams");
+#endif
+ /* FALL THRU */
+ case RegExpStrategyNumber:
+ qtrg = createTrgmNFA(query, PG_GET_COLLATION(),
+ &graph, fcinfo->flinfo->fn_mcxt);
+ /* just in case an empty array is returned ... */
+ if (qtrg && ARRNELEM(qtrg) <= 0)
+ {
+ pfree(qtrg);
+ qtrg = NULL;
+ }
+ break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
qtrg = NULL; /* keep compiler quiet */
break;
}
- newcache = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- MAXALIGN(sizeof(StrategyNumber)) +
- MAXALIGN(querysize) +
- VARSIZE(qtrg));
- cachedQuery = newcache + MAXALIGN(sizeof(StrategyNumber));
+ qtrgsize = qtrg ? VARSIZE(qtrg) : 0;
- *((StrategyNumber *) newcache) = strategy;
- memcpy(cachedQuery, query, querysize);
- memcpy(cachedQuery + MAXALIGN(querysize), qtrg, VARSIZE(qtrg));
+ newcache = (gtrgm_consistent_cache *)
+ MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
+ MAXALIGN(sizeof(gtrgm_consistent_cache)) +
+ MAXALIGN(querysize) +
+ qtrgsize);
+
+ newcache->strategy = strategy;
+ newcache->query = (text *)
+ ((char *) newcache + MAXALIGN(sizeof(gtrgm_consistent_cache)));
+ memcpy((char *) newcache->query, (char *) query, querysize);
+ if (qtrg)
+ {
+ newcache->trigrams = (TRGM *)
+ ((char *) newcache->query + MAXALIGN(querysize));
+ memcpy((char *) newcache->trigrams, (char *) qtrg, qtrgsize);
+ /* release qtrg in case it was made in fn_mcxt */
+ pfree(qtrg);
+ }
+ else
+ newcache->trigrams = NULL;
+ newcache->graph = graph;
if (cache)
pfree(cache);
- fcinfo->flinfo->fn_extra = newcache;
+ fcinfo->flinfo->fn_extra = (void *) newcache;
+ cache = newcache;
}
- qtrg = (TRGM *) (cachedQuery + MAXALIGN(querysize));
+ qtrg = cache->trigrams;
switch (strategy)
{
@@ -267,8 +301,8 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
}
else
{ /* non-leaf contains signature */
- int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
- int4 len = ARRNELEM(qtrg);
+ int32 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+ int32 len = ARRNELEM(qtrg);
if (len == 0)
res = false;
@@ -317,6 +351,63 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
}
}
break;
+ case RegExpICaseStrategyNumber:
+#ifndef IGNORECASE
+ elog(ERROR, "cannot handle ~* with case-sensitive trigrams");
+#endif
+ /* FALL THRU */
+ case RegExpStrategyNumber:
+ /* Regexp search is inexact */
+ *recheck = true;
+
+ /* Check regex match as much as we can with available info */
+ if (qtrg)
+ {
+ if (GIST_LEAF(entry))
+ { /* all leafs contains orig trgm */
+ bool *check;
+
+ check = trgm_presence_map(qtrg, key);
+ res = trigramsMatchGraph(cache->graph, check);
+ pfree(check);
+ }
+ else if (ISALLTRUE(key))
+ { /* non-leaf contains signature */
+ res = true;
+ }
+ else
+ { /* non-leaf contains signature */
+ int32 k,
+ tmp = 0,
+ len = ARRNELEM(qtrg);
+ trgm *ptr = GETARR(qtrg);
+ BITVECP sign = GETSIGN(key);
+ bool *check;
+
+ /*
+ * GETBIT() tests may give false positives, due to limited
+ * size of the sign array. But since trigramsMatchGraph()
+ * implements a monotone boolean function, false positives
+ * in the check array can't lead to false negative answer.
+ * So we can apply trigramsMatchGraph despite uncertainty,
+ * and that usefully improves the quality of the search.
+ */
+ check = (bool *) palloc(len * sizeof(bool));
+ for (k = 0; k < len; k++)
+ {
+ CPTRGM(((char *) &tmp), ptr + k);
+ check[k] = GETBIT(sign, HASHVAL(tmp));
+ }
+ res = trigramsMatchGraph(cache->graph, check);
+ pfree(check);
+ }
+ }
+ else
+ {
+ /* trigram-free query must be rechecked everywhere */
+ res = true;
+ }
+ break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
res = false; /* keep compiler quiet */
@@ -379,8 +470,8 @@ gtrgm_distance(PG_FUNCTION_ARGS)
}
else
{ /* non-leaf contains signature */
- int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
- int4 len = ARRNELEM(qtrg);
+ int32 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+ int32 len = ARRNELEM(qtrg);
res = (len == 0) ? -1.0 : 1.0 - ((float8) count) / ((float8) len);
}
@@ -394,10 +485,10 @@ gtrgm_distance(PG_FUNCTION_ARGS)
PG_RETURN_FLOAT8(res);
}
-static int4
+static int32
unionkey(BITVECP sbase, TRGM *add)
{
- int4 i;
+ int32 i;
if (ISSIGNKEY(add))
{
@@ -412,7 +503,7 @@ unionkey(BITVECP sbase, TRGM *add)
else
{
trgm *ptr = GETARR(add);
- int4 tmp = 0;
+ int32 tmp = 0;
for (i = 0; i < ARRNELEM(add); i++)
{
@@ -428,11 +519,11 @@ Datum
gtrgm_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
- int4 len = entryvec->n;
+ int32 len = entryvec->n;
int *size = (int *) PG_GETARG_POINTER(1);
BITVEC base;
- int4 i;
- int4 flag = 0;
+ int32 i;
+ int32 flag = 0;
TRGM *result;
MemSet((void *) base, 0, sizeof(BITVEC));
@@ -474,7 +565,7 @@ gtrgm_same(PG_FUNCTION_ARGS)
*result = false;
else
{
- int4 i;
+ int32 i;
BITVECP sa = GETSIGN(a),
sb = GETSIGN(b);
@@ -491,7 +582,7 @@ gtrgm_same(PG_FUNCTION_ARGS)
}
else
{ /* a and b ISARRKEY */
- int4 lena = ARRNELEM(a),
+ int32 lena = ARRNELEM(a),
lenb = ARRNELEM(b);
if (lena != lenb)
@@ -500,7 +591,7 @@ gtrgm_same(PG_FUNCTION_ARGS)
{
trgm *ptra = GETARR(a),
*ptrb = GETARR(b);
- int4 i;
+ int32 i;
*result = true;
for (i = 0; i < lena; i++)
@@ -515,10 +606,10 @@ gtrgm_same(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(result);
}
-static int4
+static int32
sizebitvec(BITVECP sign)
{
- int4 size = 0,
+ int32 size = 0,
i;
LOOPBYTE
@@ -634,7 +725,7 @@ fillcache(CACHESIGN *item, TRGM *key)
typedef struct
{
OffsetNumber pos;
- int4 cost;
+ int32 cost;
} SPLITCOST;
static int
@@ -675,11 +766,11 @@ gtrgm_picksplit(PG_FUNCTION_ARGS)
*datum_r;
BITVECP union_l,
union_r;
- int4 size_alpha,
+ int32 size_alpha,
size_beta;
- int4 size_waste,
+ int32 size_waste,
waste = -1;
- int4 nbytes;
+ int32 nbytes;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index 4e32c6f654..c385e09edd 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -9,6 +9,7 @@
#include "catalog/pg_type.h"
#include "tsearch/ts_locale.h"
+#include "utils/memutils.h"
PG_MODULE_MAGIC;
@@ -16,22 +17,11 @@ PG_MODULE_MAGIC;
float4 trgm_limit = 0.3f;
PG_FUNCTION_INFO_V1(set_limit);
-Datum set_limit(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(show_limit);
-Datum show_limit(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(show_trgm);
-Datum show_trgm(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(similarity);
-Datum similarity(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(similarity_dist);
-Datum similarity_dist(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(similarity_op);
-Datum similarity_op(PG_FUNCTION_ARGS);
Datum
@@ -77,12 +67,6 @@ unique_array(trgm *a, int len)
return curend + 1 - a;
}
-#ifdef KEEPONLYALNUM
-#define iswordchr(c) (t_isalpha(c) || t_isdigit(c))
-#else
-#define iswordchr(c) (!t_isspace(c))
-#endif
-
/*
* Finds first word in string, returns pointer to the word,
* endword points to the character after word
@@ -92,7 +76,7 @@ find_word(char *str, int lenstr, char **endword, int *charlen)
{
char *beginword = str;
- while (beginword - str < lenstr && !iswordchr(beginword))
+ while (beginword - str < lenstr && !ISWORDCHR(beginword))
beginword += pg_mblen(beginword);
if (beginword - str >= lenstr)
@@ -100,7 +84,7 @@ find_word(char *str, int lenstr, char **endword, int *charlen)
*endword = beginword;
*charlen = 0;
- while (*endword - str < lenstr && iswordchr(*endword))
+ while (*endword - str < lenstr && ISWORDCHR(*endword))
{
*endword += pg_mblen(*endword);
(*charlen)++;
@@ -109,9 +93,13 @@ find_word(char *str, int lenstr, char **endword, int *charlen)
return beginword;
}
-#ifdef USE_WIDE_UPPER_LOWER
-static void
-cnt_trigram(trgm *tptr, char *str, int bytelen)
+/*
+ * Reduce a trigram (three possibly multi-byte characters) to a trgm,
+ * which is always exactly three bytes. If we have three single-byte
+ * characters, we just use them as-is; otherwise we form a hash value.
+ */
+void
+compact_trigram(trgm *tptr, char *str, int bytelen)
{
if (bytelen == 3)
{
@@ -131,7 +119,6 @@ cnt_trigram(trgm *tptr, char *str, int bytelen)
CPTRGM(tptr, &crc);
}
}
-#endif
/*
* Adds trigrams from words (already padded).
@@ -144,16 +131,16 @@ make_trigrams(trgm *tptr, char *str, int bytelen, int charlen)
if (charlen < 3)
return tptr;
-#ifdef USE_WIDE_UPPER_LOWER
- if (pg_database_encoding_max_length() > 1)
+ if (bytelen > charlen)
{
+ /* Find multibyte character boundaries and apply compact_trigram */
int lenfirst = pg_mblen(str),
lenmiddle = pg_mblen(str + lenfirst),
lenlast = pg_mblen(str + lenfirst + lenmiddle);
while ((ptr - str) + lenfirst + lenmiddle + lenlast <= bytelen)
{
- cnt_trigram(tptr, ptr, lenfirst + lenmiddle + lenlast);
+ compact_trigram(tptr, ptr, lenfirst + lenmiddle + lenlast);
ptr += lenfirst;
tptr++;
@@ -164,8 +151,8 @@ make_trigrams(trgm *tptr, char *str, int bytelen, int charlen)
}
}
else
-#endif
{
+ /* Fast path when there are no multibyte characters */
Assert(bytelen == charlen);
while (ptr - str < bytelen - 2 /* number of trigrams = strlen - 2 */ )
@@ -191,6 +178,18 @@ generate_trgm(char *str, int slen)
char *bword,
*eword;
+ /*
+ * Guard against possible overflow in the palloc requests below. (We
+ * don't worry about the additive constants, since palloc can detect
+ * requests that are a little above MaxAllocSize --- we just need to
+ * prevent integer overflow in the multiplications.)
+ */
+ if ((Size) (slen / 2) >= (MaxAllocSize / (sizeof(trgm) * 3)) ||
+ (Size) slen >= (MaxAllocSize / pg_database_encoding_max_length()))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of memory")));
+
trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
trg->flag = ARRKEY;
SET_VARSIZE(trg, TRGMHDRSIZE);
@@ -200,7 +199,8 @@ generate_trgm(char *str, int slen)
tptr = GETARR(trg);
- buf = palloc(sizeof(char) * (slen + 4));
+ /* Allocate a buffer for case-folded, blank-padded words */
+ buf = (char *) palloc(slen * pg_database_encoding_max_length() + 4);
if (LPADDING > 0)
{
@@ -224,6 +224,7 @@ generate_trgm(char *str, int slen)
#ifdef IGNORECASE
pfree(bword);
#endif
+
buf[LPADDING + bytelen] = ' ';
buf[LPADDING + bytelen + 1] = ' ';
@@ -239,7 +240,10 @@ generate_trgm(char *str, int slen)
if ((len = tptr - GETARR(trg)) == 0)
return trg;
- if (len > 0)
+ /*
+ * Make trigrams unique.
+ */
+ if (len > 1)
{
qsort((void *) GETARR(trg), len, sizeof(trgm), comp_trgm);
len = unique_array(GETARR(trg), len);
@@ -272,33 +276,36 @@ get_wildcard_part(const char *str, int lenstr,
const char *beginword = str;
const char *endword;
char *s = buf;
- bool in_wildcard_meta = false;
+ bool in_leading_wildcard_meta = false;
+ bool in_trailing_wildcard_meta = false;
bool in_escape = false;
int clen;
/*
- * Find the first word character remembering whether last character was
- * wildcard meta-character.
+ * Find the first word character, remembering whether preceding character
+ * was wildcard meta-character. Note that the in_escape state persists
+ * from this loop to the next one, since we may exit at a word character
+ * that is in_escape.
*/
while (beginword - str < lenstr)
{
if (in_escape)
{
- in_escape = false;
- in_wildcard_meta = false;
- if (iswordchr(beginword))
+ if (ISWORDCHR(beginword))
break;
+ in_escape = false;
+ in_leading_wildcard_meta = false;
}
else
{
if (ISESCAPECHAR(beginword))
in_escape = true;
else if (ISWILDCARDCHAR(beginword))
- in_wildcard_meta = true;
- else if (iswordchr(beginword))
+ in_leading_wildcard_meta = true;
+ else if (ISWORDCHR(beginword))
break;
else
- in_wildcard_meta = false;
+ in_leading_wildcard_meta = false;
}
beginword += pg_mblen(beginword);
}
@@ -310,11 +317,11 @@ get_wildcard_part(const char *str, int lenstr,
return NULL;
/*
- * Add left padding spaces if last character wasn't wildcard
+ * Add left padding spaces if preceding character wasn't wildcard
* meta-character.
*/
*charlen = 0;
- if (!in_wildcard_meta)
+ if (!in_leading_wildcard_meta)
{
if (LPADDING > 0)
{
@@ -333,23 +340,29 @@ get_wildcard_part(const char *str, int lenstr,
* string boundary. Strip escapes during copy.
*/
endword = beginword;
- in_wildcard_meta = false;
- in_escape = false;
while (endword - str < lenstr)
{
clen = pg_mblen(endword);
if (in_escape)
{
- in_escape = false;
- in_wildcard_meta = false;
- if (iswordchr(endword))
+ if (ISWORDCHR(endword))
{
memcpy(s, endword, clen);
(*charlen)++;
s += clen;
}
else
+ {
+ /*
+ * Back up endword to the escape character when stopping at an
+ * escaped char, so that subsequent get_wildcard_part will
+ * restart from the escape character. We assume here that
+ * escape chars are single-byte.
+ */
+ endword--;
break;
+ }
+ in_escape = false;
}
else
{
@@ -357,29 +370,26 @@ get_wildcard_part(const char *str, int lenstr,
in_escape = true;
else if (ISWILDCARDCHAR(endword))
{
- in_wildcard_meta = true;
+ in_trailing_wildcard_meta = true;
break;
}
- else if (iswordchr(endword))
+ else if (ISWORDCHR(endword))
{
memcpy(s, endword, clen);
(*charlen)++;
s += clen;
}
else
- {
- in_wildcard_meta = false;
break;
- }
}
endword += clen;
}
/*
- * Add right padding spaces if last character wasn't wildcard
+ * Add right padding spaces if next character isn't wildcard
* meta-character.
*/
- if (!in_wildcard_meta)
+ if (!in_trailing_wildcard_meta)
{
if (RPADDING > 0)
{
@@ -416,6 +426,18 @@ generate_wildcard_trgm(const char *str, int slen)
bytelen;
const char *eword;
+ /*
+ * Guard against possible overflow in the palloc requests below. (We
+ * don't worry about the additive constants, since palloc can detect
+ * requests that are a little above MaxAllocSize --- we just need to
+ * prevent integer overflow in the multiplications.)
+ */
+ if ((Size) (slen / 2) >= (MaxAllocSize / (sizeof(trgm) * 3)) ||
+ (Size) slen >= (MaxAllocSize / pg_database_encoding_max_length()))
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of memory")));
+
trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
trg->flag = ARRKEY;
SET_VARSIZE(trg, TRGMHDRSIZE);
@@ -425,6 +447,7 @@ generate_wildcard_trgm(const char *str, int slen)
tptr = GETARR(trg);
+ /* Allocate a buffer for blank-padded, but not yet case-folded, words */
buf = palloc(sizeof(char) * (slen + 4));
/*
@@ -445,6 +468,7 @@ generate_wildcard_trgm(const char *str, int slen)
* count trigrams
*/
tptr = make_trigrams(tptr, buf2, bytelen, charlen);
+
#ifdef IGNORECASE
pfree(buf2);
#endif
@@ -458,7 +482,7 @@ generate_wildcard_trgm(const char *str, int slen)
/*
* Make trigrams unique.
*/
- if (len > 0)
+ if (len > 1)
{
qsort((void *) GETARR(trg), len, sizeof(trgm), comp_trgm);
len = unique_array(GETARR(trg), len);
@@ -547,6 +571,10 @@ cnt_sml(TRGM *trg1, TRGM *trg2)
len1 = ARRNELEM(trg1);
len2 = ARRNELEM(trg2);
+ /* explicit test is needed to avoid 0/0 division when both lengths are 0 */
+ if (len1 <= 0 || len2 <= 0)
+ return (float4) 0.0;
+
while (ptr1 - GETARR(trg1) < len1 && ptr2 - GETARR(trg2) < len2)
{
int res = CMPTRGM(ptr1, ptr2);
@@ -564,9 +592,9 @@ cnt_sml(TRGM *trg1, TRGM *trg2)
}
#ifdef DIVUNION
- return ((((float4) count) / ((float4) (len1 + len2 - count))));
+ return ((float4) count) / ((float4) (len1 + len2 - count));
#else
- return (((float) count) / ((float) ((len1 > len2) ? len1 : len2)));
+ return ((float4) count) / ((float4) ((len1 > len2) ? len1 : len2));
#endif
}
@@ -609,6 +637,50 @@ trgm_contained_by(TRGM *trg1, TRGM *trg2)
return true;
}
+/*
+ * Return a palloc'd boolean array showing, for each trigram in "query",
+ * whether it is present in the trigram array "key".
+ * This relies on the "key" array being sorted, but "query" need not be.
+ */
+bool *
+trgm_presence_map(TRGM *query, TRGM *key)
+{
+ bool *result;
+ trgm *ptrq = GETARR(query),
+ *ptrk = GETARR(key);
+ int lenq = ARRNELEM(query),
+ lenk = ARRNELEM(key),
+ i;
+
+ result = (bool *) palloc0(lenq * sizeof(bool));
+
+ /* for each query trigram, do a binary search in the key array */
+ for (i = 0; i < lenq; i++)
+ {
+ int lo = 0;
+ int hi = lenk;
+
+ while (lo < hi)
+ {
+ int mid = (lo + hi) / 2;
+ int res = CMPTRGM(ptrq, ptrk + mid);
+
+ if (res < 0)
+ hi = mid;
+ else if (res > 0)
+ lo = mid + 1;
+ else
+ {
+ result[i] = true;
+ break;
+ }
+ }
+ ptrq++;
+ }
+
+ return result;
+}
+
Datum
similarity(PG_FUNCTION_ARGS)
{
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
new file mode 100644
index 0000000000..9f050533c5
--- /dev/null
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -0,0 +1,2247 @@
+/*-------------------------------------------------------------------------
+ *
+ * trgm_regexp.c
+ * Regular expression matching using trigrams.
+ *
+ * The general idea of trigram index support for a regular expression (regex)
+ * search is to transform the regex into a logical expression on trigrams.
+ * For example:
+ *
+ * (ab|cd)efg => ((abe & bef) | (cde & def)) & efg
+ *
+ * If a string matches the regex, then it must match the logical expression on
+ * trigrams. The opposite is not necessarily true, however: a string that
+ * matches the logical expression might not match the original regex. Such
+ * false positives are removed via recheck, by running the regular regex match
+ * operator on the retrieved heap tuple.
+ *
+ * Since the trigram expression involves both AND and OR operators, we can't
+ * expect the core index machinery to evaluate it completely. Instead, the
+ * result of regex analysis is a list of trigrams to be sought in the index,
+ * plus a simplified graph that is used by trigramsMatchGraph() to determine
+ * whether a particular indexed value matches the expression.
+ *
+ * Converting a regex to a trigram expression is based on analysis of an
+ * automaton corresponding to the regex. The algorithm consists of four
+ * stages:
+ *
+ * 1) Compile the regexp to NFA form. This is handled by the PostgreSQL
+ * regexp library, which provides accessors for its opaque regex_t struct
+ * to expose the NFA state graph and the "colors" (sets of equivalent
+ * characters) used as state transition labels.
+ *
+ * 2) Transform the original NFA into an expanded graph, where arcs
+ * are labeled with trigrams that must be present in order to move from
+ * one state to another via the arcs. The trigrams used in this stage
+ * consist of colors, not characters, as in the original NFA.
+ *
+ * 3) Expand the color trigrams into regular trigrams consisting of
+ * characters. If too many distinct trigrams are produced, trigrams are
+ * eliminated and the graph is simplified until it's simple enough.
+ *
+ * 4) Finally, the resulting graph is packed into a TrgmPackedGraph struct,
+ * and returned to the caller.
+ *
+ * 1) Compile the regexp to NFA form
+ * ---------------------------------
+ * The automaton returned by the regexp compiler is a graph where vertices
+ * are "states" and arcs are labeled with colors. Each color represents
+ * a set of characters, so that all characters assigned to the same color
+ * are interchangeable, so far as matching the regexp is concerned. There
+ * are two special states: "initial" and "final". A state can have multiple
+ * outgoing arcs labeled with the same color, which makes the automaton
+ * non-deterministic, because it can be in many states simultaneously.
+ *
+ * Note that this NFA is already lossy compared to the original regexp,
+ * since it ignores some regex features such as lookahead constraints and
+ * backref matching. This is OK for our purposes since it's still the case
+ * that only strings matching the NFA can possibly satisfy the regexp.
+ *
+ * 2) Transform the original NFA into an expanded graph
+ * ----------------------------------------------------
+ * In the 2nd stage, the automaton is transformed into a graph based on the
+ * original NFA. Each state in the expanded graph represents a state from
+ * the original NFA, plus a prefix identifying the last two characters
+ * (colors, to be precise) seen before entering the state. There can be
+ * multiple states in the expanded graph for each state in the original NFA,
+ * depending on what characters can precede it. A prefix position can be
+ * "unknown" if it's uncertain what the preceding character was, or "blank"
+ * if the character was a non-word character (we don't need to distinguish
+ * which non-word character it was, so just think of all of them as blanks).
+ *
+ * For convenience in description, call an expanded-state identifier
+ * (two prefix colors plus a state number from the original NFA) an
+ * "enter key".
+ *
+ * Each arc of the expanded graph is labelled with a trigram that must be
+ * present in the string to match. We can construct this from an out-arc of
+ * the underlying NFA state by combining the expanded state's prefix with the
+ * color label of the underlying out-arc, if neither prefix position is
+ * "unknown". But note that some of the colors in the trigram might be
+ * "blank". This is OK since we want to generate word-boundary trigrams as
+ * the regular trigram machinery would, if we know that some word characters
+ * must be adjacent to a word boundary in all strings matching the NFA.
+ *
+ * The expanded graph can also have fewer states than the original NFA,
+ * because we don't bother to make a separate state entry unless the state
+ * is reachable by a valid arc. When an enter key is reachable from a state
+ * of the expanded graph, but we do not know a complete trigram associated
+ * with that transition, we cannot make a valid arc; instead we insert the
+ * enter key into the enterKeys list of the source state. This effectively
+ * means that the two expanded states are not reliably distinguishable based
+ * on examining trigrams.
+ *
+ * So the expanded graph resembles the original NFA, but the arcs are
+ * labeled with trigrams instead of individual characters, and there may be
+ * more or fewer states. It is a lossy representation of the original NFA:
+ * any string that matches the original regexp must match the expanded graph,
+ * but the reverse is not true.
+ *
+ * We build the expanded graph through a breadth-first traversal of states
+ * reachable from the initial state. At each reachable state, we identify the
+ * states reachable from it without traversing a predictable trigram, and add
+ * those states' enter keys to the current state. Then we generate all
+ * out-arcs leading out of this collection of states that have predictable
+ * trigrams, adding their target states to the queue of states to examine.
+ *
+ * When building the graph, if the number of states or arcs exceed pre-defined
+ * limits, we give up and simply mark any states not yet processed as final
+ * states. Roughly speaking, that means that we make use of some portion from
+ * the beginning of the regexp. Also, any colors that have too many member
+ * characters are treated as "unknown", so that we can't derive trigrams
+ * from them.
+ *
+ * 3) Expand the color trigrams into regular trigrams
+ * --------------------------------------------------
+ * The trigrams in the expanded graph are "color trigrams", consisting
+ * of three consecutive colors that must be present in the string. But for
+ * search, we need regular trigrams consisting of characters. In the 3rd
+ * stage, the color trigrams are expanded into regular trigrams. Since each
+ * color can represent many characters, the total number of regular trigrams
+ * after expansion could be very large. Because searching the index for
+ * thousands of trigrams would be slow, and would likely produce so many
+ * false positives that we would have to traverse a large fraction of the
+ * index, the graph is simplified further in a lossy fashion by removing
+ * color trigrams. When a color trigram is removed, the states connected by
+ * any arcs labelled with that trigram are merged.
+ *
+ * Trigrams do not all have equivalent value for searching: some of them are
+ * more frequent and some of them are less frequent. Ideally, we would like
+ * to know the distribution of trigrams, but we don't. But because of padding
+ * we know for sure that the empty character is more frequent than others,
+ * so we can penalize trigrams according to presence of whitespace. The
+ * penalty assigned to each color trigram is the number of simple trigrams
+ * it would produce, times the penalties[] multiplier associated with its
+ * whitespace content. (The penalties[] constants were calculated by analysis
+ * of some real-life text.) We eliminate color trigrams starting with the
+ * highest-penalty one, until we get to a total penalty of no more than
+ * WISH_TRGM_PENALTY. However, we cannot remove a color trigram if that would
+ * lead to merging the initial and final states, so we may not be able to
+ * reach WISH_TRGM_PENALTY. It's still okay so long as we have no more than
+ * MAX_TRGM_COUNT simple trigrams in total, otherwise we fail.
+ *
+ * 4) Pack the graph into a compact representation
+ * -----------------------------------------------
+ * The 2nd and 3rd stages might have eliminated or merged many of the states
+ * and trigrams created earlier, so in this final stage, the graph is
+ * compacted and packed into a simpler struct that contains only the
+ * information needed to evaluate it.
+ *
+ * ALGORITHM EXAMPLE:
+ *
+ * Consider the example regex "ab[cd]". This regex is transformed into the
+ * following NFA (for simplicity we show colors as their single members):
+ *
+ * 4#
+ * c/
+ * a b /
+ * 1* --- 2 ---- 3
+ * \
+ * d\
+ * 5#
+ *
+ * We use * to mark initial state and # to mark final state. It's not depicted,
+ * but states 1, 4, 5 have self-referencing arcs for all possible characters,
+ * because this pattern can match to any part of a string.
+ *
+ * As the result of stage 2 we will have the following graph:
+ *
+ * abc abd
+ * 2# <---- 1* ----> 3#
+ *
+ * The process for generating this graph is:
+ * 1) Create state 1 with enter key (UNKNOWN, UNKNOWN, 1).
+ * 2) Add key (UNKNOWN, "a", 2) to state 1.
+ * 3) Add key ("a", "b", 3) to state 1.
+ * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
+ * from state 1 to state 2 with label trigram "abc".
+ * 5) Mark state 2 final because state 4 of source NFA is marked as final.
+ * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
+ * from state 1 to state 3 with label trigram "abd".
+ * 7) Mark state 3 final because state 5 of source NFA is marked as final.
+ *
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ * contrib/pg_trgm/trgm_regexp.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "trgm.h"
+
+#include "regex/regexport.h"
+#include "tsearch/ts_locale.h"
+#include "utils/hsearch.h"
+#include "utils/memutils.h"
+
+
+/*
+ * Uncomment to print intermediate stages, for exploring and debugging the
+ * algorithm implementation. This produces three graph files in /tmp,
+ * in Graphviz .dot format.
+ */
+/* #define TRGM_REGEXP_DEBUG */
+
+/*
+ * These parameters are used to limit the amount of work done.
+ * Otherwise regex processing could be too slow and memory-consuming.
+ *
+ * MAX_EXPANDED_STATES - How many states we allow in expanded graph
+ * MAX_EXPANDED_ARCS - How many arcs we allow in expanded graph
+ * MAX_TRGM_COUNT - How many simple trigrams we allow to be extracted
+ * WISH_TRGM_PENALTY - Maximum desired sum of color trigram penalties
+ * COLOR_COUNT_LIMIT - Maximum number of characters per color
+ */
+#define MAX_EXPANDED_STATES 128
+#define MAX_EXPANDED_ARCS 1024
+#define MAX_TRGM_COUNT 256
+#define WISH_TRGM_PENALTY 16
+#define COLOR_COUNT_LIMIT 256
+
+/*
+ * Penalty multipliers for trigram counts depending on whitespace contents.
+ * Numbers based on analysis of real-life texts.
+ */
+const float4 penalties[8] = {
+ 1.0f, /* "aaa" */
+ 3.5f, /* "aa " */
+ 0.0f, /* "a a" (impossible) */
+ 0.0f, /* "a " (impossible) */
+ 4.2f, /* " aa" */
+ 2.1f, /* " a " */
+ 25.0f, /* " a" */
+ 0.0f /* " " (impossible) */
+};
+
+/* Struct representing a single pg_wchar, converted back to multibyte form */
+typedef struct
+{
+ char bytes[MAX_MULTIBYTE_CHAR_LEN];
+} trgm_mb_char;
+
+/*
+ * Attributes of NFA colors:
+ *
+ * expandable - we know the character expansion of this color
+ * containsNonWord - color contains non-word characters
+ * (which will not be extracted into trigrams)
+ * wordCharsCount - count of word characters in color
+ * wordChars - array of this color's word characters
+ * (which can be extracted into trigrams)
+ *
+ * When expandable is false, the other attributes don't matter; we just
+ * assume this color represents unknown character(s).
+ */
+typedef struct
+{
+ bool expandable;
+ bool containsNonWord;
+ int wordCharsCount;
+ trgm_mb_char *wordChars;
+} TrgmColorInfo;
+
+/*
+ * A "prefix" is information about the colors of the last two characters read
+ * before reaching a specific NFA state. These colors can have special values
+ * COLOR_UNKNOWN and COLOR_BLANK. COLOR_UNKNOWN means that we have no
+ * information, for example because we read some character of an unexpandable
+ * color. COLOR_BLANK means that we read a non-word character.
+ *
+ * We call a prefix ambiguous if at least one of its colors is unknown. It's
+ * fully ambiguous if both are unknown, partially ambiguous if only the first
+ * is unknown. (The case of first color known, second unknown is not valid.)
+ *
+ * Wholly- or partly-blank prefixes are mostly handled the same as regular
+ * color prefixes. This allows us to generate appropriate partly-blank
+ * trigrams when the NFA requires word character(s) to appear adjacent to
+ * non-word character(s).
+ */
+typedef int TrgmColor;
+
+/* We assume that colors returned by the regexp engine cannot be these: */
+#define COLOR_UNKNOWN (-1)
+#define COLOR_BLANK (-2)
+
+typedef struct
+{
+ TrgmColor colors[2];
+} TrgmPrefix;
+
+/*
+ * Color-trigram data type. Note that some elements of the trigram can be
+ * COLOR_BLANK, but we don't allow COLOR_UNKNOWN.
+ */
+typedef struct
+{
+ TrgmColor colors[3];
+} ColorTrgm;
+
+/*
+ * Key identifying a state of our expanded graph: color prefix, and number
+ * of the corresponding state in the underlying regex NFA. The color prefix
+ * shows how we reached the regex state (to the extent that we know it).
+ */
+typedef struct
+{
+ TrgmPrefix prefix;
+ int nstate;
+} TrgmStateKey;
+
+/*
+ * One state of the expanded graph.
+ *
+ * stateKey - ID of this state
+ * arcs - outgoing arcs of this state (List of TrgmArc)
+ * enterKeys - enter keys reachable from this state without reading any
+ * predictable trigram (List of TrgmStateKey)
+ * fin - flag indicating this state is final
+ * init - flag indicating this state is initial
+ * parent - parent state, if this state has been merged into another
+ * children - child states (states that have been merged into this one)
+ * number - number of this state (used at the packaging stage)
+ */
+typedef struct TrgmState
+{
+ TrgmStateKey stateKey; /* hashtable key: must be first field */
+ List *arcs;
+ List *enterKeys;
+ bool fin;
+ bool init;
+ struct TrgmState *parent;
+ List *children;
+ int number;
+} TrgmState;
+
+/*
+ * One arc in the expanded graph.
+ */
+typedef struct
+{
+ ColorTrgm ctrgm; /* trigram needed to traverse arc */
+ TrgmState *target; /* next state */
+} TrgmArc;
+
+/*
+ * Information about arc of specific color trigram (used in stage 3)
+ *
+ * Contains pointers to the source and target states.
+ */
+typedef struct
+{
+ TrgmState *source;
+ TrgmState *target;
+} TrgmArcInfo;
+
+/*
+ * Information about color trigram (used in stage 3)
+ *
+ * ctrgm - trigram itself
+ * number - number of this trigram (used in the packaging stage)
+ * count - number of simple trigrams created from this color trigram
+ * expanded - indicates this color trigram is expanded into simple trigrams
+ * arcs - list of all arcs labeled with this color trigram.
+ */
+typedef struct
+{
+ ColorTrgm ctrgm;
+ int number;
+ int count;
+ float4 penalty;
+ bool expanded;
+ List *arcs;
+} ColorTrgmInfo;
+
+/*
+ * Data structure representing all the data we need during regex processing.
+ *
+ * regex - compiled regex
+ * colorInfo - extracted information about regex's colors
+ * ncolors - number of colors in colorInfo[]
+ * states - hashtable of TrgmStates (states of expanded graph)
+ * initState - pointer to initial state of expanded graph
+ * queue - queue of to-be-processed TrgmStates
+ * keysQueue - queue of to-be-processed TrgmStateKeys
+ * arcsCount - total number of arcs of expanded graph (for resource
+ * limiting)
+ * overflowed - we have exceeded resource limit for transformation
+ * colorTrgms - array of all color trigrams present in graph
+ * colorTrgmsCount - count of those color trigrams
+ * totalTrgmCount - total count of extracted simple trigrams
+ */
+typedef struct
+{
+ /* Source regexp, and color information extracted from it (stage 1) */
+ regex_t *regex;
+ TrgmColorInfo *colorInfo;
+ int ncolors;
+
+ /* Expanded graph (stage 2) */
+ HTAB *states;
+ TrgmState *initState;
+
+ /* Workspace for stage 2 */
+ List *queue;
+ List *keysQueue;
+ int arcsCount;
+ bool overflowed;
+
+ /* Information about distinct color trigrams in the graph (stage 3) */
+ ColorTrgmInfo *colorTrgms;
+ int colorTrgmsCount;
+ int totalTrgmCount;
+} TrgmNFA;
+
+/*
+ * Final, compact representation of expanded graph.
+ */
+typedef struct
+{
+ int targetState; /* index of target state (zero-based) */
+ int colorTrgm; /* index of color trigram for transition */
+} TrgmPackedArc;
+
+typedef struct
+{
+ int arcsCount; /* number of out-arcs for this state */
+ TrgmPackedArc *arcs; /* array of arcsCount packed arcs */
+} TrgmPackedState;
+
+/* "typedef struct TrgmPackedGraph TrgmPackedGraph" appears in trgm.h */
+struct TrgmPackedGraph
+{
+ /*
+ * colorTrigramsCount and colorTrigramsGroups contain information about
+ * how trigrams are grouped into color trigrams. "colorTrigramsCount" is
+ * the count of color trigrams and "colorTrigramGroups" contains number of
+ * simple trigrams for each color trigram. The array of simple trigrams
+ * (stored separately from this struct) is ordered so that the simple
+ * trigrams for each color trigram are consecutive, and they're in order
+ * by color trigram number.
+ */
+ int colorTrigramsCount;
+ int *colorTrigramGroups; /* array of size colorTrigramsCount */
+
+ /*
+ * The states of the simplified NFA. State number 0 is always initial
+ * state and state number 1 is always final state.
+ */
+ int statesCount;
+ TrgmPackedState *states; /* array of size statesCount */
+
+ /* Temporary work space for trigramsMatchGraph() */
+ bool *colorTrigramsActive; /* array of size colorTrigramsCount */
+ bool *statesActive; /* array of size statesCount */
+ int *statesQueue; /* array of size statesCount */
+};
+
+/*
+ * Temporary structure for representing an arc during packaging.
+ */
+typedef struct
+{
+ int sourceState;
+ int targetState;
+ int colorTrgm;
+} TrgmPackArcInfo;
+
+
+/* prototypes for private functions */
+static TRGM *createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph,
+ MemoryContext rcontext);
+static void RE_compile(regex_t *regex, text *text_re,
+ int cflags, Oid collation);
+static void getColorInfo(regex_t *regex, TrgmNFA *trgmNFA);
+static bool convertPgWchar(pg_wchar c, trgm_mb_char *result);
+static void transformGraph(TrgmNFA *trgmNFA);
+static void processState(TrgmNFA *trgmNFA, TrgmState *state);
+static void addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key);
+static void addKeyToQueue(TrgmNFA *trgmNFA, TrgmStateKey *key);
+static void addArcs(TrgmNFA *trgmNFA, TrgmState *state);
+static void addArc(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key,
+ TrgmColor co, TrgmStateKey *destKey);
+static bool validArcLabel(TrgmStateKey *key, TrgmColor co);
+static TrgmState *getState(TrgmNFA *trgmNFA, TrgmStateKey *key);
+static bool prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2);
+static bool selectColorTrigrams(TrgmNFA *trgmNFA);
+static TRGM *expandColorTrigrams(TrgmNFA *trgmNFA, MemoryContext rcontext);
+static void fillTrgm(trgm *ptrgm, trgm_mb_char s[3]);
+static void mergeStates(TrgmState *state1, TrgmState *state2);
+static int colorTrgmInfoCmp(const void *p1, const void *p2);
+static int colorTrgmInfoPenaltyCmp(const void *p1, const void *p2);
+static TrgmPackedGraph *packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext);
+static int packArcInfoCmp(const void *a1, const void *a2);
+
+#ifdef TRGM_REGEXP_DEBUG
+static void printSourceNFA(regex_t *regex, TrgmColorInfo *colors, int ncolors);
+static void printTrgmNFA(TrgmNFA *trgmNFA);
+static void printTrgmColor(StringInfo buf, TrgmColor co);
+static void printTrgmPackedGraph(TrgmPackedGraph *packedGraph, TRGM *trigrams);
+#endif
+
+
+/*
+ * Main entry point to process a regular expression.
+ *
+ * Returns an array of trigrams required by the regular expression, or NULL if
+ * the regular expression was too complex to analyze. In addition, a packed
+ * graph representation of the regex is returned into *graph. The results
+ * must be allocated in rcontext (which might or might not be the current
+ * context).
+ */
+TRGM *
+createTrgmNFA(text *text_re, Oid collation,
+ TrgmPackedGraph **graph, MemoryContext rcontext)
+{
+ TRGM *trg;
+ regex_t regex;
+ MemoryContext tmpcontext;
+ MemoryContext oldcontext;
+
+ /*
+ * This processing generates a great deal of cruft, which we'd like to
+ * clean up before returning (since this function may be called in a
+ * query-lifespan memory context). Make a temp context we can work in so
+ * that cleanup is easy.
+ */
+ tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
+ "createTrgmNFA temporary context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+ oldcontext = MemoryContextSwitchTo(tmpcontext);
+
+ /*
+ * Stage 1: Compile the regexp into a NFA, using the regexp library.
+ */
+#ifdef IGNORECASE
+ RE_compile(&regex, text_re, REG_ADVANCED | REG_ICASE, collation);
+#else
+ RE_compile(&regex, text_re, REG_ADVANCED, collation);
+#endif
+
+ /*
+ * Since the regexp library allocates its internal data structures with
+ * malloc, we need to use a PG_TRY block to ensure that pg_regfree() gets
+ * done even if there's an error.
+ */
+ PG_TRY();
+ {
+ trg = createTrgmNFAInternal(&regex, graph, rcontext);
+ }
+ PG_CATCH();
+ {
+ pg_regfree(&regex);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ pg_regfree(&regex);
+
+ /* Clean up all the cruft we created */
+ MemoryContextSwitchTo(oldcontext);
+ MemoryContextDelete(tmpcontext);
+
+ return trg;
+}
+
+/*
+ * Body of createTrgmNFA, exclusive of regex compilation/freeing.
+ */
+static TRGM *
+createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph,
+ MemoryContext rcontext)
+{
+ TRGM *trg;
+ TrgmNFA trgmNFA;
+
+ trgmNFA.regex = regex;
+
+ /* Collect color information from the regex */
+ getColorInfo(regex, &trgmNFA);
+
+#ifdef TRGM_REGEXP_DEBUG
+ printSourceNFA(regex, trgmNFA.colorInfo, trgmNFA.ncolors);
+#endif
+
+ /*
+ * Stage 2: Create an expanded graph from the source NFA.
+ */
+ transformGraph(&trgmNFA);
+
+#ifdef TRGM_REGEXP_DEBUG
+ printTrgmNFA(&trgmNFA);
+#endif
+
+ /*
+ * Fail if we were unable to make a nontrivial graph, ie it is possible to
+ * get from the initial state to the final state without reading any
+ * predictable trigram.
+ */
+ if (trgmNFA.initState->fin)
+ return NULL;
+
+ /*
+ * Stage 3: Select color trigrams to expand. Fail if too many trigrams.
+ */
+ if (!selectColorTrigrams(&trgmNFA))
+ return NULL;
+
+ /*
+ * Stage 4: Expand color trigrams and pack graph into final
+ * representation.
+ */
+ trg = expandColorTrigrams(&trgmNFA, rcontext);
+
+ *graph = packGraph(&trgmNFA, rcontext);
+
+#ifdef TRGM_REGEXP_DEBUG
+ printTrgmPackedGraph(*graph, trg);
+#endif
+
+ return trg;
+}
+
+/*
+ * Main entry point for evaluating a graph during index scanning.
+ *
+ * The check[] array is indexed by trigram number (in the array of simple
+ * trigrams returned by createTrgmNFA), and holds TRUE for those trigrams
+ * that are present in the index entry being checked.
+ */
+bool
+trigramsMatchGraph(TrgmPackedGraph *graph, bool *check)
+{
+ int i,
+ j,
+ k,
+ queueIn,
+ queueOut;
+
+ /*
+ * Reset temporary working areas.
+ */
+ memset(graph->colorTrigramsActive, 0,
+ sizeof(bool) * graph->colorTrigramsCount);
+ memset(graph->statesActive, 0, sizeof(bool) * graph->statesCount);
+
+ /*
+ * Check which color trigrams were matched. A match for any simple
+ * trigram associated with a color trigram counts as a match of the color
+ * trigram.
+ */
+ j = 0;
+ for (i = 0; i < graph->colorTrigramsCount; i++)
+ {
+ int cnt = graph->colorTrigramGroups[i];
+
+ for (k = j; k < j + cnt; k++)
+ {
+ if (check[k])
+ {
+ /*
+ * Found one matched trigram in the group. Can skip the rest
+ * of them and go to the next group.
+ */
+ graph->colorTrigramsActive[i] = true;
+ break;
+ }
+ }
+ j = j + cnt;
+ }
+
+ /*
+ * Initialize the statesQueue to hold just the initial state. Note:
+ * statesQueue has room for statesCount entries, which is certainly enough
+ * since no state will be put in the queue more than once. The
+ * statesActive array marks which states have been queued.
+ */
+ graph->statesActive[0] = true;
+ graph->statesQueue[0] = 0;
+ queueIn = 0;
+ queueOut = 1;
+
+ /* Process queued states as long as there are any. */
+ while (queueIn < queueOut)
+ {
+ int stateno = graph->statesQueue[queueIn++];
+ TrgmPackedState *state = &graph->states[stateno];
+ int cnt = state->arcsCount;
+
+ /* Loop over state's out-arcs */
+ for (i = 0; i < cnt; i++)
+ {
+ TrgmPackedArc *arc = &state->arcs[i];
+
+ /*
+ * If corresponding color trigram is present then activate the
+ * corresponding state. We're done if that's the final state,
+ * otherwise queue the state if it's not been queued already.
+ */
+ if (graph->colorTrigramsActive[arc->colorTrgm])
+ {
+ int nextstate = arc->targetState;
+
+ if (nextstate == 1)
+ return true; /* success: final state is reachable */
+
+ if (!graph->statesActive[nextstate])
+ {
+ graph->statesActive[nextstate] = true;
+ graph->statesQueue[queueOut++] = nextstate;
+ }
+ }
+ }
+ }
+
+ /* Queue is empty, so match fails. */
+ return false;
+}
+
+/*
+ * Compile regex string into struct at *regex.
+ * NB: pg_regfree must be applied to regex if this completes successfully.
+ */
+static void
+RE_compile(regex_t *regex, text *text_re, int cflags, Oid collation)
+{
+ int text_re_len = VARSIZE_ANY_EXHDR(text_re);
+ char *text_re_val = VARDATA_ANY(text_re);
+ pg_wchar *pattern;
+ int pattern_len;
+ int regcomp_result;
+ char errMsg[100];
+
+ /* Convert pattern string to wide characters */
+ pattern = (pg_wchar *) palloc((text_re_len + 1) * sizeof(pg_wchar));
+ pattern_len = pg_mb2wchar_with_len(text_re_val,
+ pattern,
+ text_re_len);
+
+ /* Compile regex */
+ regcomp_result = pg_regcomp(regex,
+ pattern,
+ pattern_len,
+ cflags,
+ collation);
+
+ pfree(pattern);
+
+ if (regcomp_result != REG_OKAY)
+ {
+ /* re didn't compile (no need for pg_regfree, if so) */
+ pg_regerror(regcomp_result, regex, errMsg, sizeof(errMsg));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
+ errmsg("invalid regular expression: %s", errMsg)));
+ }
+}
+
+
+/*---------------------
+ * Subroutines for pre-processing the color map (stage 1).
+ *---------------------
+ */
+
+/*
+ * Fill TrgmColorInfo structure for each color using regex export functions.
+ */
+static void
+getColorInfo(regex_t *regex, TrgmNFA *trgmNFA)
+{
+ int colorsCount = pg_reg_getnumcolors(regex);
+ int i;
+
+ trgmNFA->ncolors = colorsCount;
+ trgmNFA->colorInfo = (TrgmColorInfo *)
+ palloc0(colorsCount * sizeof(TrgmColorInfo));
+
+ /*
+ * Loop over colors, filling TrgmColorInfo about each.
+ */
+ for (i = 0; i < colorsCount; i++)
+ {
+ TrgmColorInfo *colorInfo = &trgmNFA->colorInfo[i];
+ int charsCount = pg_reg_getnumcharacters(regex, i);
+ pg_wchar *chars;
+ int j;
+
+ if (charsCount < 0 || charsCount > COLOR_COUNT_LIMIT)
+ {
+ /* Non expandable, or too large to work with */
+ colorInfo->expandable = false;
+ continue;
+ }
+
+ colorInfo->expandable = true;
+ colorInfo->containsNonWord = false;
+ colorInfo->wordChars = (trgm_mb_char *)
+ palloc(sizeof(trgm_mb_char) * charsCount);
+ colorInfo->wordCharsCount = 0;
+
+ /* Extract all the chars in this color */
+ chars = (pg_wchar *) palloc(sizeof(pg_wchar) * charsCount);
+ pg_reg_getcharacters(regex, i, chars, charsCount);
+
+ /*
+ * Convert characters back to multibyte form, and save only those that
+ * are word characters. Set "containsNonWord" if any non-word
+ * character. (Note: it'd probably be nicer to keep the chars in
+ * pg_wchar format for now, but ISWORDCHR wants to see multibyte.)
+ */
+ for (j = 0; j < charsCount; j++)
+ {
+ trgm_mb_char c;
+
+ if (!convertPgWchar(chars[j], &c))
+ continue; /* ok to ignore it altogether */
+ if (ISWORDCHR(c.bytes))
+ colorInfo->wordChars[colorInfo->wordCharsCount++] = c;
+ else
+ colorInfo->containsNonWord = true;
+ }
+
+ pfree(chars);
+ }
+}
+
+/*
+ * Convert pg_wchar to multibyte format.
+ * Returns false if the character should be ignored completely.
+ */
+static bool
+convertPgWchar(pg_wchar c, trgm_mb_char *result)
+{
+ /* "s" has enough space for a multibyte character and a trailing NUL */
+ char s[MAX_MULTIBYTE_CHAR_LEN + 1];
+
+ /*
+ * We can ignore the NUL character, since it can never appear in a PG text
+ * string. This avoids the need for various special cases when
+ * reconstructing trigrams.
+ */
+ if (c == 0)
+ return false;
+
+ /* Do the conversion, making sure the result is NUL-terminated */
+ memset(s, 0, sizeof(s));
+ pg_wchar2mb_with_len(&c, s, 1);
+
+ /*
+ * In IGNORECASE mode, we can ignore uppercase characters. We assume that
+ * the regex engine generated both uppercase and lowercase equivalents
+ * within each color, since we used the REG_ICASE option; so there's no
+ * need to process the uppercase version.
+ *
+ * XXX this code is dependent on the assumption that lowerstr() works the
+ * same as the regex engine's internal case folding machinery. Might be
+ * wiser to expose pg_wc_tolower and test whether c == pg_wc_tolower(c).
+ * On the other hand, the trigrams in the index were created using
+ * lowerstr(), so we're probably screwed if there's any incompatibility
+ * anyway.
+ */
+#ifdef IGNORECASE
+ {
+ char *lowerCased = lowerstr(s);
+
+ if (strcmp(lowerCased, s) != 0)
+ {
+ pfree(lowerCased);
+ return false;
+ }
+ pfree(lowerCased);
+ }
+#endif
+
+ /* Fill result with exactly MAX_MULTIBYTE_CHAR_LEN bytes */
+ strncpy(result->bytes, s, MAX_MULTIBYTE_CHAR_LEN);
+ return true;
+}
+
+
+/*---------------------
+ * Subroutines for expanding original NFA graph into a trigram graph (stage 2).
+ *---------------------
+ */
+
+/*
+ * Transform the graph, given a regex and extracted color information.
+ *
+ * We create and process a queue of expanded-graph states until all the states
+ * are processed.
+ *
+ * This algorithm may be stopped due to resource limitation. In this case we
+ * force every unprocessed branch to immediately finish with matching (this
+ * can give us false positives but no false negatives) by marking all
+ * unprocessed states as final.
+ */
+static void
+transformGraph(TrgmNFA *trgmNFA)
+{
+ HASHCTL hashCtl;
+ TrgmStateKey initkey;
+ TrgmState *initstate;
+
+ /* Initialize this stage's workspace in trgmNFA struct */
+ trgmNFA->queue = NIL;
+ trgmNFA->keysQueue = NIL;
+ trgmNFA->arcsCount = 0;
+ trgmNFA->overflowed = false;
+
+ /* Create hashtable for states */
+ hashCtl.keysize = sizeof(TrgmStateKey);
+ hashCtl.entrysize = sizeof(TrgmState);
+ hashCtl.hcxt = CurrentMemoryContext;
+ hashCtl.hash = tag_hash;
+ trgmNFA->states = hash_create("Trigram NFA",
+ 1024,
+ &hashCtl,
+ HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION);
+
+ /* Create initial state: ambiguous prefix, NFA's initial state */
+ MemSet(&initkey, 0, sizeof(initkey));
+ initkey.prefix.colors[0] = COLOR_UNKNOWN;
+ initkey.prefix.colors[1] = COLOR_UNKNOWN;
+ initkey.nstate = pg_reg_getinitialstate(trgmNFA->regex);
+
+ initstate = getState(trgmNFA, &initkey);
+ initstate->init = true;
+ trgmNFA->initState = initstate;
+
+ /*
+ * Recursively build the expanded graph by processing queue of states
+ * (breadth-first search). getState already put initstate in the queue.
+ */
+ while (trgmNFA->queue != NIL)
+ {
+ TrgmState *state = (TrgmState *) linitial(trgmNFA->queue);
+
+ trgmNFA->queue = list_delete_first(trgmNFA->queue);
+
+ /*
+ * If we overflowed then just mark state as final. Otherwise do
+ * actual processing.
+ */
+ if (trgmNFA->overflowed)
+ state->fin = true;
+ else
+ processState(trgmNFA, state);
+
+ /* Did we overflow? */
+ if (trgmNFA->arcsCount > MAX_EXPANDED_ARCS ||
+ hash_get_num_entries(trgmNFA->states) > MAX_EXPANDED_STATES)
+ trgmNFA->overflowed = true;
+ }
+}
+
+/*
+ * Process one state: add enter keys and then add outgoing arcs.
+ */
+static void
+processState(TrgmNFA *trgmNFA, TrgmState *state)
+{
+ /* keysQueue should be NIL already, but make sure */
+ trgmNFA->keysQueue = NIL;
+
+ /*
+ * Add state's own key, and then process all keys added to keysQueue until
+ * queue is empty. But we can quit if the state gets marked final.
+ */
+ addKey(trgmNFA, state, &state->stateKey);
+ while (trgmNFA->keysQueue != NIL && !state->fin)
+ {
+ TrgmStateKey *key = (TrgmStateKey *) linitial(trgmNFA->keysQueue);
+
+ trgmNFA->keysQueue = list_delete_first(trgmNFA->keysQueue);
+ addKey(trgmNFA, state, key);
+ }
+
+ /*
+ * Add outgoing arcs only if state isn't final (we have no interest in
+ * outgoing arcs if we already match)
+ */
+ if (!state->fin)
+ addArcs(trgmNFA, state);
+}
+
+/*
+ * Add the given enter key into the state's enterKeys list, and determine
+ * whether this should result in any further enter keys being added.
+ * If so, add those keys to keysQueue so that processState will handle them.
+ *
+ * If the enter key is for the NFA's final state, set state->fin = TRUE.
+ * This situation means that we can reach the final state from this expanded
+ * state without reading any predictable trigram, so we must consider this
+ * state as an accepting one.
+ *
+ * The given key could be a duplicate of one already in enterKeys, or be
+ * redundant with some enterKeys. So we check that before doing anything.
+ *
+ * Note that we don't generate any actual arcs here. addArcs will do that
+ * later, after we have identified all the enter keys for this state.
+ */
+static void
+addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
+{
+ regex_arc_t *arcs;
+ TrgmStateKey destKey;
+ ListCell *cell,
+ *prev,
+ *next;
+ int i,
+ arcsCount;
+
+ /*
+ * Ensure any pad bytes in destKey are zero, since it may get used as a
+ * hashtable key by getState.
+ */
+ MemSet(&destKey, 0, sizeof(destKey));
+
+ /*
+ * Compare key to each existing enter key of the state to check for
+ * redundancy. We can drop either old key(s) or the new key if we find
+ * redundancy.
+ */
+ prev = NULL;
+ cell = list_head(state->enterKeys);
+ while (cell)
+ {
+ TrgmStateKey *existingKey = (TrgmStateKey *) lfirst(cell);
+
+ next = lnext(cell);
+ if (existingKey->nstate == key->nstate)
+ {
+ if (prefixContains(&existingKey->prefix, &key->prefix))
+ {
+ /* This old key already covers the new key. Nothing to do */
+ return;
+ }
+ if (prefixContains(&key->prefix, &existingKey->prefix))
+ {
+ /*
+ * The new key covers this old key. Remove the old key, it's
+ * no longer needed once we add this key to the list.
+ */
+ state->enterKeys = list_delete_cell(state->enterKeys,
+ cell, prev);
+ }
+ else
+ prev = cell;
+ }
+ else
+ prev = cell;
+ cell = next;
+ }
+
+ /* No redundancy, so add this key to the state's list */
+ state->enterKeys = lappend(state->enterKeys, key);
+
+ /* If state is now known final, mark it and we're done */
+ if (key->nstate == pg_reg_getfinalstate(trgmNFA->regex))
+ {
+ state->fin = true;
+ return;
+ }
+
+ /*
+ * Loop through all outgoing arcs of the corresponding state in the
+ * original NFA.
+ */
+ arcsCount = pg_reg_getnumoutarcs(trgmNFA->regex, key->nstate);
+ arcs = (regex_arc_t *) palloc(sizeof(regex_arc_t) * arcsCount);
+ pg_reg_getoutarcs(trgmNFA->regex, key->nstate, arcs, arcsCount);
+
+ for (i = 0; i < arcsCount; i++)
+ {
+ regex_arc_t *arc = &arcs[i];
+
+ if (pg_reg_colorisbegin(trgmNFA->regex, arc->co))
+ {
+ /*
+ * Start of line/string (^). Trigram extraction treats start of
+ * line same as start of word: double space prefix is added.
+ * Hence, make an enter key showing we can reach the arc
+ * destination with all-blank prefix.
+ */
+ destKey.prefix.colors[0] = COLOR_BLANK;
+ destKey.prefix.colors[1] = COLOR_BLANK;
+ destKey.nstate = arc->to;
+
+ /* Add enter key to this state */
+ addKeyToQueue(trgmNFA, &destKey);
+ }
+ else if (pg_reg_colorisend(trgmNFA->regex, arc->co))
+ {
+ /*
+ * End of line/string ($). We must consider this arc as a
+ * transition that doesn't read anything. The reason for adding
+ * this enter key to the state is that if the arc leads to the
+ * NFA's final state, we must mark this expanded state as final.
+ */
+ destKey.prefix.colors[0] = COLOR_UNKNOWN;
+ destKey.prefix.colors[1] = COLOR_UNKNOWN;
+ destKey.nstate = arc->to;
+
+ /* Add enter key to this state */
+ addKeyToQueue(trgmNFA, &destKey);
+ }
+ else
+ {
+ /* Regular color */
+ TrgmColorInfo *colorInfo = &trgmNFA->colorInfo[arc->co];
+
+ if (colorInfo->expandable)
+ {
+ if (colorInfo->containsNonWord &&
+ !validArcLabel(key, COLOR_BLANK))
+ {
+ /*
+ * We can reach the arc destination after reading a
+ * non-word character, but the prefix is not something
+ * that addArc will accept with COLOR_BLANK, so no trigram
+ * arc can get made for this transition. We must make an
+ * enter key to show that the arc destination is
+ * reachable. Set it up with an all-blank prefix, since
+ * that corresponds to what the trigram extraction code
+ * will do at a word starting boundary.
+ */
+ destKey.prefix.colors[0] = COLOR_BLANK;
+ destKey.prefix.colors[1] = COLOR_BLANK;
+ destKey.nstate = arc->to;
+ addKeyToQueue(trgmNFA, &destKey);
+ }
+
+ if (colorInfo->wordCharsCount > 0 &&
+ !validArcLabel(key, arc->co))
+ {
+ /*
+ * We can reach the arc destination after reading a word
+ * character, but the prefix is not something that addArc
+ * will accept, so no trigram arc can get made for this
+ * transition. We must make an enter key to show that the
+ * arc destination is reachable. The prefix for the enter
+ * key should reflect the info we have for this arc.
+ */
+ destKey.prefix.colors[0] = key->prefix.colors[1];
+ destKey.prefix.colors[1] = arc->co;
+ destKey.nstate = arc->to;
+ addKeyToQueue(trgmNFA, &destKey);
+ }
+ }
+ else
+ {
+ /*
+ * Unexpandable color. Add enter key with ambiguous prefix,
+ * showing we can reach the destination from this state, but
+ * the preceding colors will be uncertain. (We do not set the
+ * first prefix color to key->prefix.colors[1], because a
+ * prefix of known followed by unknown is invalid.)
+ */
+ destKey.prefix.colors[0] = COLOR_UNKNOWN;
+ destKey.prefix.colors[1] = COLOR_UNKNOWN;
+ destKey.nstate = arc->to;
+ addKeyToQueue(trgmNFA, &destKey);
+ }
+ }
+ }
+
+ pfree(arcs);
+}
+
+/*
+ * Add copy of given key to keysQueue for later processing.
+ */
+static void
+addKeyToQueue(TrgmNFA *trgmNFA, TrgmStateKey *key)
+{
+ TrgmStateKey *keyCopy = (TrgmStateKey *) palloc(sizeof(TrgmStateKey));
+
+ memcpy(keyCopy, key, sizeof(TrgmStateKey));
+ trgmNFA->keysQueue = lappend(trgmNFA->keysQueue, keyCopy);
+}
+
+/*
+ * Add outgoing arcs from given state, whose enter keys are all now known.
+ */
+static void
+addArcs(TrgmNFA *trgmNFA, TrgmState *state)
+{
+ TrgmStateKey destKey;
+ ListCell *cell;
+ regex_arc_t *arcs;
+ int arcsCount,
+ i;
+
+ /*
+ * Ensure any pad bytes in destKey are zero, since it may get used as a
+ * hashtable key by getState.
+ */
+ MemSet(&destKey, 0, sizeof(destKey));
+
+ /*
+ * Iterate over enter keys associated with this expanded-graph state. This
+ * includes both the state's own stateKey, and any enter keys we added to
+ * it during addKey (which represent expanded-graph states that are not
+ * distinguishable from this one by means of trigrams). For each such
+ * enter key, examine all the out-arcs of the key's underlying NFA state,
+ * and try to make a trigram arc leading to where the out-arc leads.
+ * (addArc will deal with whether the arc is valid or not.)
+ */
+ foreach(cell, state->enterKeys)
+ {
+ TrgmStateKey *key = (TrgmStateKey *) lfirst(cell);
+
+ arcsCount = pg_reg_getnumoutarcs(trgmNFA->regex, key->nstate);
+ arcs = (regex_arc_t *) palloc(sizeof(regex_arc_t) * arcsCount);
+ pg_reg_getoutarcs(trgmNFA->regex, key->nstate, arcs, arcsCount);
+
+ for (i = 0; i < arcsCount; i++)
+ {
+ regex_arc_t *arc = &arcs[i];
+ TrgmColorInfo *colorInfo = &trgmNFA->colorInfo[arc->co];
+
+ /*
+ * Ignore non-expandable colors; addKey already handled the case.
+ *
+ * We need no special check for begin/end pseudocolors here. We
+ * don't need to do any processing for them, and they will be
+ * marked non-expandable since the regex engine will have reported
+ * them that way.
+ */
+ if (!colorInfo->expandable)
+ continue;
+
+ if (colorInfo->containsNonWord)
+ {
+ /*
+ * Color includes non-word character(s).
+ *
+ * Generate an arc, treating this transition as occurring on
+ * BLANK. This allows word-ending trigrams to be manufactured
+ * if possible.
+ */
+ destKey.prefix.colors[0] = key->prefix.colors[1];
+ destKey.prefix.colors[1] = COLOR_BLANK;
+ destKey.nstate = arc->to;
+
+ addArc(trgmNFA, state, key, COLOR_BLANK, &destKey);
+ }
+
+ if (colorInfo->wordCharsCount > 0)
+ {
+ /*
+ * Color includes word character(s).
+ *
+ * Generate an arc. Color is pushed into prefix of target
+ * state.
+ */
+ destKey.prefix.colors[0] = key->prefix.colors[1];
+ destKey.prefix.colors[1] = arc->co;
+ destKey.nstate = arc->to;
+
+ addArc(trgmNFA, state, key, arc->co, &destKey);
+ }
+ }
+
+ pfree(arcs);
+ }
+}
+
+/*
+ * Generate an out-arc of the expanded graph, if it's valid and not redundant.
+ *
+ * state: expanded-graph state we want to add an out-arc to
+ * key: provides prefix colors (key->nstate is not used)
+ * co: transition color
+ * destKey: identifier for destination state of expanded graph
+ */
+static void
+addArc(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key,
+ TrgmColor co, TrgmStateKey *destKey)
+{
+ TrgmArc *arc;
+ ListCell *cell;
+
+ /* Do nothing if this wouldn't be a valid arc label trigram */
+ if (!validArcLabel(key, co))
+ return;
+
+ /*
+ * Check if we are going to reach key which is covered by a key which is
+ * already listed in this state. If so arc is useless: the NFA can bypass
+ * it through a path that doesn't require any predictable trigram, so
+ * whether the arc's trigram is present or not doesn't really matter.
+ */
+ foreach(cell, state->enterKeys)
+ {
+ TrgmStateKey *existingKey = (TrgmStateKey *) lfirst(cell);
+
+ if (existingKey->nstate == destKey->nstate &&
+ prefixContains(&existingKey->prefix, &destKey->prefix))
+ return;
+ }
+
+ /* Checks were successful, add new arc */
+ arc = (TrgmArc *) palloc(sizeof(TrgmArc));
+ arc->target = getState(trgmNFA, destKey);
+ arc->ctrgm.colors[0] = key->prefix.colors[0];
+ arc->ctrgm.colors[1] = key->prefix.colors[1];
+ arc->ctrgm.colors[2] = co;
+
+ state->arcs = lappend(state->arcs, arc);
+ trgmNFA->arcsCount++;
+}
+
+/*
+ * Can we make a valid trigram arc label from the given prefix and arc color?
+ *
+ * This is split out so that tests in addKey and addArc will stay in sync.
+ */
+static bool
+validArcLabel(TrgmStateKey *key, TrgmColor co)
+{
+ /*
+ * We have to know full trigram in order to add outgoing arc. So we can't
+ * do it if prefix is ambiguous.
+ */
+ if (key->prefix.colors[0] == COLOR_UNKNOWN)
+ return false;
+
+ /* If key->prefix.colors[0] isn't unknown, its second color isn't either */
+ Assert(key->prefix.colors[1] != COLOR_UNKNOWN);
+ /* And we should not be called with an unknown arc color anytime */
+ Assert(co != COLOR_UNKNOWN);
+
+ /*
+ * We don't bother with making arcs representing three non-word
+ * characters, since that's useless for trigram extraction.
+ */
+ if (key->prefix.colors[0] == COLOR_BLANK &&
+ key->prefix.colors[1] == COLOR_BLANK &&
+ co == COLOR_BLANK)
+ return false;
+
+ /*
+ * We also reject nonblank-blank-anything. The nonblank-blank-nonblank
+ * case doesn't correspond to any trigram the trigram extraction code
+ * would make. The nonblank-blank-blank case is also not possible with
+ * RPADDING = 1. (Note that in many cases we'd fail to generate such a
+ * trigram even if it were valid, for example processing "foo bar" will
+ * not result in considering the trigram "o ". So if you want to support
+ * RPADDING = 2, there's more to do than just twiddle this test.)
+ */
+ if (key->prefix.colors[0] != COLOR_BLANK &&
+ key->prefix.colors[1] == COLOR_BLANK)
+ return false;
+
+ /*
+ * Other combinations involving blank are valid, in particular we assume
+ * blank-blank-nonblank is valid, which presumes that LPADDING is 2.
+ *
+ * Note: Using again the example "foo bar", we will not consider the
+ * trigram " b", though this trigram would be found by the trigram
+ * extraction code. Since we will find " ba", it doesn't seem worth
+ * trying to hack the algorithm to generate the additional trigram.
+ */
+
+ /* arc label is valid */
+ return true;
+}
+
+/*
+ * Get state of expanded graph for given state key,
+ * and queue the state for processing if it didn't already exist.
+ */
+static TrgmState *
+getState(TrgmNFA *trgmNFA, TrgmStateKey *key)
+{
+ TrgmState *state;
+ bool found;
+
+ state = (TrgmState *) hash_search(trgmNFA->states, key, HASH_ENTER,
+ &found);
+ if (!found)
+ {
+ /* New state: initialize and queue it */
+ state->arcs = NIL;
+ state->enterKeys = NIL;
+ state->init = false;
+ state->fin = false;
+ state->parent = NULL;
+ state->children = NIL;
+ state->number = -1;
+
+ trgmNFA->queue = lappend(trgmNFA->queue, state);
+ }
+ return state;
+}
+
+/*
+ * Check if prefix1 "contains" prefix2.
+ *
+ * "contains" means that any exact prefix (with no ambiguity) that satisfies
+ * prefix2 also satisfies prefix1.
+ */
+static bool
+prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2)
+{
+ if (prefix1->colors[1] == COLOR_UNKNOWN)
+ {
+ /* Fully ambiguous prefix contains everything */
+ return true;
+ }
+ else if (prefix1->colors[0] == COLOR_UNKNOWN)
+ {
+ /*
+ * Prefix with only first unknown color contains every prefix with
+ * same second color.
+ */
+ if (prefix1->colors[1] == prefix2->colors[1])
+ return true;
+ else
+ return false;
+ }
+ else
+ {
+ /* Exact prefix contains only the exact same prefix */
+ if (prefix1->colors[0] == prefix2->colors[0] &&
+ prefix1->colors[1] == prefix2->colors[1])
+ return true;
+ else
+ return false;
+ }
+}
+
+
+/*---------------------
+ * Subroutines for expanding color trigrams into regular trigrams (stage 3).
+ *---------------------
+ */
+
+/*
+ * Get vector of all color trigrams in graph and select which of them
+ * to expand into simple trigrams.
+ *
+ * Returns TRUE if OK, FALSE if exhausted resource limits.
+ */
+static bool
+selectColorTrigrams(TrgmNFA *trgmNFA)
+{
+ HASH_SEQ_STATUS scan_status;
+ int arcsCount = trgmNFA->arcsCount,
+ i;
+ TrgmState *state;
+ ColorTrgmInfo *colorTrgms;
+ int64 totalTrgmCount;
+ float4 totalTrgmPenalty;
+ int number;
+
+ /* Collect color trigrams from all arcs */
+ colorTrgms = (ColorTrgmInfo *) palloc(sizeof(ColorTrgmInfo) * arcsCount);
+ trgmNFA->colorTrgms = colorTrgms;
+
+ i = 0;
+ hash_seq_init(&scan_status, trgmNFA->states);
+ while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+ {
+ ListCell *cell;
+
+ foreach(cell, state->arcs)
+ {
+ TrgmArc *arc = (TrgmArc *) lfirst(cell);
+ TrgmArcInfo *arcInfo = (TrgmArcInfo *) palloc(sizeof(TrgmArcInfo));
+
+ arcInfo->source = state;
+ arcInfo->target = arc->target;
+ colorTrgms[i].arcs = list_make1(arcInfo);
+ colorTrgms[i].expanded = true;
+ colorTrgms[i].ctrgm = arc->ctrgm;
+ i++;
+ }
+ }
+ Assert(i == arcsCount);
+
+ /* Remove duplicates, merging their arcs lists */
+ if (arcsCount >= 2)
+ {
+ ColorTrgmInfo *p1,
+ *p2;
+
+ /* Sort trigrams to ease duplicate detection */
+ qsort(colorTrgms, arcsCount, sizeof(ColorTrgmInfo), colorTrgmInfoCmp);
+
+ /* p1 is probe point, p2 is last known non-duplicate. */
+ p2 = colorTrgms;
+ for (p1 = colorTrgms + 1; p1 < colorTrgms + arcsCount; p1++)
+ {
+ if (colorTrgmInfoCmp(p1, p2) > 0)
+ {
+ p2++;
+ *p2 = *p1;
+ }
+ else
+ {
+ p2->arcs = list_concat(p2->arcs, p1->arcs);
+ }
+ }
+ trgmNFA->colorTrgmsCount = (p2 - colorTrgms) + 1;
+ }
+ else
+ {
+ trgmNFA->colorTrgmsCount = arcsCount;
+ }
+
+ /*
+ * Count number of simple trigrams generated by each color trigram, and
+ * also compute a penalty value, which is the number of simple trigrams
+ * times a multiplier that depends on its whitespace content.
+ *
+ * Note: per-color-trigram counts cannot overflow an int so long as
+ * COLOR_COUNT_LIMIT is not more than the cube root of INT_MAX, ie about
+ * 1290. However, the grand total totalTrgmCount might conceivably
+ * overflow an int, so we use int64 for that within this routine. Also,
+ * penalties are calculated in float4 arithmetic to avoid any overflow
+ * worries.
+ */
+ totalTrgmCount = 0;
+ totalTrgmPenalty = 0.0f;
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ ColorTrgmInfo *trgmInfo = &colorTrgms[i];
+ int j,
+ count = 1,
+ typeIndex = 0;
+
+ for (j = 0; j < 3; j++)
+ {
+ TrgmColor c = trgmInfo->ctrgm.colors[j];
+
+ typeIndex *= 2;
+ if (c == COLOR_BLANK)
+ typeIndex++;
+ else
+ count *= trgmNFA->colorInfo[c].wordCharsCount;
+ }
+ trgmInfo->count = count;
+ totalTrgmCount += count;
+ trgmInfo->penalty = penalties[typeIndex] * (float4) count;
+ totalTrgmPenalty += trgmInfo->penalty;
+ }
+
+ /* Sort color trigrams in descending order of their penalties */
+ qsort(colorTrgms, trgmNFA->colorTrgmsCount, sizeof(ColorTrgmInfo),
+ colorTrgmInfoPenaltyCmp);
+
+ /*
+ * Remove color trigrams from the graph so long as total penalty of color
+ * trigrams exceeds WISH_TRGM_PENALTY. (If we fail to get down to
+ * WISH_TRGM_PENALTY, it's OK so long as total count is no more than
+ * MAX_TRGM_COUNT.) We prefer to remove color trigrams with higher
+ * penalty, since those are the most promising for reducing the total
+ * penalty. When removing a color trigram we have to merge states
+ * connected by arcs labeled with that trigram. It's necessary to not
+ * merge initial and final states, because our graph becomes useless if
+ * that happens; so we cannot always remove the trigram we'd prefer to.
+ */
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ ColorTrgmInfo *trgmInfo = &colorTrgms[i];
+ bool canRemove = true;
+ ListCell *cell;
+
+ /* Done if we've reached the target */
+ if (totalTrgmPenalty <= WISH_TRGM_PENALTY)
+ break;
+
+ /*
+ * Does any arc of this color trigram connect initial and final
+ * states? If so we can't remove it.
+ */
+ foreach(cell, trgmInfo->arcs)
+ {
+ TrgmArcInfo *arcInfo = (TrgmArcInfo *) lfirst(cell);
+ TrgmState *source = arcInfo->source,
+ *target = arcInfo->target;
+
+ /* examine parent states, if any merging has already happened */
+ while (source->parent)
+ source = source->parent;
+ while (target->parent)
+ target = target->parent;
+
+ if ((source->init || target->init) &&
+ (source->fin || target->fin))
+ {
+ canRemove = false;
+ break;
+ }
+ }
+ if (!canRemove)
+ continue;
+
+ /* OK, merge states linked by each arc labeled by the trigram */
+ foreach(cell, trgmInfo->arcs)
+ {
+ TrgmArcInfo *arcInfo = (TrgmArcInfo *) lfirst(cell);
+ TrgmState *source = arcInfo->source,
+ *target = arcInfo->target;
+
+ while (source->parent)
+ source = source->parent;
+ while (target->parent)
+ target = target->parent;
+ if (source != target)
+ mergeStates(source, target);
+ }
+
+ /* Mark trigram unexpanded, and update totals */
+ trgmInfo->expanded = false;
+ totalTrgmCount -= trgmInfo->count;
+ totalTrgmPenalty -= trgmInfo->penalty;
+ }
+
+ /* Did we succeed in fitting into MAX_TRGM_COUNT? */
+ if (totalTrgmCount > MAX_TRGM_COUNT)
+ return false;
+
+ trgmNFA->totalTrgmCount = (int) totalTrgmCount;
+
+ /*
+ * Sort color trigrams by colors (will be useful for bsearch in packGraph)
+ * and enumerate the color trigrams that are expanded.
+ */
+ number = 0;
+ qsort(colorTrgms, trgmNFA->colorTrgmsCount, sizeof(ColorTrgmInfo),
+ colorTrgmInfoCmp);
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ if (colorTrgms[i].expanded)
+ {
+ colorTrgms[i].number = number;
+ number++;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Expand selected color trigrams into regular trigrams.
+ *
+ * Returns the TRGM array to be passed to the index machinery.
+ * The array must be allocated in rcontext.
+ */
+static TRGM *
+expandColorTrigrams(TrgmNFA *trgmNFA, MemoryContext rcontext)
+{
+ TRGM *trg;
+ trgm *p;
+ int i;
+ TrgmColorInfo blankColor;
+ trgm_mb_char blankChar;
+
+ /* Set up "blank" color structure containing a single zero character */
+ memset(blankChar.bytes, 0, sizeof(blankChar.bytes));
+ blankColor.wordCharsCount = 1;
+ blankColor.wordChars = &blankChar;
+
+ /* Construct the trgm array */
+ trg = (TRGM *)
+ MemoryContextAllocZero(rcontext,
+ TRGMHDRSIZE +
+ trgmNFA->totalTrgmCount * sizeof(trgm));
+ trg->flag = ARRKEY;
+ SET_VARSIZE(trg, CALCGTSIZE(ARRKEY, trgmNFA->totalTrgmCount));
+ p = GETARR(trg);
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ ColorTrgmInfo *colorTrgm = &trgmNFA->colorTrgms[i];
+ TrgmColorInfo *c[3];
+ trgm_mb_char s[3];
+ int j,
+ i1,
+ i2,
+ i3;
+
+ /* Ignore any unexpanded trigrams ... */
+ if (!colorTrgm->expanded)
+ continue;
+
+ /* Get colors, substituting the dummy struct for COLOR_BLANK */
+ for (j = 0; j < 3; j++)
+ {
+ if (colorTrgm->ctrgm.colors[j] != COLOR_BLANK)
+ c[j] = &trgmNFA->colorInfo[colorTrgm->ctrgm.colors[j]];
+ else
+ c[j] = &blankColor;
+ }
+
+ /* Iterate over all possible combinations of colors' characters */
+ for (i1 = 0; i1 < c[0]->wordCharsCount; i1++)
+ {
+ s[0] = c[0]->wordChars[i1];
+ for (i2 = 0; i2 < c[1]->wordCharsCount; i2++)
+ {
+ s[1] = c[1]->wordChars[i2];
+ for (i3 = 0; i3 < c[2]->wordCharsCount; i3++)
+ {
+ s[2] = c[2]->wordChars[i3];
+ fillTrgm(p, s);
+ p++;
+ }
+ }
+ }
+ }
+
+ return trg;
+}
+
+/*
+ * Convert trigram into trgm datatype.
+ */
+static void
+fillTrgm(trgm *ptrgm, trgm_mb_char s[3])
+{
+ char str[3 * MAX_MULTIBYTE_CHAR_LEN],
+ *p;
+ int i,
+ j;
+
+ /* Write multibyte string into "str" (we don't need null termination) */
+ p = str;
+
+ for (i = 0; i < 3; i++)
+ {
+ if (s[i].bytes[0] != 0)
+ {
+ for (j = 0; j < MAX_MULTIBYTE_CHAR_LEN && s[i].bytes[j]; j++)
+ *p++ = s[i].bytes[j];
+ }
+ else
+ {
+ /* Emit a space in place of COLOR_BLANK */
+ *p++ = ' ';
+ }
+ }
+
+ /* Convert "str" to a standard trigram (possibly hashing it) */
+ compact_trigram(ptrgm, str, p - str);
+}
+
+/*
+ * Merge two states of graph.
+ */
+static void
+mergeStates(TrgmState *state1, TrgmState *state2)
+{
+ ListCell *cell;
+
+ Assert(state1 != state2);
+ Assert(!state1->parent);
+ Assert(!state2->parent);
+
+ /* state1 absorbs state2's init/fin flags */
+ state1->init |= state2->init;
+ state1->fin |= state2->fin;
+
+ /* state2, and all its children, become children of state1 */
+ foreach(cell, state2->children)
+ {
+ TrgmState *state = (TrgmState *) lfirst(cell);
+
+ state->parent = state1;
+ }
+ state2->parent = state1;
+ state1->children = list_concat(state1->children, state2->children);
+ state1->children = lappend(state1->children, state2);
+ state2->children = NIL;
+}
+
+/*
+ * Compare function for sorting of color trigrams by their colors.
+ */
+static int
+colorTrgmInfoCmp(const void *p1, const void *p2)
+{
+ const ColorTrgmInfo *c1 = (const ColorTrgmInfo *) p1;
+ const ColorTrgmInfo *c2 = (const ColorTrgmInfo *) p2;
+
+ return memcmp(&c1->ctrgm, &c2->ctrgm, sizeof(ColorTrgm));
+}
+
+/*
+ * Compare function for sorting color trigrams in descending order of
+ * their penalty fields.
+ */
+static int
+colorTrgmInfoPenaltyCmp(const void *p1, const void *p2)
+{
+ float4 penalty1 = ((const ColorTrgmInfo *) p1)->penalty;
+ float4 penalty2 = ((const ColorTrgmInfo *) p2)->penalty;
+
+ if (penalty1 < penalty2)
+ return 1;
+ else if (penalty1 == penalty2)
+ return 0;
+ else
+ return -1;
+}
+
+
+/*---------------------
+ * Subroutines for packing the graph into final representation (stage 4).
+ *---------------------
+ */
+
+/*
+ * Pack expanded graph into final representation.
+ *
+ * The result data must be allocated in rcontext.
+ */
+static TrgmPackedGraph *
+packGraph(TrgmNFA *trgmNFA, MemoryContext rcontext)
+{
+ int number = 2,
+ arcIndex,
+ arcsCount;
+ HASH_SEQ_STATUS scan_status;
+ TrgmState *state;
+ TrgmPackArcInfo *arcs,
+ *p1,
+ *p2;
+ TrgmPackedArc *packedArcs;
+ TrgmPackedGraph *result;
+ int i,
+ j;
+
+ /* Enumerate surviving states, giving init and fin reserved numbers */
+ hash_seq_init(&scan_status, trgmNFA->states);
+ while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+ {
+ while (state->parent)
+ state = state->parent;
+
+ if (state->number < 0)
+ {
+ if (state->init)
+ state->number = 0;
+ else if (state->fin)
+ state->number = 1;
+ else
+ {
+ state->number = number;
+ number++;
+ }
+ }
+ }
+
+ /* Collect array of all arcs */
+ arcs = (TrgmPackArcInfo *)
+ palloc(sizeof(TrgmPackArcInfo) * trgmNFA->arcsCount);
+ arcIndex = 0;
+ hash_seq_init(&scan_status, trgmNFA->states);
+ while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+ {
+ TrgmState *source = state;
+ ListCell *cell;
+
+ while (source->parent)
+ source = source->parent;
+
+ foreach(cell, state->arcs)
+ {
+ TrgmArc *arc = (TrgmArc *) lfirst(cell);
+ TrgmState *target = arc->target;
+
+ while (target->parent)
+ target = target->parent;
+
+ if (source->number != target->number)
+ {
+ ColorTrgmInfo *ctrgm;
+
+ ctrgm = (ColorTrgmInfo *) bsearch(&arc->ctrgm,
+ trgmNFA->colorTrgms,
+ trgmNFA->colorTrgmsCount,
+ sizeof(ColorTrgmInfo),
+ colorTrgmInfoCmp);
+ Assert(ctrgm != NULL);
+ Assert(ctrgm->expanded);
+
+ arcs[arcIndex].sourceState = source->number;
+ arcs[arcIndex].targetState = target->number;
+ arcs[arcIndex].colorTrgm = ctrgm->number;
+ arcIndex++;
+ }
+ }
+ }
+
+ /* Sort arcs to ease duplicate detection */
+ qsort(arcs, arcIndex, sizeof(TrgmPackArcInfo), packArcInfoCmp);
+
+ /* We could have duplicates because states were merged. Remove them. */
+ /* p1 is probe point, p2 is last known non-duplicate. */
+ p2 = arcs;
+ for (p1 = arcs + 1; p1 < arcs + arcIndex; p1++)
+ {
+ if (packArcInfoCmp(p1, p2) > 0)
+ {
+ p2++;
+ *p2 = *p1;
+ }
+ }
+ arcsCount = (p2 - arcs) + 1;
+
+ /* Create packed representation */
+ result = (TrgmPackedGraph *)
+ MemoryContextAlloc(rcontext, sizeof(TrgmPackedGraph));
+
+ /* Pack color trigrams information */
+ result->colorTrigramsCount = 0;
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ if (trgmNFA->colorTrgms[i].expanded)
+ result->colorTrigramsCount++;
+ }
+ result->colorTrigramGroups = (int *)
+ MemoryContextAlloc(rcontext, sizeof(int) * result->colorTrigramsCount);
+ j = 0;
+ for (i = 0; i < trgmNFA->colorTrgmsCount; i++)
+ {
+ if (trgmNFA->colorTrgms[i].expanded)
+ {
+ result->colorTrigramGroups[j] = trgmNFA->colorTrgms[i].count;
+ j++;
+ }
+ }
+
+ /* Pack states and arcs information */
+ result->statesCount = number;
+ result->states = (TrgmPackedState *)
+ MemoryContextAlloc(rcontext, number * sizeof(TrgmPackedState));
+ packedArcs = (TrgmPackedArc *)
+ MemoryContextAlloc(rcontext, arcsCount * sizeof(TrgmPackedArc));
+ j = 0;
+ for (i = 0; i < number; i++)
+ {
+ int cnt = 0;
+
+ result->states[i].arcs = &packedArcs[j];
+ while (j < arcsCount && arcs[j].sourceState == i)
+ {
+ packedArcs[j].targetState = arcs[j].targetState;
+ packedArcs[j].colorTrgm = arcs[j].colorTrgm;
+ cnt++;
+ j++;
+ }
+ result->states[i].arcsCount = cnt;
+ }
+
+ /* Allocate working memory for trigramsMatchGraph() */
+ result->colorTrigramsActive = (bool *)
+ MemoryContextAlloc(rcontext, sizeof(bool) * result->colorTrigramsCount);
+ result->statesActive = (bool *)
+ MemoryContextAlloc(rcontext, sizeof(bool) * result->statesCount);
+ result->statesQueue = (int *)
+ MemoryContextAlloc(rcontext, sizeof(int) * result->statesCount);
+
+ return result;
+}
+
+/*
+ * Comparison function for sorting TrgmPackArcInfos.
+ *
+ * Compares arcs in following order: sourceState, colorTrgm, targetState.
+ */
+static int
+packArcInfoCmp(const void *a1, const void *a2)
+{
+ const TrgmPackArcInfo *p1 = (const TrgmPackArcInfo *) a1;
+ const TrgmPackArcInfo *p2 = (const TrgmPackArcInfo *) a2;
+
+ if (p1->sourceState < p2->sourceState)
+ return -1;
+ if (p1->sourceState > p2->sourceState)
+ return 1;
+ if (p1->colorTrgm < p2->colorTrgm)
+ return -1;
+ if (p1->colorTrgm > p2->colorTrgm)
+ return 1;
+ if (p1->targetState < p2->targetState)
+ return -1;
+ if (p1->targetState > p2->targetState)
+ return 1;
+ return 0;
+}
+
+
+/*---------------------
+ * Debugging functions
+ *
+ * These are designed to emit GraphViz files.
+ *---------------------
+ */
+
+#ifdef TRGM_REGEXP_DEBUG
+
+/*
+ * Print initial NFA, in regexp library's representation
+ */
+static void
+printSourceNFA(regex_t *regex, TrgmColorInfo *colors, int ncolors)
+{
+ StringInfoData buf;
+ int nstates = pg_reg_getnumstates(regex);
+ int state;
+ int i;
+
+ initStringInfo(&buf);
+
+ appendStringInfoString(&buf, "\ndigraph sourceNFA {\n");
+
+ for (state = 0; state < nstates; state++)
+ {
+ regex_arc_t *arcs;
+ int i,
+ arcsCount;
+
+ appendStringInfo(&buf, "s%d", state);
+ if (pg_reg_getfinalstate(regex) == state)
+ appendStringInfoString(&buf, " [shape = doublecircle]");
+ appendStringInfoString(&buf, ";\n");
+
+ arcsCount = pg_reg_getnumoutarcs(regex, state);
+ arcs = (regex_arc_t *) palloc(sizeof(regex_arc_t) * arcsCount);
+ pg_reg_getoutarcs(regex, state, arcs, arcsCount);
+
+ for (i = 0; i < arcsCount; i++)
+ {
+ appendStringInfo(&buf, " s%d -> s%d [label = \"%d\"];\n",
+ state, arcs[i].to, arcs[i].co);
+ }
+
+ pfree(arcs);
+ }
+
+ appendStringInfoString(&buf, " node [shape = point ]; initial;\n");
+ appendStringInfo(&buf, " initial -> s%d;\n",
+ pg_reg_getinitialstate(regex));
+
+ /* Print colors */
+ appendStringInfoString(&buf, " { rank = sink;\n");
+ appendStringInfoString(&buf, " Colors [shape = none, margin=0, label=<\n");
+
+ for (i = 0; i < ncolors; i++)
+ {
+ TrgmColorInfo *color = &colors[i];
+ int j;
+
+ appendStringInfo(&buf, "<br/>Color %d: ", i);
+ if (color->expandable)
+ {
+ for (j = 0; j < color->wordCharsCount; j++)
+ {
+ char s[MAX_MULTIBYTE_CHAR_LEN + 1];
+
+ memcpy(s, color->wordChars[j].bytes, MAX_MULTIBYTE_CHAR_LEN);
+ s[MAX_MULTIBYTE_CHAR_LEN] = '\0';
+ appendStringInfoString(&buf, s);
+ }
+ }
+ else
+ appendStringInfoString(&buf, "not expandable");
+ appendStringInfoChar(&buf, '\n');
+ }
+
+ appendStringInfoString(&buf, " >];\n");
+ appendStringInfoString(&buf, " }\n");
+ appendStringInfoString(&buf, "}\n");
+
+ {
+ /* dot -Tpng -o /tmp/source.png < /tmp/source.dot */
+ FILE *fp = fopen("/tmp/source.dot", "w");
+
+ fprintf(fp, "%s", buf.data);
+ fclose(fp);
+ }
+
+ pfree(buf.data);
+}
+
+/*
+ * Print expanded graph.
+ */
+static void
+printTrgmNFA(TrgmNFA *trgmNFA)
+{
+ StringInfoData buf;
+ HASH_SEQ_STATUS scan_status;
+ TrgmState *state;
+ TrgmState *initstate = NULL;
+
+ initStringInfo(&buf);
+
+ appendStringInfoString(&buf, "\ndigraph transformedNFA {\n");
+
+ hash_seq_init(&scan_status, trgmNFA->states);
+ while ((state = (TrgmState *) hash_seq_search(&scan_status)) != NULL)
+ {
+ ListCell *cell;
+
+ appendStringInfo(&buf, "s%p", (void *) state);
+ if (state->fin)
+ appendStringInfoString(&buf, " [shape = doublecircle]");
+ if (state->init)
+ initstate = state;
+ appendStringInfo(&buf, " [label = \"%d\"]", state->stateKey.nstate);
+ appendStringInfoString(&buf, ";\n");
+
+ foreach(cell, state->arcs)
+ {
+ TrgmArc *arc = (TrgmArc *) lfirst(cell);
+
+ appendStringInfo(&buf, " s%p -> s%p [label = \"",
+ (void *) state, (void *) arc->target);
+ printTrgmColor(&buf, arc->ctrgm.colors[0]);
+ appendStringInfoChar(&buf, ' ');
+ printTrgmColor(&buf, arc->ctrgm.colors[1]);
+ appendStringInfoChar(&buf, ' ');
+ printTrgmColor(&buf, arc->ctrgm.colors[2]);
+ appendStringInfoString(&buf, "\"];\n");
+ }
+ }
+
+ if (initstate)
+ {
+ appendStringInfoString(&buf, " node [shape = point ]; initial;\n");
+ appendStringInfo(&buf, " initial -> s%p;\n", (void *) initstate);
+ }
+
+ appendStringInfoString(&buf, "}\n");
+
+ {
+ /* dot -Tpng -o /tmp/transformed.png < /tmp/transformed.dot */
+ FILE *fp = fopen("/tmp/transformed.dot", "w");
+
+ fprintf(fp, "%s", buf.data);
+ fclose(fp);
+ }
+
+ pfree(buf.data);
+}
+
+/*
+ * Print a TrgmColor readably.
+ */
+static void
+printTrgmColor(StringInfo buf, TrgmColor co)
+{
+ if (co == COLOR_UNKNOWN)
+ appendStringInfoChar(buf, 'u');
+ else if (co == COLOR_BLANK)
+ appendStringInfoChar(buf, 'b');
+ else
+ appendStringInfo(buf, "%d", (int) co);
+}
+
+/*
+ * Print final packed representation of trigram-based expanded graph.
+ */
+static void
+printTrgmPackedGraph(TrgmPackedGraph *packedGraph, TRGM *trigrams)
+{
+ StringInfoData buf;
+ trgm *p;
+ int i;
+
+ initStringInfo(&buf);
+
+ appendStringInfoString(&buf, "\ndigraph packedGraph {\n");
+
+ for (i = 0; i < packedGraph->statesCount; i++)
+ {
+ TrgmPackedState *state = &packedGraph->states[i];
+ int j;
+
+ appendStringInfo(&buf, " s%d", i);
+ if (i == 1)
+ appendStringInfoString(&buf, " [shape = doublecircle]");
+
+ appendStringInfo(&buf, " [label = <s%d>];\n", i);
+
+ for (j = 0; j < state->arcsCount; j++)
+ {
+ TrgmPackedArc *arc = &state->arcs[j];
+
+ appendStringInfo(&buf, " s%d -> s%d [label = \"trigram %d\"];\n",
+ i, arc->targetState, arc->colorTrgm);
+ }
+ }
+
+ appendStringInfoString(&buf, " node [shape = point ]; initial;\n");
+ appendStringInfo(&buf, " initial -> s%d;\n", 0);
+
+ /* Print trigrams */
+ appendStringInfoString(&buf, " { rank = sink;\n");
+ appendStringInfoString(&buf, " Trigrams [shape = none, margin=0, label=<\n");
+
+ p = GETARR(trigrams);
+ for (i = 0; i < packedGraph->colorTrigramsCount; i++)
+ {
+ int count = packedGraph->colorTrigramGroups[i];
+ int j;
+
+ appendStringInfo(&buf, "<br/>Trigram %d: ", i);
+
+ for (j = 0; j < count; j++)
+ {
+ if (j > 0)
+ appendStringInfoString(&buf, ", ");
+
+ /*
+ * XXX This representation is nice only for all-ASCII trigrams.
+ */
+ appendStringInfo(&buf, "\"%c%c%c\"", (*p)[0], (*p)[1], (*p)[2]);
+ p++;
+ }
+ }
+
+ appendStringInfoString(&buf, " >];\n");
+ appendStringInfoString(&buf, " }\n");
+ appendStringInfoString(&buf, "}\n");
+
+ {
+ /* dot -Tpng -o /tmp/packed.png < /tmp/packed.dot */
+ FILE *fp = fopen("/tmp/packed.dot", "w");
+
+ fprintf(fp, "%s", buf.data);
+ fclose(fp);
+ }
+
+ pfree(buf.data);
+}
+
+#endif /* TRGM_REGEXP_DEBUG */
diff --git a/contrib/pg_upgrade/Makefile b/contrib/pg_upgrade/Makefile
index 364b20df1b..150a9b4770 100644
--- a/contrib/pg_upgrade/Makefile
+++ b/contrib/pg_upgrade/Makefile
@@ -5,13 +5,15 @@ PGAPPICON = win32
PROGRAM = pg_upgrade
OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \
- option.o page.o pg_upgrade.o relfilenode.o server.o \
+ option.o page.o parallel.o pg_upgrade.o relfilenode.o server.o \
tablespace.o util.o version.o version_old_8_3.o $(WIN32RES)
PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
PG_LIBS = $(libpq_pgport)
-EXTRA_CLEAN = delete_old_cluster.sh log/ tmp_check/
+EXTRA_CLEAN = analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/ \
+ pg_upgrade_dump_globals.sql \
+ pg_upgrade_dump_*.custom pg_upgrade_*.log
ifdef USE_PGXS
PG_CONFIG = pg_config
@@ -25,7 +27,7 @@ include $(top_srcdir)/contrib/contrib-global.mk
endif
check: test.sh all
- MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) $(SHELL) $< --install
+ MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) EXTRA_REGRESS_OPTS="$(EXTRA_REGRESS_OPTS)" $(SHELL) $< --install
# disabled because it upsets the build farm
#installcheck: test.sh
diff --git a/contrib/pg_upgrade/TESTING b/contrib/pg_upgrade/TESTING
index 2043b40ae8..359688c664 100644
--- a/contrib/pg_upgrade/TESTING
+++ b/contrib/pg_upgrade/TESTING
@@ -10,7 +10,7 @@ specific to each major version of Postgres.
Here are the steps needed to create a regression database dump file:
1) Create and populate the regression database in the old cluster
- This database can be created by running 'gmake installcheck' from
+ This database can be created by running 'make installcheck' from
src/test/regression.
2) Use pg_dump to dump out the regression database. Use the new
@@ -66,18 +66,18 @@ steps:
The shell script test.sh in this directory performs more or less this
procedure. You can invoke it by running
- gmake check
+ make check
or by running
- gmake installcheck
+ make installcheck
-if "gmake install" (or "gmake install-world") were done beforehand.
+if "make install" (or "make install-world") were done beforehand.
When invoked without arguments, it will run an upgrade from the
version in this source tree to a new instance of the same version. To
test an upgrade from a different version, invoke it like this:
- gmake installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql
+ make installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql
In this case, you will have to manually eyeball the resulting dump
diff for version-specific differences, as explained above.
diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 71d8f7514d..edfe7e114b 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -3,12 +3,13 @@
*
* server checks and output routines
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/check.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
+#include "mb/pg_wchar.h"
#include "pg_upgrade.h"
@@ -16,25 +17,49 @@ static void set_locale_and_encoding(ClusterInfo *cluster);
static void check_new_cluster_is_empty(void);
static void check_locale_and_encoding(ControlData *oldctrl,
ControlData *newctrl);
+static bool equivalent_locale(const char *loca, const char *locb);
+static bool equivalent_encoding(const char *chara, const char *charb);
static void check_is_super_user(ClusterInfo *cluster);
static void check_for_prepared_transactions(ClusterInfo *cluster);
static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
static void check_for_reg_data_type_usage(ClusterInfo *cluster);
static void get_bin_version(ClusterInfo *cluster);
+static char *get_canonical_locale_name(int category, const char *locale);
+/*
+ * fix_path_separator
+ * For non-Windows, just return the argument.
+ * For Windows convert any forward slash to a backslash
+ * such as is suitable for arguments to builtin commands
+ * like RMDIR and DEL.
+ */
+static char *
+fix_path_separator(char *path)
+{
+#ifdef WIN32
+
+ char *result;
+ char *c;
+
+ result = pg_strdup(path);
+
+ for (c = result; *c != '\0'; c++)
+ if (*c == '/')
+ *c = '\\';
+
+ return result;
+#else
+
+ return path;
+#endif
+}
+
void
-output_check_banner(bool *live_check)
+output_check_banner(bool live_check)
{
- if (user_opts.check && is_server_running(old_cluster.pgdata))
+ if (user_opts.check && live_check)
{
- *live_check = true;
- if (old_cluster.port == DEF_PGUPORT)
- pg_log(PG_FATAL, "When checking a live old server, "
- "you must specify the old server's port number.\n");
- if (old_cluster.port == new_cluster.port)
- pg_log(PG_FATAL, "When checking a live server, "
- "the old and new port numbers must be different.\n");
pg_log(PG_REPORT, "Performing Consistency Checks on Old Live Server\n");
pg_log(PG_REPORT, "------------------------------------------------\n");
}
@@ -47,12 +72,12 @@ output_check_banner(bool *live_check)
void
-check_old_cluster(bool live_check, char **sequence_script_file_name)
+check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name)
{
/* -- OLD -- */
if (!live_check)
- start_postmaster(&old_cluster);
+ start_postmaster(&old_cluster, true);
set_locale_and_encoding(&old_cluster);
@@ -97,6 +122,10 @@ check_old_cluster(bool live_check, char **sequence_script_file_name)
old_8_3_create_sequence_script(&old_cluster);
}
+ /* Pre-PG 9.4 had a different 'line' data type internal format */
+ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903)
+ old_9_3_check_for_line_data_type_usage(&old_cluster);
+
/* Pre-PG 9.0 had no large object permissions */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(&old_cluster, true);
@@ -106,10 +135,7 @@ check_old_cluster(bool live_check, char **sequence_script_file_name)
* the old server is running.
*/
if (!user_opts.check)
- {
generate_old_dump();
- split_old_dump();
- }
if (!live_check)
stop_postmaster(false);
@@ -135,21 +161,20 @@ check_new_cluster(void)
check_is_super_user(&new_cluster);
/*
- * We don't restore our own user, so both clusters must match have
- * matching install-user oids.
+ * We don't restore our own user, so both clusters must match have
+ * matching install-user oids.
*/
if (old_cluster.install_role_oid != new_cluster.install_role_oid)
- pg_log(PG_FATAL,
- "Old and new cluster install users have different values for pg_authid.oid.\n");
+ pg_fatal("Old and new cluster install users have different values for pg_authid.oid.\n");
/*
- * We only allow the install user in the new cluster because other
- * defined users might match users defined in the old cluster and
- * generate an error during pg_dump restore.
+ * We only allow the install user in the new cluster because other defined
+ * users might match users defined in the old cluster and generate an
+ * error during pg_dump restore.
*/
if (new_cluster.role_count != 1)
- pg_log(PG_FATAL, "Only the install user can be defined in the new cluster.\n");
-
+ pg_fatal("Only the install user can be defined in the new cluster.\n");
+
check_for_prepared_transactions(&new_cluster);
}
@@ -177,19 +202,16 @@ issue_warnings(char *sequence_script_file_name)
/* old = PG 8.3 warnings? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803)
{
- start_postmaster(&new_cluster);
+ start_postmaster(&new_cluster, true);
/* restore proper sequence values using file created from old server */
if (sequence_script_file_name)
{
prep_status("Adjusting sequences");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE "\"%s/psql\" --echo-queries "
- "--set ON_ERROR_STOP=on "
- "--no-psqlrc --port %d --username \"%s\" "
- "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- sequence_script_file_name, UTILITY_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ sequence_script_file_name);
unlink(sequence_script_file_name);
check_ok();
}
@@ -203,7 +225,7 @@ issue_warnings(char *sequence_script_file_name)
/* Create dummy large object permissions for old < PG 9.0? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
{
- start_postmaster(&new_cluster);
+ start_postmaster(&new_cluster, true);
new_9_0_populate_pg_largeobject_metadata(&new_cluster, false);
stop_postmaster(false);
}
@@ -226,10 +248,17 @@ output_completion_banner(char *analyze_script_file_name,
"by pg_upgrade so, once you start the new server, consider running:\n"
" %s\n\n", analyze_script_file_name);
- pg_log(PG_REPORT,
- "Running this script will delete the old cluster's data files:\n"
- " %s\n",
- deletion_script_file_name);
+
+ if (deletion_script_file_name)
+ pg_log(PG_REPORT,
+ "Running this script will delete the old cluster's data files:\n"
+ " %s\n",
+ deletion_script_file_name);
+ else
+ pg_log(PG_REPORT,
+ "Could not create a script to delete the old cluster's data\n"
+ "files because user-defined tablespaces exist in the old cluster\n"
+ "directory. The old cluster's contents must be deleted manually.\n");
}
@@ -248,12 +277,12 @@ check_cluster_versions(void)
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 803)
- pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
+ pg_fatal("This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
- pg_log(PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
- PG_MAJORVERSION);
+ pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n",
+ PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
@@ -261,7 +290,7 @@ check_cluster_versions(void)
* versions.
*/
if (old_cluster.major_version > new_cluster.major_version)
- pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
+ pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
/* get old and new binary versions */
get_bin_version(&old_cluster);
@@ -270,12 +299,10 @@ check_cluster_versions(void)
/* Ensure binaries match the designated data directories */
if (GET_MAJOR_VERSION(old_cluster.major_version) !=
GET_MAJOR_VERSION(old_cluster.bin_version))
- pg_log(PG_FATAL,
- "Old cluster data and binary directories are from different major versions.\n");
+ pg_fatal("Old cluster data and binary directories are from different major versions.\n");
if (GET_MAJOR_VERSION(new_cluster.major_version) !=
GET_MAJOR_VERSION(new_cluster.bin_version))
- pg_log(PG_FATAL,
- "New cluster data and binary directories are from different major versions.\n");
+ pg_fatal("New cluster data and binary directories are from different major versions.\n");
check_ok();
}
@@ -292,8 +319,18 @@ check_cluster_compatibility(bool live_check)
/* Is it 9.0 but without tablespace directories? */
if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 &&
new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER)
- pg_log(PG_FATAL, "This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
- "because of backend API changes made during development.\n");
+ pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
+ "because of backend API changes made during development.\n");
+
+ /* We read the real port number for PG >= 9.1 */
+ if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
+ old_cluster.port == DEF_PGUPORT)
+ pg_fatal("When checking a pre-PG 9.1 live old server, "
+ "you must specify the old server's port number.\n");
+
+ if (live_check && old_cluster.port == new_cluster.port)
+ pg_fatal("When checking a live server, "
+ "the old and new port numbers must be different.\n");
}
@@ -321,22 +358,37 @@ set_locale_and_encoding(ClusterInfo *cluster)
res = executeQueryOrDie(conn,
"SELECT datcollate, datctype "
- "FROM pg_catalog.pg_database "
+ "FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
assert(PQntuples(res) == 1);
i_datcollate = PQfnumber(res, "datcollate");
i_datctype = PQfnumber(res, "datctype");
- ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
- ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
+ if (GET_MAJOR_VERSION(cluster->major_version) < 902)
+ {
+ /*
+ * Pre-9.2 did not canonicalize the supplied locale names to match
+ * what the system returns, while 9.2+ does, so convert pre-9.2 to
+ * match.
+ */
+ ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE,
+ pg_strdup(PQgetvalue(res, 0, i_datcollate)));
+ ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE,
+ pg_strdup(PQgetvalue(res, 0, i_datctype)));
+ }
+ else
+ {
+ ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate));
+ ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype));
+ }
PQclear(res);
}
res = executeQueryOrDie(conn,
"SELECT pg_catalog.pg_encoding_to_char(encoding) "
- "FROM pg_catalog.pg_database "
+ "FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
assert(PQntuples(res) == 1);
@@ -352,23 +404,86 @@ set_locale_and_encoding(ClusterInfo *cluster)
/*
* check_locale_and_encoding()
*
- * locale is not in pg_controldata in 8.4 and later so
- * we probably had to get via a database query.
+ * Check that old and new locale and encoding match. Even though the backend
+ * tries to canonicalize stored locale names, the platform often doesn't
+ * cooperate, so it's entirely possible that one DB thinks its locale is
+ * "en_US.UTF-8" while the other says "en_US.utf8". Try to be forgiving.
*/
static void
check_locale_and_encoding(ControlData *oldctrl,
ControlData *newctrl)
{
- /* These are often defined with inconsistent case, so use pg_strcasecmp(). */
- if (pg_strcasecmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
- pg_log(PG_FATAL,
- "old and new cluster lc_collate values do not match\n");
- if (pg_strcasecmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
- pg_log(PG_FATAL,
- "old and new cluster lc_ctype values do not match\n");
- if (pg_strcasecmp(oldctrl->encoding, newctrl->encoding) != 0)
- pg_log(PG_FATAL,
- "old and new cluster encoding values do not match\n");
+ if (!equivalent_locale(oldctrl->lc_collate, newctrl->lc_collate))
+ pg_fatal("lc_collate cluster values do not match: old \"%s\", new \"%s\"\n",
+ oldctrl->lc_collate, newctrl->lc_collate);
+ if (!equivalent_locale(oldctrl->lc_ctype, newctrl->lc_ctype))
+ pg_fatal("lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n",
+ oldctrl->lc_ctype, newctrl->lc_ctype);
+ if (!equivalent_encoding(oldctrl->encoding, newctrl->encoding))
+ pg_fatal("encoding cluster values do not match: old \"%s\", new \"%s\"\n",
+ oldctrl->encoding, newctrl->encoding);
+}
+
+/*
+ * equivalent_locale()
+ *
+ * Best effort locale-name comparison. Return false if we are not 100% sure
+ * the locales are equivalent.
+ */
+static bool
+equivalent_locale(const char *loca, const char *locb)
+{
+ const char *chara = strrchr(loca, '.');
+ const char *charb = strrchr(locb, '.');
+ int lencmp;
+
+ /* If they don't both contain an encoding part, just do strcasecmp(). */
+ if (!chara || !charb)
+ return (pg_strcasecmp(loca, locb) == 0);
+
+ /*
+ * Compare the encoding parts. Windows tends to use code page numbers for
+ * the encoding part, which equivalent_encoding() won't like, so accept if
+ * the strings are case-insensitive equal; otherwise use
+ * equivalent_encoding() to compare.
+ */
+ if (pg_strcasecmp(chara + 1, charb + 1) != 0 &&
+ !equivalent_encoding(chara + 1, charb + 1))
+ return false;
+
+ /*
+ * OK, compare the locale identifiers (e.g. en_US part of en_US.utf8).
+ *
+ * It's tempting to ignore non-alphanumeric chars here, but for now it's
+ * not clear that that's necessary; just do case-insensitive comparison.
+ */
+ lencmp = chara - loca;
+ if (lencmp != charb - locb)
+ return false;
+
+ return (pg_strncasecmp(loca, locb, lencmp) == 0);
+}
+
+/*
+ * equivalent_encoding()
+ *
+ * Best effort encoding-name comparison. Return true only if the encodings
+ * are valid server-side encodings and known equivalent.
+ *
+ * Because the lookup in pg_valid_server_encoding() does case folding and
+ * ignores non-alphanumeric characters, this will recognize many popular
+ * variant spellings as equivalent, eg "utf8" and "UTF-8" will match.
+ */
+static bool
+equivalent_encoding(const char *chara, const char *charb)
+{
+ int enca = pg_valid_server_encoding(chara);
+ int encb = pg_valid_server_encoding(charb);
+
+ if (enca < 0 || encb < 0)
+ return false;
+
+ return (enca == encb);
}
@@ -387,8 +502,8 @@ check_new_cluster_is_empty(void)
{
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
- pg_log(PG_FATAL, "New cluster database \"%s\" is not empty\n",
- new_cluster.dbarr.dbs[dbnum].db_name);
+ pg_fatal("New cluster database \"%s\" is not empty\n",
+ new_cluster.dbarr.dbs[dbnum].db_name);
}
}
@@ -404,21 +519,25 @@ void
create_script_for_cluster_analyze(char **analyze_script_file_name)
{
FILE *script = NULL;
-
- *analyze_script_file_name = pg_malloc(MAXPGPATH);
+ char *user_specification = "";
prep_status("Creating script to analyze new cluster");
- snprintf(*analyze_script_file_name, MAXPGPATH, "analyze_new_cluster.%s",
- SCRIPT_EXT);
+ if (os_info.user_specified)
+ user_specification = psprintf("-U \"%s\" ", os_info.user);
+
+ *analyze_script_file_name = psprintf("analyze_new_cluster.%s", SCRIPT_EXT);
if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ *analyze_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
fprintf(script, "#!/bin/sh\n\n");
+#else
+ /* suppress command echoing */
+ fprintf(script, "@echo off\n");
#endif
fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
@@ -429,7 +548,7 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo\n\n");
+ fprintf(script, "echo%s\n\n", ECHO_BLANK);
fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
ECHO_QUOTE, ECHO_QUOTE);
@@ -437,73 +556,27 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo\n\n");
+ fprintf(script, "echo%s\n\n", ECHO_BLANK);
fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sthis script and run:%s\n",
ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE,
+ fprintf(script, "echo %s \"%s/vacuumdb\" %s--all %s%s\n", ECHO_QUOTE,
+ new_cluster.bindir, user_specification,
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze", ECHO_QUOTE);
- fprintf(script, "echo\n\n");
+ fprintf(script, "echo%s\n\n", ECHO_BLANK);
-#ifndef WIN32
- fprintf(script, "sleep 2\n");
- fprintf(script, "PGOPTIONS='-c default_statistics_target=1 -c vacuum_cost_delay=0'\n");
- /* only need to export once */
- fprintf(script, "export PGOPTIONS\n");
-#else
- fprintf(script, "REM simulate sleep 2\n");
- fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
- fprintf(script, "SET PGOPTIONS=-c default_statistics_target=1 -c vacuum_cost_delay=0\n");
-#endif
-
- fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo %s--------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "vacuumdb --all --analyze-only\n");
- fprintf(script, "echo\n");
- fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo\n\n");
-
-#ifndef WIN32
- fprintf(script, "sleep 2\n");
- fprintf(script, "PGOPTIONS='-c default_statistics_target=10'\n");
-#else
- fprintf(script, "REM simulate sleep\n");
- fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
- fprintf(script, "SET PGOPTIONS=-c default_statistics_target=10\n");
-#endif
-
- fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo %s---------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "vacuumdb --all --analyze-only\n");
- fprintf(script, "echo\n\n");
-
-#ifndef WIN32
- fprintf(script, "unset PGOPTIONS\n");
-#else
- fprintf(script, "SET PGOPTIONS\n");
-#endif
-
- fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "echo %s-------------------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
- fprintf(script, "vacuumdb --all %s\n",
+ fprintf(script, "\"%s/vacuumdb\" %s--all --analyze-in-stages\n",
+ new_cluster.bindir, user_specification);
/* Did we copy the free space files? */
- (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
- "--analyze-only" : "--analyze");
+ if (GET_MAJOR_VERSION(old_cluster.major_version) < 804)
+ fprintf(script, "\"%s/vacuumdb\" %s--all\n", new_cluster.bindir,
+ user_specification);
- fprintf(script, "echo\n\n");
+ fprintf(script, "echo%s\n\n", ECHO_BLANK);
fprintf(script, "echo %sDone%s\n",
ECHO_QUOTE, ECHO_QUOTE);
@@ -511,10 +584,13 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#ifndef WIN32
if (chmod(*analyze_script_file_name, S_IRWXU) != 0)
- pg_log(PG_FATAL, "Could not add execute permission to file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ pg_fatal("Could not add execute permission to file \"%s\": %s\n",
+ *analyze_script_file_name, getErrorText(errno));
#endif
+ if (os_info.user_specified)
+ pg_free(user_specification);
+
check_ok();
}
@@ -529,17 +605,38 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
{
FILE *script = NULL;
int tblnum;
+ char old_cluster_pgdata[MAXPGPATH];
- *deletion_script_file_name = pg_malloc(MAXPGPATH);
+ *deletion_script_file_name = psprintf("delete_old_cluster.%s", SCRIPT_EXT);
- prep_status("Creating script to delete old cluster");
+ /*
+ * Some users (oddly) create tablespaces inside the cluster data
+ * directory. We can't create a proper old cluster delete script in that
+ * case.
+ */
+ strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH);
+ canonicalize_path(old_cluster_pgdata);
+ for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ {
+ char old_tablespace_dir[MAXPGPATH];
- snprintf(*deletion_script_file_name, MAXPGPATH, "delete_old_cluster.%s",
- SCRIPT_EXT);
+ strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
+ canonicalize_path(old_tablespace_dir);
+ if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
+ {
+ /* Unlink file in case it is left over from a previous run. */
+ unlink(*deletion_script_file_name);
+ pg_free(*deletion_script_file_name);
+ *deletion_script_file_name = NULL;
+ return;
+ }
+ }
+
+ prep_status("Creating script to delete old cluster");
if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ *deletion_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
@@ -547,10 +644,10 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
#endif
/* delete old cluster's default tablespace */
- fprintf(script, RMDIR_CMD " %s\n", old_cluster.pgdata);
+ fprintf(script, RMDIR_CMD " %s\n", fix_path_separator(old_cluster.pgdata));
/* delete old cluster's alternate tablespaces */
- for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
{
/*
* Do the old cluster's per-database directories share a directory
@@ -564,32 +661,36 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
fprintf(script, "\n");
/* remove PG_VERSION? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
- fprintf(script, RM_CMD " %s%s/PG_VERSION\n",
- os_info.tablespaces[tblnum], old_cluster.tablespace_suffix);
+ fprintf(script, RM_CMD " %s%cPG_VERSION\n",
+ fix_path_separator(os_info.old_tablespaces[tblnum]),
+ PATH_SEPARATOR);
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
- {
- fprintf(script, RMDIR_CMD " %s%s/%d\n",
- os_info.tablespaces[tblnum], old_cluster.tablespace_suffix,
- old_cluster.dbarr.dbs[dbnum].db_oid);
- }
+ fprintf(script, RMDIR_CMD " %s%c%d\n",
+ fix_path_separator(os_info.old_tablespaces[tblnum]),
+ PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid);
}
else
+ {
+ char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
/*
* Simply delete the tablespace directory, which might be ".old"
* or a version-specific subdirectory.
*/
fprintf(script, RMDIR_CMD " %s%s\n",
- os_info.tablespaces[tblnum], old_cluster.tablespace_suffix);
+ fix_path_separator(os_info.old_tablespaces[tblnum]),
+ fix_path_separator(suffix_path));
+ pfree(suffix_path);
+ }
}
fclose(script);
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
- pg_log(PG_FATAL, "Could not add execute permission to file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ pg_fatal("Could not add execute permission to file \"%s\": %s\n",
+ *deletion_script_file_name, getErrorText(errno));
#endif
check_ok();
@@ -616,8 +717,8 @@ check_is_super_user(ClusterInfo *cluster)
"WHERE rolname = current_user");
if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
- pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
- os_info.user);
+ pg_fatal("database user \"%s\" is not a superuser\n",
+ os_info.user);
cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));
@@ -628,7 +729,7 @@ check_is_super_user(ClusterInfo *cluster)
"FROM pg_catalog.pg_roles ");
if (PQntuples(res) != 1)
- pg_log(PG_FATAL, "could not determine the number of users\n");
+ pg_fatal("could not determine the number of users\n");
cluster->role_count = atoi(PQgetvalue(res, 0, 0));
@@ -659,8 +760,8 @@ check_for_prepared_transactions(ClusterInfo *cluster)
"FROM pg_catalog.pg_prepared_xacts");
if (PQntuples(res) != 0)
- pg_log(PG_FATAL, "The %s cluster contains prepared transactions\n",
- CLUSTER_NAME(cluster));
+ pg_fatal("The %s cluster contains prepared transactions\n",
+ CLUSTER_NAME(cluster));
PQclear(res);
@@ -724,8 +825,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
{
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@@ -747,14 +848,13 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
if (found)
{
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation contains \"contrib/isn\" functions which rely on the\n"
+ pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
"bigint data type. Your old and new clusters pass bigint values\n"
"differently so this cluster cannot currently be upgraded. You can\n"
- "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
- "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
- "the problem functions is in the file:\n"
- " %s\n\n", output_path);
+ "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
+ "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
+ "the problem functions is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
@@ -818,8 +918,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
" c.relnamespace = n.oid AND "
- " n.nspname != 'pg_catalog' AND "
- " n.nspname != 'information_schema'");
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -829,8 +928,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
{
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@@ -853,13 +952,12 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
if (found)
{
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation contains one of the reg* data types in user tables.\n"
+ pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
- "remove the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "remove the problem tables and restart the upgrade. A list of the problem\n"
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
@@ -879,8 +977,8 @@ get_bin_version(ClusterInfo *cluster)
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
- pg_log(PG_FATAL, "Could not get pg_ctl version data using %s: %s\n",
- cmd, getErrorText(errno));
+ pg_fatal("Could not get pg_ctl version data using %s: %s\n",
+ cmd, getErrorText(errno));
pclose(output);
@@ -889,7 +987,45 @@ get_bin_version(ClusterInfo *cluster)
*strchr(cmd_output, '\n') = '\0';
if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2)
- pg_log(PG_FATAL, "could not get version from %s\n", cmd);
+ pg_fatal("could not get version from %s\n", cmd);
cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
}
+
+
+/*
+ * get_canonical_locale_name
+ *
+ * Send the locale name to the system, and hope we get back a canonical
+ * version. This should match the backend's check_locale() function.
+ */
+static char *
+get_canonical_locale_name(int category, const char *locale)
+{
+ char *save;
+ char *res;
+
+ /* get the current setting, so we can restore it. */
+ save = setlocale(category, NULL);
+ if (!save)
+ pg_fatal("failed to get the current locale\n");
+
+ /* 'save' may be pointing at a modifiable scratch variable, so copy it. */
+ save = pg_strdup(save);
+
+ /* set the locale with setlocale, to see if it accepts it. */
+ res = setlocale(category, locale);
+
+ if (!res)
+ pg_fatal("failed to get system locale name for \"%s\"\n", locale);
+
+ res = pg_strdup(res);
+
+ /* restore old value. */
+ if (!setlocale(category, save))
+ pg_fatal("failed to restore old locale \"%s\"\n", save);
+
+ pg_free(save);
+
+ return res;
+}
diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c
index 6bffe549e5..2906ccbf8c 100644
--- a/contrib/pg_upgrade/controldata.c
+++ b/contrib/pg_upgrade/controldata.c
@@ -3,11 +3,11 @@
*
* controldata functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/controldata.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -27,7 +27,7 @@
* pg_control data. pg_resetxlog cannot be run while the server is running
* so we use pg_controldata; pg_controldata doesn't provide all the fields
* we need to actually perform the upgrade, but it provides enough for
- * check mode. We do not implement pg_resetxlog -n because it is hard to
+ * check mode. We do not implement pg_resetxlog -n because it is hard to
* return valid xid data for a running server.
*/
void
@@ -39,6 +39,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
char *p;
bool got_xid = false;
bool got_oid = false;
+ bool got_nextxlogfile = false;
+ bool got_multi = false;
+ bool got_mxoff = false;
+ bool got_oldestmulti = false;
bool got_log_id = false;
bool got_log_seg = false;
bool got_tli = false;
@@ -52,6 +56,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
bool got_toast = false;
bool got_date_is_int = false;
bool got_float8_pass_by_value = false;
+ bool got_data_checksum_version = false;
char *lc_collate = NULL;
char *lc_ctype = NULL;
char *lc_monetary = NULL;
@@ -61,6 +66,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
char *language = NULL;
char *lc_all = NULL;
char *lc_messages = NULL;
+ uint32 logid = 0;
+ uint32 segno = 0;
+ uint32 tli = 0;
+
/*
* Because we test the pg_resetxlog output as strings, it has to be in
@@ -101,7 +110,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_putenv("LC_ALL", NULL);
pg_putenv("LC_MESSAGES", "C");
- snprintf(cmd, sizeof(cmd), SYSTEMQUOTE "\"%s/%s \"%s\"" SYSTEMQUOTE,
+ snprintf(cmd, sizeof(cmd), "\"%s/%s \"%s\"",
cluster->bindir,
live_check ? "pg_controldata\"" : "pg_resetxlog\" -n",
cluster->pgdata);
@@ -109,8 +118,8 @@ get_control_data(ClusterInfo *cluster, bool live_check)
fflush(stderr);
if ((output = popen(cmd, "r")) == NULL)
- pg_log(PG_FATAL, "Could not get control data using %s: %s\n",
- cmd, getErrorText(errno));
+ pg_fatal("Could not get control data using %s: %s\n",
+ cmd, getErrorText(errno));
/* Only pre-8.4 has these so if they are not set below we will check later */
cluster->controldata.lc_collate = NULL;
@@ -123,6 +132,13 @@ get_control_data(ClusterInfo *cluster, bool live_check)
got_float8_pass_by_value = true;
}
+ /* Only in <= 9.2 */
+ if (GET_MAJOR_VERSION(cluster->major_version) <= 902)
+ {
+ cluster->controldata.data_checksum_version = 0;
+ got_data_checksum_version = true;
+ }
+
/* we have the result of cmd in "output". so parse it line by line now */
while (fgets(bufin, sizeof(bufin), output))
{
@@ -139,10 +155,9 @@ get_control_data(ClusterInfo *cluster, bool live_check)
{
for (p = bufin; *p; p++)
if (!isascii(*p))
- pg_log(PG_FATAL,
- "The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
- "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
- "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
+ pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
+ "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
+ "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
}
#endif
@@ -151,7 +166,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: pg_resetxlog problem\n", __LINE__);
+ pg_fatal("%d: pg_resetxlog problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ctrl_ver = str2uint(p);
@@ -161,20 +176,37 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.cat_ver = str2uint(p);
}
+ else if ((p = strstr(bufin, "First log segment after reset:")) != NULL)
+ {
+ /* Skip the colon and any whitespace after it */
+ p = strchr(p, ':');
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+ p = strpbrk(p, "01234567890ABCDEF");
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ /* Make sure it looks like a valid WAL file name */
+ if (strspn(p, "0123456789ABCDEF") != 24)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ strlcpy(cluster->controldata.nextxlogfile, p, 25);
+ got_nextxlogfile = true;
+ }
else if ((p = strstr(bufin, "First log file ID after reset:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
- cluster->controldata.logid = str2uint(p);
+ logid = str2uint(p);
got_log_id = true;
}
else if ((p = strstr(bufin, "First log file segment after reset:")) != NULL)
@@ -182,10 +214,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
- cluster->controldata.nxtlogseg = str2uint(p);
+ segno = str2uint(p);
got_log_seg = true;
}
else if ((p = strstr(bufin, "Latest checkpoint's TimeLineID:")) != NULL)
@@ -193,7 +225,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_tli = str2uint(p);
@@ -207,7 +239,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
op = strchr(p, ':');
if (op == NULL || strlen(op) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
op++; /* removing ':' char */
cluster->controldata.chkpnt_nxtxid = str2uint(op);
@@ -218,18 +250,51 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_nxtoid = str2uint(p);
got_oid = true;
}
+ else if ((p = strstr(bufin, "Latest checkpoint's NextMultiXactId:")) != NULL)
+ {
+ p = strchr(p, ':');
+
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ p++; /* removing ':' char */
+ cluster->controldata.chkpnt_nxtmulti = str2uint(p);
+ got_multi = true;
+ }
+ else if ((p = strstr(bufin, "Latest checkpoint's oldestMultiXid:")) != NULL)
+ {
+ p = strchr(p, ':');
+
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ p++; /* removing ':' char */
+ cluster->controldata.chkpnt_oldstMulti = str2uint(p);
+ got_oldestmulti = true;
+ }
+ else if ((p = strstr(bufin, "Latest checkpoint's NextMultiOffset:")) != NULL)
+ {
+ p = strchr(p, ':');
+
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ p++; /* removing ':' char */
+ cluster->controldata.chkpnt_nxtmxoff = str2uint(p);
+ got_mxoff = true;
+ }
else if ((p = strstr(bufin, "Maximum data alignment:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.align = str2uint(p);
@@ -240,7 +305,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.blocksz = str2uint(p);
@@ -251,7 +316,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.largesz = str2uint(p);
@@ -262,7 +327,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walsz = str2uint(p);
@@ -273,7 +338,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walseg = str2uint(p);
@@ -284,7 +349,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ident = str2uint(p);
@@ -295,7 +360,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.index = str2uint(p);
@@ -306,7 +371,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.toast = str2uint(p);
@@ -317,7 +382,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL;
@@ -328,20 +393,32 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* used later for contrib check */
cluster->controldata.float8_pass_by_value = strstr(p, "by value") != NULL;
got_float8_pass_by_value = true;
}
+ else if ((p = strstr(bufin, "checksum")) != NULL)
+ {
+ p = strchr(p, ':');
+
+ if (p == NULL || strlen(p) <= 1)
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
+
+ p++; /* removing ':' char */
+ /* used later for contrib check */
+ cluster->controldata.data_checksum_version = str2uint(p);
+ got_data_checksum_version = true;
+ }
/* In pre-8.4 only */
else if ((p = strstr(bufin, "LC_COLLATE:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
@@ -356,7 +433,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
- pg_log(PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
+ pg_fatal("%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
@@ -393,17 +470,36 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_free(lc_all);
pg_free(lc_messages);
+ /*
+ * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log
+ * file after reset as separate lines. Starting with 9.3, it reports the
+ * WAL file name. If the old cluster is older than 9.3, we construct the
+ * WAL file name from the xlogid and segno.
+ */
+ if (GET_MAJOR_VERSION(cluster->major_version) <= 902)
+ {
+ if (got_log_id && got_log_seg)
+ {
+ snprintf(cluster->controldata.nextxlogfile, 25, "%08X%08X%08X",
+ tli, logid, segno);
+ got_nextxlogfile = true;
+ }
+ }
+
/* verify that we got all the mandatory pg_control data */
if (!got_xid || !got_oid ||
- (!live_check && !got_log_id) ||
- (!live_check && !got_log_seg) ||
+ !got_multi || !got_mxoff ||
+ (!got_oldestmulti &&
+ cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) ||
+ (!live_check && !got_nextxlogfile) ||
!got_tli ||
!got_align || !got_blocksz || !got_largesz || !got_walsz ||
!got_walseg || !got_ident || !got_index || !got_toast ||
- !got_date_is_int || !got_float8_pass_by_value)
+ !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version)
{
pg_log(PG_REPORT,
- "Some required control information is missing; cannot find:\n");
+ "The %s cluster lacks some required control information:\n",
+ CLUSTER_NAME(cluster));
if (!got_xid)
pg_log(PG_REPORT, " checkpoint next XID\n");
@@ -411,11 +507,18 @@ get_control_data(ClusterInfo *cluster, bool live_check)
if (!got_oid)
pg_log(PG_REPORT, " latest checkpoint next OID\n");
- if (!live_check && !got_log_id)
- pg_log(PG_REPORT, " first log file ID after reset\n");
+ if (!got_multi)
+ pg_log(PG_REPORT, " latest checkpoint next MultiXactId\n");
+
+ if (!got_mxoff)
+ pg_log(PG_REPORT, " latest checkpoint next MultiXactOffset\n");
+
+ if (!got_oldestmulti &&
+ cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
+ pg_log(PG_REPORT, " latest checkpoint oldest MultiXactId\n");
- if (!live_check && !got_log_seg)
- pg_log(PG_REPORT, " first log file segment after reset\n");
+ if (!live_check && !got_nextxlogfile)
+ pg_log(PG_REPORT, " first WAL segment after reset\n");
if (!got_tli)
pg_log(PG_REPORT, " latest checkpoint timeline ID\n");
@@ -451,8 +554,11 @@ get_control_data(ClusterInfo *cluster, bool live_check)
if (!got_float8_pass_by_value)
pg_log(PG_REPORT, " float8 argument passing method\n");
- pg_log(PG_FATAL,
- "Cannot continue without required control information, terminating\n");
+ /* value added in Postgres 9.3 */
+ if (!got_data_checksum_version)
+ pg_log(PG_REPORT, " data checksum version\n");
+
+ pg_fatal("Cannot continue without required control information, terminating\n");
}
}
@@ -467,36 +573,29 @@ check_control_data(ControlData *oldctrl,
ControlData *newctrl)
{
if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
- pg_log(PG_FATAL,
- "old and new pg_controldata alignments are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata alignments are invalid or do not match\n"
+ "Likely one cluster is a 32-bit install, the other 64-bit\n");
if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
- pg_log(PG_FATAL,
- "old and new pg_controldata block sizes are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata block sizes are invalid or do not match\n");
if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz)
- pg_log(PG_FATAL,
- "old and new pg_controldata maximum relation segement sizes are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata maximum relation segement sizes are invalid or do not match\n");
if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz)
- pg_log(PG_FATAL,
- "old and new pg_controldata WAL block sizes are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata WAL block sizes are invalid or do not match\n");
if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg)
- pg_log(PG_FATAL,
- "old and new pg_controldata WAL segment sizes are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata WAL segment sizes are invalid or do not match\n");
if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident)
- pg_log(PG_FATAL,
- "old and new pg_controldata maximum identifier lengths are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata maximum identifier lengths are invalid or do not match\n");
if (oldctrl->index == 0 || oldctrl->index != newctrl->index)
- pg_log(PG_FATAL,
- "old and new pg_controldata maximum indexed columns are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata maximum indexed columns are invalid or do not match\n");
if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast)
- pg_log(PG_FATAL,
- "old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n");
+ pg_fatal("old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n");
if (oldctrl->date_is_int != newctrl->date_is_int)
{
@@ -506,10 +605,18 @@ check_control_data(ControlData *oldctrl,
/*
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
- pg_log(PG_FATAL,
- "You will need to rebuild the new server with configure option\n"
- "--disable-integer-datetimes or get server binaries built with those\n"
- "options.\n");
+ pg_fatal("You will need to rebuild the new server with configure option\n"
+ "--disable-integer-datetimes or get server binaries built with those\n"
+ "options.\n");
+ }
+
+ /*
+ * We might eventually allow upgrades from checksum to no-checksum
+ * clusters.
+ */
+ if (oldctrl->data_checksum_version != newctrl->data_checksum_version)
+ {
+ pg_fatal("old and new pg_controldata checksum versions are invalid or do not match\n");
}
}
@@ -526,7 +633,7 @@ disable_old_cluster(void)
snprintf(old_path, sizeof(old_path), "%s/global/pg_control", old_cluster.pgdata);
snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", old_cluster.pgdata);
if (pg_mv_file(old_path, new_path) != 0)
- pg_log(PG_FATAL, "Unable to rename %s to %s.\n", old_path, new_path);
+ pg_fatal("Unable to rename %s to %s.\n", old_path, new_path);
check_ok();
pg_log(PG_REPORT, "\n"
diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c
index 571792b1d4..6c7661049c 100644
--- a/contrib/pg_upgrade/dump.c
+++ b/contrib/pg_upgrade/dump.c
@@ -3,11 +3,11 @@
*
* dump functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/dump.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -16,90 +16,54 @@
void
generate_old_dump(void)
{
- /* run new pg_dumpall binary */
- prep_status("Creating catalog dump");
+ int dbnum;
+ mode_t old_umask;
- /*
- * --binary-upgrade records the width of dropped columns in pg_class, and
- * restores the frozenid's for databases and relations.
- */
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --username \"%s\" "
- "--schema-only --binary-upgrade %s > \"%s\" 2>> \"%s\""
- SYSTEMQUOTE, new_cluster.bindir, old_cluster.port, os_info.user,
+ prep_status("Creating dump of global objects");
+
+ /* run new pg_dumpall binary for globals */
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_dumpall\" %s --schema-only --globals-only "
+ "--quote-all-identifiers --binary-upgrade %s -f %s",
+ new_cluster.bindir, cluster_conn_opts(&old_cluster),
log_opts.verbose ? "--verbose" : "",
- ALL_DUMP_FILE, UTILITY_LOG_FILE);
+ GLOBALS_DUMP_FILE);
check_ok();
-}
-
-
-/*
- * split_old_dump
- *
- * This function splits pg_dumpall output into global values and
- * database creation, and per-db schemas. This allows us to create
- * the support functions between restoring these two parts of the
- * dump. We split on the first "\connect " after a CREATE ROLE
- * username match; this is where the per-db restore starts.
- *
- * We suppress recreation of our own username so we don't generate
- * an error during restore
- */
-void
-split_old_dump(void)
-{
- FILE *all_dump,
- *globals_dump,
- *db_dump;
- FILE *current_output;
- char line[LINE_ALLOC];
- bool start_of_line = true;
- char create_role_str[MAX_STRING];
- char create_role_str_quote[MAX_STRING];
- char filename[MAXPGPATH];
- bool suppressed_username = false;
- snprintf(filename, sizeof(filename), "%s", ALL_DUMP_FILE);
- if ((all_dump = fopen(filename, "r")) == NULL)
- pg_log(PG_FATAL, "Could not open dump file \"%s\": %s\n", filename, getErrorText(errno));
- snprintf(filename, sizeof(filename), "%s", GLOBALS_DUMP_FILE);
- if ((globals_dump = fopen_priv(filename, "w")) == NULL)
- pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
- snprintf(filename, sizeof(filename), "%s", DB_DUMP_FILE);
- if ((db_dump = fopen_priv(filename, "w")) == NULL)
- pg_log(PG_FATAL, "Could not write to dump file \"%s\": %s\n", filename, getErrorText(errno));
+ prep_status("Creating dump of database schemas\n");
- current_output = globals_dump;
-
- /* patterns used to prevent our own username from being recreated */
- snprintf(create_role_str, sizeof(create_role_str),
- "CREATE ROLE %s;", os_info.user);
- snprintf(create_role_str_quote, sizeof(create_role_str_quote),
- "CREATE ROLE %s;", quote_identifier(os_info.user));
+ /*
+ * Set umask for this function, all functions it calls, and all
+ * subprocesses/threads it creates. We can't use fopen_priv() as Windows
+ * uses threads and umask is process-global.
+ */
+ old_umask = umask(S_IRWXG | S_IRWXO);
- while (fgets(line, sizeof(line), all_dump) != NULL)
+ /* create per-db dump files */
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{
- /* switch to db_dump file output? */
- if (current_output == globals_dump && start_of_line &&
- suppressed_username &&
- strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
- current_output = db_dump;
+ char sql_file_name[MAXPGPATH],
+ log_file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
- /* output unless we are recreating our own username */
- if (current_output != globals_dump || !start_of_line ||
- (strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
- strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
- fputs(line, current_output);
- else
- suppressed_username = true;
+ pg_log(PG_STATUS, "%s", old_db->db_name);
+ snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+ snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
- if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
- start_of_line = true;
- else
- start_of_line = false;
+ parallel_exec_prog(log_file_name, NULL,
+ "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers "
+ "--binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&old_cluster),
+ log_opts.verbose ? "--verbose" : "",
+ sql_file_name, old_db->db_name);
}
- fclose(all_dump);
- fclose(globals_dump);
- fclose(db_dump);
+ /* reap all children */
+ while (reap_child(true) == true)
+ ;
+
+ umask(old_umask);
+
+ end_progress_output();
+ check_ok();
}
diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c
index 9e63bd5856..6c217c902d 100644
--- a/contrib/pg_upgrade/exec.c
+++ b/contrib/pg_upgrade/exec.c
@@ -3,16 +3,15 @@
*
* execution functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/exec.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
#include <fcntl.h>
-#include <unistd.h>
#include <sys/types.h>
static void check_data_dir(const char *pg_data);
@@ -26,61 +25,161 @@ static int win32_check_directory_write_permissions(void);
/*
* exec_prog()
+ * Execute an external program with stdout/stderr redirected, and report
+ * errors
*
- * Formats a command from the given argument list and executes that
- * command. If the command executes, exec_prog() returns 1 otherwise
- * exec_prog() logs an error message and returns 0.
+ * Formats a command from the given argument list, logs it to the log file,
+ * and attempts to execute that command. If the command executes
+ * successfully, exec_prog() returns true.
*
- * If throw_error is TRUE, this function will throw a PG_FATAL error
- * instead of returning should an error occur.
+ * If the command fails, an error message is saved to the specified log_file.
+ * If throw_error is true, this raises a PG_FATAL error and pg_upgrade
+ * terminates; otherwise it is just reported as PG_REPORT and exec_prog()
+ * returns false.
+ *
+ * The code requires it be called first from the primary thread on Windows.
*/
-int
-exec_prog(bool throw_error, bool is_priv,
- const char *log_file, const char *fmt,...)
+bool
+exec_prog(const char *log_file, const char *opt_log_file,
+ bool throw_error, const char *fmt,...)
{
- va_list args;
- int result;
- char cmd[MAXPGPATH];
- mode_t old_umask = 0;
+ int result = 0;
+ int written;
+
+#define MAXCMDLEN (2 * MAXPGPATH)
+ char cmd[MAXCMDLEN];
+ FILE *log;
+ va_list ap;
+
+#ifdef WIN32
+ static DWORD mainThreadId = 0;
- if (is_priv)
- old_umask = umask(S_IRWXG | S_IRWXO);
+ /* We assume we are called from the primary thread first */
+ if (mainThreadId == 0)
+ mainThreadId = GetCurrentThreadId();
+#endif
- va_start(args, fmt);
- vsnprintf(cmd, MAXPGPATH, fmt, args);
- va_end(args);
+ written = 0;
+ va_start(ap, fmt);
+ written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap);
+ va_end(ap);
+ if (written >= MAXCMDLEN)
+ pg_fatal("command too long\n");
+ written += snprintf(cmd + written, MAXCMDLEN - written,
+ " >> \"%s\" 2>&1", log_file);
+ if (written >= MAXCMDLEN)
+ pg_fatal("command too long\n");
pg_log(PG_VERBOSE, "%s\n", cmd);
- result = system(cmd);
+#ifdef WIN32
- if (is_priv)
- umask(old_umask);
+ /*
+ * For some reason, Windows issues a file-in-use error if we write data to
+ * the log file from a non-primary thread just before we create a
+ * subprocess that also writes to the same log file. One fix is to sleep
+ * for 100ms. A cleaner fix is to write to the log file _after_ the
+ * subprocess has completed, so we do this only when writing from a
+ * non-primary thread. fflush(), running system() twice, and pre-creating
+ * the file do not see to help.
+ */
+ if (mainThreadId != GetCurrentThreadId())
+ result = system(cmd);
+#endif
+
+ log = fopen(log_file, "a");
+
+#ifdef WIN32
+ {
+ /*
+ * "pg_ctl -w stop" might have reported that the server has stopped
+ * because the postmaster.pid file has been removed, but "pg_ctl -w
+ * start" might still be in the process of closing and might still be
+ * holding its stdout and -l log file descriptors open. Therefore,
+ * try to open the log file a few more times.
+ */
+ int iter;
+
+ for (iter = 0; iter < 4 && log == NULL; iter++)
+ {
+ pg_usleep(1000000); /* 1 sec */
+ log = fopen(log_file, "a");
+ }
+ }
+#endif
+
+ if (log == NULL)
+ pg_fatal("cannot write to log file %s\n", log_file);
+
+#ifdef WIN32
+ /* Are we printing "command:" before its output? */
+ if (mainThreadId == GetCurrentThreadId())
+ fprintf(log, "\n\n");
+#endif
+ fprintf(log, "command: %s\n", cmd);
+#ifdef WIN32
+ /* Are we printing "command:" after its output? */
+ if (mainThreadId != GetCurrentThreadId())
+ fprintf(log, "\n\n");
+#endif
+
+ /*
+ * In Windows, we must close the log file at this point so the file is not
+ * open while the command is running, or we get a share violation.
+ */
+ fclose(log);
+
+#ifdef WIN32
+ /* see comment above */
+ if (mainThreadId == GetCurrentThreadId())
+#endif
+ result = system(cmd);
if (result != 0)
{
- report_status(PG_REPORT, "*failure*");
+ /* we might be in on a progress status line, so go to the next line */
+ report_status(PG_REPORT, "\n*failure*");
fflush(stdout);
+
pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd);
- pg_log(throw_error ? PG_FATAL : PG_REPORT,
- "Consult the last few lines of \"%s\" for\n"
- "the probable cause of the failure.\n",
- log_file);
- return 1;
+ if (opt_log_file)
+ pg_log(throw_error ? PG_FATAL : PG_REPORT,
+ "Consult the last few lines of \"%s\" or \"%s\" for\n"
+ "the probable cause of the failure.\n",
+ log_file, opt_log_file);
+ else
+ pg_log(throw_error ? PG_FATAL : PG_REPORT,
+ "Consult the last few lines of \"%s\" for\n"
+ "the probable cause of the failure.\n",
+ log_file);
}
- return 0;
+#ifndef WIN32
+
+ /*
+ * We can't do this on Windows because it will keep the "pg_ctl start"
+ * output filename open until the server stops, so we do the \n\n above on
+ * that platform. We use a unique filename for "pg_ctl start" that is
+ * never reused while the server is running, so it works fine. We could
+ * log these commands to a third file, but that just adds complexity.
+ */
+ if ((log = fopen(log_file, "a")) == NULL)
+ pg_fatal("cannot write to log file %s\n", log_file);
+ fprintf(log, "\n\n");
+ fclose(log);
+#endif
+
+ return result == 0;
}
/*
- * is_server_running()
+ * pid_lock_file_exists()
*
- * checks whether postmaster on the given data directory is running or not.
- * The check is performed by looking for the existence of postmaster.pid file.
+ * Checks whether the postmaster.pid file exists.
*/
bool
-is_server_running(const char *datadir)
+pid_lock_file_exists(const char *datadir)
{
char path[MAXPGPATH];
int fd;
@@ -91,8 +190,8 @@ is_server_running(const char *datadir)
{
/* ENOTDIR means we will throw a more useful error later */
if (errno != ENOENT && errno != ENOTDIR)
- pg_log(PG_FATAL, "could not open file \"%s\" for reading: %s\n",
- path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\" for reading: %s\n",
+ path, getErrorText(errno));
return false;
}
@@ -113,22 +212,17 @@ is_server_running(const char *datadir)
void
verify_directories(void)
{
-
- prep_status("Checking current, bin, and data directories");
-
#ifndef WIN32
if (access(".", R_OK | W_OK | X_OK) != 0)
#else
if (win32_check_directory_write_permissions() != 0)
#endif
- pg_log(PG_FATAL,
- "You must have read and write access in the current directory.\n");
+ pg_fatal("You must have read and write access in the current directory.\n");
check_bin_dir(&old_cluster);
check_data_dir(old_cluster.pgdata);
check_bin_dir(&new_cluster);
check_data_dir(new_cluster.pgdata);
- check_ok();
}
@@ -146,7 +240,7 @@ win32_check_directory_write_permissions(void)
int fd;
/*
- * We open a file we would normally create anyway. We do this even in
+ * We open a file we would normally create anyway. We do this even in
* 'check' mode, which isn't ideal, but this is the best we can do.
*/
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
@@ -163,7 +257,7 @@ win32_check_directory_write_permissions(void)
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
- * directory. If any of the subdirectories are missing (or secured against
+ * directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
@@ -203,7 +297,7 @@ check_data_dir(const char *pg_data)
* check_bin_dir()
*
* This function searches for the executables that we expect to find
- * in the binaries directory. If we find that a required executable
+ * in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/
@@ -226,7 +320,6 @@ check_bin_dir(ClusterInfo *cluster)
if (cluster == &new_cluster)
{
/* these are only needed in the new cluster */
- validate_exec(cluster->bindir, "pg_config");
validate_exec(cluster->bindir, "psql");
validate_exec(cluster->bindir, "pg_dumpall");
}
@@ -257,11 +350,11 @@ validate_exec(const char *dir, const char *cmdName)
* Ensure that the file exists and is a regular file.
*/
if (stat(path, &buf) < 0)
- pg_log(PG_FATAL, "check for \"%s\" failed: %s\n",
- path, getErrorText(errno));
+ pg_fatal("check for \"%s\" failed: %s\n",
+ path, getErrorText(errno));
else if (!S_ISREG(buf.st_mode))
- pg_log(PG_FATAL, "check for \"%s\" failed: not an executable file\n",
- path);
+ pg_fatal("check for \"%s\" failed: not an executable file\n",
+ path);
/*
* Ensure that the file is both executable and readable (required for
@@ -272,14 +365,14 @@ validate_exec(const char *dir, const char *cmdName)
#else
if ((buf.st_mode & S_IRUSR) == 0)
#endif
- pg_log(PG_FATAL, "check for \"%s\" failed: cannot read file (permission denied)\n",
- path);
+ pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n",
+ path);
#ifndef WIN32
if (access(path, X_OK) != 0)
#else
if ((buf.st_mode & S_IXUSR) == 0)
#endif
- pg_log(PG_FATAL, "check for \"%s\" failed: cannot execute (permission denied)\n",
- path);
+ pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n",
+ path);
}
diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c
index 1dd3722142..ab9d1edcb6 100644
--- a/contrib/pg_upgrade/file.c
+++ b/contrib/pg_upgrade/file.c
@@ -3,11 +3,11 @@
*
* file system operations
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/file.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -103,10 +103,10 @@ copyAndUpdateFile(pageCnvCtx *pageConverter,
/*
* linkAndUpdateFile()
*
- * Creates a symbolic link between the given relation files. We use
+ * Creates a hard link between the given relation files. We use
* this function to perform a true in-place update. If the on-disk
* format of the new cluster is bit-for-bit compatible with the on-disk
- * format of the old cluster, we can simply symlink each relation
+ * format of the old cluster, we can simply link each relation
* instead of copying the data from the old cluster to the new cluster.
*/
const char *
@@ -127,40 +127,36 @@ linkAndUpdateFile(pageCnvCtx *pageConverter,
static int
copy_file(const char *srcfile, const char *dstfile, bool force)
{
-
#define COPY_BUF_SIZE (50 * BLCKSZ)
int src_fd;
int dest_fd;
char *buffer;
+ int ret = 0;
+ int save_errno = 0;
if ((srcfile == NULL) || (dstfile == NULL))
+ {
+ errno = EINVAL;
return -1;
+ }
if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0)
return -1;
if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0)
{
- if (src_fd != 0)
- close(src_fd);
+ save_errno = errno;
- return -1;
- }
-
- buffer = (char *) malloc(COPY_BUF_SIZE);
-
- if (buffer == NULL)
- {
if (src_fd != 0)
close(src_fd);
- if (dest_fd != 0)
- close(dest_fd);
-
+ errno = save_errno;
return -1;
}
+ buffer = (char *) pg_malloc(COPY_BUF_SIZE);
+
/* perform data copying i.e read src source, write to destination */
while (true)
{
@@ -168,19 +164,9 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
if (nbytes < 0)
{
- int save_errno = errno;
-
- if (buffer != NULL)
- free(buffer);
-
- if (src_fd != 0)
- close(src_fd);
-
- if (dest_fd != 0)
- close(dest_fd);
-
- errno = save_errno;
- return -1;
+ save_errno = errno;
+ ret = -1;
+ break;
}
if (nbytes == 0)
@@ -191,24 +177,15 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
if (write(dest_fd, buffer, nbytes) != nbytes)
{
/* if write didn't set errno, assume problem is no disk space */
- int save_errno = errno ? errno : ENOSPC;
-
- if (buffer != NULL)
- free(buffer);
-
- if (src_fd != 0)
- close(src_fd);
-
- if (dest_fd != 0)
- close(dest_fd);
-
- errno = save_errno;
- return -1;
+ if (errno == 0)
+ errno = ENOSPC;
+ save_errno = errno;
+ ret = -1;
+ break;
}
}
- if (buffer != NULL)
- free(buffer);
+ pg_free(buffer);
if (src_fd != 0)
close(src_fd);
@@ -216,69 +193,12 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
if (dest_fd != 0)
close(dest_fd);
- return 1;
-}
-#endif
+ if (save_errno != 0)
+ errno = save_errno;
-
-/*
- * load_directory()
- *
- * Returns count of files that meet the selection criteria coded in
- * the function pointed to by selector. Creates an array of pointers
- * to dirent structures. Address of array returned in namelist.
- *
- * Note that the number of dirent structures needed is dynamically
- * allocated using realloc. Realloc can be inefficient if invoked a
- * large number of times.
- */
-int
-load_directory(const char *dirname, struct dirent *** namelist)
-{
- DIR *dirdesc;
- struct dirent *direntry;
- int count = 0;
- int name_num = 0;
- size_t entrysize;
-
- if ((dirdesc = opendir(dirname)) == NULL)
- pg_log(PG_FATAL, "could not open directory \"%s\": %s\n", dirname, getErrorText(errno));
-
- *namelist = NULL;
-
- while ((direntry = readdir(dirdesc)) != NULL)
- {
- count++;
-
- *namelist = (struct dirent **) realloc((void *) (*namelist),
- (size_t) ((name_num + 1) * sizeof(struct dirent *)));
-
- if (*namelist == NULL)
- {
- closedir(dirdesc);
- return -1;
- }
-
- entrysize = sizeof(struct dirent) - sizeof(direntry->d_name) +
- strlen(direntry->d_name) + 1;
-
- (*namelist)[name_num] = (struct dirent *) malloc(entrysize);
-
- if ((*namelist)[name_num] == NULL)
- {
- closedir(dirdesc);
- return -1;
- }
-
- memcpy((*namelist)[name_num], direntry, entrysize);
-
- name_num++;
- }
-
- closedir(dirdesc);
-
- return count;
+ return ret;
}
+#endif
void
@@ -293,10 +213,9 @@ check_hard_link(void)
if (pg_link_file(existing_file, new_link_file) == -1)
{
- pg_log(PG_FATAL,
- "Could not create hard link between old and new data directories: %s\n"
- "In link mode the old and new data directories must be on the same file system volume.\n",
- getErrorText(errno));
+ pg_fatal("Could not create hard link between old and new data directories: %s\n"
+ "In link mode the old and new data directories must be on the same file system volume.\n",
+ getErrorText(errno));
}
unlink(new_link_file);
}
diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c
index 77bd3a0359..f2cd4716c7 100644
--- a/contrib/pg_upgrade/function.c
+++ b/contrib/pg_upgrade/function.c
@@ -3,11 +3,11 @@
*
* server-side function support
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/function.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -193,7 +193,7 @@ get_loadable_libraries(void)
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
"in psql:\n"
"\n"
- " \\df *.plpython_call_handler\n"
+ " \\df *.plpython_call_handler\n"
"\n"
"The \"public\" schema version of this function was created by a\n"
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
@@ -201,12 +201,12 @@ get_loadable_libraries(void)
"shared object file. You can remove the \"public\" schema version\n"
"of this function by running the following command:\n"
"\n"
- " DROP FUNCTION public.plpython_call_handler()\n"
+ " DROP FUNCTION public.plpython_call_handler()\n"
"\n"
"in each affected database:\n"
"\n");
}
- pg_log(PG_WARNING, " %s\n", active_db->db_name);
+ pg_log(PG_WARNING, " %s\n", active_db->db_name);
found_public_plpython_handler = true;
}
PQclear(res);
@@ -216,8 +216,7 @@ get_loadable_libraries(void)
}
if (found_public_plpython_handler)
- pg_log(PG_FATAL,
- "Remove the problem functions from the old cluster to continue.\n");
+ pg_fatal("Remove the problem functions from the old cluster to continue.\n");
totaltups++; /* reserve for pg_upgrade_support */
@@ -297,7 +296,7 @@ check_loadable_libraries(void)
* plpython2u language was created with library name plpython2.so as a
* symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u
- * pointing to it. For this reason, any reference to library name
+ * pointing to it. For this reason, any reference to library name
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* the new cluster.
*
@@ -324,12 +323,11 @@ check_loadable_libraries(void)
/* exit and report missing support library with special message */
if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
- pg_log(PG_FATAL,
- "The pg_upgrade_support module must be created and installed in the new cluster.\n");
+ pg_fatal("The pg_upgrade_support module must be created and installed in the new cluster.\n");
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ output_path, getErrorText(errno));
fprintf(script, "Could not load library \"%s\"\n%s\n",
lib,
PQerrorMessage(conn));
@@ -344,12 +342,11 @@ check_loadable_libraries(void)
{
fclose(script);
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation references loadable libraries that are missing from the\n"
- "new installation. You can add these libraries to the new installation,\n"
- "or remove the functions using them from the old installation. A list of\n"
- "problem libraries is in the file:\n"
- " %s\n\n", output_path);
+ pg_fatal("Your installation references loadable libraries that are missing from the\n"
+ "new installation. You can add these libraries to the new installation,\n"
+ "or remove the functions using them from the old installation. A list of\n"
+ "problem libraries is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c
index 74b13e782d..d2968b479a 100644
--- a/contrib/pg_upgrade/info.c
+++ b/contrib/pg_upgrade/info.c
@@ -3,11 +3,11 @@
*
* information support functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/info.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -18,11 +18,12 @@ static void create_rel_filename_map(const char *old_data, const char *new_data,
const DbInfo *old_db, const DbInfo *new_db,
const RelInfo *old_rel, const RelInfo *new_rel,
FileNameMap *map);
+static void free_db_and_rel_infos(DbInfoArr *db_arr);
static void get_db_infos(ClusterInfo *cluster);
static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo);
static void free_rel_infos(RelInfoArr *rel_arr);
static void print_db_infos(DbInfoArr *dbinfo);
-static void print_rel_infos(RelInfoArr *arr);
+static void print_rel_infos(RelInfoArr *rel_arr);
/*
@@ -40,21 +41,18 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
int relnum;
int num_maps = 0;
- if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
- pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
- old_db->db_name);
-
maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
old_db->rel_arr.nrels);
- for (relnum = 0; relnum < old_db->rel_arr.nrels; relnum++)
+ for (relnum = 0; relnum < Min(old_db->rel_arr.nrels, new_db->rel_arr.nrels);
+ relnum++)
{
RelInfo *old_rel = &old_db->rel_arr.rels[relnum];
RelInfo *new_rel = &new_db->rel_arr.rels[relnum];
if (old_rel->reloid != new_rel->reloid)
- pg_log(PG_FATAL, "Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n",
- old_db->db_name, old_rel->reloid, new_rel->reloid);
+ pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n",
+ old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
* TOAST table names initially match the heap pg_class oid. In
@@ -62,22 +60,32 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
* 9.0, TOAST relation names always use heap table oids, hence we
* cannot check relation names when upgrading from pre-9.0. Clusters
- * upgraded to 9.0 will get matching TOAST names.
+ * upgraded to 9.0 will get matching TOAST names. If index names don't
+ * match primary key constraint names, this will fail because pg_dump
+ * dumps constraint names and pg_upgrade checks index names.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
strcmp(old_rel->nspname, "pg_toast") != 0) &&
strcmp(old_rel->relname, new_rel->relname) != 0))
- pg_log(PG_FATAL, "Mismatch of relation names in database \"%s\": "
- "old name \"%s.%s\", new name \"%s.%s\"\n",
- old_db->db_name, old_rel->nspname, old_rel->relname,
- new_rel->nspname, new_rel->relname);
+ pg_fatal("Mismatch of relation names in database \"%s\": "
+ "old name \"%s.%s\", new name \"%s.%s\"\n",
+ old_db->db_name, old_rel->nspname, old_rel->relname,
+ new_rel->nspname, new_rel->relname);
create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
old_rel, new_rel, maps + num_maps);
num_maps++;
}
+ /*
+ * Do this check after the loop so hopefully we will produce a clearer
+ * error above
+ */
+ if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
+ pg_fatal("old and new databases \"%s\" have a different number of relations\n",
+ old_db->db_name);
+
*nmaps = num_maps;
return maps;
}
@@ -100,20 +108,23 @@ create_rel_filename_map(const char *old_data, const char *new_data,
* relation belongs to the default tablespace, hence relfiles should
* exist in the data directories.
*/
- snprintf(map->old_dir, sizeof(map->old_dir), "%s/base/%u", old_data,
- old_db->db_oid);
- snprintf(map->new_dir, sizeof(map->new_dir), "%s/base/%u", new_data,
- new_db->db_oid);
+ map->old_tablespace = old_data;
+ map->new_tablespace = new_data;
+ map->old_tablespace_suffix = "/base";
+ map->new_tablespace_suffix = "/base";
}
else
{
/* relation belongs to a tablespace, so use the tablespace location */
- snprintf(map->old_dir, sizeof(map->old_dir), "%s%s/%u", old_rel->tablespace,
- old_cluster.tablespace_suffix, old_db->db_oid);
- snprintf(map->new_dir, sizeof(map->new_dir), "%s%s/%u", new_rel->tablespace,
- new_cluster.tablespace_suffix, new_db->db_oid);
+ map->old_tablespace = old_rel->tablespace;
+ map->new_tablespace = new_rel->tablespace;
+ map->old_tablespace_suffix = old_cluster.tablespace_suffix;
+ map->new_tablespace_suffix = new_cluster.tablespace_suffix;
}
+ map->old_db_oid = old_db->db_oid;
+ map->new_db_oid = new_db->db_oid;
+
/*
* old_relfilenode might differ from pg_class.oid (and hence
* new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
@@ -124,8 +135,8 @@ create_rel_filename_map(const char *old_data, const char *new_data,
map->new_relfilenode = new_rel->relfilenode;
/* used only for logging and error reporing, old/new are identical */
- snprintf(map->nspname, sizeof(map->nspname), "%s", old_rel->nspname);
- snprintf(map->relname, sizeof(map->relname), "%s", old_rel->relname);
+ map->nspname = old_rel->nspname;
+ map->relname = old_rel->relname;
}
@@ -217,9 +228,8 @@ get_db_infos(ClusterInfo *cluster)
for (tupnum = 0; tupnum < ntups; tupnum++)
{
dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
- snprintf(dbinfos[tupnum].db_name, sizeof(dbinfos[tupnum].db_name), "%s",
- PQgetvalue(res, tupnum, i_datname));
- snprintf(dbinfos[tupnum].db_tblspace, sizeof(dbinfos[tupnum].db_tblspace), "%s",
+ dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname));
+ snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s",
PQgetvalue(res, tupnum, i_spclocation));
}
PQclear(res);
@@ -252,6 +262,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
int num_rels = 0;
char *nspname = NULL;
char *relname = NULL;
+ char *tablespace = NULL;
int i_spclocation,
i_nspname,
i_relname,
@@ -259,6 +270,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
i_relfilenode,
i_reltablespace;
char query[QUERY_ALLOC];
+ char *last_namespace = NULL,
+ *last_tablespace = NULL;
/*
* pg_largeobject contains user data that does not appear in pg_dumpall
@@ -269,34 +282,75 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
*/
snprintf(query, sizeof(query),
- "SELECT c.oid, n.nspname, c.relname, "
- " c.relfilenode, c.reltablespace, %s "
+ "CREATE TEMPORARY TABLE info_rels (reloid) AS SELECT c.oid "
"FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n "
" ON c.relnamespace = n.oid "
- " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
- " ON c.reltablespace = t.oid "
- "WHERE relkind IN ('r','t', 'i'%s) AND "
+ "LEFT OUTER JOIN pg_catalog.pg_index i "
+ " ON c.oid = i.indexrelid "
+ "WHERE relkind IN ('r', 'm', 'i'%s) AND "
+
+ /*
+ * pg_dump only dumps valid indexes; testing indisready is necessary in
+ * 9.2, and harmless in earlier/later versions.
+ */
+ " i.indisvalid IS DISTINCT FROM false AND "
+ " i.indisready IS DISTINCT FROM false AND "
/* exclude possible orphaned temp tables */
" ((n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
+ /* skip pg_toast because toast index have relkind == 'i', not 't' */
+ " n.nspname NOT IN ('pg_catalog', 'information_schema', "
+ " 'binary_upgrade', 'pg_toast') AND "
" c.oid >= %u) "
" OR (n.nspname = 'pg_catalog' AND "
- " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
- /* we preserve pg_class.oid so we sort by it to match old/new */
- "ORDER BY 1;",
- /* 9.2 removed the spclocation column */
- (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
- "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation",
+ " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ));",
/* see the comment at the top of old_8_3_create_sequence_script() */
(GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ?
"" : ", 'S'",
- /* this oid allows us to skip system toast tables */
FirstNormalObjectId,
/* does pg_largeobject_metadata need to be migrated? */
(GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
"" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");
+ PQclear(executeQueryOrDie(conn, "%s", query));
+
+ /*
+ * Get TOAST tables and indexes; we have to gather the TOAST tables in
+ * later steps because we can't schema-qualify TOAST tables.
+ */
+ PQclear(executeQueryOrDie(conn,
+ "INSERT INTO info_rels "
+ "SELECT reltoastrelid "
+ "FROM info_rels i JOIN pg_catalog.pg_class c "
+ " ON i.reloid = c.oid "
+ " AND c.reltoastrelid != %u", InvalidOid));
+ PQclear(executeQueryOrDie(conn,
+ "INSERT INTO info_rels "
+ "SELECT indexrelid "
+ "FROM pg_index "
+ "WHERE indisvalid "
+ " AND indrelid IN (SELECT reltoastrelid "
+ " FROM info_rels i "
+ " JOIN pg_catalog.pg_class c "
+ " ON i.reloid = c.oid "
+ " AND c.reltoastrelid != %u)",
+ InvalidOid));
+
+ snprintf(query, sizeof(query),
+ "SELECT c.oid, n.nspname, c.relname, "
+ " c.relfilenode, c.reltablespace, %s "
+ "FROM info_rels i JOIN pg_catalog.pg_class c "
+ " ON i.reloid = c.oid "
+ " JOIN pg_catalog.pg_namespace n "
+ " ON c.relnamespace = n.oid "
+ " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+ " ON c.reltablespace = t.oid "
+ /* we preserve pg_class.oid so we sort by it to match old/new */
+ "ORDER BY 1;",
+ /* 9.2 removed the spclocation column */
+ (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+ "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+
res = executeQueryOrDie(conn, "%s", query);
ntups = PQntuples(res);
@@ -313,26 +367,53 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
for (relnum = 0; relnum < ntups; relnum++)
{
RelInfo *curr = &relinfos[num_rels++];
- const char *tblspace;
curr->reloid = atooid(PQgetvalue(res, relnum, i_oid));
nspname = PQgetvalue(res, relnum, i_nspname);
- strlcpy(curr->nspname, nspname, sizeof(curr->nspname));
+ curr->nsp_alloc = false;
+
+ /*
+ * Many of the namespace and tablespace strings are identical, so we
+ * try to reuse the allocated string pointers where possible to reduce
+ * memory consumption.
+ */
+ /* Can we reuse the previous string allocation? */
+ if (last_namespace && strcmp(nspname, last_namespace) == 0)
+ curr->nspname = last_namespace;
+ else
+ {
+ last_namespace = curr->nspname = pg_strdup(nspname);
+ curr->nsp_alloc = true;
+ }
relname = PQgetvalue(res, relnum, i_relname);
- strlcpy(curr->relname, relname, sizeof(curr->relname));
+ curr->relname = pg_strdup(relname);
curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
+ curr->tblsp_alloc = false;
+ /* Is the tablespace oid non-zero? */
if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
- /* Might be "", meaning the cluster default location. */
- tblspace = PQgetvalue(res, relnum, i_spclocation);
+ {
+ /*
+ * The tablespace location might be "", meaning the cluster
+ * default location, i.e. pg_default or pg_global.
+ */
+ tablespace = PQgetvalue(res, relnum, i_spclocation);
+
+ /* Can we reuse the previous string allocation? */
+ if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
+ curr->tablespace = last_tablespace;
+ else
+ {
+ last_tablespace = curr->tablespace = pg_strdup(tablespace);
+ curr->tblsp_alloc = true;
+ }
+ }
else
- /* A zero reltablespace indicates the database tablespace. */
- tblspace = dbinfo->db_tblspace;
-
- strlcpy(curr->tablespace, tblspace, sizeof(curr->tablespace));
+ /* A zero reltablespace oid indicates the database tablespace. */
+ curr->tablespace = dbinfo->db_tablespace;
}
PQclear(res);
@@ -343,13 +424,16 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
}
-void
+static void
free_db_and_rel_infos(DbInfoArr *db_arr)
{
int dbnum;
for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++)
+ {
free_rel_infos(&db_arr->dbs[dbnum].rel_arr);
+ pg_free(db_arr->dbs[dbnum].db_name);
+ }
pg_free(db_arr->dbs);
db_arr->dbs = NULL;
db_arr->ndbs = 0;
@@ -359,6 +443,16 @@ free_db_and_rel_infos(DbInfoArr *db_arr)
static void
free_rel_infos(RelInfoArr *rel_arr)
{
+ int relnum;
+
+ for (relnum = 0; relnum < rel_arr->nrels; relnum++)
+ {
+ if (rel_arr->rels[relnum].nsp_alloc)
+ pg_free(rel_arr->rels[relnum].nspname);
+ pg_free(rel_arr->rels[relnum].relname);
+ if (rel_arr->rels[relnum].tblsp_alloc)
+ pg_free(rel_arr->rels[relnum].tablespace);
+ }
pg_free(rel_arr->rels);
rel_arr->nrels = 0;
}
@@ -379,12 +473,14 @@ print_db_infos(DbInfoArr *db_arr)
static void
-print_rel_infos(RelInfoArr *arr)
+print_rel_infos(RelInfoArr *rel_arr)
{
int relnum;
- for (relnum = 0; relnum < arr->nrels; relnum++)
+ for (relnum = 0; relnum < rel_arr->nrels; relnum++)
pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n",
- arr->rels[relnum].nspname, arr->rels[relnum].relname,
- arr->rels[relnum].reloid, arr->rels[relnum].tablespace);
+ rel_arr->rels[relnum].nspname,
+ rel_arr->rels[relnum].relname,
+ rel_arr->rels[relnum].reloid,
+ rel_arr->rels[relnum].tablespace);
}
diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c
index ccf00434d3..b81010a813 100644
--- a/contrib/pg_upgrade/option.c
+++ b/contrib/pg_upgrade/option.c
@@ -3,26 +3,28 @@
*
* options functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/option.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
+
+#include "miscadmin.h"
+#include "getopt_long.h"
#include "pg_upgrade.h"
-#include <getopt_long.h>
#include <time.h>
#include <sys/types.h>
-#include <sys/stat.h>
#ifdef WIN32
#include <io.h>
#endif
static void usage(void);
-static void check_required_directory(char **dirpath,
+static void check_required_directory(char **dirpath, char **configpath,
char *envVarName, char *cmdLineOption, char *description);
+#define FIX_DEFAULT_READ_ONLY "-c default_transaction_read_only=false"
UserOpts user_opts;
@@ -46,10 +48,11 @@ parseCommandLine(int argc, char *argv[])
{"old-port", required_argument, NULL, 'p'},
{"new-port", required_argument, NULL, 'P'},
- {"user", required_argument, NULL, 'u'},
+ {"username", required_argument, NULL, 'U'},
{"check", no_argument, NULL, 'c'},
{"link", no_argument, NULL, 'k'},
{"retain", no_argument, NULL, 'r'},
+ {"jobs", required_argument, NULL, 'j'},
{"verbose", no_argument, NULL, 'v'},
{NULL, 0, NULL, 0}
};
@@ -79,8 +82,7 @@ parseCommandLine(int argc, char *argv[])
if (argc > 1)
{
- if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
- strcmp(argv[1], "-?") == 0)
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
usage();
exit(0);
@@ -94,12 +96,12 @@ parseCommandLine(int argc, char *argv[])
/* Allow help and version to be run as root, so do the test here. */
if (os_user_effective_id == 0)
- pg_log(PG_FATAL, "%s: cannot be run as root\n", os_info.progname);
+ pg_fatal("%s: cannot be run as root\n", os_info.progname);
if ((log_opts.internal = fopen_priv(INTERNAL_LOG_FILE, "a")) == NULL)
- pg_log(PG_FATAL, "cannot write to log file %s\n", INTERNAL_LOG_FILE);
+ pg_fatal("cannot write to log file %s\n", INTERNAL_LOG_FILE);
- while ((option = getopt_long(argc, argv, "d:D:b:B:cko:O:p:P:ru:v",
+ while ((option = getopt_long(argc, argv, "d:D:b:B:cj:ko:O:p:P:rU:v",
long_options, &optindex)) != -1)
{
switch (option)
@@ -126,6 +128,10 @@ parseCommandLine(int argc, char *argv[])
new_cluster.pgconfig = pg_strdup(optarg);
break;
+ case 'j':
+ user_opts.jobs = atoi(optarg);
+ break;
+
case 'k':
user_opts.transfer_mode = TRANSFER_MODE_LINK;
break;
@@ -146,7 +152,7 @@ parseCommandLine(int argc, char *argv[])
case 'p':
if ((old_cluster.port = atoi(optarg)) <= 0)
{
- pg_log(PG_FATAL, "invalid old port number\n");
+ pg_fatal("invalid old port number\n");
exit(1);
}
break;
@@ -154,7 +160,7 @@ parseCommandLine(int argc, char *argv[])
case 'P':
if ((new_cluster.port = atoi(optarg)) <= 0)
{
- pg_log(PG_FATAL, "invalid new port number\n");
+ pg_fatal("invalid new port number\n");
exit(1);
}
break;
@@ -163,9 +169,10 @@ parseCommandLine(int argc, char *argv[])
log_opts.retain = true;
break;
- case 'u':
+ case 'U':
pg_free(os_info.user);
os_info.user = pg_strdup(optarg);
+ os_info.user_specified = true;
/*
* Push the user name into the environment so pre-9.1
@@ -180,9 +187,8 @@ parseCommandLine(int argc, char *argv[])
break;
default:
- pg_log(PG_FATAL,
- "Try \"%s --help\" for more information.\n",
- os_info.progname);
+ pg_fatal("Try \"%s --help\" for more information.\n",
+ os_info.progname);
break;
}
}
@@ -191,7 +197,7 @@ parseCommandLine(int argc, char *argv[])
for (filename = output_files; *filename != NULL; filename++)
{
if ((fp = fopen_priv(*filename, "a")) == NULL)
- pg_log(PG_FATAL, "cannot write to log file %s\n", *filename);
+ pg_fatal("cannot write to log file %s\n", *filename);
/* Start with newline because we might be appending to a file. */
fprintf(fp, "\n"
@@ -202,15 +208,27 @@ parseCommandLine(int argc, char *argv[])
fclose(fp);
}
+ /* Turn off read-only mode; add prefix to PGOPTIONS? */
+ if (getenv("PGOPTIONS"))
+ {
+ char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
+ getenv("PGOPTIONS"));
+
+ pg_putenv("PGOPTIONS", pgoptions);
+ pfree(pgoptions);
+ }
+ else
+ pg_putenv("PGOPTIONS", FIX_DEFAULT_READ_ONLY);
+
/* Get values from env if not already set */
- check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b",
+ check_required_directory(&old_cluster.bindir, NULL, "PGBINOLD", "-b",
"old cluster binaries reside");
- check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B",
+ check_required_directory(&new_cluster.bindir, NULL, "PGBINNEW", "-B",
"new cluster binaries reside");
- check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d",
- "old cluster data resides");
- check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D",
- "new cluster data resides");
+ check_required_directory(&old_cluster.pgdata, &old_cluster.pgconfig,
+ "PGDATAOLD", "-d", "old cluster data resides");
+ check_required_directory(&new_cluster.pgdata, &new_cluster.pgconfig,
+ "PGDATANEW", "-D", "new cluster data resides");
}
@@ -222,21 +240,22 @@ usage(void)
pg_upgrade [OPTION]...\n\
\n\
Options:\n\
- -b, --old-bindir=OLDBINDIR old cluster executable directory\n\
- -B, --new-bindir=NEWBINDIR new cluster executable directory\n\
+ -b, --old-bindir=BINDIR old cluster executable directory\n\
+ -B, --new-bindir=BINDIR new cluster executable directory\n\
-c, --check check clusters only, don't change any data\n\
- -d, --old-datadir=OLDDATADIR old cluster data directory\n\
- -D, --new-datadir=NEWDATADIR new cluster data directory\n\
+ -d, --old-datadir=DATADIR old cluster data directory\n\
+ -D, --new-datadir=DATADIR new cluster data directory\n\
+ -j, --jobs number of simultaneous processes or threads to use\n\
-k, --link link instead of copying files to new cluster\n\
-o, --old-options=OPTIONS old cluster options to pass to the server\n\
-O, --new-options=OPTIONS new cluster options to pass to the server\n\
- -p, --old-port=OLDPORT old cluster port number (default %d)\n\
- -P, --new-port=NEWPORT new cluster port number (default %d)\n\
+ -p, --old-port=PORT old cluster port number (default %d)\n\
+ -P, --new-port=PORT new cluster port number (default %d)\n\
-r, --retain retain SQL and log files after success\n\
- -u, --user=NAME cluster superuser (default \"%s\")\n\
+ -U, --username=NAME cluster superuser (default \"%s\")\n\
-v, --verbose enable verbose internal logging\n\
-V, --version display version information, then exit\n\
- -h, --help show this help, then exit\n\
+ -?, --help show this help, then exit\n\
\n\
Before running pg_upgrade you must:\n\
create a new database cluster (using the new version of initdb)\n\
@@ -244,10 +263,10 @@ Before running pg_upgrade you must:\n\
shutdown the postmaster servicing the new cluster\n\
\n\
When you run pg_upgrade, you must provide the following information:\n\
- the data directory for the old cluster (-d OLDDATADIR)\n\
- the data directory for the new cluster (-D NEWDATADIR)\n\
- the \"bin\" directory for the old version (-b OLDBINDIR)\n\
- the \"bin\" directory for the new version (-B NEWBINDIR)\n\
+ the data directory for the old cluster (-d DATADIR)\n\
+ the data directory for the new cluster (-D DATADIR)\n\
+ the \"bin\" directory for the old version (-b BINDIR)\n\
+ the \"bin\" directory for the new version (-B BINDIR)\n\
\n\
For example:\n\
pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\
@@ -276,6 +295,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
*
* Checks a directory option.
* dirpath - the directory name supplied on the command line
+ * configpath - optional configuration directory
* envVarName - the name of an environment variable to get if dirpath is NULL
* cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N)
* description - a description of this directory option
@@ -284,23 +304,29 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
* user hasn't provided the required directory name.
*/
static void
-check_required_directory(char **dirpath, char *envVarName,
- char *cmdLineOption, char *description)
+check_required_directory(char **dirpath, char **configpath,
+ char *envVarName, char *cmdLineOption,
+ char *description)
{
if (*dirpath == NULL || strlen(*dirpath) == 0)
{
const char *envVar;
if ((envVar = getenv(envVarName)) && strlen(envVar))
+ {
*dirpath = pg_strdup(envVar);
+ if (configpath)
+ *configpath = pg_strdup(envVar);
+ }
else
- pg_log(PG_FATAL, "You must identify the directory where the %s.\n"
- "Please use the %s command-line option or the %s environment variable.\n",
- description, cmdLineOption, envVarName);
+ pg_fatal("You must identify the directory where the %s.\n"
+ "Please use the %s command-line option or the %s environment variable.\n",
+ description, cmdLineOption, envVarName);
}
/*
- * Trim off any trailing path separators
+ * Trim off any trailing path separators because we construct paths by
+ * appending to this path.
*/
#ifndef WIN32
if ((*dirpath)[strlen(*dirpath) - 1] == '/')
@@ -348,7 +374,7 @@ adjust_data_dir(ClusterInfo *cluster)
/*
* We don't have a data directory yet, so we can't check the PG version,
- * so this might fail --- only works for PG 9.2+. If this fails,
+ * so this might fail --- only works for PG 9.2+. If this fails,
* pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
@@ -356,8 +382,8 @@ adjust_data_dir(ClusterInfo *cluster)
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
- pg_log(PG_FATAL, "Could not get data directory using %s: %s\n",
- cmd, getErrorText(errno));
+ pg_fatal("Could not get data directory using %s: %s\n",
+ cmd, getErrorText(errno));
pclose(output);
@@ -369,3 +395,86 @@ adjust_data_dir(ClusterInfo *cluster)
check_ok();
}
+
+
+/*
+ * get_sock_dir
+ *
+ * Identify the socket directory to use for this cluster. If we're doing
+ * a live check (old cluster only), we need to find out where the postmaster
+ * is listening. Otherwise, we're going to put the socket into the current
+ * directory.
+ */
+void
+get_sock_dir(ClusterInfo *cluster, bool live_check)
+{
+#ifdef HAVE_UNIX_SOCKETS
+
+ /*
+ * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot
+ * process pg_ctl -w for sockets in non-default locations.
+ */
+ if (GET_MAJOR_VERSION(cluster->major_version) >= 901)
+ {
+ if (!live_check)
+ {
+ /* Use the current directory for the socket */
+ cluster->sockdir = pg_malloc(MAXPGPATH);
+ if (!getcwd(cluster->sockdir, MAXPGPATH))
+ pg_fatal("cannot find current directory\n");
+ }
+ else
+ {
+ /*
+ * If we are doing a live check, we will use the old cluster's
+ * Unix domain socket directory so we can connect to the live
+ * server.
+ */
+ unsigned short orig_port = cluster->port;
+ char filename[MAXPGPATH],
+ line[MAXPGPATH];
+ FILE *fp;
+ int lineno;
+
+ snprintf(filename, sizeof(filename), "%s/postmaster.pid",
+ cluster->pgdata);
+ if ((fp = fopen(filename, "r")) == NULL)
+ pg_fatal("Cannot open file %s: %m\n", filename);
+
+ for (lineno = 1;
+ lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR);
+ lineno++)
+ {
+ if (fgets(line, sizeof(line), fp) == NULL)
+ pg_fatal("Cannot read line %d from %s: %m\n", lineno, filename);
+
+ /* potentially overwrite user-supplied value */
+ if (lineno == LOCK_FILE_LINE_PORT)
+ sscanf(line, "%hu", &old_cluster.port);
+ if (lineno == LOCK_FILE_LINE_SOCKET_DIR)
+ {
+ cluster->sockdir = pg_strdup(line);
+ /* strip off newline */
+ if (strchr(cluster->sockdir, '\n') != NULL)
+ *strchr(cluster->sockdir, '\n') = '\0';
+ }
+ }
+ fclose(fp);
+
+ /* warn of port number correction */
+ if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port)
+ pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n",
+ orig_port, cluster->port);
+ }
+ }
+ else
+
+ /*
+ * Can't get sockdir and pg_ctl -w can't use a non-default, use
+ * default
+ */
+ cluster->sockdir = NULL;
+#else /* !HAVE_UNIX_SOCKETS */
+ cluster->sockdir = NULL;
+#endif
+}
diff --git a/contrib/pg_upgrade/page.c b/contrib/pg_upgrade/page.c
index a790f4efc5..6354cec2b0 100644
--- a/contrib/pg_upgrade/page.c
+++ b/contrib/pg_upgrade/page.c
@@ -3,11 +3,11 @@
*
* per-page conversion operations
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/page.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -17,7 +17,7 @@
#ifdef PAGE_CONVERSION
-static const char *getPageVersion(
+static void getPageVersion(
uint16 *version, const char *pathName);
static pageCnvCtx *loadConverterPlugin(
uint16 newPageVersion, uint16 oldPageVersion);
@@ -30,16 +30,12 @@ static pageCnvCtx *loadConverterPlugin(
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
- * to the new format. If the versions are identical, this function just
+ * to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
- *
- * If successful this function sets *result and returns NULL. If an error
- * occurs, this function returns an error message in the form of an null-terminated
- * string.
*/
-const char *
-setupPageConverter(pageCnvCtx **result)
+pageCnvCtx *
+setupPageConverter(void)
{
uint16 oldPageVersion;
uint16 newPageVersion;
@@ -53,35 +49,28 @@ setupPageConverter(pageCnvCtx **result)
snprintf(srcName, sizeof(srcName), "%s/global/%u", old_cluster.pgdata,
old_cluster.pg_database_oid);
- if ((msg = getPageVersion(&oldPageVersion, srcName)) != NULL)
- return msg;
-
- if ((msg = getPageVersion(&newPageVersion, dstName)) != NULL)
- return msg;
+ getPageVersion(&oldPageVersion, srcName);
+ getPageVersion(&newPageVersion, dstName);
/*
* If the old cluster and new cluster use the same page layouts, then we
* don't need a page converter.
*/
- if (newPageVersion == oldPageVersion)
+ if (newPageVersion != oldPageVersion)
{
- *result = NULL;
- return NULL;
- }
+ /*
+ * The clusters use differing page layouts, see if we can find a
+ * plugin that knows how to convert from the old page layout to the
+ * new page layout.
+ */
- /*
- * The clusters use differing page layouts, see if we can find a plugin
- * that knows how to convert from the old page layout to the new page
- * layout.
- */
+ if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
+ pg_fatal("could not find plugin to convert from old page layout to new page layout\n");
- if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL)
- return "could not find plugin to convert from old page layout to new page layout";
+ return converter;
+ }
else
- {
- *result = converter;
return NULL;
- }
}
@@ -94,7 +83,7 @@ setupPageConverter(pageCnvCtx **result)
* if an error occurs, this function returns an error message (in the form
* of a null-terminated string).
*/
-static const char *
+static void
getPageVersion(uint16 *version, const char *pathName)
{
int relfd;
@@ -102,19 +91,16 @@ getPageVersion(uint16 *version, const char *pathName)
ssize_t bytesRead;
if ((relfd = open(pathName, O_RDONLY, 0)) < 0)
- return "could not open relation";
+ pg_fatal("could not open relation %s\n", pathName);
if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page))
- {
- close(relfd);
- return "could not read page header";
- }
+ pg_fatal("could not read page header of %s\n", pathName);
*version = PageGetPageLayoutVersion(&page);
close(relfd);
- return NULL;
+ return;
}
@@ -124,7 +110,7 @@ getPageVersion(uint16 *version, const char *pathName)
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
- * newPageVersion and oldPageVersion. If a plugin is found, this
+ * newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.
@@ -175,6 +161,4 @@ loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion)
}
}
-
-
#endif
diff --git a/contrib/pg_upgrade/parallel.c b/contrib/pg_upgrade/parallel.c
new file mode 100644
index 0000000000..5d2565d441
--- /dev/null
+++ b/contrib/pg_upgrade/parallel.c
@@ -0,0 +1,357 @@
+/*
+ * parallel.c
+ *
+ * multi-process support
+ *
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
+ * contrib/pg_upgrade/parallel.c
+ */
+
+#include "postgres_fe.h"
+
+#include "pg_upgrade.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#ifdef WIN32
+#include <io.h>
+#endif
+
+static int parallel_jobs;
+
+#ifdef WIN32
+/*
+ * Array holding all active threads. There can't be any gaps/zeros so
+ * it can be passed to WaitForMultipleObjects(). We use two arrays
+ * so the thread_handles array can be passed to WaitForMultipleObjects().
+ */
+HANDLE *thread_handles;
+
+typedef struct
+{
+ char *log_file;
+ char *opt_log_file;
+ char *cmd;
+} exec_thread_arg;
+
+typedef struct
+{
+ DbInfoArr *old_db_arr;
+ DbInfoArr *new_db_arr;
+ char *old_pgdata;
+ char *new_pgdata;
+ char *old_tablespace;
+} transfer_thread_arg;
+
+exec_thread_arg **exec_thread_args;
+transfer_thread_arg **transfer_thread_args;
+
+/* track current thread_args struct so reap_child() can be used for all cases */
+void **cur_thread_args;
+
+DWORD win32_exec_prog(exec_thread_arg *args);
+DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
+#endif
+
+/*
+ * parallel_exec_prog
+ *
+ * This has the same API as exec_prog, except it does parallel execution,
+ * and therefore must throw errors and doesn't return an error status.
+ */
+void
+parallel_exec_prog(const char *log_file, const char *opt_log_file,
+ const char *fmt,...)
+{
+ va_list args;
+ char cmd[MAX_STRING];
+
+#ifndef WIN32
+ pid_t child;
+#else
+ HANDLE child;
+ exec_thread_arg *new_arg;
+#endif
+
+ va_start(args, fmt);
+ vsnprintf(cmd, sizeof(cmd), fmt, args);
+ va_end(args);
+
+ if (user_opts.jobs <= 1)
+ /* throw_error must be true to allow jobs */
+ exec_prog(log_file, opt_log_file, true, "%s", cmd);
+ else
+ {
+ /* parallel */
+#ifdef WIN32
+ if (thread_handles == NULL)
+ thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
+
+ if (exec_thread_args == NULL)
+ {
+ int i;
+
+ exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *));
+
+ /*
+ * For safety and performance, we keep the args allocated during
+ * the entire life of the process, and we don't free the args in a
+ * thread different from the one that allocated it.
+ */
+ for (i = 0; i < user_opts.jobs; i++)
+ exec_thread_args[i] = pg_malloc0(sizeof(exec_thread_arg));
+ }
+
+ cur_thread_args = (void **) exec_thread_args;
+#endif
+ /* harvest any dead children */
+ while (reap_child(false) == true)
+ ;
+
+ /* must we wait for a dead child? */
+ if (parallel_jobs >= user_opts.jobs)
+ reap_child(true);
+
+ /* set this before we start the job */
+ parallel_jobs++;
+
+ /* Ensure stdio state is quiesced before forking */
+ fflush(NULL);
+
+#ifndef WIN32
+ child = fork();
+ if (child == 0)
+ /* use _exit to skip atexit() functions */
+ _exit(!exec_prog(log_file, opt_log_file, true, "%s", cmd));
+ else if (child < 0)
+ /* fork failed */
+ pg_fatal("could not create worker process: %s\n", strerror(errno));
+#else
+ /* empty array element are always at the end */
+ new_arg = exec_thread_args[parallel_jobs - 1];
+
+ /* Can only pass one pointer into the function, so use a struct */
+ if (new_arg->log_file)
+ pg_free(new_arg->log_file);
+ new_arg->log_file = pg_strdup(log_file);
+ if (new_arg->opt_log_file)
+ pg_free(new_arg->opt_log_file);
+ new_arg->opt_log_file = opt_log_file ? pg_strdup(opt_log_file) : NULL;
+ if (new_arg->cmd)
+ pg_free(new_arg->cmd);
+ new_arg->cmd = pg_strdup(cmd);
+
+ child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
+ new_arg, 0, NULL);
+ if (child == 0)
+ pg_fatal("could not create worker thread: %s\n", strerror(errno));
+
+ thread_handles[parallel_jobs - 1] = child;
+#endif
+ }
+
+ return;
+}
+
+
+#ifdef WIN32
+DWORD
+win32_exec_prog(exec_thread_arg *args)
+{
+ int ret;
+
+ ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd);
+
+ /* terminates thread */
+ return ret;
+}
+#endif
+
+
+/*
+ * parallel_transfer_all_new_dbs
+ *
+ * This has the same API as transfer_all_new_dbs, except it does parallel execution
+ * by transfering multiple tablespaces in parallel
+ */
+void
+parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace)
+{
+#ifndef WIN32
+ pid_t child;
+#else
+ HANDLE child;
+ transfer_thread_arg *new_arg;
+#endif
+
+ if (user_opts.jobs <= 1)
+ /* throw_error must be true to allow jobs */
+ transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL);
+ else
+ {
+ /* parallel */
+#ifdef WIN32
+ if (thread_handles == NULL)
+ thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
+
+ if (transfer_thread_args == NULL)
+ {
+ int i;
+
+ transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
+
+ /*
+ * For safety and performance, we keep the args allocated during
+ * the entire life of the process, and we don't free the args in a
+ * thread different from the one that allocated it.
+ */
+ for (i = 0; i < user_opts.jobs; i++)
+ transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg));
+ }
+
+ cur_thread_args = (void **) transfer_thread_args;
+#endif
+ /* harvest any dead children */
+ while (reap_child(false) == true)
+ ;
+
+ /* must we wait for a dead child? */
+ if (parallel_jobs >= user_opts.jobs)
+ reap_child(true);
+
+ /* set this before we start the job */
+ parallel_jobs++;
+
+ /* Ensure stdio state is quiesced before forking */
+ fflush(NULL);
+
+#ifndef WIN32
+ child = fork();
+ if (child == 0)
+ {
+ transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata,
+ old_tablespace);
+ /* if we take another exit path, it will be non-zero */
+ /* use _exit to skip atexit() functions */
+ _exit(0);
+ }
+ else if (child < 0)
+ /* fork failed */
+ pg_fatal("could not create worker process: %s\n", strerror(errno));
+#else
+ /* empty array element are always at the end */
+ new_arg = transfer_thread_args[parallel_jobs - 1];
+
+ /* Can only pass one pointer into the function, so use a struct */
+ new_arg->old_db_arr = old_db_arr;
+ new_arg->new_db_arr = new_db_arr;
+ if (new_arg->old_pgdata)
+ pg_free(new_arg->old_pgdata);
+ new_arg->old_pgdata = pg_strdup(old_pgdata);
+ if (new_arg->new_pgdata)
+ pg_free(new_arg->new_pgdata);
+ new_arg->new_pgdata = pg_strdup(new_pgdata);
+ if (new_arg->old_tablespace)
+ pg_free(new_arg->old_tablespace);
+ new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL;
+
+ child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs,
+ new_arg, 0, NULL);
+ if (child == 0)
+ pg_fatal("could not create worker thread: %s\n", strerror(errno));
+
+ thread_handles[parallel_jobs - 1] = child;
+#endif
+ }
+
+ return;
+}
+
+
+#ifdef WIN32
+DWORD
+win32_transfer_all_new_dbs(transfer_thread_arg *args)
+{
+ transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata,
+ args->new_pgdata, args->old_tablespace);
+
+ /* terminates thread */
+ return 0;
+}
+#endif
+
+
+/*
+ * collect status from a completed worker child
+ */
+bool
+reap_child(bool wait_for_child)
+{
+#ifndef WIN32
+ int work_status;
+ int ret;
+#else
+ int thread_num;
+ DWORD res;
+#endif
+
+ if (user_opts.jobs <= 1 || parallel_jobs == 0)
+ return false;
+
+#ifndef WIN32
+ ret = waitpid(-1, &work_status, wait_for_child ? 0 : WNOHANG);
+
+ /* no children or, for WNOHANG, no dead children */
+ if (ret <= 0 || !WIFEXITED(work_status))
+ return false;
+
+ if (WEXITSTATUS(work_status) != 0)
+ pg_fatal("child worker exited abnormally: %s\n", strerror(errno));
+#else
+ /* wait for one to finish */
+ thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles,
+ false, wait_for_child ? INFINITE : 0);
+
+ if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED)
+ return false;
+
+ /* compute thread index in active_threads */
+ thread_num -= WAIT_OBJECT_0;
+
+ /* get the result */
+ GetExitCodeThread(thread_handles[thread_num], &res);
+ if (res != 0)
+ pg_fatal("child worker exited abnormally: %s\n", strerror(errno));
+
+ /* dispose of handle to stop leaks */
+ CloseHandle(thread_handles[thread_num]);
+
+ /* Move last slot into dead child's position */
+ if (thread_num != parallel_jobs - 1)
+ {
+ void *tmp_args;
+
+ thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
+
+ /*
+ * Move last active thead arg struct into the now-dead slot, and the
+ * now-dead slot to the end for reuse by the next thread. Though the
+ * thread struct is in use by another thread, we can safely swap the
+ * struct pointers within the array.
+ */
+ tmp_args = cur_thread_args[thread_num];
+ cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
+ cur_thread_args[parallel_jobs - 1] = tmp_args;
+ }
+#endif
+
+ /* do this after job has been removed */
+ parallel_jobs--;
+
+ return true;
+}
diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c
index 27ff8fc85a..773bb07e04 100644
--- a/contrib/pg_upgrade/pg_upgrade.c
+++ b/contrib/pg_upgrade/pg_upgrade.c
@@ -3,7 +3,7 @@
*
* main source file
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/pg_upgrade.c
*/
@@ -15,13 +15,12 @@
* oids are the same between old and new clusters. This is important
* because toast oids are stored as toast pointers in user tables.
*
- * FYI, while pg_class.oid and pg_class.relfilenode are initially the same
- * in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
- * FULL. The new cluster will have matching pg_class.oid and
- * pg_class.relfilenode values and be based on the old oid value. This can
- * cause the old and new pg_class.relfilenode values to differ. In summary,
- * old and new pg_class.oid and new pg_class.relfilenode will have the
- * same value, and old pg_class.relfilenode might differ.
+ * While pg_class.oid and pg_class.relfilenode are initially the same
+ * in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM
+ * FULL. In the new cluster, pg_class.oid and pg_class.relfilenode will
+ * be the same and will match the old pg_class.oid value. Because of
+ * this, old/new pg_class.relfilenode values will not match if CLUSTER,
+ * REINDEX, or VACUUM FULL have been performed in the old cluster.
*
* We control all assignments of pg_type.oid because these oids are stored
* in user composite type values.
@@ -35,7 +34,7 @@
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -48,7 +47,7 @@ static void prepare_new_databases(void);
static void create_new_objects(void);
static void copy_clog_xlog_xid(void);
static void set_frozenxids(void);
-static void setup(char *argv0, bool live_check);
+static void setup(char *argv0, bool *live_check);
static void cleanup(void);
ClusterInfo old_cluster,
@@ -61,7 +60,6 @@ char *output_files[] = {
/* unique file for pg_ctl start */
SERVER_START_LOG_FILE,
#endif
- RESTORE_LOG_FILE,
UTILITY_LOG_FILE,
INTERNAL_LOG_FILE,
NULL
@@ -81,18 +79,22 @@ main(int argc, char **argv)
adjust_data_dir(&old_cluster);
adjust_data_dir(&new_cluster);
- output_check_banner(&live_check);
+ setup(argv[0], &live_check);
- setup(argv[0], live_check);
+ output_check_banner(live_check);
check_cluster_versions();
+
+ get_sock_dir(&old_cluster, live_check);
+ get_sock_dir(&new_cluster, false);
+
check_cluster_compatibility(live_check);
- check_old_cluster(live_check, &sequence_script_file_name);
+ check_and_dump_old_cluster(live_check, &sequence_script_file_name);
/* -- NEW -- */
- start_postmaster(&new_cluster);
+ start_postmaster(&new_cluster, true);
check_new_cluster();
report_clusters_compatible();
@@ -113,7 +115,7 @@ main(int argc, char **argv)
/* New now using xids of the old system */
/* -- NEW -- */
- start_postmaster(&new_cluster);
+ start_postmaster(&new_cluster, true);
prepare_new_databases();
@@ -123,15 +125,15 @@ main(int argc, char **argv)
/*
* Most failures happen in create_new_objects(), which has completed at
- * this point. We do this here because it is just before linking, which
+ * this point. We do this here because it is just before linking, which
* will link the old and new cluster data files, preventing the old
* cluster from being safely started once the new cluster is started.
*/
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster();
- transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
- old_cluster.pgdata, new_cluster.pgdata);
+ transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
+ old_cluster.pgdata, new_cluster.pgdata);
/*
* Assuming OIDs are only used in system tables, there is no need to
@@ -140,11 +142,16 @@ main(int argc, char **argv)
* because there is no need to have the schema load use new oids.
*/
prep_status("Setting next OID for new cluster");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" >> \"%s\" 2>&1"
- SYSTEMQUOTE,
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_resetxlog\" -o %u \"%s\"",
new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
- new_cluster.pgdata, UTILITY_LOG_FILE);
+ new_cluster.pgdata);
+ check_ok();
+
+ prep_status("Sync data directory to disk");
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
+ new_cluster.pgdata);
check_ok();
create_script_for_cluster_analyze(&analyze_script_file_name);
@@ -169,7 +176,7 @@ main(int argc, char **argv)
static void
-setup(char *argv0, bool live_check)
+setup(char *argv0, bool *live_check)
{
char exec_path[MAXPGPATH]; /* full path to my executable */
@@ -181,19 +188,42 @@ setup(char *argv0, bool live_check)
verify_directories();
- /* no postmasters should be running */
- if (!live_check && is_server_running(old_cluster.pgdata))
- pg_log(PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ /* no postmasters should be running, except for a live check */
+ if (pid_lock_file_exists(old_cluster.pgdata))
+ {
+ /*
+ * If we have a postmaster.pid file, try to start the server. If it
+ * starts, the pid file was stale, so stop the server. If it doesn't
+ * start, assume the server is running. If the pid file is left over
+ * from a server crash, this also allows any committed transactions
+ * stored in the WAL to be replayed so they are not lost, because WAL
+ * files are not transfered from old to new servers.
+ */
+ if (start_postmaster(&old_cluster, false))
+ stop_postmaster(false);
+ else
+ {
+ if (!user_opts.check)
+ pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
+ "Please shutdown that postmaster and try again.\n");
+ else
+ *live_check = true;
+ }
+ }
/* same goes for the new postmaster */
- if (is_server_running(new_cluster.pgdata))
- pg_log(PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ if (pid_lock_file_exists(new_cluster.pgdata))
+ {
+ if (start_postmaster(&new_cluster, false))
+ stop_postmaster(false);
+ else
+ pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
+ "Please shutdown that postmaster and try again.\n");
+ }
/* get path to pg_upgrade executable */
if (find_my_exec(argv0, exec_path) < 0)
- pg_log(PG_FATAL, "Could not get path name to pg_upgrade: %s\n", getErrorText(errno));
+ pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno));
/* Trim off program name and keep just path */
*last_dir_separator(exec_path) = '\0';
@@ -211,11 +241,10 @@ prepare_new_cluster(void)
* --analyze so autovacuum doesn't update statistics later
*/
prep_status("Analyzing all rows in the new cluster");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
- "--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/vacuumdb\" %s --all --analyze %s",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ log_opts.verbose ? "--verbose" : "");
check_ok();
/*
@@ -225,11 +254,10 @@ prepare_new_cluster(void)
* later.
*/
prep_status("Freezing all rows on the new cluster");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
- "--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/vacuumdb\" %s --all --freeze %s",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ log_opts.verbose ? "--verbose" : "");
check_ok();
get_pg_database_relfilenode(&new_cluster);
@@ -247,12 +275,12 @@ prepare_new_databases(void)
set_frozenxids();
- prep_status("Creating databases in the new cluster");
+ prep_status("Restoring global objects in the new cluster");
/*
* Install support functions in the global-object restore database to
- * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
- * database so objects we add into 'template1' are not propogated. They
+ * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
+ * database so objects we add into 'template1' are not propogated. They
* are removed on pg_upgrade exit.
*/
install_support_functions_in_new_db("template1");
@@ -263,14 +291,10 @@ prepare_new_databases(void)
* support functions in template1 but pg_dumpall creates database using
* the template0 template.
*/
- exec_prog(true, true, RESTORE_LOG_FILE,
- SYSTEMQUOTE "\"%s/psql\" --echo-queries "
- "--set ON_ERROR_STOP=on "
- /* --no-psqlrc prevents AUTOCOMMIT=off */
- "--no-psqlrc --port %d --username \"%s\" "
- "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- GLOBALS_DUMP_FILE, RESTORE_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"",
+ new_cluster.bindir, cluster_conn_opts(&new_cluster),
+ GLOBALS_DUMP_FILE);
check_ok();
/* we load this to get a current list of databases */
@@ -285,6 +309,11 @@ create_new_objects(void)
prep_status("Adding support functions to new cluster");
+ /*
+ * Technically, we only need to install these support functions in new
+ * databases that also exist in the old cluster, but for completeness we
+ * process all new databases.
+ */
for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
{
DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum];
@@ -295,14 +324,36 @@ create_new_objects(void)
}
check_ok();
- prep_status("Restoring database schema to new cluster");
- exec_prog(true, true, RESTORE_LOG_FILE,
- SYSTEMQUOTE "\"%s/psql\" --echo-queries "
- "--set ON_ERROR_STOP=on "
- "--no-psqlrc --port %d --username \"%s\" "
- "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- DB_DUMP_FILE, RESTORE_LOG_FILE);
+ prep_status("Restoring database schemas in the new cluster\n");
+
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+ {
+ char sql_file_name[MAXPGPATH],
+ log_file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+ pg_log(PG_STATUS, "%s", old_db->db_name);
+ snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+ snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
+
+ /*
+ * pg_dump only produces its output at the end, so there is little
+ * parallelism if using the pipe.
+ */
+ parallel_exec_prog(log_file_name,
+ NULL,
+ "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
+ new_cluster.bindir,
+ cluster_conn_opts(&new_cluster),
+ old_db->db_name,
+ sql_file_name);
+ }
+
+ /* reap all children */
+ while (reap_child(true) == true)
+ ;
+
+ end_progress_output();
check_ok();
/* regenerate now that we have objects in the databases */
@@ -311,55 +362,105 @@ create_new_objects(void)
uninstall_support_functions_from_new_cluster();
}
-
+/*
+ * Delete the given subdirectory contents from the new cluster, and copy the
+ * files from the old cluster into it.
+ */
static void
-copy_clog_xlog_xid(void)
+copy_subdir_files(char *subdir)
{
- char old_clog_path[MAXPGPATH];
- char new_clog_path[MAXPGPATH];
+ char old_path[MAXPGPATH];
+ char new_path[MAXPGPATH];
- /* copy old commit logs to new data dir */
- prep_status("Deleting new commit clogs");
+ prep_status("Deleting files from new %s", subdir);
- snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", old_cluster.pgdata);
- snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", new_cluster.pgdata);
- if (!rmtree(new_clog_path, true))
- pg_log(PG_FATAL, "could not delete directory \"%s\"\n", new_clog_path);
+ snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir);
+ snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir);
+ if (!rmtree(new_path, true))
+ pg_fatal("could not delete directory \"%s\"\n", new_path);
check_ok();
- prep_status("Copying old commit clogs to new server");
- exec_prog(true, false, UTILITY_LOG_FILE,
+ prep_status("Copying old %s to new server", subdir);
+
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
#ifndef WIN32
- SYSTEMQUOTE "%s \"%s\" \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
- "cp -Rf",
+ "cp -Rf \"%s\" \"%s\"",
#else
/* flags: everything, no confirm, quiet, overwrite read-only */
- SYSTEMQUOTE "%s \"%s\" \"%s\\\" >> \"%s\" 2>&1" SYSTEMQUOTE,
- "xcopy /e /y /q /r",
+ "xcopy /e /y /q /r \"%s\" \"%s\\\"",
#endif
- old_clog_path, new_clog_path, UTILITY_LOG_FILE);
+ old_path, new_path);
+
check_ok();
+}
+
+static void
+copy_clog_xlog_xid(void)
+{
+ /* copy old commit logs to new data dir */
+ copy_subdir_files("pg_clog");
/* set the next transaction id of the new cluster */
prep_status("Setting next transaction ID for new cluster");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE
- "\"%s/pg_resetxlog\" -f -x %u \"%s\" >> \"%s\" 2>&1"
- SYSTEMQUOTE, new_cluster.bindir,
- old_cluster.controldata.chkpnt_nxtxid,
- new_cluster.pgdata, UTILITY_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_resetxlog\" -f -x %u \"%s\"",
+ new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,
+ new_cluster.pgdata);
check_ok();
+ /*
+ * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change
+ * (see pg_upgrade.h) and the new server is after, then we don't copy
+ * pg_multixact files, but we need to reset pg_control so that the new
+ * server doesn't attempt to read multis older than the cutoff value.
+ */
+ if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER &&
+ new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
+ {
+ copy_subdir_files("pg_multixact/offsets");
+ copy_subdir_files("pg_multixact/members");
+ prep_status("Setting next multixact ID and offset for new cluster");
+
+ /*
+ * we preserve all files and contents, so we must preserve both "next"
+ * counters here and the oldest multi present on system.
+ */
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"",
+ new_cluster.bindir,
+ old_cluster.controldata.chkpnt_nxtmxoff,
+ old_cluster.controldata.chkpnt_nxtmulti,
+ old_cluster.controldata.chkpnt_oldstMulti,
+ new_cluster.pgdata);
+ check_ok();
+ }
+ else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER)
+ {
+ prep_status("Setting oldest multixact ID on new cluster");
+
+ /*
+ * We don't preserve files in this case, but it's important that the
+ * oldest multi is set to the latest value used by the old system, so
+ * that multixact.c returns the empty set for multis that might be
+ * present on disk. We set next multi to the value following that; it
+ * might end up wrapped around (i.e. 0) if the old cluster had
+ * next=MaxMultiXactId, but multixact.c can cope with that just fine.
+ */
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_resetxlog\" -m %u,%u \"%s\"",
+ new_cluster.bindir,
+ old_cluster.controldata.chkpnt_nxtmulti + 1,
+ old_cluster.controldata.chkpnt_nxtmulti,
+ new_cluster.pgdata);
+ check_ok();
+ }
+
/* now reset the wal archives in the new cluster */
prep_status("Resetting WAL archives");
- exec_prog(true, true, UTILITY_LOG_FILE,
- SYSTEMQUOTE
- "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1"
- SYSTEMQUOTE, new_cluster.bindir,
- old_cluster.controldata.chkpnt_tli,
- old_cluster.controldata.logid,
- old_cluster.controldata.nxtlogseg,
- new_cluster.pgdata, UTILITY_LOG_FILE);
+ exec_prog(UTILITY_LOG_FILE, NULL, true,
+ "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir,
+ old_cluster.controldata.nextxlogfile,
+ new_cluster.pgdata);
check_ok();
}
@@ -428,8 +529,8 @@ set_frozenxids(void)
PQclear(executeQueryOrDie(conn,
"UPDATE pg_catalog.pg_class "
"SET relfrozenxid = '%u' "
- /* only heap and TOAST are vacuumed */
- "WHERE relkind IN ('r', 't')",
+ /* only heap, materialized view, and TOAST are vacuumed */
+ "WHERE relkind IN ('r', 'm', 't')",
old_cluster.controldata.chkpnt_nxtxid));
PQfinish(conn);
@@ -452,20 +553,32 @@ set_frozenxids(void)
static void
cleanup(void)
{
-
fclose(log_opts.internal);
/* Remove dump and log files? */
if (!log_opts.retain)
{
+ int dbnum;
char **filename;
for (filename = output_files; *filename != NULL; filename++)
unlink(*filename);
- /* remove SQL files */
- unlink(ALL_DUMP_FILE);
+ /* remove dump files */
unlink(GLOBALS_DUMP_FILE);
- unlink(DB_DUMP_FILE);
+
+ if (old_cluster.dbarr.dbs)
+ for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+ {
+ char sql_file_name[MAXPGPATH],
+ log_file_name[MAXPGPATH];
+ DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
+
+ snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
+ unlink(sql_file_name);
+
+ snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
+ unlink(log_file_name);
+ }
}
}
diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h
index 8b2062181f..0410b02293 100644
--- a/contrib/pg_upgrade/pg_upgrade.h
+++ b/contrib/pg_upgrade/pg_upgrade.h
@@ -1,13 +1,12 @@
/*
* pg_upgrade.h
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/pg_upgrade.h
*/
#include <unistd.h>
#include <assert.h>
-#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
@@ -25,18 +24,16 @@
#define MIGRATOR_API_VERSION 1
-#define MESSAGE_WIDTH "60"
+#define MESSAGE_WIDTH 60
-#define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
#define GET_MAJOR_VERSION(v) ((v) / 100)
-#define ALL_DUMP_FILE "pg_upgrade_dump_all.sql"
/* contains both global db information and CREATE DATABASE commands */
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
-#define DB_DUMP_FILE "pg_upgrade_dump_db.sql"
+#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom"
+#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log"
#define SERVER_LOG_FILE "pg_upgrade_server.log"
-#define RESTORE_LOG_FILE "pg_upgrade_restore.log"
#define UTILITY_LOG_FILE "pg_upgrade_utility.log"
#define INTERNAL_LOG_FILE "pg_upgrade_internal.log"
@@ -63,7 +60,11 @@ extern char *output_files[];
#define SERVER_STOP_LOG_FILE SERVER_LOG_FILE
#else
#define SERVER_START_LOG_FILE "pg_upgrade_server_start.log"
-/* pg_ctl stop doesn't keep the log file open, so reuse UTILITY_LOG_FILE */
+/*
+ * "pg_ctl start" keeps SERVER_START_LOG_FILE and SERVER_LOG_FILE open
+ * while the server is running, so we use UTILITY_LOG_FILE for "pg_ctl
+ * stop".
+ */
#define SERVER_STOP_LOG_FILE UTILITY_LOG_FILE
#endif
@@ -72,20 +73,23 @@ extern char *output_files[];
#define pg_copy_file copy_file
#define pg_mv_file rename
#define pg_link_file link
+#define PATH_SEPARATOR '/'
#define RM_CMD "rm -f"
#define RMDIR_CMD "rm -rf"
#define SCRIPT_EXT "sh"
#define ECHO_QUOTE "'"
+#define ECHO_BLANK ""
#else
#define pg_copy_file CopyFile
#define pg_mv_file pgrename
#define pg_link_file win32_pghardlink
-#define sleep(x) Sleep(x * 1000)
+#define PATH_SEPARATOR '\\'
#define RM_CMD "DEL /q"
#define RMDIR_CMD "RMDIR /s/q"
#define SCRIPT_EXT "bat"
#define EXE_EXT ".exe"
#define ECHO_QUOTE ""
+#define ECHO_BLANK "."
#endif
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
@@ -103,18 +107,28 @@ extern char *output_files[];
*/
#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
+/*
+ * pg_multixact format changed in 9.3 commit 0ac5ad5134f2769ccbaefec73844f85,
+ * ("Improve concurrency of foreign key locking") which also updated catalog
+ * version to this value. pg_upgrade behavior depends on whether old and new
+ * server versions are both newer than this, or only the new one is.
+ */
+#define MULTIXACT_FORMATCHANGE_CAT_VER 201301231
/*
* Each relation is represented by a relinfo structure.
*/
typedef struct
{
- char nspname[NAMEDATALEN]; /* namespace name */
- char relname[NAMEDATALEN]; /* relation name */
+ /* Can't use NAMEDATALEN; not guaranteed to fit on client */
+ char *nspname; /* namespace name */
+ char *relname; /* relation name */
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
/* relation tablespace path, or "" for the cluster default */
- char tablespace[MAXPGPATH];
+ char *tablespace;
+ bool nsp_alloc;
+ bool tblsp_alloc;
} RelInfo;
typedef struct
@@ -128,8 +142,12 @@ typedef struct
*/
typedef struct
{
- char old_dir[MAXPGPATH];
- char new_dir[MAXPGPATH];
+ const char *old_tablespace;
+ const char *new_tablespace;
+ const char *old_tablespace_suffix;
+ const char *new_tablespace_suffix;
+ Oid old_db_oid;
+ Oid new_db_oid;
/*
* old/new relfilenodes might differ for pg_largeobject(_metadata) indexes
@@ -138,8 +156,8 @@ typedef struct
Oid old_relfilenode;
Oid new_relfilenode;
/* the rest are used only for logging and error reporting */
- char nspname[NAMEDATALEN]; /* namespaces */
- char relname[NAMEDATALEN];
+ char *nspname; /* namespaces */
+ char *relname;
} FileNameMap;
/*
@@ -148,8 +166,9 @@ typedef struct
typedef struct
{
Oid db_oid; /* oid of the database */
- char db_name[NAMEDATALEN]; /* database name */
- char db_tblspace[MAXPGPATH]; /* database default tablespace path */
+ char *db_name; /* database name */
+ char db_tablespace[MAXPGPATH]; /* database default tablespace
+ * path */
RelInfoArr rel_arr; /* array of all user relinfos */
} DbInfo;
@@ -168,11 +187,13 @@ typedef struct
{
uint32 ctrl_ver;
uint32 cat_ver;
- uint32 logid;
- uint32 nxtlogseg;
+ char nextxlogfile[25];
uint32 chkpnt_tli;
uint32 chkpnt_nxtxid;
uint32 chkpnt_nxtoid;
+ uint32 chkpnt_nxtmulti;
+ uint32 chkpnt_nxtmxoff;
+ uint32 chkpnt_oldstMulti;
uint32 align;
uint32 blocksz;
uint32 largesz;
@@ -183,6 +204,7 @@ typedef struct
uint32 toast;
bool date_is_int;
bool float8_pass_by_value;
+ bool data_checksum_version;
char *lc_collate;
char *lc_ctype;
char *encoding;
@@ -203,6 +225,7 @@ typedef enum
typedef enum
{
PG_VERBOSE,
+ PG_STATUS,
PG_REPORT,
PG_WARNING,
PG_FATAL
@@ -227,14 +250,15 @@ typedef struct
char *bindir; /* pathname for cluster's executable directory */
char *pgopts; /* options to pass to the server, like pg_ctl
* -o */
+ char *sockdir; /* directory for Unix Domain socket, if any */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
uint32 bin_version; /* version returned from pg_ctl */
Oid pg_database_oid; /* OID of pg_database relation */
- Oid install_role_oid; /* OID of connected role */
- Oid role_count; /* number of roles defined in the cluster */
- char *tablespace_suffix; /* directory specification */
+ Oid install_role_oid; /* OID of connected role */
+ Oid role_count; /* number of roles defined in the cluster */
+ const char *tablespace_suffix; /* directory specification */
} ClusterInfo;
@@ -257,6 +281,7 @@ typedef struct
bool check; /* TRUE -> ask user for permission to make
* changes */
transferMode transfer_mode; /* copy files or link them? */
+ int jobs;
} UserOpts;
@@ -268,8 +293,9 @@ typedef struct
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
- char **tablespaces; /* tablespaces */
- int num_tablespaces;
+ bool user_specified; /* user specified on command-line */
+ char **old_tablespaces; /* tablespaces */
+ int num_old_tablespaces;
char **libraries; /* loadable libraries */
int num_libraries;
ClusterInfo *running_cluster;
@@ -284,14 +310,13 @@ extern UserOpts user_opts;
extern ClusterInfo old_cluster,
new_cluster;
extern OSInfo os_info;
-extern char scandir_file_pattern[];
/* check.c */
-void output_check_banner(bool *live_check);
-void check_old_cluster(bool live_check,
- char **sequence_script_file_name);
+void output_check_banner(bool live_check);
+void check_and_dump_old_cluster(bool live_check,
+ char **sequence_script_file_name);
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(char *sequence_script_file_name);
@@ -313,17 +338,17 @@ void disable_old_cluster(void);
/* dump.c */
void generate_old_dump(void);
-void split_old_dump(void);
/* exec.c */
-int
-exec_prog(bool throw_error, bool is_priv,
- const char *log_file, const char *cmd,...)
+#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1"
+bool
+exec_prog(const char *log_file, const char *opt_log_file,
+ bool throw_error, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
void verify_directories(void);
-bool is_server_running(const char *datadir);
+bool pid_lock_file_exists(const char *datadir);
/* file.c */
@@ -352,13 +377,12 @@ typedef struct
pluginShutdown shutdown; /* Pointer to plugin's shutdown function */
} pageCnvCtx;
-const char *setupPageConverter(pageCnvCtx **result);
+const pageCnvCtx *setupPageConverter(void);
#else
/* dummy */
typedef void *pageCnvCtx;
#endif
-int load_directory(const char *dirname, struct dirent *** namelist);
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
@@ -380,7 +404,6 @@ FileNameMap *gen_db_file_maps(DbInfo *old_db,
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
void get_db_and_rel_infos(ClusterInfo *cluster);
-void free_db_and_rel_infos(DbInfoArr *db_arr);
void print_maps(FileNameMap *maps, int n,
const char *db_name);
@@ -388,13 +411,16 @@ void print_maps(FileNameMap *maps, int n,
void parseCommandLine(int argc, char *argv[]);
void adjust_data_dir(ClusterInfo *cluster);
+void get_sock_dir(ClusterInfo *cluster, bool live_check);
/* relfilenode.c */
void get_pg_database_relfilenode(ClusterInfo *cluster);
-const char *transfer_all_new_dbs(DbInfoArr *olddb_arr,
- DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata);
-
+void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
+ DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
+void transfer_all_new_dbs(DbInfoArr *old_db_arr,
+ DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
+ char *old_tablespace);
/* tablespace.c */
@@ -408,7 +434,9 @@ PGresult *
executeQueryOrDie(PGconn *conn, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-void start_postmaster(ClusterInfo *cluster);
+char *cluster_conn_opts(ClusterInfo *cluster);
+
+bool start_postmaster(ClusterInfo *cluster, bool throw_error);
void stop_postmaster(bool fast);
uint32 get_major_server_version(ClusterInfo *cluster);
void check_pghost_envvar(void);
@@ -417,21 +445,22 @@ void check_pghost_envvar(void);
/* util.c */
char *quote_identifier(const char *s);
-int get_user_info(char **user_name);
+int get_user_info(char **user_name_p);
void check_ok(void);
void
report_status(eLogType type, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
-pg_log(eLogType type, char *fmt,...)
+pg_log(eLogType type, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
+pg_fatal(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2), noreturn));
+void end_progress_output(void);
+void
prep_status(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void check_ok(void);
-char *pg_strdup(const char *s);
-void *pg_malloc(int size);
-void pg_free(void *ptr);
const char *getErrorText(int errNum);
unsigned int str2uint(const char *str);
void pg_putenv(const char *var, const char *val);
@@ -441,6 +470,7 @@ void pg_putenv(const char *var, const char *val);
void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
bool check_mode);
+void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
/* version_old_8_3.c */
@@ -452,3 +482,13 @@ void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
bool check_mode);
char *old_8_3_create_sequence_script(ClusterInfo *cluster);
+
+/* parallel.c */
+void
+parallel_exec_prog(const char *log_file, const char *opt_log_file,
+ const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
+void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace);
+bool reap_child(bool wait_for_child);
diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c
index 3509585de7..aa6aafde5e 100644
--- a/contrib/pg_upgrade/relfilenode.c
+++ b/contrib/pg_upgrade/relfilenode.c
@@ -3,11 +3,11 @@
*
* relfilenode functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/relfilenode.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -16,13 +16,59 @@
static void transfer_single_new_db(pageCnvCtx *pageConverter,
- FileNameMap *maps, int size);
-static void transfer_relfile(pageCnvCtx *pageConverter,
- const char *fromfile, const char *tofile,
- const char *nspname, const char *relname);
+ FileNameMap *maps, int size, char *old_tablespace);
+static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
+ const char *suffix);
+
+
+/*
+ * transfer_all_new_tablespaces()
+ *
+ * Responsible for upgrading all database. invokes routines to generate mappings and then
+ * physically link the databases.
+ */
+void
+transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata)
+{
+ pg_log(PG_REPORT, "%s user relation files\n",
+ user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
+
+ /*
+ * Transfering files by tablespace is tricky because a single database can
+ * use multiple tablespaces. For non-parallel mode, we just pass a NULL
+ * tablespace path, which matches all tablespaces. In parallel mode, we
+ * pass the default tablespace and all user-created tablespaces and let
+ * those operations happen in parallel.
+ */
+ if (user_opts.jobs <= 1)
+ parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
+ new_pgdata, NULL);
+ else
+ {
+ int tblnum;
+
+ /* transfer default tablespace */
+ parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
+ new_pgdata, old_pgdata);
+
+ for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ parallel_transfer_all_new_dbs(old_db_arr,
+ new_db_arr,
+ old_pgdata,
+ new_pgdata,
+ os_info.old_tablespaces[tblnum]);
+ /* reap all children */
+ while (reap_child(true) == true)
+ ;
+ }
+
+ end_progress_output();
+ check_ok();
+
+ return;
+}
-/* used by scandir(), must be global */
-char scandir_file_pattern[MAXPGPATH];
/*
* transfer_all_new_dbs()
@@ -30,16 +76,12 @@ char scandir_file_pattern[MAXPGPATH];
* Responsible for upgrading all database. invokes routines to generate mappings and then
* physically link the databases.
*/
-const char *
-transfer_all_new_dbs(DbInfoArr *old_db_arr,
- DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
+void
+transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
+ char *old_pgdata, char *new_pgdata, char *old_tablespace)
{
int old_dbnum,
new_dbnum;
- const char *msg = NULL;
-
- prep_status("%s user relation files\n",
- user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/* Scan the old cluster databases and transfer their files */
for (old_dbnum = new_dbnum = 0;
@@ -65,8 +107,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
}
if (new_dbnum >= new_db_arr->ndbs)
- pg_log(PG_FATAL, "old database \"%s\" not found in the new cluster\n",
- old_db->db_name);
+ pg_fatal("old database \"%s\" not found in the new cluster\n",
+ old_db->db_name);
n_maps = 0;
mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
@@ -77,27 +119,23 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
print_maps(mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
- msg = setupPageConverter(&pageConverter);
+ pageConverter = setupPageConverter();
#endif
- transfer_single_new_db(pageConverter, mappings, n_maps);
+ transfer_single_new_db(pageConverter, mappings, n_maps,
+ old_tablespace);
pg_free(mappings);
}
}
- prep_status(" "); /* in case nothing printed; pass a space so
- * gcc doesn't complain about empty format
- * string */
- check_ok();
-
- return msg;
+ return;
}
/*
* get_pg_database_relfilenode()
*
- * Retrieves the relfilenode for a few system-catalog tables. We need these
+ * Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void
@@ -131,124 +169,39 @@ get_pg_database_relfilenode(ClusterInfo *cluster)
*/
static void
transfer_single_new_db(pageCnvCtx *pageConverter,
- FileNameMap *maps, int size)
+ FileNameMap *maps, int size, char *old_tablespace)
{
- char old_dir[MAXPGPATH];
- struct dirent **namelist = NULL;
- int numFiles = 0;
int mapnum;
- int fileno;
- bool vm_crashsafe_change = false;
+ bool vm_crashsafe_match = true;
- old_dir[0] = '\0';
-
- /* Do not copy non-crashsafe vm files for binaries that assume crashsafety */
+ /*
+ * Do the old and new cluster disagree on the crash-safetiness of the vm
+ * files? If so, do not copy them.
+ */
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
- vm_crashsafe_change = true;
+ vm_crashsafe_match = false;
for (mapnum = 0; mapnum < size; mapnum++)
{
- char old_file[MAXPGPATH];
- char new_file[MAXPGPATH];
-
- /* Changed tablespaces? Need a new directory scan? */
- if (strcmp(maps[mapnum].old_dir, old_dir) != 0)
- {
- if (numFiles > 0)
- {
- for (fileno = 0; fileno < numFiles; fileno++)
- pg_free(namelist[fileno]);
- pg_free(namelist);
- }
-
- snprintf(old_dir, sizeof(old_dir), "%s", maps[mapnum].old_dir);
- numFiles = load_directory(old_dir, &namelist);
- }
-
- /* Copying files might take some time, so give feedback. */
-
- snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_dir,
- maps[mapnum].old_relfilenode);
- snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_dir,
- maps[mapnum].new_relfilenode);
- pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_file);
-
- /*
- * Copy/link the relation file to the new cluster
- */
- unlink(new_file);
- transfer_relfile(pageConverter, old_file, new_file,
- maps[mapnum].nspname, maps[mapnum].relname);
-
- /* fsm/vm files added in PG 8.4 */
- if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
+ if (old_tablespace == NULL ||
+ strcmp(maps[mapnum].old_tablespace, old_tablespace) == 0)
{
- /*
- * Copy/link any fsm and vm files, if they exist
- */
- snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_",
- maps[mapnum].old_relfilenode);
-
- for (fileno = 0; fileno < numFiles; fileno++)
- {
- char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
- bool is_vm_file = false;
-
- /* Is a visibility map file? (name ends with _vm) */
- if (vm_offset && strlen(vm_offset) == strlen("_vm"))
- is_vm_file = true;
-
- if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
- strlen(scandir_file_pattern)) == 0 &&
- (!is_vm_file || !vm_crashsafe_change))
- {
- snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
- namelist[fileno]->d_name);
- snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
- maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '_'));
-
- unlink(new_file);
- transfer_relfile(pageConverter, old_file, new_file,
- maps[mapnum].nspname, maps[mapnum].relname);
- }
- }
- }
+ /* transfer primary file */
+ transfer_relfile(pageConverter, &maps[mapnum], "");
- /*
- * Now copy/link any related segments as well. Remember, PG breaks
- * large files into 1GB segments, the first segment has no extension,
- * subsequent segments are named relfilenode.1, relfilenode.2,
- * relfilenode.3, ... 'fsm' and 'vm' files use underscores so are not
- * copied.
- */
- snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.",
- maps[mapnum].old_relfilenode);
-
- for (fileno = 0; fileno < numFiles; fileno++)
- {
- if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
- strlen(scandir_file_pattern)) == 0)
+ /* fsm/vm files added in PG 8.4 */
+ if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
{
- snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
- namelist[fileno]->d_name);
- snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
- maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '.'));
-
- unlink(new_file);
- transfer_relfile(pageConverter, old_file, new_file,
- maps[mapnum].nspname, maps[mapnum].relname);
+ /*
+ * Copy/link any fsm and vm files, if they exist
+ */
+ transfer_relfile(pageConverter, &maps[mapnum], "_fsm");
+ if (vm_crashsafe_match)
+ transfer_relfile(pageConverter, &maps[mapnum], "_vm");
}
}
}
-
-
- if (numFiles > 0)
- {
- for (fileno = 0; fileno < numFiles; fileno++)
- pg_free(namelist[fileno]);
- pg_free(namelist);
- }
}
@@ -258,31 +211,86 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
* Copy or link file from old cluster to new one.
*/
static void
-transfer_relfile(pageCnvCtx *pageConverter, const char *old_file,
- const char *new_file, const char *nspname, const char *relname)
+transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
+ const char *type_suffix)
{
const char *msg;
+ char old_file[MAXPGPATH];
+ char new_file[MAXPGPATH];
+ int fd;
+ int segno;
+ char extent_suffix[65];
+
+ /*
+ * Now copy/link any related segments as well. Remember, PG breaks large
+ * files into 1GB segments, the first segment has no extension, subsequent
+ * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied.
+ */
+ for (segno = 0;; segno++)
+ {
+ if (segno == 0)
+ extent_suffix[0] = '\0';
+ else
+ snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
+
+ snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s",
+ map->old_tablespace,
+ map->old_tablespace_suffix,
+ map->old_db_oid,
+ map->old_relfilenode,
+ type_suffix,
+ extent_suffix);
+ snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s",
+ map->new_tablespace,
+ map->new_tablespace_suffix,
+ map->new_db_oid,
+ map->new_relfilenode,
+ type_suffix,
+ extent_suffix);
+
+ /* Is it an extent, fsm, or vm file? */
+ if (type_suffix[0] != '\0' || segno != 0)
+ {
+ /* Did file open fail? */
+ if ((fd = open(old_file, O_RDONLY, 0)) == -1)
+ {
+ /* File does not exist? That's OK, just return */
+ if (errno == ENOENT)
+ return;
+ else
+ pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
+ map->nspname, map->relname, old_file, new_file,
+ getErrorText(errno));
+ }
+ close(fd);
+ }
- if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
- pg_log(PG_FATAL, "This upgrade requires page-by-page conversion, "
- "you must use copy mode instead of link mode.\n");
+ unlink(new_file);
- if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
- {
- pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file);
+ /* Copying files might take some time, so give feedback. */
+ pg_log(PG_STATUS, "%s", old_file);
- if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
- pg_log(PG_FATAL, "error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- nspname, relname, old_file, new_file, msg);
- }
- else
- {
- pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file);
+ if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
+ pg_fatal("This upgrade requires page-by-page conversion, "
+ "you must use copy mode instead of link mode.\n");
+
+ if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
+ {
+ pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file);
+
+ if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
+ pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
+ map->nspname, map->relname, old_file, new_file, msg);
+ }
+ else
+ {
+ pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file);
- if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
- pg_log(PG_FATAL,
- "error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- nspname, relname, old_file, new_file, msg);
+ if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
+ pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
+ map->nspname, map->relname, old_file, new_file, msg);
+ }
}
+
return;
}
diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c
index f83d6fa866..5f4b5307cb 100644
--- a/contrib/pg_upgrade/server.c
+++ b/contrib/pg_upgrade/server.c
@@ -3,11 +3,11 @@
*
* database server functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/server.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -46,22 +46,55 @@ connectToServer(ClusterInfo *cluster, const char *db_name)
/*
* get_db_conn()
*
- * get database connection
+ * get database connection, using named database + standard params for cluster
*/
static PGconn *
get_db_conn(ClusterInfo *cluster, const char *db_name)
{
- char conn_opts[MAXPGPATH];
+ char conn_opts[2 * NAMEDATALEN + MAXPGPATH + 100];
- snprintf(conn_opts, sizeof(conn_opts),
- "dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
- cluster->port);
+ if (cluster->sockdir)
+ snprintf(conn_opts, sizeof(conn_opts),
+ "dbname = '%s' user = '%s' host = '%s' port = %d",
+ db_name, os_info.user, cluster->sockdir, cluster->port);
+ else
+ snprintf(conn_opts, sizeof(conn_opts),
+ "dbname = '%s' user = '%s' port = %d",
+ db_name, os_info.user, cluster->port);
return PQconnectdb(conn_opts);
}
/*
+ * cluster_conn_opts()
+ *
+ * Return standard command-line options for connecting to this cluster when
+ * using psql, pg_dump, etc. Ideally this would match what get_db_conn()
+ * sets, but the utilities we need aren't very consistent about the treatment
+ * of database name options, so we leave that out.
+ *
+ * Note result is in static storage, so use it right away.
+ */
+char *
+cluster_conn_opts(ClusterInfo *cluster)
+{
+ static char conn_opts[MAXPGPATH + NAMEDATALEN + 100];
+
+ if (cluster->sockdir)
+ snprintf(conn_opts, sizeof(conn_opts),
+ "--host \"%s\" --port %d --username \"%s\"",
+ cluster->sockdir, cluster->port, os_info.user);
+ else
+ snprintf(conn_opts, sizeof(conn_opts),
+ "--port %d --username \"%s\"",
+ cluster->port, os_info.user);
+
+ return conn_opts;
+}
+
+
+/*
* executeQueryOrDie()
*
* Formats a query string from the given arguments and executes the
@@ -116,12 +149,12 @@ get_major_server_version(ClusterInfo *cluster)
snprintf(ver_filename, sizeof(ver_filename), "%s/PG_VERSION",
cluster->pgdata);
if ((version_fd = fopen(ver_filename, "r")) == NULL)
- return 0;
+ pg_fatal("could not open version file: %s\n", ver_filename);
if (fscanf(version_fd, "%63s", cluster->major_version_str) == 0 ||
sscanf(cluster->major_version_str, "%d.%d", &integer_version,
&fractional_version) != 2)
- pg_log(PG_FATAL, "could not get version from %s\n", cluster->pgdata);
+ pg_fatal("could not get version from %s\n", cluster->pgdata);
fclose(version_fd);
@@ -133,17 +166,17 @@ static void
stop_postmaster_atexit(void)
{
stop_postmaster(true);
-
}
-void
-start_postmaster(ClusterInfo *cluster)
+bool
+start_postmaster(ClusterInfo *cluster, bool throw_error)
{
- char cmd[MAXPGPATH];
+ char cmd[MAXPGPATH * 4 + 1000];
PGconn *conn;
bool exit_hook_registered = false;
- int pg_ctl_return = 0;
+ bool pg_ctl_return = false;
+ char socket_string[MAXPGPATH + 200];
if (!exit_hook_registered)
{
@@ -151,34 +184,83 @@ start_postmaster(ClusterInfo *cluster)
exit_hook_registered = true;
}
+ socket_string[0] = '\0';
+
+#ifdef HAVE_UNIX_SOCKETS
+ /* prevent TCP/IP connections, restrict socket access */
+ strcat(socket_string,
+ " -c listen_addresses='' -c unix_socket_permissions=0700");
+
+ /* Have a sockdir? Tell the postmaster. */
+ if (cluster->sockdir)
+ snprintf(socket_string + strlen(socket_string),
+ sizeof(socket_string) - strlen(socket_string),
+ " -c %s='%s'",
+ (GET_MAJOR_VERSION(cluster->major_version) < 903) ?
+ "unix_socket_directory" : "unix_socket_directories",
+ cluster->sockdir);
+#endif
+
/*
* Using autovacuum=off disables cleanup vacuum and analyze, but freeze
* vacuums can still happen, so we set autovacuum_freeze_max_age to its
* maximum. We assume all datfrozenxid and relfrozen values are less than
* a gap of 2000000000 from the current xid counter, so autovacuum will
* not touch them.
+ *
+ * Turn off durability requirements to improve object creation speed, and
+ * we only modify the new cluster, so only use it there. If there is a
+ * crash, the new cluster has to be recreated anyway. fsync=off is a big
+ * win on ext4.
*/
snprintf(cmd, sizeof(cmd),
- SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
- "-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
+ "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start",
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
(cluster->controldata.cat_ver >=
- BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
- "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
- cluster->pgopts ? cluster->pgopts : "", SERVER_START_LOG_FILE);
+ BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" :
+ " -c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
+ (cluster == &new_cluster) ?
+ " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "",
+ cluster->pgopts ? cluster->pgopts : "", socket_string);
/*
* Don't throw an error right away, let connecting throw the error because
* it might supply a reason for the failure.
*/
- pg_ctl_return = exec_prog(false, true,
- /* pass both file names if the differ */
- (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
- SERVER_LOG_FILE :
- SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
+ pg_ctl_return = exec_prog(SERVER_START_LOG_FILE,
+ /* pass both file names if they differ */
+ (strcmp(SERVER_LOG_FILE,
+ SERVER_START_LOG_FILE) != 0) ?
+ SERVER_LOG_FILE : NULL,
+ false,
"%s", cmd);
- /* Check to see if we can connect to the server; if not, report it. */
+ /* Did it fail and we are just testing if the server could be started? */
+ if (!pg_ctl_return && !throw_error)
+ return false;
+
+ /*
+ * We set this here to make sure atexit() shuts down the server, but only
+ * if we started the server successfully. We do it before checking for
+ * connectivity in case the server started but there is a connectivity
+ * failure. If pg_ctl did not return success, we will exit below.
+ *
+ * Pre-9.1 servers do not have PQping(), so we could be leaving the server
+ * running if authentication was misconfigured, so someday we might went
+ * to be more aggressive about doing server shutdowns even if pg_ctl
+ * fails, but now (2013-08-14) it seems prudent to be cautious. We don't
+ * want to shutdown a server that might have been accidentally started
+ * during the upgrade.
+ */
+ if (pg_ctl_return)
+ os_info.running_cluster = cluster;
+
+ /*
+ * pg_ctl -w might have failed because the server couldn't be started, or
+ * there might have been a connection problem in _checking_ if the server
+ * has started. Therefore, even if pg_ctl failed, we continue and test
+ * for connectivity in case we get a connection reason for the failure.
+ */
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
PQstatus(conn) != CONNECTION_OK)
{
@@ -186,24 +268,28 @@ start_postmaster(ClusterInfo *cluster)
PQerrorMessage(conn));
if (conn)
PQfinish(conn);
- pg_log(PG_FATAL, "could not connect to %s postmaster started with the command: %s\n",
- CLUSTER_NAME(cluster), cmd);
+ pg_fatal("could not connect to %s postmaster started with the command:\n"
+ "%s\n",
+ CLUSTER_NAME(cluster), cmd);
}
PQfinish(conn);
- /* If the connection didn't fail, fail now */
- if (pg_ctl_return != 0)
- pg_log(PG_FATAL, "pg_ctl failed to start the %s server\n",
- CLUSTER_NAME(cluster));
+ /*
+ * If pg_ctl failed, and the connection didn't fail, and throw_error is
+ * enabled, fail now. This could happen if the server was already
+ * running.
+ */
+ if (!pg_ctl_return)
+ pg_fatal("pg_ctl failed to start the %s server, or connection failed\n",
+ CLUSTER_NAME(cluster));
- os_info.running_cluster = cluster;
+ return true;
}
void
stop_postmaster(bool fast)
{
- char cmd[MAXPGPATH];
ClusterInfo *cluster;
if (os_info.running_cluster == &old_cluster)
@@ -213,14 +299,11 @@ stop_postmaster(bool fast)
else
return; /* no cluster running */
- snprintf(cmd, sizeof(cmd),
- SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
- "%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
- cluster->bindir, cluster->pgconfig,
- cluster->pgopts ? cluster->pgopts : "",
- fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
-
- exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd);
+ exec_prog(SERVER_STOP_LOG_FILE, NULL, !fast,
+ "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" %s stop",
+ cluster->bindir, cluster->pgconfig,
+ cluster->pgopts ? cluster->pgopts : "",
+ fast ? "-m fast" : "");
os_info.running_cluster = NULL;
}
@@ -241,6 +324,9 @@ check_pghost_envvar(void)
start = PQconndefaults();
+ if (!start)
+ pg_fatal("out of memory\n");
+
for (option = start; option->keyword != NULL; option++)
{
if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 ||
@@ -252,9 +338,8 @@ check_pghost_envvar(void)
/* check for 'local' host values */
(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
strcmp(value, "::1") != 0 && value[0] != '/'))
- pg_log(PG_FATAL,
- "libpq environment variable %s has a non-local server value: %s\n",
- option->envvar, value);
+ pg_fatal("libpq environment variable %s has a non-local server value: %s\n",
+ option->envvar, value);
}
}
diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c
index b783b6251e..68e9cb241c 100644
--- a/contrib/pg_upgrade/tablespace.c
+++ b/contrib/pg_upgrade/tablespace.c
@@ -3,14 +3,16 @@
*
* tablespace functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/tablespace.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
+#include <sys/types.h>
+
static void get_tablespace_paths(void);
static void set_tablespace_directory_suffix(ClusterInfo *cluster);
@@ -23,11 +25,10 @@ init_tablespaces(void)
set_tablespace_directory_suffix(&old_cluster);
set_tablespace_directory_suffix(&new_cluster);
- if (os_info.num_tablespaces > 0 &&
+ if (os_info.num_old_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
- pg_log(PG_FATAL,
- "Cannot upgrade to/from the same system catalog version when\n"
- "using tablespaces.\n");
+ pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
+ "using tablespaces.\n");
}
@@ -57,18 +58,47 @@ get_tablespace_paths(void)
res = executeQueryOrDie(conn, "%s", query);
- if ((os_info.num_tablespaces = PQntuples(res)) != 0)
- os_info.tablespaces = (char **) pg_malloc(
- os_info.num_tablespaces * sizeof(char *));
+ if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
+ os_info.old_tablespaces = (char **) pg_malloc(
+ os_info.num_old_tablespaces * sizeof(char *));
else
- os_info.tablespaces = NULL;
+ os_info.old_tablespaces = NULL;
i_spclocation = PQfnumber(res, "spclocation");
- for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++)
- os_info.tablespaces[tblnum] = pg_strdup(
+ for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ {
+ struct stat statBuf;
+
+ os_info.old_tablespaces[tblnum] = pg_strdup(
PQgetvalue(res, tblnum, i_spclocation));
+ /*
+ * Check that the tablespace path exists and is a directory.
+ * Effectively, this is checking only for tables/indexes in
+ * non-existent tablespace directories. Databases located in
+ * non-existent tablespaces already throw a backend error.
+ * Non-existent tablespace directories can occur when a data directory
+ * that contains user tablespaces is moved as part of pg_upgrade
+ * preparation and the symbolic links are not updated.
+ */
+ if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
+ {
+ if (errno == ENOENT)
+ report_status(PG_FATAL,
+ "tablespace directory \"%s\" does not exist\n",
+ os_info.old_tablespaces[tblnum]);
+ else
+ report_status(PG_FATAL,
+ "cannot stat() tablespace directory \"%s\": %s\n",
+ os_info.old_tablespaces[tblnum], getErrorText(errno));
+ }
+ if (!S_ISDIR(statBuf.st_mode))
+ report_status(PG_FATAL,
+ "tablespace path \"%s\" is not a directory\n",
+ os_info.old_tablespaces[tblnum]);
+ }
+
PQclear(res);
PQfinish(conn);
@@ -85,12 +115,10 @@ set_tablespace_directory_suffix(ClusterInfo *cluster)
else
{
/* This cluster has a version-specific subdirectory */
- cluster->tablespace_suffix = pg_malloc(4 +
- strlen(cluster->major_version_str) +
- 10 /* OIDCHARS */ + 1);
/* The leading slash is needed to start a new directory. */
- sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
- cluster->controldata.cat_ver);
+ cluster->tablespace_suffix = psprintf("/PG_%s_%d",
+ cluster->major_version_str,
+ cluster->controldata.cat_ver);
}
}
diff --git a/contrib/pg_upgrade/test.sh b/contrib/pg_upgrade/test.sh
index 299b7a5c47..baa7d4748b 100644
--- a/contrib/pg_upgrade/test.sh
+++ b/contrib/pg_upgrade/test.sh
@@ -6,14 +6,26 @@
# runs the regression tests (to put in some data), runs pg_dumpall,
# runs pg_upgrade, runs pg_dumpall again, compares the dumps.
#
-# Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+# Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
# Portions Copyright (c) 1994, Regents of the University of California
set -e
: ${MAKE=make}
-: ${PGPORT=50432}
-export PGPORT
+
+# Guard against parallel make issues (see comments in pg_regress.c)
+unset MAKEFLAGS
+unset MAKELEVEL
+
+# Set listen_addresses desirably
+testhost=`uname -s`
+
+case $testhost in
+ MINGW*) LISTEN_ADDRESSES="localhost" ;;
+ *) LISTEN_ADDRESSES="" ;;
+esac
+
+POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES"
temp_root=$PWD/tmp_check
@@ -38,8 +50,9 @@ if [ "$1" = '--install' ]; then
# We need to make it use psql from our temporary installation,
# because otherwise the installcheck run below would try to
# use psql from the proper installation directory, which might
- # be outdated or missing.
- EXTRA_REGRESS_OPTS=--psqldir=$bindir
+ # be outdated or missing. But don't override anything else that's
+ # already in EXTRA_REGRESS_OPTS.
+ EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --psqldir=$bindir"
export EXTRA_REGRESS_OPTS
fi
@@ -52,20 +65,59 @@ newsrc=`cd ../.. && pwd`
PATH=$bindir:$PATH
export PATH
-PGDATA=$temp_root/data
+BASE_PGDATA=$temp_root/data
+PGDATA="$BASE_PGDATA.old"
export PGDATA
-rm -rf "$PGDATA" "$PGDATA".old
+rm -rf "$BASE_PGDATA" "$PGDATA"
logdir=$PWD/log
rm -rf "$logdir"
mkdir "$logdir"
+# Clear out any environment vars that might cause libpq to connect to
+# the wrong postmaster (cf pg_regress.c)
+#
+# Some shells, such as NetBSD's, return non-zero from unset if the variable
+# is already unset. Since we are operating under 'set -e', this causes the
+# script to fail. To guard against this, set them all to an empty string first.
+PGDATABASE=""; unset PGDATABASE
+PGUSER=""; unset PGUSER
+PGSERVICE=""; unset PGSERVICE
+PGSSLMODE=""; unset PGSSLMODE
+PGREQUIRESSL=""; unset PGREQUIRESSL
+PGCONNECT_TIMEOUT=""; unset PGCONNECT_TIMEOUT
+PGHOST=""; unset PGHOST
+PGHOSTADDR=""; unset PGHOSTADDR
+
+# Select a non-conflicting port number, similarly to pg_regress.c
+PG_VERSION_NUM=`grep '#define PG_VERSION_NUM' $newsrc/src/include/pg_config.h | awk '{print $3}'`
+PGPORT=`expr $PG_VERSION_NUM % 16384 + 49152`
+export PGPORT
+
+i=0
+while psql -X postgres </dev/null 2>/dev/null
+do
+ i=`expr $i + 1`
+ if [ $i -eq 16 ]
+ then
+ echo port $PGPORT apparently in use
+ exit 1
+ fi
+ PGPORT=`expr $PGPORT + 1`
+ export PGPORT
+done
+
+# buildfarm may try to override port via EXTRA_REGRESS_OPTS ...
+EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --port=$PGPORT"
+export EXTRA_REGRESS_OPTS
+
+# enable echo so the user can see what is being executed
set -x
-$oldbindir/initdb
-$oldbindir/pg_ctl start -l "$logdir/postmaster1.log" -w
+$oldbindir/initdb -N
+$oldbindir/pg_ctl start -l "$logdir/postmaster1.log" -o "$POSTMASTER_OPTS" -w
if "$MAKE" -C "$oldsrc" installcheck; then
- pg_dumpall >"$temp_root"/dump1.sql || pg_dumpall1_status=$?
+ pg_dumpall -f "$temp_root"/dump1.sql || pg_dumpall1_status=$?
if [ "$newsrc" != "$oldsrc" ]; then
oldpgversion=`psql -A -t -d regression -c "SHOW server_version_num"`
fix_sql=""
@@ -100,20 +152,36 @@ if [ -n "$pg_dumpall1_status" ]; then
exit 1
fi
-mv "${PGDATA}" "${PGDATA}.old"
+PGDATA=$BASE_PGDATA
+
+initdb -N
-initdb
+pg_upgrade $PG_UPGRADE_OPTS -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir" -p "$PGPORT" -P "$PGPORT"
-pg_upgrade -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir"
+pg_ctl start -l "$logdir/postmaster2.log" -o "$POSTMASTER_OPTS" -w
-pg_ctl start -l "$logdir/postmaster2.log" -w
-pg_dumpall >"$temp_root"/dump2.sql || pg_dumpall2_status=$?
+case $testhost in
+ MINGW*) cmd /c analyze_new_cluster.bat ;;
+ *) sh ./analyze_new_cluster.sh ;;
+esac
+
+pg_dumpall -f "$temp_root"/dump2.sql || pg_dumpall2_status=$?
pg_ctl -m fast stop
+
+# no need to echo commands anymore
+set +x
+echo
+
if [ -n "$pg_dumpall2_status" ]; then
echo "pg_dumpall of post-upgrade database cluster failed"
exit 1
fi
+case $testhost in
+ MINGW*) cmd /c delete_old_cluster.bat ;;
+ *) sh ./delete_old_cluster.sh ;;
+esac
+
if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then
echo PASSED
exit 0
diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c
index 6977663b63..3b94057696 100644
--- a/contrib/pg_upgrade/util.c
+++ b/contrib/pg_upgrade/util.c
@@ -3,12 +3,13 @@
*
* utility functions
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/util.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
+#include "common/username.h"
#include "pg_upgrade.h"
#include <signal.h>
@@ -35,6 +36,18 @@ report_status(eLogType type, const char *fmt,...)
}
+/* force blank output for progress display */
+void
+end_progress_output(void)
+{
+ /*
+ * In case nothing printed; pass a space so gcc doesn't complain about
+ * empty format string.
+ */
+ prep_status(" ");
+}
+
+
/*
* prep_status
*
@@ -63,27 +76,30 @@ prep_status(const char *fmt,...)
if (strlen(message) > 0 && message[strlen(message) - 1] == '\n')
pg_log(PG_REPORT, "%s", message);
else
- pg_log(PG_REPORT, "%-" MESSAGE_WIDTH "s", message);
+ /* trim strings that don't end in a newline */
+ pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message);
}
+static
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
void
-pg_log(eLogType type, char *fmt,...)
+pg_log_v(eLogType type, const char *fmt, va_list ap)
{
- va_list args;
char message[MAX_STRING];
- va_start(args, fmt);
- vsnprintf(message, sizeof(message), fmt, args);
- va_end(args);
+ vsnprintf(message, sizeof(message), fmt, ap);
- /* PG_VERBOSE is only output in verbose mode */
- if (type != PG_VERBOSE || log_opts.verbose)
+ /* PG_VERBOSE and PG_STATUS are only output in verbose mode */
+ /* fopen() on log_opts.internal might have failed, so check it */
+ if (((type != PG_VERBOSE && type != PG_STATUS) || log_opts.verbose) &&
+ log_opts.internal != NULL)
{
- fwrite(message, strlen(message), 1, log_opts.internal);
- /* if we are using OVERWRITE_MESSAGE, add newline */
- if (strchr(message, '\r') != NULL)
- fwrite("\n", 1, 1, log_opts.internal);
+ if (type == PG_STATUS)
+ /* status messages need two leading spaces and a newline */
+ fprintf(log_opts.internal, " %s\n", message);
+ else
+ fprintf(log_opts.internal, "%s", message);
fflush(log_opts.internal);
}
@@ -94,6 +110,21 @@ pg_log(eLogType type, char *fmt,...)
printf("%s", _(message));
break;
+ case PG_STATUS:
+ /* for output to a display, do leading truncation and append \r */
+ if (isatty(fileno(stdout)))
+ /* -2 because we use a 2-space indent */
+ printf(" %s%-*.*s\r",
+ /* prefix with "..." if we do leading truncation */
+ strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...",
+ MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2,
+ /* optional leading truncation */
+ strlen(message) <= MESSAGE_WIDTH - 2 ? message :
+ message + strlen(message) - MESSAGE_WIDTH + 3 + 2);
+ else
+ printf(" %s\n", _(message));
+ break;
+
case PG_REPORT:
case PG_WARNING:
printf("%s", _(message));
@@ -113,6 +144,30 @@ pg_log(eLogType type, char *fmt,...)
void
+pg_log(eLogType type, const char *fmt,...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ pg_log_v(type, fmt, args);
+ va_end(args);
+}
+
+
+void
+pg_fatal(const char *fmt,...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ pg_log_v(PG_FATAL, fmt, args);
+ va_end(args);
+ printf("Failure, exiting\n");
+ exit(1);
+}
+
+
+void
check_ok(void)
{
/* all seems well */
@@ -151,66 +206,28 @@ quote_identifier(const char *s)
/*
* get_user_info()
- * (copied from initdb.c) find the current user
*/
int
-get_user_info(char **user_name)
+get_user_info(char **user_name_p)
{
int user_id;
+ const char *user_name;
+ char *errstr;
#ifndef WIN32
- struct passwd *pw = getpwuid(geteuid());
-
user_id = geteuid();
-#else /* the windows code */
- struct passwd_win32
- {
- int pw_uid;
- char pw_name[128];
- } pass_win32;
- struct passwd_win32 *pw = &pass_win32;
- DWORD pwname_size = sizeof(pass_win32.pw_name) - 1;
-
- GetUserName(pw->pw_name, &pwname_size);
-
+#else
user_id = 1;
#endif
- *user_name = pg_strdup(pw->pw_name);
+ user_name = get_user_name(&errstr);
+ if (!user_name)
+ pg_fatal("%s\n", errstr);
- return user_id;
-}
+ /* make a copy */
+ *user_name_p = pg_strdup(user_name);
-
-void *
-pg_malloc(int n)
-{
- void *p = malloc(n);
-
- if (p == NULL)
- pg_log(PG_FATAL, "%s: out of memory\n", os_info.progname);
-
- return p;
-}
-
-
-void
-pg_free(void *p)
-{
- if (p != NULL)
- free(p);
-}
-
-
-char *
-pg_strdup(const char *s)
-{
- char *result = strdup(s);
-
- if (result == NULL)
- pg_log(PG_FATAL, "%s: out of memory\n", os_info.progname);
-
- return result;
+ return user_id;
}
@@ -256,15 +273,14 @@ pg_putenv(const char *var, const char *val)
if (val)
{
#ifndef WIN32
- char *envstr = (char *) pg_malloc(strlen(var) +
- strlen(val) + 2);
+ char *envstr;
- sprintf(envstr, "%s=%s", var, val);
+ envstr = psprintf("%s=%s", var, val);
putenv(envstr);
/*
* Do not free envstr because it becomes part of the environment on
- * some operating systems. See port/unsetenv.c::unsetenv.
+ * some operating systems. See port/unsetenv.c::unsetenv.
*/
#else
SetEnvironmentVariableA(var, val);
diff --git a/contrib/pg_upgrade/version.c b/contrib/pg_upgrade/version.c
index 5d790a0803..0f9dc079b2 100644
--- a/contrib/pg_upgrade/version.c
+++ b/contrib/pg_upgrade/version.c
@@ -3,11 +3,11 @@
*
* Postgres-version-specific routines
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/version.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -49,7 +49,7 @@ new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
if (!check_mode)
{
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
fprintf(script, "\\connect %s\n",
quote_identifier(active_db->db_name));
fprintf(script,
@@ -87,3 +87,92 @@ new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
else
check_ok();
}
+
+
+/*
+ * old_9_3_check_for_line_data_type_usage()
+ * 9.3 -> 9.4
+ * Fully implement the 'line' data type in 9.4, which previously returned
+ * "not enabled" by default and was only functionally enabled with a
+ * compile-time switch; 9.4 "line" has different binary and text
+ * representation formats; checks tables and indexes.
+ */
+void
+old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
+{
+ int dbnum;
+ FILE *script = NULL;
+ bool found = false;
+ char output_path[MAXPGPATH];
+
+ prep_status("Checking for invalid \"line\" user columns");
+
+ snprintf(output_path, sizeof(output_path), "tables_using_line.txt");
+
+ for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+ {
+ PGresult *res;
+ bool db_used = false;
+ int ntups;
+ int rowno;
+ int i_nspname,
+ i_relname,
+ i_attname;
+ DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
+ PGconn *conn = connectToServer(cluster, active_db->db_name);
+
+ res = executeQueryOrDie(conn,
+ "SELECT n.nspname, c.relname, a.attname "
+ "FROM pg_catalog.pg_class c, "
+ " pg_catalog.pg_namespace n, "
+ " pg_catalog.pg_attribute a "
+ "WHERE c.oid = a.attrelid AND "
+ " NOT a.attisdropped AND "
+ " a.atttypid = 'pg_catalog.line'::pg_catalog.regtype AND "
+ " c.relnamespace = n.oid AND "
+ /* exclude possible orphaned temp tables */
+ " n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+
+ ntups = PQntuples(res);
+ i_nspname = PQfnumber(res, "nspname");
+ i_relname = PQfnumber(res, "relname");
+ i_attname = PQfnumber(res, "attname");
+ for (rowno = 0; rowno < ntups; rowno++)
+ {
+ found = true;
+ if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ if (!db_used)
+ {
+ fprintf(script, "Database: %s\n", active_db->db_name);
+ db_used = true;
+ }
+ fprintf(script, " %s.%s.%s\n",
+ PQgetvalue(res, rowno, i_nspname),
+ PQgetvalue(res, rowno, i_relname),
+ PQgetvalue(res, rowno, i_attname));
+ }
+
+ PQclear(res);
+
+ PQfinish(conn);
+ }
+
+ if (script)
+ fclose(script);
+
+ if (found)
+ {
+ pg_log(PG_REPORT, "fatal\n");
+ pg_fatal("Your installation contains the \"line\" data type in user tables. This\n"
+ "data type changed its internal and input/output format between your old\n"
+ "and new clusters so this cluster cannot currently be upgraded. You can\n"
+ "remove the problem tables and restart the upgrade. A list of the problem\n"
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
+ }
+ else
+ check_ok();
+}
diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c
index b681c0984e..07e79bd609 100644
--- a/contrib/pg_upgrade/version_old_8_3.c
+++ b/contrib/pg_upgrade/version_old_8_3.c
@@ -3,11 +3,11 @@
*
* Postgres-version-specific routines
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade/version_old_8_3.c
*/
-#include "postgres.h"
+#include "postgres_fe.h"
#include "pg_upgrade.h"
@@ -61,7 +61,7 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
" a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
- " n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
@@ -73,7 +73,7 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
{
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@@ -96,13 +96,12 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
if (found)
{
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation contains the \"name\" data type in user tables. This\n"
+ pg_fatal("Your installation contains the \"name\" data type in user tables. This\n"
"data type changed its internal alignment between your old and new\n"
- "clusters so this cluster cannot currently be upgraded. You can remove\n"
+ "clusters so this cluster cannot currently be upgraded. You can remove\n"
"the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
@@ -145,13 +144,14 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
+ /* materialized views didn't exist in 8.3, so no need to check 'm' */
"WHERE c.relkind = 'r' AND "
" c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
- " n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
@@ -163,7 +163,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
{
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@@ -186,13 +186,12 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
if (found)
{
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation contains the \"tsquery\" data type. This data type\n"
- "added a new internal field between your old and new clusters so this\n"
+ pg_fatal("Your installation contains the \"tsquery\" data type. This data type\n"
+ "added a new internal field between your old and new clusters so this\n"
"cluster cannot currently be upgraded. You can remove the problem\n"
- "columns and restart the upgrade. A list of the problem columns is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ "columns and restart the upgrade. A list of the problem columns is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
@@ -242,8 +241,8 @@ old_8_3_check_ltree_usage(ClusterInfo *cluster)
{
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ pg_fatal("Could not open file \"%s\": %s\n",
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
@@ -265,14 +264,13 @@ old_8_3_check_ltree_usage(ClusterInfo *cluster)
if (found)
{
pg_log(PG_REPORT, "fatal\n");
- pg_log(PG_FATAL,
- "Your installation contains the \"ltree\" data type. This data type\n"
- "changed its internal storage format between your old and new clusters so this\n"
- "cluster cannot currently be upgraded. You can manually upgrade databases\n"
- "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
- "cluster and restart the upgrade. A list of the problem functions is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ pg_fatal("Your installation contains the \"ltree\" data type. This data type\n"
+ "changed its internal storage format between your old and new clusters so this\n"
+ "cluster cannot currently be upgraded. You can manually upgrade databases\n"
+ "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
+ "cluster and restart the upgrade. A list of the problem functions is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
@@ -323,13 +321,16 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
+ /* materialized views didn't exist in 8.3, so no need to check 'm' */
"WHERE c.relkind = 'r' AND "
" c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
+ /* child attribute changes are processed by the parent */
+ " a.attinhcount = 0 AND "
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
- " n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
@@ -343,9 +344,12 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
"FROM pg_catalog.pg_class c, " \
" pg_catalog.pg_namespace n, " \
" pg_catalog.pg_attribute a " \
+ /* materialized views didn't exist in 8.3, so no need to check 'm' */ \
"WHERE c.relkind = 'r' AND " \
" c.oid = a.attrelid AND " \
" NOT a.attisdropped AND " \
+ /* child attribute changes are processed by the parent */ \
+ " a.attinhcount = 0 AND " \
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " \
" c.relnamespace = n.oid AND " \
" n.nspname !~ '^pg_' AND " \
@@ -361,7 +365,7 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
if (!check_mode)
{
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
@@ -430,7 +434,7 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
/*
* old_8_3_invalidate_hash_gin_indexes()
* 8.3 -> 8.4
- * Hash, Gin, and GiST index binary format has changes from 8.3->8.4
+ * Hash and GIN index binary format changed from 8.3->8.4
*/
void
old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
@@ -477,7 +481,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
if (!check_mode)
{
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "\\connect %s\n",
@@ -596,7 +600,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
if (!check_mode)
{
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "\\connect %s\n",
@@ -675,9 +679,9 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
int dbnum;
FILE *script = NULL;
bool found = false;
- char *output_path = pg_malloc(MAXPGPATH);
+ char *output_path;
- snprintf(output_path, MAXPGPATH, "adjust_sequences.sql");
+ output_path = pg_strdup("adjust_sequences.sql");
prep_status("Creating script to adjust sequences");
@@ -700,7 +704,7 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
"WHERE c.relkind = 'S' AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
- " n.nspname !~ '^pg_temp_' AND "
+ " n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
@@ -718,7 +722,7 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
- pg_log(PG_FATAL, "could not open file \"%s\": %s\n", output_path, getErrorText(errno));
+ pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
diff --git a/contrib/pg_upgrade_support/pg_upgrade_support.c b/contrib/pg_upgrade_support/pg_upgrade_support.c
index 472f1525b0..edd41d06ae 100644
--- a/contrib/pg_upgrade_support/pg_upgrade_support.c
+++ b/contrib/pg_upgrade_support/pg_upgrade_support.c
@@ -5,12 +5,13 @@
* to control oid and relfilenode assignment, and do other special
* hacks needed for pg_upgrade.
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
* contrib/pg_upgrade_support/pg_upgrade_support.c
*/
#include "postgres.h"
+#include "catalog/binary_upgrade.h"
#include "catalog/namespace.h"
#include "catalog/pg_type.h"
#include "commands/extension.h"
@@ -24,30 +25,6 @@
PG_MODULE_MAGIC;
#endif
-extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_oid;
-extern PGDLLIMPORT Oid binary_upgrade_next_array_pg_type_oid;
-extern PGDLLIMPORT Oid binary_upgrade_next_toast_pg_type_oid;
-
-extern PGDLLIMPORT Oid binary_upgrade_next_heap_pg_class_oid;
-extern PGDLLIMPORT Oid binary_upgrade_next_index_pg_class_oid;
-extern PGDLLIMPORT Oid binary_upgrade_next_toast_pg_class_oid;
-
-extern PGDLLIMPORT Oid binary_upgrade_next_pg_enum_oid;
-extern PGDLLIMPORT Oid binary_upgrade_next_pg_authid_oid;
-
-Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
-Datum set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
-Datum set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
-
-Datum set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
-Datum set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
-Datum set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
-
-Datum set_next_pg_enum_oid(PG_FUNCTION_ARGS);
-Datum set_next_pg_authid_oid(PG_FUNCTION_ARGS);
-
-Datum create_empty_extension(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(set_next_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_array_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_toast_pg_type_oid);
diff --git a/contrib/pg_xlogdump/.gitignore b/contrib/pg_xlogdump/.gitignore
new file mode 100644
index 0000000000..71f8531c40
--- /dev/null
+++ b/contrib/pg_xlogdump/.gitignore
@@ -0,0 +1,19 @@
+/pg_xlogdump
+# Source files copied from src/backend/access/
+/clogdesc.c
+/dbasedesc.c
+/gindesc.c
+/gistdesc.c
+/hashdesc.c
+/heapdesc.c
+/mxactdesc.c
+/nbtdesc.c
+/relmapdesc.c
+/seqdesc.c
+/smgrdesc.c
+/spgdesc.c
+/standbydesc.c
+/tblspcdesc.c
+/xactdesc.c
+/xlogdesc.c
+/xlogreader.c
diff --git a/contrib/pg_xlogdump/Makefile b/contrib/pg_xlogdump/Makefile
new file mode 100644
index 0000000000..ada261c4dd
--- /dev/null
+++ b/contrib/pg_xlogdump/Makefile
@@ -0,0 +1,31 @@
+# contrib/pg_xlogdump/Makefile
+
+PGFILEDESC = "pg_xlogdump"
+PGAPPICON=win32
+
+PROGRAM = pg_xlogdump
+OBJS = pg_xlogdump.o compat.o xlogreader.o rmgrdesc.o \
+ $(RMGRDESCOBJS) $(WIN32RES)
+
+RMGRDESCSOURCES = $(notdir $(wildcard $(top_srcdir)/src/backend/access/rmgrdesc/*desc.c))
+RMGRDESCOBJS = $(patsubst %.c,%.o,$(RMGRDESCSOURCES))
+
+EXTRA_CLEAN = $(RMGRDESCSOURCES) xlogreader.c
+
+ifdef USE_PGXS
+$(error "pg_xlogdump cannot be built with PGXS")
+endif
+
+subdir = contrib/pg_xlogdump
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+
+
+override CPPFLAGS := -DFRONTEND $(CPPFLAGS)
+
+xlogreader.c: % : $(top_srcdir)/src/backend/access/transam/%
+ rm -f $@ && $(LN_S) $< .
+
+$(RMGRDESCSOURCES): % : $(top_srcdir)/src/backend/access/rmgrdesc/%
+ rm -f $@ && $(LN_S) $< .
diff --git a/contrib/pg_xlogdump/compat.c b/contrib/pg_xlogdump/compat.c
new file mode 100644
index 0000000000..6ca7012fd9
--- /dev/null
+++ b/contrib/pg_xlogdump/compat.c
@@ -0,0 +1,99 @@
+/*-------------------------------------------------------------------------
+ *
+ * compat.c
+ * Reimplementations of various backend functions.
+ *
+ * Portions Copyright (c) 2013-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pg_xlogdump/compat.c
+ *
+ * This file contains client-side implementations for various backend
+ * functions that the rm_desc functions in *desc.c files rely on.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ugly hack, same as in e.g pg_controldata */
+#define FRONTEND 1
+#include "postgres.h"
+
+#include <time.h>
+
+#include "utils/datetime.h"
+#include "lib/stringinfo.h"
+
+/* copied from timestamp.c */
+pg_time_t
+timestamptz_to_time_t(TimestampTz t)
+{
+ pg_time_t result;
+
+#ifdef HAVE_INT64_TIMESTAMP
+ result = (pg_time_t) (t / USECS_PER_SEC +
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+#else
+ result = (pg_time_t) (t +
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+#endif
+ return result;
+}
+
+/*
+ * Stopgap implementation of timestamptz_to_str that doesn't depend on backend
+ * infrastructure. This will work for timestamps that are within the range
+ * of the platform time_t type. (pg_time_t is compatible except for possibly
+ * being wider.)
+ *
+ * XXX the return value points to a static buffer, so beware of using more
+ * than one result value concurrently.
+ *
+ * XXX: The backend timestamp infrastructure should instead be split out and
+ * moved into src/common. That's a large project though.
+ */
+const char *
+timestamptz_to_str(TimestampTz dt)
+{
+ static char buf[MAXDATELEN + 1];
+ char ts[MAXDATELEN + 1];
+ char zone[MAXDATELEN + 1];
+ time_t result = (time_t) timestamptz_to_time_t(dt);
+ struct tm *ltime = localtime(&result);
+
+ strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", ltime);
+ strftime(zone, sizeof(zone), "%Z", ltime);
+
+#ifdef HAVE_INT64_TIMESTAMP
+ sprintf(buf, "%s.%06d %s", ts, (int) (dt % USECS_PER_SEC), zone);
+#else
+ sprintf(buf, "%s.%.6f %s", ts, fabs(dt - floor(dt)), zone);
+#endif
+
+ return buf;
+}
+
+/*
+ * Provide a hacked up compat layer for StringInfos so xlog desc functions can
+ * be linked/called.
+ */
+void
+appendStringInfo(StringInfo str, const char *fmt,...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+void
+appendStringInfoString(StringInfo str, const char *string)
+{
+ appendStringInfo(str, "%s", string);
+}
+
+void
+appendStringInfoChar(StringInfo str, char ch)
+{
+ appendStringInfo(str, "%c", ch);
+}
diff --git a/contrib/pg_xlogdump/pg_xlogdump.c b/contrib/pg_xlogdump/pg_xlogdump.c
new file mode 100644
index 0000000000..824b8c393c
--- /dev/null
+++ b/contrib/pg_xlogdump/pg_xlogdump.c
@@ -0,0 +1,735 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_xlogdump.c - decode and display WAL
+ *
+ * Copyright (c) 2013-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pg_xlogdump/pg_xlogdump.c
+ *-------------------------------------------------------------------------
+ */
+
+#define FRONTEND 1
+#include "postgres.h"
+
+#include <dirent.h>
+#include <unistd.h>
+
+#include "access/xlog.h"
+#include "access/xlogreader.h"
+#include "access/transam.h"
+#include "common/fe_memutils.h"
+#include "getopt_long.h"
+#include "rmgrdesc.h"
+
+
+static const char *progname;
+
+typedef struct XLogDumpPrivate
+{
+ TimeLineID timeline;
+ char *inpath;
+ XLogRecPtr startptr;
+ XLogRecPtr endptr;
+ bool endptr_reached;
+} XLogDumpPrivate;
+
+typedef struct XLogDumpConfig
+{
+ /* display options */
+ bool bkp_details;
+ int stop_after_records;
+ int already_displayed_records;
+ bool follow;
+
+ /* filter options */
+ int filter_by_rmgr;
+ TransactionId filter_by_xid;
+ bool filter_by_xid_enabled;
+} XLogDumpConfig;
+
+static void
+fatal_error(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+
+/*
+ * Big red button to push when things go horribly wrong.
+ */
+static void
+fatal_error(const char *fmt,...)
+{
+ va_list args;
+
+ fflush(stdout);
+
+ fprintf(stderr, "%s: FATAL: ", progname);
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+ va_end(args);
+ fputc('\n', stderr);
+
+ exit(EXIT_FAILURE);
+}
+
+static void
+print_rmgr_list(void)
+{
+ int i;
+
+ for (i = 0; i <= RM_MAX_ID; i++)
+ {
+ printf("%s\n", RmgrDescTable[i].rm_name);
+ }
+}
+
+/*
+ * Check whether directory exists and whether we can open it. Keep errno set so
+ * that the caller can report errors somewhat more accurately.
+ */
+static bool
+verify_directory(const char *directory)
+{
+ DIR *dir = opendir(directory);
+
+ if (dir == NULL)
+ return false;
+ closedir(dir);
+ return true;
+}
+
+/*
+ * Split a pathname as dirname(1) and basename(1) would.
+ *
+ * XXX this probably doesn't do very well on Windows. We probably need to
+ * apply canonicalize_path(), at the very least.
+ */
+static void
+split_path(const char *path, char **dir, char **fname)
+{
+ char *sep;
+
+ /* split filepath into directory & filename */
+ sep = strrchr(path, '/');
+
+ /* directory path */
+ if (sep != NULL)
+ {
+ *dir = pg_strdup(path);
+ (*dir)[(sep - path) + 1] = '\0'; /* no strndup */
+ *fname = pg_strdup(sep + 1);
+ }
+ /* local directory */
+ else
+ {
+ *dir = NULL;
+ *fname = pg_strdup(path);
+ }
+}
+
+/*
+ * Try to find the file in several places:
+ * if directory == NULL:
+ * fname
+ * XLOGDIR / fname
+ * $PGDATA / XLOGDIR / fname
+ * else
+ * directory / fname
+ * directory / XLOGDIR / fname
+ *
+ * return a read only fd
+ */
+static int
+fuzzy_open_file(const char *directory, const char *fname)
+{
+ int fd = -1;
+ char fpath[MAXPGPATH];
+
+ if (directory == NULL)
+ {
+ const char *datadir;
+
+ /* fname */
+ fd = open(fname, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0 && errno != ENOENT)
+ return -1;
+ else if (fd > 0)
+ return fd;
+
+ /* XLOGDIR / fname */
+ snprintf(fpath, MAXPGPATH, "%s/%s",
+ XLOGDIR, fname);
+ fd = open(fpath, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0 && errno != ENOENT)
+ return -1;
+ else if (fd > 0)
+ return fd;
+
+ datadir = getenv("PGDATA");
+ /* $PGDATA / XLOGDIR / fname */
+ if (datadir != NULL)
+ {
+ snprintf(fpath, MAXPGPATH, "%s/%s/%s",
+ datadir, XLOGDIR, fname);
+ fd = open(fpath, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0 && errno != ENOENT)
+ return -1;
+ else if (fd > 0)
+ return fd;
+ }
+ }
+ else
+ {
+ /* directory / fname */
+ snprintf(fpath, MAXPGPATH, "%s/%s",
+ directory, fname);
+ fd = open(fpath, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0 && errno != ENOENT)
+ return -1;
+ else if (fd > 0)
+ return fd;
+
+ /* directory / XLOGDIR / fname */
+ snprintf(fpath, MAXPGPATH, "%s/%s/%s",
+ directory, XLOGDIR, fname);
+ fd = open(fpath, O_RDONLY | PG_BINARY, 0);
+ if (fd < 0 && errno != ENOENT)
+ return -1;
+ else if (fd > 0)
+ return fd;
+ }
+ return -1;
+}
+
+/*
+ * Read count bytes from a segment file in the specified directory, for the
+ * given timeline, containing the specified record pointer; store the data in
+ * the passed buffer.
+ */
+static void
+XLogDumpXLogRead(const char *directory, TimeLineID timeline_id,
+ XLogRecPtr startptr, char *buf, Size count)
+{
+ char *p;
+ XLogRecPtr recptr;
+ Size nbytes;
+
+ static int sendFile = -1;
+ static XLogSegNo sendSegNo = 0;
+ static uint32 sendOff = 0;
+
+ p = buf;
+ recptr = startptr;
+ nbytes = count;
+
+ while (nbytes > 0)
+ {
+ uint32 startoff;
+ int segbytes;
+ int readbytes;
+
+ startoff = recptr % XLogSegSize;
+
+ if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo))
+ {
+ char fname[MAXFNAMELEN];
+
+ /* Switch to another logfile segment */
+ if (sendFile >= 0)
+ close(sendFile);
+
+ XLByteToSeg(recptr, sendSegNo);
+
+ XLogFileName(fname, timeline_id, sendSegNo);
+
+ sendFile = fuzzy_open_file(directory, fname);
+
+ if (sendFile < 0)
+ fatal_error("could not find file \"%s\": %s",
+ fname, strerror(errno));
+ sendOff = 0;
+ }
+
+ /* Need to seek in the file? */
+ if (sendOff != startoff)
+ {
+ if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
+ {
+ int err = errno;
+ char fname[MAXPGPATH];
+
+ XLogFileName(fname, timeline_id, sendSegNo);
+
+ fatal_error("could not seek in log segment %s to offset %u: %s",
+ fname, startoff, strerror(err));
+ }
+ sendOff = startoff;
+ }
+
+ /* How many bytes are within this segment? */
+ if (nbytes > (XLogSegSize - startoff))
+ segbytes = XLogSegSize - startoff;
+ else
+ segbytes = nbytes;
+
+ readbytes = read(sendFile, p, segbytes);
+ if (readbytes <= 0)
+ {
+ int err = errno;
+ char fname[MAXPGPATH];
+
+ XLogFileName(fname, timeline_id, sendSegNo);
+
+ fatal_error("could not read from log segment %s, offset %d, length %d: %s",
+ fname, sendOff, segbytes, strerror(err));
+ }
+
+ /* Update state for read */
+ recptr += readbytes;
+
+ sendOff += readbytes;
+ nbytes -= readbytes;
+ p += readbytes;
+ }
+}
+
+/*
+ * XLogReader read_page callback
+ */
+static int
+XLogDumpReadPage(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
+ XLogRecPtr targetPtr, char *readBuff, TimeLineID *curFileTLI)
+{
+ XLogDumpPrivate *private = state->private_data;
+ int count = XLOG_BLCKSZ;
+
+ if (private->endptr != InvalidXLogRecPtr)
+ {
+ if (targetPagePtr + XLOG_BLCKSZ <= private->endptr)
+ count = XLOG_BLCKSZ;
+ else if (targetPagePtr + reqLen <= private->endptr)
+ count = private->endptr - targetPagePtr;
+ else
+ {
+ private->endptr_reached = true;
+ return -1;
+ }
+ }
+
+ XLogDumpXLogRead(private->inpath, private->timeline, targetPagePtr,
+ readBuff, count);
+
+ return count;
+}
+
+/*
+ * Print a record to stdout
+ */
+static void
+XLogDumpDisplayRecord(XLogDumpConfig *config, XLogRecPtr ReadRecPtr, XLogRecord *record)
+{
+ const RmgrDescData *desc = &RmgrDescTable[record->xl_rmid];
+
+ if (config->filter_by_rmgr != -1 &&
+ config->filter_by_rmgr != record->xl_rmid)
+ return;
+
+ if (config->filter_by_xid_enabled &&
+ config->filter_by_xid != record->xl_xid)
+ return;
+
+ config->already_displayed_records++;
+
+ printf("rmgr: %-11s len (rec/tot): %6u/%6u, tx: %10u, lsn: %X/%08X, prev %X/%08X, bkp: %u%u%u%u, desc: ",
+ desc->rm_name,
+ record->xl_len, record->xl_tot_len,
+ record->xl_xid,
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
+ (uint32) (record->xl_prev >> 32), (uint32) record->xl_prev,
+ !!(XLR_BKP_BLOCK(0) & record->xl_info),
+ !!(XLR_BKP_BLOCK(1) & record->xl_info),
+ !!(XLR_BKP_BLOCK(2) & record->xl_info),
+ !!(XLR_BKP_BLOCK(3) & record->xl_info));
+
+ /* the desc routine will printf the description directly to stdout */
+ desc->rm_desc(NULL, record->xl_info, XLogRecGetData(record));
+
+ putchar('\n');
+
+ if (config->bkp_details)
+ {
+ int bkpnum;
+ char *blk = (char *) XLogRecGetData(record) + record->xl_len;
+
+ for (bkpnum = 0; bkpnum < XLR_MAX_BKP_BLOCKS; bkpnum++)
+ {
+ BkpBlock bkpb;
+
+ if (!(XLR_BKP_BLOCK(bkpnum) & record->xl_info))
+ continue;
+
+ memcpy(&bkpb, blk, sizeof(BkpBlock));
+ blk += sizeof(BkpBlock);
+ blk += BLCKSZ - bkpb.hole_length;
+
+ printf("\tbackup bkp #%u; rel %u/%u/%u; fork: %s; block: %u; hole: offset: %u, length: %u\n",
+ bkpnum,
+ bkpb.node.spcNode, bkpb.node.dbNode, bkpb.node.relNode,
+ forkNames[bkpb.fork],
+ bkpb.block, bkpb.hole_offset, bkpb.hole_length);
+ }
+ }
+}
+
+static void
+usage(void)
+{
+ printf("%s decodes and displays PostgreSQL transaction logs for debugging.\n\n",
+ progname);
+ printf("Usage:\n");
+ printf(" %s [OPTION]... [STARTSEG [ENDSEG]] \n", progname);
+ printf("\nOptions:\n");
+ printf(" -b, --bkp-details output detailed information about backup blocks\n");
+ printf(" -e, --end=RECPTR stop reading at log position RECPTR\n");
+ printf(" -f, --follow keep retrying after reaching end of WAL\n");
+ printf(" -n, --limit=N number of records to display\n");
+ printf(" -p, --path=PATH directory in which to find log segment files\n");
+ printf(" (default: ./pg_xlog)\n");
+ printf(" -r, --rmgr=RMGR only show records generated by resource manager RMGR\n");
+ printf(" use --rmgr=list to list valid resource manager names\n");
+ printf(" -s, --start=RECPTR start reading at log position RECPTR\n");
+ printf(" -t, --timeline=TLI timeline from which to read log records\n");
+ printf(" (default: 1 or the value used in STARTSEG)\n");
+ printf(" -V, --version output version information, then exit\n");
+ printf(" -x, --xid=XID only show records with TransactionId XID\n");
+ printf(" -?, --help show this help, then exit\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ uint32 xlogid;
+ uint32 xrecoff;
+ XLogReaderState *xlogreader_state;
+ XLogDumpPrivate private;
+ XLogDumpConfig config;
+ XLogRecord *record;
+ XLogRecPtr first_record;
+ char *errormsg;
+
+ static struct option long_options[] = {
+ {"bkp-details", no_argument, NULL, 'b'},
+ {"end", required_argument, NULL, 'e'},
+ {"follow", no_argument, NULL, 'f'},
+ {"help", no_argument, NULL, '?'},
+ {"limit", required_argument, NULL, 'n'},
+ {"path", required_argument, NULL, 'p'},
+ {"rmgr", required_argument, NULL, 'r'},
+ {"start", required_argument, NULL, 's'},
+ {"timeline", required_argument, NULL, 't'},
+ {"xid", required_argument, NULL, 'x'},
+ {"version", no_argument, NULL, 'V'},
+ {NULL, 0, NULL, 0}
+ };
+
+ int option;
+ int optindex = 0;
+
+ progname = get_progname(argv[0]);
+
+ memset(&private, 0, sizeof(XLogDumpPrivate));
+ memset(&config, 0, sizeof(XLogDumpConfig));
+
+ private.timeline = 1;
+ private.startptr = InvalidXLogRecPtr;
+ private.endptr = InvalidXLogRecPtr;
+ private.endptr_reached = false;
+
+ config.bkp_details = false;
+ config.stop_after_records = -1;
+ config.already_displayed_records = 0;
+ config.follow = false;
+ config.filter_by_rmgr = -1;
+ config.filter_by_xid = InvalidTransactionId;
+ config.filter_by_xid_enabled = false;
+
+ if (argc <= 1)
+ {
+ fprintf(stderr, "%s: no arguments specified\n", progname);
+ goto bad_argument;
+ }
+
+ while ((option = getopt_long(argc, argv, "be:?fn:p:r:s:t:Vx:",
+ long_options, &optindex)) != -1)
+ {
+ switch (option)
+ {
+ case 'b':
+ config.bkp_details = true;
+ break;
+ case 'e':
+ if (sscanf(optarg, "%X/%X", &xlogid, &xrecoff) != 2)
+ {
+ fprintf(stderr, "%s: could not parse end log position \"%s\"\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ private.endptr = (uint64) xlogid << 32 | xrecoff;
+ break;
+ case 'f':
+ config.follow = true;
+ break;
+ case '?':
+ usage();
+ exit(EXIT_SUCCESS);
+ break;
+ case 'n':
+ if (sscanf(optarg, "%d", &config.stop_after_records) != 1)
+ {
+ fprintf(stderr, "%s: could not parse limit \"%s\"\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ break;
+ case 'p':
+ private.inpath = pg_strdup(optarg);
+ break;
+ case 'r':
+ {
+ int i;
+
+ if (pg_strcasecmp(optarg, "list") == 0)
+ {
+ print_rmgr_list();
+ exit(EXIT_SUCCESS);
+ }
+
+ for (i = 0; i <= RM_MAX_ID; i++)
+ {
+ if (pg_strcasecmp(optarg, RmgrDescTable[i].rm_name) == 0)
+ {
+ config.filter_by_rmgr = i;
+ break;
+ }
+ }
+
+ if (config.filter_by_rmgr == -1)
+ {
+ fprintf(stderr, "%s: resource manager \"%s\" does not exist\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ }
+ break;
+ case 's':
+ if (sscanf(optarg, "%X/%X", &xlogid, &xrecoff) != 2)
+ {
+ fprintf(stderr, "%s: could not parse start log position \"%s\"\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ else
+ private.startptr = (uint64) xlogid << 32 | xrecoff;
+ break;
+ case 't':
+ if (sscanf(optarg, "%d", &private.timeline) != 1)
+ {
+ fprintf(stderr, "%s: could not parse timeline \"%s\"\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ break;
+ case 'V':
+ puts("pg_xlogdump (PostgreSQL) " PG_VERSION);
+ exit(EXIT_SUCCESS);
+ break;
+ case 'x':
+ if (sscanf(optarg, "%u", &config.filter_by_xid) != 1)
+ {
+ fprintf(stderr, "%s: could not parse \"%s\" as a valid xid\n",
+ progname, optarg);
+ goto bad_argument;
+ }
+ config.filter_by_xid_enabled = true;
+ break;
+ default:
+ goto bad_argument;
+ }
+ }
+
+ if ((optind + 2) < argc)
+ {
+ fprintf(stderr,
+ "%s: too many command-line arguments (first is \"%s\")\n",
+ progname, argv[optind + 2]);
+ goto bad_argument;
+ }
+
+ if (private.inpath != NULL)
+ {
+ /* validate path points to directory */
+ if (!verify_directory(private.inpath))
+ {
+ fprintf(stderr,
+ "%s: path \"%s\" cannot be opened: %s",
+ progname, private.inpath, strerror(errno));
+ goto bad_argument;
+ }
+ }
+
+ /* parse files as start/end boundaries, extract path if not specified */
+ if (optind < argc)
+ {
+ char *directory = NULL;
+ char *fname = NULL;
+ int fd;
+ XLogSegNo segno;
+
+ split_path(argv[optind], &directory, &fname);
+
+ if (private.inpath == NULL && directory != NULL)
+ {
+ private.inpath = directory;
+
+ if (!verify_directory(private.inpath))
+ fatal_error("cannot open directory \"%s\": %s",
+ private.inpath, strerror(errno));
+ }
+
+ fd = fuzzy_open_file(private.inpath, fname);
+ if (fd < 0)
+ fatal_error("could not open file \"%s\"", fname);
+ close(fd);
+
+ /* parse position from file */
+ XLogFromFileName(fname, &private.timeline, &segno);
+
+ if (XLogRecPtrIsInvalid(private.startptr))
+ XLogSegNoOffsetToRecPtr(segno, 0, private.startptr);
+ else if (!XLByteInSeg(private.startptr, segno))
+ {
+ fprintf(stderr,
+ "%s: start log position %X/%X is not inside file \"%s\"\n",
+ progname,
+ (uint32) (private.startptr >> 32),
+ (uint32) private.startptr,
+ fname);
+ goto bad_argument;
+ }
+
+ /* no second file specified, set end position */
+ if (!(optind + 1 < argc) && XLogRecPtrIsInvalid(private.endptr))
+ XLogSegNoOffsetToRecPtr(segno + 1, 0, private.endptr);
+
+ /* parse ENDSEG if passed */
+ if (optind + 1 < argc)
+ {
+ XLogSegNo endsegno;
+
+ /* ignore directory, already have that */
+ split_path(argv[optind + 1], &directory, &fname);
+
+ fd = fuzzy_open_file(private.inpath, fname);
+ if (fd < 0)
+ fatal_error("could not open file \"%s\"", fname);
+ close(fd);
+
+ /* parse position from file */
+ XLogFromFileName(fname, &private.timeline, &endsegno);
+
+ if (endsegno < segno)
+ fatal_error("ENDSEG %s is before STARTSEG %s",
+ argv[optind + 1], argv[optind]);
+
+ if (XLogRecPtrIsInvalid(private.endptr))
+ XLogSegNoOffsetToRecPtr(endsegno + 1, 0, private.endptr);
+
+ /* set segno to endsegno for check of --end */
+ segno = endsegno;
+ }
+
+
+ if (!XLByteInSeg(private.endptr, segno) &&
+ private.endptr != (segno + 1) * XLogSegSize)
+ {
+ fprintf(stderr,
+ "%s: end log position %X/%X is not inside file \"%s\"\n",
+ progname,
+ (uint32) (private.endptr >> 32),
+ (uint32) private.endptr,
+ argv[argc - 1]);
+ goto bad_argument;
+ }
+ }
+
+ /* we don't know what to print */
+ if (XLogRecPtrIsInvalid(private.startptr))
+ {
+ fprintf(stderr, "%s: no start log position given in range mode.\n", progname);
+ goto bad_argument;
+ }
+
+ /* done with argument parsing, do the actual work */
+
+ /* we have everything we need, start reading */
+ xlogreader_state = XLogReaderAllocate(XLogDumpReadPage, &private);
+ if (!xlogreader_state)
+ fatal_error("out of memory");
+
+ /* first find a valid recptr to start from */
+ first_record = XLogFindNextRecord(xlogreader_state, private.startptr);
+
+ if (first_record == InvalidXLogRecPtr)
+ fatal_error("could not find a valid record after %X/%X",
+ (uint32) (private.startptr >> 32),
+ (uint32) private.startptr);
+
+ /*
+ * Display a message that we're skipping data if `from` wasn't a pointer
+ * to the start of a record and also wasn't a pointer to the beginning of
+ * a segment (e.g. we were used in file mode).
+ */
+ if (first_record != private.startptr && (private.startptr % XLogSegSize) != 0)
+ printf("first record is after %X/%X, at %X/%X, skipping over %u bytes\n",
+ (uint32) (private.startptr >> 32), (uint32) private.startptr,
+ (uint32) (first_record >> 32), (uint32) first_record,
+ (uint32) (first_record - private.startptr));
+
+ for (;;)
+ {
+ /* try to read the next record */
+ record = XLogReadRecord(xlogreader_state, first_record, &errormsg);
+ if (!record)
+ {
+ if (!config.follow || private.endptr_reached)
+ break;
+ else
+ {
+ pg_usleep(1000000L); /* 1 second */
+ continue;
+ }
+ }
+
+ /* after reading the first record, continue at next one */
+ first_record = InvalidXLogRecPtr;
+ XLogDumpDisplayRecord(&config, xlogreader_state->ReadRecPtr, record);
+
+ /* check whether we printed enough */
+ if (config.stop_after_records > 0 &&
+ config.already_displayed_records >= config.stop_after_records)
+ break;
+ }
+
+ if (errormsg)
+ fatal_error("error in WAL record at %X/%X: %s\n",
+ (uint32) (xlogreader_state->ReadRecPtr >> 32),
+ (uint32) xlogreader_state->ReadRecPtr,
+ errormsg);
+
+ XLogReaderFree(xlogreader_state);
+
+ return EXIT_SUCCESS;
+
+bad_argument:
+ fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
+ return EXIT_FAILURE;
+}
diff --git a/contrib/pg_xlogdump/rmgrdesc.c b/contrib/pg_xlogdump/rmgrdesc.c
new file mode 100644
index 0000000000..cbcaaa6b0c
--- /dev/null
+++ b/contrib/pg_xlogdump/rmgrdesc.c
@@ -0,0 +1,35 @@
+/*
+ * rmgrdesc.c
+ *
+ * pg_xlogdump resource managers definition
+ *
+ * contrib/pg_xlogdump/rmgrdesc.c
+ */
+#define FRONTEND 1
+#include "postgres.h"
+
+#include "access/clog.h"
+#include "access/gin.h"
+#include "access/gist_private.h"
+#include "access/hash.h"
+#include "access/heapam_xlog.h"
+#include "access/multixact.h"
+#include "access/nbtree.h"
+#include "access/rmgr.h"
+#include "access/spgist.h"
+#include "access/xact.h"
+#include "access/xlog_internal.h"
+#include "catalog/storage_xlog.h"
+#include "commands/dbcommands.h"
+#include "commands/sequence.h"
+#include "commands/tablespace.h"
+#include "rmgrdesc.h"
+#include "storage/standby.h"
+#include "utils/relmapper.h"
+
+#define PG_RMGR(symname,name,redo,desc,startup,cleanup) \
+ { name, desc, },
+
+const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = {
+#include "access/rmgrlist.h"
+};
diff --git a/contrib/pg_xlogdump/rmgrdesc.h b/contrib/pg_xlogdump/rmgrdesc.h
new file mode 100644
index 0000000000..edf8257751
--- /dev/null
+++ b/contrib/pg_xlogdump/rmgrdesc.h
@@ -0,0 +1,21 @@
+/*
+ * rmgrdesc.h
+ *
+ * pg_xlogdump resource managers declaration
+ *
+ * contrib/pg_xlogdump/rmgrdesc.h
+ */
+#ifndef RMGRDESC_H
+#define RMGRDESC_H
+
+#include "lib/stringinfo.h"
+
+typedef struct RmgrDescData
+{
+ const char *rm_name;
+ void (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec);
+} RmgrDescData;
+
+extern const RmgrDescData RmgrDescTable[];
+
+#endif /* RMGRDESC_H */
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index 24b0c7e105..534fd72150 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -5,7 +5,7 @@
* Originally written by Tatsuo Ishii and enhanced by many contributors.
*
* contrib/pgbench/pgbench.c
- * Copyright (c) 2000-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2000-2014, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
* Permission to use, copy, modify, and distribute this software and its
@@ -35,16 +35,12 @@
#include "getopt_long.h"
#include "libpq-fe.h"
-#include "libpq/pqsignal.h"
#include "portability/instr_time.h"
#include <ctype.h>
-
-#ifndef WIN32
+#include <math.h>
+#include <signal.h>
#include <sys/time.h>
-#include <unistd.h>
-#endif /* ! WIN32 */
-
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
@@ -73,7 +69,7 @@ static int pthread_join(pthread_t th, void **thread_return);
#include <pthread.h>
#else
/* Use emulation with fork. Rename pthread identifiers to avoid conflicts */
-
+#define PTHREAD_FORK_EMULATION
#include <sys/wait.h>
#define pthread_t pg_pthread_t
@@ -88,9 +84,6 @@ static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start
static int pthread_join(pthread_t th, void **thread_return);
#endif
-extern char *optarg;
-extern int optind;
-
/********************************************************************
* some configurable parameters */
@@ -102,6 +95,7 @@ extern int optind;
#define MAXCLIENTS 1024
#endif
+#define LOG_STEP_SECONDS 5 /* seconds between log messages */
#define DEFAULT_NXACTS 10 /* default nxacts */
int nxacts = 0; /* number of transactions per client */
@@ -120,11 +114,27 @@ int scale = 1;
int fillfactor = 100;
/*
+ * create foreign key constraints on the tables?
+ */
+int foreign_keys = 0;
+
+/*
* use unlogged tables?
*/
int unlogged_tables = 0;
/*
+ * log sampling rate (1.0 = log everything, 0.0 = option not given)
+ */
+double sample_rate = 0.0;
+
+/*
+ * When threads are throttled to a given rate limit, this is the target delay
+ * to reach that rate in usec. 0 is the default and means no throttling.
+ */
+int64 throttle_delay = 0;
+
+/*
* tablespace selection
*/
char *tablespace = NULL;
@@ -142,17 +152,33 @@ char *index_tablespace = NULL;
#ifdef PGXC
bool use_branch = false; /* use branch id in DDL and DML */
#endif
+/*
+ * The scale factor at/beyond which 32bit integers are incapable of storing
+ * 64bit values.
+ *
+ * Although the actual threshold is 21474, we use 20000 because it is easier to
+ * document and remember, and isn't that far away from the real threshold.
+ */
+#define SCALE_32BIT_THRESHOLD 20000
+
bool use_log; /* log transaction latencies to a file */
+bool use_quiet; /* quiet logging onto stderr */
+int agg_interval; /* log aggregates instead of individual
+ * transactions */
+int progress = 0; /* thread progress report every this seconds */
+int progress_nclients = 0; /* number of clients for progress
+ * report */
+int progress_nthreads = 0; /* number of threads for progress
+ * report */
bool is_connect; /* establish connection for each transaction */
bool is_latencies; /* report per-command latencies */
int main_pid; /* main process id used in log filename */
char *pghost = "";
char *pgport = "";
-char *pgoptions = NULL;
-char *pgtty = NULL;
char *login = NULL;
char *dbName;
+const char *progname;
volatile bool timer_exceeded = false; /* flag from signal handler */
@@ -180,11 +206,15 @@ typedef struct
int listen; /* 0 indicates that an async query has been
* sent */
int sleeping; /* 1 indicates that the client is napping */
+ bool throttling; /* whether nap is for throttling */
int64 until; /* napping until (usec) */
Variable *variables; /* array of variable definitions */
int nvariables;
instr_time txn_begin; /* used for measuring transaction latencies */
instr_time stmt_begin; /* used for measuring statement latencies */
+ int64 txn_latencies; /* cumulated latencies */
+ int64 txn_sqlats; /* cumulated square latencies */
+ bool is_throttled; /* whether transaction throttling is done */
int use_file; /* index in sql_files for this client */
bool prepared[MAX_FILES];
} CState;
@@ -202,6 +232,9 @@ typedef struct
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
int *exec_count; /* number of cmd executions (per Command) */
unsigned short random_state[3]; /* separate randomness for each thread */
+ int64 throttle_trigger; /* previous/next throttling (us) */
+ int64 throttle_lag; /* total transaction lag behind throttling */
+ int64 throttle_lag_max; /* max transaction lag */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
@@ -209,7 +242,11 @@ typedef struct
typedef struct
{
instr_time conn_time;
- int xacts;
+ int64 xacts;
+ int64 latencies;
+ int64 sqlats;
+ int64 throttle_lag;
+ int64 throttle_lag_max;
} TResult;
/*
@@ -239,6 +276,19 @@ typedef struct
char *argv[MAX_ARGS]; /* command word list */
} Command;
+typedef struct
+{
+
+ long start_time; /* when does the interval start */
+ int cnt; /* number of transactions */
+ double min_duration; /* min/max durations */
+ double max_duration;
+ double sum; /* sum(duration), sum(duration^2) - for
+ * estimates */
+ double sum2;
+
+} AggVals;
+
static Command **sql_files[MAX_FILES]; /* SQL script files */
static int num_files; /* number of script files */
static int num_commands = 0; /* total number of Command structs */
@@ -326,108 +376,134 @@ static char *select_only = {
static void setalarm(int seconds);
static void *threadRun(void *arg);
-
-/*
- * routines to check mem allocations and fail noisily.
- */
-static void *
-xmalloc(size_t size)
-{
- void *result;
-
- result = malloc(size);
- if (!result)
- {
- fprintf(stderr, "out of memory\n");
- exit(1);
- }
- return result;
-}
-
-static void *
-xrealloc(void *ptr, size_t size)
-{
- void *result;
-
- result = realloc(ptr, size);
- if (!result)
- {
- fprintf(stderr, "out of memory\n");
- exit(1);
- }
- return result;
-}
-
-static char *
-xstrdup(const char *s)
-{
- char *result;
-
- result = strdup(s);
- if (!result)
- {
- fprintf(stderr, "out of memory\n");
- exit(1);
- }
- return result;
-}
-
-
static void
-usage(const char *progname)
+usage(void)
{
printf("%s is a benchmarking tool for PostgreSQL.\n\n"
"Usage:\n"
" %s [OPTION]... [DBNAME]\n"
"\nInitialization options:\n"
- " -i invokes initialization mode\n"
- " -F NUM fill factor\n"
+ " -i, --initialize invokes initialization mode\n"
+ " -F, --fillfactor=NUM set fill factor\n"
#ifdef PGXC
" -k distribute by primary key branch id - bid\n"
#endif
- " -s NUM scaling factor\n"
+ " -n, --no-vacuum do not run VACUUM after initialization\n"
+ " -q, --quiet quiet logging (one message each 5 seconds)\n"
+ " -s, --scale=NUM scaling factor\n"
+ " --foreign-keys create foreign key constraints between tables\n"
" --index-tablespace=TABLESPACE\n"
- " create indexes in the specified tablespace\n"
- " --tablespace=TABLESPACE\n"
- " create tables in the specified tablespace\n"
- " --unlogged-tables\n"
- " create tables as unlogged tables\n"
+ " create indexes in the specified tablespace\n"
+ " --tablespace=TABLESPACE create tables in the specified tablespace\n"
+ " --unlogged-tables create tables as unlogged tables\n"
"\nBenchmarking options:\n"
- " -c NUM number of concurrent database clients (default: 1)\n"
- " -C establish new connection for each transaction\n"
- " -D VARNAME=VALUE\n"
- " define variable for use by custom script\n"
- " -f FILENAME read transaction script from FILENAME\n"
+ " -c, --client=NUM number of concurrent database clients (default: 1)\n"
+ " -C, --connect establish new connection for each transaction\n"
+ " -D, --define=VARNAME=VALUE\n"
+ " define variable for use by custom script\n"
+ " -f, --file=FILENAME read transaction script from FILENAME\n"
#ifdef PGXC
" -k query with default key and additional key branch id (bid)\n"
#endif
- " -j NUM number of threads (default: 1)\n"
- " -l write transaction times to log file\n"
- " -M simple|extended|prepared\n"
- " protocol for submitting queries to server (default: simple)\n"
- " -n do not run VACUUM before tests\n"
- " -N do not update tables \"pgbench_tellers\" and \"pgbench_branches\"\n"
- " -r report average latency per command\n"
- " -s NUM report this scale factor in output\n"
- " -S perform SELECT-only transactions\n"
- " -t NUM number of transactions each client runs (default: 10)\n"
- " -T NUM duration of benchmark test in seconds\n"
- " -v vacuum all four standard tables before tests\n"
+ " -j, --jobs=NUM number of threads (default: 1)\n"
+ " -l, --log write transaction times to log file\n"
+ " -M, --protocol=simple|extended|prepared\n"
+ " protocol for submitting queries (default: simple)\n"
+ " -n, --no-vacuum do not run VACUUM before tests\n"
+ " -N, --skip-some-updates skip updates of pgbench_tellers and pgbench_branches\n"
+ " -P, --progress=NUM show thread progress report every NUM seconds\n"
+ " -r, --report-latencies report average latency per command\n"
+ " -R, --rate=NUM target rate in transactions per second\n"
+ " -s, --scale=NUM report this scale factor in output\n"
+ " -S, --select-only perform SELECT-only transactions\n"
+ " -t, --transactions=NUM number of transactions each client runs (default: 10)\n"
+ " -T, --time=NUM duration of benchmark test in seconds\n"
+ " -v, --vacuum-all vacuum all four standard tables before tests\n"
+ " --aggregate-interval=NUM aggregate data over NUM seconds\n"
+ " --sampling-rate=NUM fraction of transactions to log (e.g. 0.01 for 1%%)\n"
"\nCommon options:\n"
- " -d print debugging output\n"
- " -h HOSTNAME database server host or socket directory\n"
- " -p PORT database server port number\n"
- " -U USERNAME connect as specified database user\n"
- " --help show this help, then exit\n"
- " --version output version information, then exit\n"
+ " -d, --debug print debugging output\n"
+ " -h, --host=HOSTNAME database server host or socket directory\n"
+ " -p, --port=PORT database server port number\n"
+ " -U, --username=USERNAME connect as specified database user\n"
+ " -V, --version output version information, then exit\n"
+ " -?, --help show this help, then exit\n"
"\n"
"Report bugs to <pgsql-bugs@postgresql.org>.\n",
progname, progname);
}
+/*
+ * strtoint64 -- convert a string to 64-bit integer
+ *
+ * This function is a modified version of scanint8() from
+ * src/backend/utils/adt/int8.c.
+ */
+static int64
+strtoint64(const char *str)
+{
+ const char *ptr = str;
+ int64 result = 0;
+ int sign = 1;
+
+ /*
+ * Do our own scan, rather than relying on sscanf which might be broken
+ * for long long.
+ */
+
+ /* skip leading spaces */
+ while (*ptr && isspace((unsigned char) *ptr))
+ ptr++;
+
+ /* handle sign */
+ if (*ptr == '-')
+ {
+ ptr++;
+
+ /*
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * cleaner than trying to get the loop below to handle it portably.
+ */
+ if (strncmp(ptr, "9223372036854775808", 19) == 0)
+ {
+ result = -INT64CONST(0x7fffffffffffffff) - 1;
+ ptr += 19;
+ goto gotdigits;
+ }
+ sign = -1;
+ }
+ else if (*ptr == '+')
+ ptr++;
+
+ /* require at least one digit */
+ if (!isdigit((unsigned char) *ptr))
+ fprintf(stderr, "invalid input syntax for integer: \"%s\"\n", str);
+
+ /* process digits */
+ while (*ptr && isdigit((unsigned char) *ptr))
+ {
+ int64 tmp = result * 10 + (*ptr++ - '0');
+
+ if ((tmp / 10) != result) /* overflow? */
+ fprintf(stderr, "value \"%s\" is out of range for type bigint\n", str);
+ result = tmp;
+ }
+
+gotdigits:
+
+ /* allow trailing whitespace, but not other trailing chars */
+ while (*ptr != '\0' && isspace((unsigned char) *ptr))
+ ptr++;
+
+ if (*ptr != '\0')
+ fprintf(stderr, "invalid input syntax for integer: \"%s\"\n", str);
+
+ return ((sign < 0) ? -result : result);
+}
+
/* random number generator: uniform distribution from min to max inclusive */
-static int
-getrand(TState *thread, int min, int max)
+static int64
+getrand(TState *thread, int64 min, int64 max)
{
/*
* Odd coding is so that min and max have approximately the same chance of
@@ -438,7 +514,7 @@ getrand(TState *thread, int min, int max)
* protected by a mutex, and therefore a bottleneck on machines with many
* CPUs.
*/
- return min + (int) ((max - min + 1) * pg_erand48(thread->random_state));
+ return min + (int64) ((max - min + 1) * pg_erand48(thread->random_state));
}
/* call PQexec() and exit() on failure */
@@ -470,10 +546,30 @@ doConnect(void)
*/
do
{
+#define PARAMS_ARRAY_SIZE 7
+
+ const char *keywords[PARAMS_ARRAY_SIZE];
+ const char *values[PARAMS_ARRAY_SIZE];
+
+ keywords[0] = "host";
+ values[0] = pghost;
+ keywords[1] = "port";
+ values[1] = pgport;
+ keywords[2] = "user";
+ values[2] = login;
+ keywords[3] = "password";
+ values[3] = password;
+ keywords[4] = "dbname";
+ values[4] = dbName;
+ keywords[5] = "fallback_application_name";
+ values[5] = progname;
+ keywords[6] = NULL;
+ values[6] = NULL;
+
new_pass = false;
- conn = PQsetdbLogin(pghost, pgport, pgoptions, pgtty, dbName,
- login, password);
+ conn = PQconnectdbParams(keywords, values, true);
+
if (!conn)
{
fprintf(stderr, "Connection to database \"%s\" failed\n",
@@ -593,17 +689,17 @@ putVariable(CState *st, const char *context, char *name, char *value)
}
if (st->variables)
- newvars = (Variable *) xrealloc(st->variables,
+ newvars = (Variable *) pg_realloc(st->variables,
(st->nvariables + 1) * sizeof(Variable));
else
- newvars = (Variable *) xmalloc(sizeof(Variable));
+ newvars = (Variable *) pg_malloc(sizeof(Variable));
st->variables = newvars;
var = &newvars[st->nvariables];
- var->name = xstrdup(name);
- var->value = xstrdup(value);
+ var->name = pg_strdup(name);
+ var->value = pg_strdup(value);
st->nvariables++;
@@ -615,7 +711,7 @@ putVariable(CState *st, const char *context, char *name, char *value)
char *val;
/* dup then free, in case value is pointing at this variable */
- val = xstrdup(value);
+ val = pg_strdup(value);
free(var->value);
var->value = val;
@@ -637,7 +733,7 @@ parseVariable(const char *sql, int *eaten)
if (i == 1)
return NULL;
- name = xmalloc(i);
+ name = pg_malloc(i);
memcpy(name, &sql[1], i - 1);
name[i - 1] = '\0';
@@ -654,7 +750,7 @@ replaceVariable(char **sql, char *param, int len, char *value)
{
size_t offset = param - *sql;
- *sql = xrealloc(*sql, strlen(*sql) - len + valueln + 1);
+ *sql = pg_realloc(*sql, strlen(*sql) - len + valueln + 1);
param = *sql + offset;
}
@@ -836,23 +932,90 @@ clientDone(CState *st, bool ok)
return false; /* always false */
}
+static
+void
+agg_vals_init(AggVals *aggs, instr_time start)
+{
+ /* basic counters */
+ aggs->cnt = 0; /* number of transactions */
+ aggs->sum = 0; /* SUM(duration) */
+ aggs->sum2 = 0; /* SUM(duration*duration) */
+
+ /* min and max transaction duration */
+ aggs->min_duration = 0;
+ aggs->max_duration = 0;
+
+ /* start of the current interval */
+ aggs->start_time = INSTR_TIME_GET_DOUBLE(start);
+}
+
/* return false iff client should be disconnected */
static bool
-doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile)
+doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals *agg)
{
PGresult *res;
Command **commands;
+ bool trans_needs_throttle = false;
top:
commands = sql_files[st->use_file];
+ /*
+ * Handle throttling once per transaction by sleeping. It is simpler to
+ * do this here rather than at the end, because so much complicated logic
+ * happens below when statements finish.
+ */
+ if (throttle_delay && !st->is_throttled)
+ {
+ /*
+ * Use inverse transform sampling to randomly generate a delay, such
+ * that the series of delays will approximate a Poisson distribution
+ * centered on the throttle_delay time.
+ *
+ * 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay
+ * multiplier, and results in a 0.055 % target underestimation bias:
+ *
+ * SELECT 1.0/AVG(-LN(i/10000.0)) FROM generate_series(1,10000) AS i;
+ * = 1.000552717032611116335474
+ *
+ * If transactions are too slow or a given wait is shorter than a
+ * transaction, the next transaction will start right away.
+ */
+ int64 wait = (int64) (throttle_delay *
+ 1.00055271703 * -log(getrand(thread, 1, 10000) / 10000.0));
+
+ thread->throttle_trigger += wait;
+
+ st->until = thread->throttle_trigger;
+ st->sleeping = 1;
+ st->throttling = true;
+ st->is_throttled = true;
+ if (debug)
+ fprintf(stderr, "client %d throttling " INT64_FORMAT " us\n",
+ st->id, wait);
+ }
+
if (st->sleeping)
{ /* are we sleeping? */
instr_time now;
+ int64 now_us;
INSTR_TIME_SET_CURRENT(now);
- if (st->until <= INSTR_TIME_GET_MICROSEC(now))
+ now_us = INSTR_TIME_GET_MICROSEC(now);
+ if (st->until <= now_us)
+ {
st->sleeping = 0; /* Done sleeping, go ahead with next command */
+ if (st->throttling)
+ {
+ /* Measure lag of throttled transaction relative to target */
+ int64 lag = now_us - st->until;
+
+ thread->throttle_lag += lag;
+ if (lag > thread->throttle_lag_max)
+ thread->throttle_lag_max = lag;
+ st->throttling = false;
+ }
+ }
else
return true; /* Still sleeping, nothing to do here */
}
@@ -887,6 +1050,27 @@ top:
thread->exec_count[cnum]++;
}
+ /* transaction finished: record latency under progress or throttling */
+ if ((progress || throttle_delay) && commands[st->state + 1] == NULL)
+ {
+ instr_time diff;
+ int64 latency;
+
+ INSTR_TIME_SET_CURRENT(diff);
+ INSTR_TIME_SUBTRACT(diff, st->txn_begin);
+ latency = INSTR_TIME_GET_MICROSEC(diff);
+ st->txn_latencies += latency;
+
+ /*
+ * XXX In a long benchmark run of high-latency transactions, this
+ * int64 addition eventually overflows. For example, 100 threads
+ * running 10s transactions will overflow it in 2.56 hours. With
+ * a more-typical OLTP workload of .1s transactions, overflow
+ * would take 256 hours.
+ */
+ st->txn_sqlats += latency * latency;
+ }
+
/*
* if transaction finished, record the time it took in the log
*/
@@ -896,21 +1080,105 @@ top:
instr_time diff;
double usec;
- INSTR_TIME_SET_CURRENT(now);
- diff = now;
- INSTR_TIME_SUBTRACT(diff, st->txn_begin);
- usec = (double) INSTR_TIME_GET_MICROSEC(diff);
+ /*
+ * write the log entry if this row belongs to the random sample,
+ * or no sampling rate was given which means log everything.
+ */
+ if (sample_rate == 0.0 ||
+ pg_erand48(thread->random_state) <= sample_rate)
+ {
+ INSTR_TIME_SET_CURRENT(now);
+ diff = now;
+ INSTR_TIME_SUBTRACT(diff, st->txn_begin);
+ usec = (double) INSTR_TIME_GET_MICROSEC(diff);
+
+ /* should we aggregate the results or not? */
+ if (agg_interval > 0)
+ {
+ /*
+ * are we still in the same interval? if yes, accumulate
+ * the values (print them otherwise)
+ */
+ if (agg->start_time + agg_interval >= INSTR_TIME_GET_DOUBLE(now))
+ {
+ agg->cnt += 1;
+ agg->sum += usec;
+ agg->sum2 += usec * usec;
+
+ /* first in this aggregation interval */
+ if ((agg->cnt == 1) || (usec < agg->min_duration))
+ agg->min_duration = usec;
+ if ((agg->cnt == 1) || (usec > agg->max_duration))
+ agg->max_duration = usec;
+ }
+ else
+ {
+ /*
+ * Loop until we reach the interval of the current
+ * transaction (and print all the empty intervals in
+ * between).
+ */
+ while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(now))
+ {
+ /*
+ * This is a non-Windows branch (thanks to the
+ * ifdef in usage), so we don't need to handle
+ * this in a special way (see below).
+ */
+ fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n",
+ agg->start_time,
+ agg->cnt,
+ agg->sum,
+ agg->sum2,
+ agg->min_duration,
+ agg->max_duration);
+
+ /* move to the next inteval */
+ agg->start_time = agg->start_time + agg_interval;
+
+ /* reset for "no transaction" intervals */
+ agg->cnt = 0;
+ agg->min_duration = 0;
+ agg->max_duration = 0;
+ agg->sum = 0;
+ agg->sum2 = 0;
+ }
+
+ /*
+ * and now update the reset values (include the
+ * current)
+ */
+ agg->cnt = 1;
+ agg->min_duration = usec;
+ agg->max_duration = usec;
+ agg->sum = usec;
+ agg->sum2 = usec * usec;
+ }
+ }
+ else
+ {
+ /* no, print raw transactions */
#ifndef WIN32
- /* This is more than we really ought to know about instr_time */
- fprintf(logfile, "%d %d %.0f %d %ld %ld\n",
- st->id, st->cnt, usec, st->use_file,
- (long) now.tv_sec, (long) now.tv_usec);
+
+ /*
+ * This is more than we really ought to know about
+ * instr_time
+ */
+ fprintf(logfile, "%d %d %.0f %d %ld %ld\n",
+ st->id, st->cnt, usec, st->use_file,
+ (long) now.tv_sec, (long) now.tv_usec);
#else
- /* On Windows, instr_time doesn't provide a timestamp anyway */
- fprintf(logfile, "%d %d %.0f %d 0 0\n",
- st->id, st->cnt, usec, st->use_file);
+
+ /*
+ * On Windows, instr_time doesn't provide a timestamp
+ * anyway
+ */
+ fprintf(logfile, "%d %d %.0f %d 0 0\n",
+ st->id, st->cnt, usec, st->use_file);
#endif
+ }
+ }
}
if (commands[st->state]->type == SQL_COMMAND)
@@ -953,8 +1221,19 @@ top:
if (commands[st->state] == NULL)
{
st->state = 0;
- st->use_file = getrand(thread, 0, num_files - 1);
+ st->use_file = (int) getrand(thread, 0, num_files - 1);
commands = sql_files[st->use_file];
+ st->is_throttled = false;
+
+ /*
+ * No transaction is underway anymore, which means there is
+ * nothing to listen to right now. When throttling rate limits
+ * are active, a sleep will happen next, as the next transaction
+ * starts. And then in any case the next SQL command will set
+ * listen back to 1.
+ */
+ st->listen = 0;
+ trans_needs_throttle = (throttle_delay > 0);
}
}
@@ -973,8 +1252,19 @@ top:
INSTR_TIME_ACCUM_DIFF(*conn_time, end, start);
}
- /* Record transaction start time if logging is enabled */
- if (logfile && st->state == 0)
+ /*
+ * This ensures that a throttling delay is inserted before proceeding with
+ * sql commands, after the first transaction. The first transaction
+ * throttling is performed when first entering doCustom.
+ */
+ if (trans_needs_throttle)
+ {
+ trans_needs_throttle = false;
+ goto top;
+ }
+
+ /* Record transaction start time under logging, progress or throttling */
+ if ((logfile || progress || throttle_delay) && st->state == 0)
INSTR_TIME_SET_CURRENT(st->txn_begin);
/* Record statement start time if per-command latencies are requested */
@@ -990,7 +1280,7 @@ top:
{
char *sql;
- sql = xstrdup(command->argv[0]);
+ sql = pg_strdup(command->argv[0]);
sql = assignVariables(st, sql);
if (debug)
@@ -1073,7 +1363,7 @@ top:
if (pg_strcasecmp(argv[0], "setrandom") == 0)
{
char *var;
- int min,
+ int64 min,
max;
char res[64];
@@ -1085,10 +1375,10 @@ top:
st->ecnt++;
return true;
}
- min = atoi(var);
+ min = strtoint64(var);
}
else
- min = atoi(argv[2]);
+ min = strtoint64(argv[2]);
#ifdef NOT_USED
if (min < 0)
@@ -1107,10 +1397,10 @@ top:
st->ecnt++;
return true;
}
- max = atoi(var);
+ max = strtoint64(var);
}
else
- max = atoi(argv[3]);
+ max = strtoint64(argv[3]);
if (max < min)
{
@@ -1120,11 +1410,11 @@ top:
}
/*
- * getrand() neeeds to be able to subtract max from min and add
- * one the result without overflowing. Since we know max > min,
- * we can detect overflow just by checking for a negative result.
- * But we must check both that the subtraction doesn't overflow,
- * and that adding one to the result doesn't overflow either.
+ * getrand() needs to be able to subtract max from min and add one
+ * to the result without overflowing. Since we know max > min, we
+ * can detect overflow just by checking for a negative result. But
+ * we must check both that the subtraction doesn't overflow, and
+ * that adding one to the result doesn't overflow either.
*/
if (max - min < 0 || (max - min) + 1 < 0)
{
@@ -1134,9 +1424,9 @@ top:
}
#ifdef DEBUG
- printf("min: %d max: %d random: %d\n", min, max, getrand(thread, min, max));
+ printf("min: " INT64_FORMAT " max: " INT64_FORMAT " random: " INT64_FORMAT "\n", min, max, getrand(thread, min, max));
#endif
- snprintf(res, sizeof(res), "%d", getrand(thread, min, max));
+ snprintf(res, sizeof(res), INT64_FORMAT, getrand(thread, min, max));
if (!putVariable(st, argv[0], argv[1], res))
{
@@ -1149,7 +1439,7 @@ top:
else if (pg_strcasecmp(argv[0], "set") == 0)
{
char *var;
- int ope1,
+ int64 ope1,
ope2;
char res[64];
@@ -1161,13 +1451,13 @@ top:
st->ecnt++;
return true;
}
- ope1 = atoi(var);
+ ope1 = strtoint64(var);
}
else
- ope1 = atoi(argv[2]);
+ ope1 = strtoint64(argv[2]);
if (argc < 5)
- snprintf(res, sizeof(res), "%d", ope1);
+ snprintf(res, sizeof(res), INT64_FORMAT, ope1);
else
{
if (*argv[4] == ':')
@@ -1178,17 +1468,17 @@ top:
st->ecnt++;
return true;
}
- ope2 = atoi(var);
+ ope2 = strtoint64(var);
}
else
- ope2 = atoi(argv[4]);
+ ope2 = strtoint64(argv[4]);
if (strcmp(argv[3], "+") == 0)
- snprintf(res, sizeof(res), "%d", ope1 + ope2);
+ snprintf(res, sizeof(res), INT64_FORMAT, ope1 + ope2);
else if (strcmp(argv[3], "-") == 0)
- snprintf(res, sizeof(res), "%d", ope1 - ope2);
+ snprintf(res, sizeof(res), INT64_FORMAT, ope1 - ope2);
else if (strcmp(argv[3], "*") == 0)
- snprintf(res, sizeof(res), "%d", ope1 * ope2);
+ snprintf(res, sizeof(res), INT64_FORMAT, ope1 * ope2);
else if (strcmp(argv[3], "/") == 0)
{
if (ope2 == 0)
@@ -1197,7 +1487,7 @@ top:
st->ecnt++;
return true;
}
- snprintf(res, sizeof(res), "%d", ope1 / ope2);
+ snprintf(res, sizeof(res), INT64_FORMAT, ope1 / ope2);
}
else
{
@@ -1302,31 +1592,44 @@ disconnect_all(CState *state, int length)
/* create tables and setup data */
static void
-init(void)
+init(bool is_no_vacuum)
{
+/*
+ * The scale factor at/beyond which 32-bit integers are insufficient for
+ * storing TPC-B account IDs.
+ *
+ * Although the actual threshold is 21474, we use 20000 because it is easier to
+ * document and remember, and isn't that far away from the real threshold.
+ */
+#define SCALE_32BIT_THRESHOLD 20000
+
/*
* Note: TPC-B requires at least 100 bytes per row, and the "filler"
* fields in these table declarations were intended to comply with that.
- * But because they default to NULLs, they don't actually take any space.
- * We could fix that by giving them non-null default values. However, that
+ * The pgbench_accounts table complies with that because the "filler"
+ * column is set to blank-padded empty string. But for all other tables
+ * the columns default to NULL and so don't actually take any space. We
+ * could fix that by giving them non-null default values. However, that
* would completely break comparability of pgbench results with prior
- * versions. Since pgbench has never pretended to be fully TPC-B
- * compliant anyway, we stick with the historical behavior.
+ * versions. Since pgbench has never pretended to be fully TPC-B compliant
+ * anyway, we stick with the historical behavior.
*/
struct ddlinfo
{
- char *table;
- char *cols;
+ const char *table; /* table name */
+ const char *smcols; /* column decls if accountIDs are 32 bits */
+ const char *bigcols; /* column decls if accountIDs are 64 bits */
int declare_fillfactor;
#ifdef PGXC
char *distribute_by;
#endif
};
- struct ddlinfo DDLs[] = {
+ static const struct ddlinfo DDLs[] = {
{
- "pgbench_branches",
- "bid int not null,bbalance int,filler char(88)",
- 1
+ "pgbench_history",
+ "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)",
+ "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)",
+ 0
#ifdef PGXC
, "distribute by hash (bid)"
#endif
@@ -1334,6 +1637,7 @@ init(void)
{
"pgbench_tellers",
"tid int not null,bid int,tbalance int,filler char(84)",
+ "tid int not null,bid int,tbalance int,filler char(84)",
1
#ifdef PGXC
, "distribute by hash (bid)"
@@ -1341,27 +1645,35 @@ init(void)
},
{
"pgbench_accounts",
- "aid int not null,bid int,abalance int,filler char(84)",
+ "aid int not null,bid int,abalance int,filler char(84)",
+ "aid bigint not null,bid int,abalance int,filler char(84)",
1
#ifdef PGXC
, "distribute by hash (bid)"
#endif
},
{
- "pgbench_history",
- "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)",
- 0
+ "pgbench_branches",
+ "bid int not null,bbalance int,filler char(88)",
+ "bid int not null,bbalance int,filler char(88)",
+ 1
#ifdef PGXC
, "distribute by hash (bid)"
#endif
}
};
-
- static char *DDLAFTERs[] = {
+ static const char *const DDLINDEXes[] = {
"alter table pgbench_branches add primary key (bid)",
"alter table pgbench_tellers add primary key (tid)",
"alter table pgbench_accounts add primary key (aid)"
};
+ static const char *const DDLKEYs[] = {
+ "alter table pgbench_tellers add foreign key (bid) references pgbench_branches",
+ "alter table pgbench_accounts add foreign key (bid) references pgbench_branches",
+ "alter table pgbench_history add foreign key (bid) references pgbench_branches",
+ "alter table pgbench_history add foreign key (tid) references pgbench_tellers",
+ "alter table pgbench_history add foreign key (aid) references pgbench_accounts"
+ };
#ifdef PGXC
static char *DDLAFTERs_bid[] = {
@@ -1375,6 +1687,14 @@ init(void)
PGresult *res;
char sql[256];
int i;
+ int64 k;
+
+ /* used to track elapsed time and estimate of the remaining time */
+ instr_time start,
+ diff;
+ double elapsed_sec,
+ remaining_sec;
+ int log_interval = 1;
if ((con = doConnect()) == NULL)
exit(1);
@@ -1383,16 +1703,17 @@ init(void)
{
char opts[256];
char buffer[256];
- struct ddlinfo *ddl = &DDLs[i];
+ const struct ddlinfo *ddl = &DDLs[i];
+ const char *cols;
/* Remove old table, if it exists. */
- snprintf(buffer, 256, "drop table if exists %s", ddl->table);
+ snprintf(buffer, sizeof(buffer), "drop table if exists %s", ddl->table);
executeStatement(con, buffer);
/* Construct new create table statement. */
opts[0] = '\0';
if (ddl->declare_fillfactor)
- snprintf(opts + strlen(opts), 256 - strlen(opts),
+ snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts),
" with (fillfactor=%d)", fillfactor);
if (tablespace != NULL)
{
@@ -1400,21 +1721,24 @@ init(void)
escape_tablespace = PQescapeIdentifier(con, tablespace,
strlen(tablespace));
- snprintf(opts + strlen(opts), 256 - strlen(opts),
+ snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts),
" tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
+
+ cols = (scale >= SCALE_32BIT_THRESHOLD) ? ddl->bigcols : ddl->smcols;
+
#ifdef PGXC
/* Add distribution columns if necessary */
if (use_branch)
- snprintf(buffer, 256, "create%s table %s(%s)%s %s",
+ snprintf(buffer, sizeof(buffer), "create%s table %s(%s)%s %s",
unlogged_tables ? " unlogged" : "",
ddl->table, ddl->cols, opts, ddl->distribute_by);
else
#endif
- snprintf(buffer, 256, "create%s table %s(%s)%s",
+ snprintf(buffer, sizeof(buffer), "create%s table %s(%s)%s",
unlogged_tables ? " unlogged" : "",
- ddl->table, ddl->cols, opts);
+ ddl->table, cols, opts);
executeStatement(con, buffer);
}
@@ -1423,13 +1747,18 @@ init(void)
for (i = 0; i < nbranches * scale; i++)
{
- snprintf(sql, 256, "insert into pgbench_branches(bid,bbalance) values(%d,0)", i + 1);
+ /* "filler" column defaults to NULL */
+ snprintf(sql, sizeof(sql),
+ "insert into pgbench_branches(bid,bbalance) values(%d,0)",
+ i + 1);
executeStatement(con, sql);
}
for (i = 0; i < ntellers * scale; i++)
{
- snprintf(sql, 256, "insert into pgbench_tellers(tid,bid,tbalance) values (%d,%d,0)",
+ /* "filler" column defaults to NULL */
+ snprintf(sql, sizeof(sql),
+ "insert into pgbench_tellers(tid,bid,tbalance) values (%d,%d,0)",
i + 1, i / ntellers + 1);
executeStatement(con, sql);
}
@@ -1452,19 +1781,60 @@ init(void)
}
PQclear(res);
- for (i = 0; i < naccounts * scale; i++)
+ INSTR_TIME_SET_CURRENT(start);
+
+ for (k = 0; k < (int64) naccounts * scale; k++)
{
- int j = i + 1;
+ int64 j = k + 1;
- snprintf(sql, 256, "%d\t%d\t%d\t\n", j, i / naccounts + 1, 0);
+ /* "filler" column defaults to blank padded empty string */
+ snprintf(sql, sizeof(sql),
+ INT64_FORMAT "\t" INT64_FORMAT "\t%d\t\n",
+ j, k / naccounts + 1, 0);
if (PQputline(con, sql))
{
fprintf(stderr, "PQputline failed\n");
exit(1);
}
- if (j % 10000 == 0)
- fprintf(stderr, "%d tuples done.\n", j);
+ /*
+ * If we want to stick with the original logging, print a message each
+ * 100k inserted rows.
+ */
+ if ((!use_quiet) && (j % 100000 == 0))
+ {
+ INSTR_TIME_SET_CURRENT(diff);
+ INSTR_TIME_SUBTRACT(diff, start);
+
+ elapsed_sec = INSTR_TIME_GET_DOUBLE(diff);
+ remaining_sec = ((double) scale * naccounts - j) * elapsed_sec / j;
+
+ fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
+ j, (int64) naccounts * scale,
+ (int) (((int64) j * 100) / (naccounts * (int64) scale)),
+ elapsed_sec, remaining_sec);
+ }
+ /* let's not call the timing for each row, but only each 100 rows */
+ else if (use_quiet && (j % 100 == 0))
+ {
+ INSTR_TIME_SET_CURRENT(diff);
+ INSTR_TIME_SUBTRACT(diff, start);
+
+ elapsed_sec = INSTR_TIME_GET_DOUBLE(diff);
+ remaining_sec = ((double) scale * naccounts - j) * elapsed_sec / j;
+
+ /* have we reached the next interval (or end)? */
+ if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS))
+ {
+ fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n",
+ j, (int64) naccounts * scale,
+ (int) (((int64) j * 100) / (naccounts * (int64) scale)), elapsed_sec, remaining_sec);
+
+ /* skip to the next interval */
+ log_interval = (int) ceil(elapsed_sec / LOG_STEP_SECONDS);
+ }
+ }
+
}
if (PQputline(con, "\\.\n"))
{
@@ -1478,6 +1848,16 @@ init(void)
}
executeStatement(con, "commit");
+ /* vacuum */
+ if (!is_no_vacuum)
+ {
+ fprintf(stderr, "vacuum...\n");
+ executeStatement(con, "vacuum analyze pgbench_branches");
+ executeStatement(con, "vacuum analyze pgbench_tellers");
+ executeStatement(con, "vacuum analyze pgbench_accounts");
+ executeStatement(con, "vacuum analyze pgbench_history");
+ }
+
/*
* create indexes
*/
@@ -1511,11 +1891,11 @@ init(void)
}
else
#endif
- for (i = 0; i < lengthof(DDLAFTERs); i++)
+ for (i = 0; i < lengthof(DDLINDEXes); i++)
{
char buffer[256];
- strncpy(buffer, DDLAFTERs[i], 256);
+ strlcpy(buffer, DDLINDEXes[i], sizeof(buffer));
if (index_tablespace != NULL)
{
@@ -1523,7 +1903,7 @@ init(void)
escape_tablespace = PQescapeIdentifier(con, index_tablespace,
strlen(index_tablespace));
- snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
+ snprintf(buffer + strlen(buffer), sizeof(buffer) - strlen(buffer),
" using index tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
@@ -1531,12 +1911,17 @@ init(void)
executeStatement(con, buffer);
}
- /* vacuum */
- fprintf(stderr, "vacuum...");
- executeStatement(con, "vacuum analyze pgbench_branches");
- executeStatement(con, "vacuum analyze pgbench_tellers");
- executeStatement(con, "vacuum analyze pgbench_accounts");
- executeStatement(con, "vacuum analyze pgbench_history");
+ /*
+ * create foreign keys
+ */
+ if (foreign_keys)
+ {
+ fprintf(stderr, "set foreign keys...\n");
+ for (i = 0; i < lengthof(DDLKEYs); i++)
+ {
+ executeStatement(con, DDLKEYs[i]);
+ }
+ }
fprintf(stderr, "done.\n");
PQfinish(con);
@@ -1551,7 +1936,7 @@ parseQuery(Command *cmd, const char *raw_sql)
char *sql,
*p;
- sql = xstrdup(raw_sql);
+ sql = pg_strdup(raw_sql);
cmd->argc = 1;
p = sql;
@@ -1613,8 +1998,8 @@ process_commands(char *buf)
return NULL;
/* Allocate and initialize Command structure */
- my_commands = (Command *) xmalloc(sizeof(Command));
- my_commands->line = xstrdup(buf);
+ my_commands = (Command *) pg_malloc(sizeof(Command));
+ my_commands->line = pg_strdup(buf);
my_commands->command_num = num_commands++;
my_commands->type = 0; /* until set */
my_commands->argc = 0;
@@ -1628,7 +2013,7 @@ process_commands(char *buf)
while (tok != NULL)
{
- my_commands->argv[j++] = xstrdup(tok);
+ my_commands->argv[j++] = pg_strdup(tok);
my_commands->argc++;
tok = strtok(NULL, delim);
}
@@ -1730,7 +2115,7 @@ process_commands(char *buf)
switch (querymode)
{
case QUERY_SIMPLE:
- my_commands->argv[0] = xstrdup(p);
+ my_commands->argv[0] = pg_strdup(p);
my_commands->argc++;
break;
case QUERY_EXTENDED:
@@ -1746,6 +2131,49 @@ process_commands(char *buf)
return my_commands;
}
+/*
+ * Read a line from fd, and return it in a malloc'd buffer.
+ * Return NULL at EOF.
+ *
+ * The buffer will typically be larger than necessary, but we don't care
+ * in this program, because we'll free it as soon as we've parsed the line.
+ */
+static char *
+read_line_from_file(FILE *fd)
+{
+ char tmpbuf[BUFSIZ];
+ char *buf;
+ size_t buflen = BUFSIZ;
+ size_t used = 0;
+
+ buf = (char *) palloc(buflen);
+ buf[0] = '\0';
+
+ while (fgets(tmpbuf, BUFSIZ, fd) != NULL)
+ {
+ size_t thislen = strlen(tmpbuf);
+
+ /* Append tmpbuf to whatever we had already */
+ memcpy(buf + used, tmpbuf, thislen + 1);
+ used += thislen;
+
+ /* Done if we collected a newline */
+ if (thislen > 0 && tmpbuf[thislen - 1] == '\n')
+ break;
+
+ /* Else, enlarge buf to ensure we can append next bufferload */
+ buflen += BUFSIZ;
+ buf = (char *) pg_realloc(buf, buflen);
+ }
+
+ if (used > 0)
+ return buf;
+
+ /* Reached EOF */
+ free(buf);
+ return NULL;
+}
+
static int
process_file(char *filename)
{
@@ -1754,7 +2182,7 @@ process_file(char *filename)
Command **my_commands;
FILE *fd;
int lineno;
- char buf[BUFSIZ];
+ char *buf;
int alloc_num;
if (num_files >= MAX_FILES)
@@ -1764,7 +2192,7 @@ process_file(char *filename)
}
alloc_num = COMMANDS_ALLOC_NUM;
- my_commands = (Command **) xmalloc(sizeof(Command *) * alloc_num);
+ my_commands = (Command **) pg_malloc(sizeof(Command *) * alloc_num);
if (strcmp(filename, "-") == 0)
fd = stdin;
@@ -1776,11 +2204,14 @@ process_file(char *filename)
lineno = 0;
- while (fgets(buf, sizeof(buf), fd) != NULL)
+ while ((buf = read_line_from_file(fd)) != NULL)
{
Command *command;
command = process_commands(buf);
+
+ free(buf);
+
if (command == NULL)
continue;
@@ -1790,7 +2221,7 @@ process_file(char *filename)
if (lineno >= alloc_num)
{
alloc_num += COMMANDS_ALLOC_NUM;
- my_commands = xrealloc(my_commands, sizeof(Command *) * alloc_num);
+ my_commands = pg_realloc(my_commands, sizeof(Command *) * alloc_num);
}
}
fclose(fd);
@@ -1813,7 +2244,7 @@ process_builtin(char *tb)
int alloc_num;
alloc_num = COMMANDS_ALLOC_NUM;
- my_commands = (Command **) xmalloc(sizeof(Command *) * alloc_num);
+ my_commands = (Command **) pg_malloc(sizeof(Command *) * alloc_num);
lineno = 0;
@@ -1844,7 +2275,7 @@ process_builtin(char *tb)
if (lineno >= alloc_num)
{
alloc_num += COMMANDS_ALLOC_NUM;
- my_commands = xrealloc(my_commands, sizeof(Command *) * alloc_num);
+ my_commands = pg_realloc(my_commands, sizeof(Command *) * alloc_num);
}
}
@@ -1855,9 +2286,11 @@ process_builtin(char *tb)
/* print out results */
static void
-printResults(int ttype, int normal_xacts, int nclients,
+printResults(int ttype, int64 normal_xacts, int nclients,
TState *threads, int nthreads,
- instr_time total_time, instr_time conn_total_time)
+ instr_time total_time, instr_time conn_total_time,
+ int64 total_latencies, int64 total_sqlats,
+ int64 throttle_lag, int64 throttle_lag_max)
{
double time_include,
tps_include,
@@ -1886,15 +2319,45 @@ printResults(int ttype, int normal_xacts, int nclients,
if (duration <= 0)
{
printf("number of transactions per client: %d\n", nxacts);
- printf("number of transactions actually processed: %d/%d\n",
- normal_xacts, nxacts * nclients);
+ printf("number of transactions actually processed: " INT64_FORMAT "/" INT64_FORMAT "\n",
+ normal_xacts, (int64) nxacts * nclients);
}
else
{
printf("duration: %d s\n", duration);
- printf("number of transactions actually processed: %d\n",
+ printf("number of transactions actually processed: " INT64_FORMAT "\n",
normal_xacts);
}
+
+ if (throttle_delay || progress)
+ {
+ /* compute and show latency average and standard deviation */
+ double latency = 0.001 * total_latencies / normal_xacts;
+ double sqlat = (double) total_sqlats / normal_xacts;
+
+ printf("latency average: %.3f ms\n"
+ "latency stddev: %.3f ms\n",
+ latency, 0.001 * sqrt(sqlat - 1000000.0 * latency * latency));
+ }
+ else
+ {
+ /* only an average latency computed from the duration is available */
+ printf("latency average: %.3f ms\n",
+ 1000.0 * duration * nclients / normal_xacts);
+ }
+
+ if (throttle_delay)
+ {
+ /*
+ * Report average transaction lag under rate limit throttling. This
+ * is the delay between scheduled and actual start times for the
+ * transaction. The measured lag may be caused by thread/client load,
+ * the database load, or the Poisson throttling process.
+ */
+ printf("rate limit schedule lag: avg %.3f (max %.3f) ms\n",
+ 0.001 * throttle_lag / normal_xacts, 0.001 * throttle_lag_max);
+ }
+
printf("tps = %f (including connections establishing)\n", tps_include);
printf("tps = %f (excluding connections establishing)\n", tps_exclude);
@@ -1948,6 +2411,42 @@ printResults(int ttype, int normal_xacts, int nclients,
int
main(int argc, char **argv)
{
+ static struct option long_options[] = {
+ /* systematic long/short named options */
+ {"client", required_argument, NULL, 'c'},
+ {"connect", no_argument, NULL, 'C'},
+ {"debug", no_argument, NULL, 'd'},
+ {"define", required_argument, NULL, 'D'},
+ {"file", required_argument, NULL, 'f'},
+ {"fillfactor", required_argument, NULL, 'F'},
+ {"host", required_argument, NULL, 'h'},
+ {"initialize", no_argument, NULL, 'i'},
+ {"jobs", required_argument, NULL, 'j'},
+ {"log", no_argument, NULL, 'l'},
+ {"no-vacuum", no_argument, NULL, 'n'},
+ {"port", required_argument, NULL, 'p'},
+ {"progress", required_argument, NULL, 'P'},
+ {"protocol", required_argument, NULL, 'M'},
+ {"quiet", no_argument, NULL, 'q'},
+ {"report-latencies", no_argument, NULL, 'r'},
+ {"scale", required_argument, NULL, 's'},
+ {"select-only", no_argument, NULL, 'S'},
+ {"skip-some-updates", no_argument, NULL, 'N'},
+ {"time", required_argument, NULL, 'T'},
+ {"transactions", required_argument, NULL, 't'},
+ {"username", required_argument, NULL, 'U'},
+ {"vacuum-all", no_argument, NULL, 'v'},
+ /* long-named only options */
+ {"foreign-keys", no_argument, &foreign_keys, 1},
+ {"index-tablespace", required_argument, NULL, 3},
+ {"tablespace", required_argument, NULL, 2},
+ {"unlogged-tables", no_argument, &unlogged_tables, 1},
+ {"sampling-rate", required_argument, NULL, 4},
+ {"aggregate-interval", required_argument, NULL, 5},
+ {"rate", required_argument, NULL, 'R'},
+ {NULL, 0, NULL, 0}
+ };
+
int c;
int nclients = 1; /* default number of simulated clients */
int nthreads = 1; /* default number of threads */
@@ -1966,17 +2465,14 @@ main(int argc, char **argv)
instr_time start_time; /* start up time */
instr_time total_time;
instr_time conn_total_time;
- int total_xacts;
+ int64 total_xacts = 0;
+ int64 total_latencies = 0;
+ int64 total_sqlats = 0;
+ int64 throttle_lag = 0;
+ int64 throttle_lag_max = 0;
int i;
- static struct option long_options[] = {
- {"index-tablespace", required_argument, NULL, 3},
- {"tablespace", required_argument, NULL, 2},
- {"unlogged-tables", no_argument, &unlogged_tables, 1},
- {NULL, 0, NULL, 0}
- };
-
#ifdef HAVE_GETRLIMIT
struct rlimit rlim;
#endif
@@ -1987,15 +2483,13 @@ main(int argc, char **argv)
char val[64];
- const char *progname;
-
progname = get_progname(argv[0]);
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
- usage(progname);
+ usage();
exit(0);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
@@ -2017,13 +2511,13 @@ main(int argc, char **argv)
else if ((env = getenv("PGUSER")) != NULL && *env != '\0')
login = env;
- state = (CState *) xmalloc(sizeof(CState));
+ state = (CState *) pg_malloc(sizeof(CState));
memset(state, 0, sizeof(CState));
#ifdef PGXC
- while ((c = getopt_long(argc, argv, "ih:knvp:dSNc:j:Crs:t:T:U:lf:D:F:M:", long_options, &optindex)) != -1)
+ while ((c = getopt_long(argc, argv, "ih:knvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:", long_options, &optindex)) != -1)
#else
- while ((c = getopt_long(argc, argv, "ih:nvp:dSNc:j:Crs:t:T:U:lf:D:F:M:", long_options, &optindex)) != -1)
+ while ((c = getopt_long(argc, argv, "ih:nvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:", long_options, &optindex)) != -1)
#endif
{
switch (c)
@@ -2037,7 +2531,7 @@ main(int argc, char **argv)
break;
#endif
case 'h':
- pghost = optarg;
+ pghost = pg_strdup(optarg);
break;
case 'n':
is_no_vacuum++;
@@ -2046,7 +2540,7 @@ main(int argc, char **argv)
do_vacuum_accounts++;
break;
case 'p':
- pgport = optarg;
+ pgport = pg_strdup(optarg);
break;
case 'd':
debug++;
@@ -2132,14 +2626,17 @@ main(int argc, char **argv)
}
break;
case 'U':
- login = optarg;
+ login = pg_strdup(optarg);
break;
case 'l':
use_log = true;
break;
+ case 'q':
+ use_quiet = true;
+ break;
case 'f':
ttype = 3;
- filename = optarg;
+ filename = pg_strdup(optarg);
if (process_file(filename) == false || *sql_files[num_files - 1] == NULL)
exit(1);
break;
@@ -2181,14 +2678,59 @@ main(int argc, char **argv)
exit(1);
}
break;
+ case 'P':
+ progress = atoi(optarg);
+ if (progress <= 0)
+ {
+ fprintf(stderr,
+ "thread progress delay (-P) must be positive (%s)\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ case 'R':
+ {
+ /* get a double from the beginning of option value */
+ double throttle_value = atof(optarg);
+
+ if (throttle_value <= 0.0)
+ {
+ fprintf(stderr, "invalid rate limit: %s\n", optarg);
+ exit(1);
+ }
+ /* Invert rate limit into a time offset */
+ throttle_delay = (int64) (1000000.0 / throttle_value);
+ }
+ break;
case 0:
/* This covers long options which take no argument. */
break;
case 2: /* tablespace */
- tablespace = optarg;
+ tablespace = pg_strdup(optarg);
break;
case 3: /* index-tablespace */
- index_tablespace = optarg;
+ index_tablespace = pg_strdup(optarg);
+ break;
+ case 4:
+ sample_rate = atof(optarg);
+ if (sample_rate <= 0.0 || sample_rate > 1.0)
+ {
+ fprintf(stderr, "invalid sampling rate: %f\n", sample_rate);
+ exit(1);
+ }
+ break;
+ case 5:
+#ifdef WIN32
+ fprintf(stderr, "--aggregate-interval is not currently supported on Windows");
+ exit(1);
+#else
+ agg_interval = atoi(optarg);
+ if (agg_interval <= 0)
+ {
+ fprintf(stderr, "invalid number of seconds for aggregation: %d\n", agg_interval);
+ exit(1);
+ }
+#endif
break;
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
@@ -2197,6 +2739,9 @@ main(int argc, char **argv)
}
}
+ /* compute a per thread delay */
+ throttle_delay *= nthreads;
+
if (argc > optind)
dbName = argv[optind];
else
@@ -2211,7 +2756,7 @@ main(int argc, char **argv)
if (is_init_mode)
{
- init();
+ init(is_no_vacuum);
exit(0);
}
@@ -2225,6 +2770,45 @@ main(int argc, char **argv)
exit(1);
}
+ /* --sampling-rate may be used only with -l */
+ if (sample_rate > 0.0 && !use_log)
+ {
+ fprintf(stderr, "log sampling rate is allowed only when logging transactions (-l) \n");
+ exit(1);
+ }
+
+ /* -q may be used only with -i */
+ if (use_quiet && !is_init_mode)
+ {
+ fprintf(stderr, "quiet-logging is allowed only in initialization mode (-i)\n");
+ exit(1);
+ }
+
+ /* --sampling-rate may must not be used with --aggregate-interval */
+ if (sample_rate > 0.0 && agg_interval > 0)
+ {
+ fprintf(stderr, "log sampling (--sampling-rate) and aggregation (--aggregate-interval) can't be used at the same time\n");
+ exit(1);
+ }
+
+ if (agg_interval > 0 && (!use_log))
+ {
+ fprintf(stderr, "log aggregation is allowed only when actually logging transactions\n");
+ exit(1);
+ }
+
+ if ((duration > 0) && (agg_interval > duration))
+ {
+ fprintf(stderr, "number of seconds for aggregation (%d) must not be higher that test duration (%d)\n", agg_interval, duration);
+ exit(1);
+ }
+
+ if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0))
+ {
+ fprintf(stderr, "duration (%d) must be a multiple of aggregation interval (%d)\n", duration, agg_interval);
+ exit(1);
+ }
+
/*
* is_latencies only works with multiple threads in thread-based
* implementations, not fork-based ones, because it supposes that the
@@ -2246,10 +2830,12 @@ main(int argc, char **argv)
* changed after fork.
*/
main_pid = (int) getpid();
+ progress_nclients = nclients;
+ progress_nthreads = nthreads;
if (nclients > 1)
{
- state = (CState *) xrealloc(state, sizeof(CState) * nclients);
+ state = (CState *) pg_realloc(state, sizeof(CState) * nclients);
memset(state + 1, 0, sizeof(CState) * (nclients - 1));
/* copy any -D switch values to all clients */
@@ -2329,6 +2915,20 @@ main(int argc, char **argv)
}
}
+ /*
+ * Define a :client_id variable that is unique per connection. But don't
+ * override an explicit -D switch.
+ */
+ if (getVariable(&state[0], "client_id") == NULL)
+ {
+ for (i = 0; i < nclients; i++)
+ {
+ snprintf(val, sizeof(val), "%d", i);
+ if (!putVariable(&state[i], "startup", "client_id", val))
+ exit(1);
+ }
+ }
+
if (!is_no_vacuum)
{
fprintf(stderr, "starting vacuum...");
@@ -2383,7 +2983,7 @@ main(int argc, char **argv)
}
/* set up thread data structures */
- threads = (TState *) xmalloc(sizeof(TState) * nthreads);
+ threads = (TState *) pg_malloc(sizeof(TState) * nthreads);
for (i = 0; i < nthreads; i++)
{
TState *thread = &threads[i];
@@ -2401,9 +3001,9 @@ main(int argc, char **argv)
int t;
thread->exec_elapsed = (instr_time *)
- xmalloc(sizeof(instr_time) * num_commands);
+ pg_malloc(sizeof(instr_time) * num_commands);
thread->exec_count = (int *)
- xmalloc(sizeof(int) * num_commands);
+ pg_malloc(sizeof(int) * num_commands);
for (t = 0; t < num_commands; t++)
{
@@ -2450,7 +3050,6 @@ main(int argc, char **argv)
}
/* wait for threads and accumulate results */
- total_xacts = 0;
INSTR_TIME_SET_ZERO(conn_total_time);
for (i = 0; i < nthreads; i++)
{
@@ -2466,17 +3065,32 @@ main(int argc, char **argv)
TResult *r = (TResult *) ret;
total_xacts += r->xacts;
+ total_latencies += r->latencies;
+ total_sqlats += r->sqlats;
+ throttle_lag += r->throttle_lag;
+ if (r->throttle_lag_max > throttle_lag_max)
+ throttle_lag_max = r->throttle_lag_max;
INSTR_TIME_ADD(conn_total_time, r->conn_time);
free(ret);
}
}
disconnect_all(state, nclients);
- /* get end time */
+ /*
+ * XXX We compute results as though every client of every thread started
+ * and finished at the same time. That model can diverge noticeably from
+ * reality for a short benchmark run involving relatively many threads.
+ * The first thread may process notably many transactions before the last
+ * thread begins. Improving the model alone would bring limited benefit,
+ * because performance during those periods of partial thread count can
+ * easily exceed steady state performance. This is one of the many ways
+ * short runs convey deceptive performance figures.
+ */
INSTR_TIME_SET_CURRENT(total_time);
INSTR_TIME_SUBTRACT(total_time, start_time);
printResults(ttype, total_xacts, nclients, threads, nthreads,
- total_time, conn_total_time);
+ total_time, conn_total_time, total_latencies, total_sqlats,
+ throttle_lag, throttle_lag_max);
return 0;
}
@@ -2494,7 +3108,30 @@ threadRun(void *arg)
int remains = nstate; /* number of remaining clients */
int i;
- result = xmalloc(sizeof(TResult));
+ /* for reporting progress: */
+ int64 thread_start = INSTR_TIME_GET_MICROSEC(thread->start_time);
+ int64 last_report = thread_start;
+ int64 next_report = last_report + (int64) progress * 1000000;
+ int64 last_count = 0,
+ last_lats = 0,
+ last_sqlats = 0,
+ last_lags = 0;
+
+ AggVals aggs;
+
+ /*
+ * Initialize throttling rate target for all of the thread's clients. It
+ * might be a little more accurate to reset thread->start_time here too.
+ * The possible drift seems too small relative to typical throttle delay
+ * times to worry about it.
+ */
+ INSTR_TIME_SET_CURRENT(start);
+ thread->throttle_trigger = INSTR_TIME_GET_MICROSEC(start);
+ thread->throttle_lag = 0;
+ thread->throttle_lag_max = 0;
+
+ result = pg_malloc(sizeof(TResult));
+
INSTR_TIME_SET_ZERO(result->conn_time);
/* open log file if requested */
@@ -2529,6 +3166,8 @@ threadRun(void *arg)
INSTR_TIME_SET_CURRENT(result->conn_time);
INSTR_TIME_SUBTRACT(result->conn_time, thread->start_time);
+ agg_vals_init(&aggs, thread->start_time);
+
/* send start up queries in async manner */
for (i = 0; i < nstate; i++)
{
@@ -2537,7 +3176,7 @@ threadRun(void *arg)
int prev_ecnt = st->ecnt;
st->use_file = getrand(thread, 0, num_files - 1);
- if (!doCustom(thread, st, &result->conn_time, logfile))
+ if (!doCustom(thread, st, &result->conn_time, logfile, &aggs))
remains--; /* I've aborted */
if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND)
@@ -2566,25 +3205,38 @@ threadRun(void *arg)
Command **commands = sql_files[st->use_file];
int sock;
- if (st->sleeping)
+ if (st->con == NULL)
{
- int this_usec;
-
- if (min_usec == INT64_MAX)
+ continue;
+ }
+ else if (st->sleeping)
+ {
+ if (st->throttling && timer_exceeded)
{
- instr_time now;
-
- INSTR_TIME_SET_CURRENT(now);
- now_usec = INSTR_TIME_GET_MICROSEC(now);
+ /* interrupt client which has not started a transaction */
+ remains--;
+ st->sleeping = 0;
+ st->throttling = false;
+ PQfinish(st->con);
+ st->con = NULL;
+ continue;
}
+ else /* just a nap from the script */
+ {
+ int this_usec;
- this_usec = st->until - now_usec;
- if (min_usec > this_usec)
- min_usec = this_usec;
- }
- else if (st->con == NULL)
- {
- continue;
+ if (min_usec == INT64_MAX)
+ {
+ instr_time now;
+
+ INSTR_TIME_SET_CURRENT(now);
+ now_usec = INSTR_TIME_GET_MICROSEC(now);
+ }
+
+ this_usec = st->until - now_usec;
+ if (min_usec > this_usec)
+ min_usec = this_usec;
+ }
}
else if (commands[st->state]->type == META_COMMAND)
{
@@ -2639,7 +3291,7 @@ threadRun(void *arg)
if (st->con && (FD_ISSET(PQsocket(st->con), &input_mask)
|| commands[st->state]->type == META_COMMAND))
{
- if (!doCustom(thread, st, &result->conn_time, logfile))
+ if (!doCustom(thread, st, &result->conn_time, logfile, &aggs))
remains--; /* I've aborted */
}
@@ -2651,14 +3303,141 @@ threadRun(void *arg)
st->con = NULL;
}
}
+
+#ifdef PTHREAD_FORK_EMULATION
+ /* each process reports its own progression */
+ if (progress)
+ {
+ instr_time now_time;
+ int64 now;
+
+ INSTR_TIME_SET_CURRENT(now_time);
+ now = INSTR_TIME_GET_MICROSEC(now_time);
+ if (now >= next_report)
+ {
+ /* generate and show report */
+ int64 count = 0,
+ lats = 0,
+ sqlats = 0;
+ int64 lags = thread->throttle_lag;
+ int64 run = now - last_report;
+ double tps,
+ total_run,
+ latency,
+ sqlat,
+ stdev,
+ lag;
+
+ for (i = 0; i < nstate; i++)
+ {
+ count += state[i].cnt;
+ lats += state[i].txn_latencies;
+ sqlats += state[i].txn_sqlats;
+ }
+
+ total_run = (now - thread_start) / 1000000.0;
+ tps = 1000000.0 * (count - last_count) / run;
+ latency = 0.001 * (lats - last_lats) / (count - last_count);
+ sqlat = 1.0 * (sqlats - last_sqlats) / (count - last_count);
+ stdev = 0.001 * sqrt(sqlat - 1000000.0 * latency * latency);
+ lag = 0.001 * (lags - last_lags) / (count - last_count);
+
+ if (throttle_delay)
+ fprintf(stderr,
+ "progress %d: %.1f s, %.1f tps, "
+ "lat %.3f ms stddev %.3f, lag %.3f ms\n",
+ thread->tid, total_run, tps, latency, stdev, lag);
+ else
+ fprintf(stderr,
+ "progress %d: %.1f s, %.1f tps, "
+ "lat %.3f ms stddev %.3f\n",
+ thread->tid, total_run, tps, latency, stdev);
+
+ last_count = count;
+ last_lats = lats;
+ last_sqlats = sqlats;
+ last_lags = lags;
+ last_report = now;
+ next_report += (int64) progress *1000000;
+ }
+ }
+#else
+ /* progress report by thread 0 for all threads */
+ if (progress && thread->tid == 0)
+ {
+ instr_time now_time;
+ int64 now;
+
+ INSTR_TIME_SET_CURRENT(now_time);
+ now = INSTR_TIME_GET_MICROSEC(now_time);
+ if (now >= next_report)
+ {
+ /* generate and show report */
+ int64 count = 0,
+ lats = 0,
+ sqlats = 0,
+ lags = 0;
+ int64 run = now - last_report;
+ double tps,
+ total_run,
+ latency,
+ sqlat,
+ lag,
+ stdev;
+
+ for (i = 0; i < progress_nclients; i++)
+ {
+ count += state[i].cnt;
+ lats += state[i].txn_latencies;
+ sqlats += state[i].txn_sqlats;
+ }
+
+ for (i = 0; i < progress_nthreads; i++)
+ lags += thread[i].throttle_lag;
+
+ total_run = (now - thread_start) / 1000000.0;
+ tps = 1000000.0 * (count - last_count) / run;
+ latency = 0.001 * (lats - last_lats) / (count - last_count);
+ sqlat = 1.0 * (sqlats - last_sqlats) / (count - last_count);
+ stdev = 0.001 * sqrt(sqlat - 1000000.0 * latency * latency);
+ lag = 0.001 * (lags - last_lags) / (count - last_count);
+
+ if (throttle_delay)
+ fprintf(stderr,
+ "progress: %.1f s, %.1f tps, "
+ "lat %.3f ms stddev %.3f, lag %.3f ms\n",
+ total_run, tps, latency, stdev, lag);
+ else
+ fprintf(stderr,
+ "progress: %.1f s, %.1f tps, "
+ "lat %.3f ms stddev %.3f\n",
+ total_run, tps, latency, stdev);
+
+ last_count = count;
+ last_lats = lats;
+ last_sqlats = sqlats;
+ last_lags = lags;
+ last_report = now;
+ next_report += (int64) progress *1000000;
+ }
+ }
+#endif /* PTHREAD_FORK_EMULATION */
}
done:
INSTR_TIME_SET_CURRENT(start);
disconnect_all(state, nstate);
result->xacts = 0;
+ result->latencies = 0;
+ result->sqlats = 0;
for (i = 0; i < nstate; i++)
+ {
result->xacts += state[i].cnt;
+ result->latencies += state[i].txn_latencies;
+ result->sqlats += state[i].txn_sqlats;
+ }
+ result->throttle_lag = thread->throttle_lag;
+ result->throttle_lag_max = thread->throttle_lag_max;
INSTR_TIME_SET_CURRENT(end);
INSTR_TIME_ACCUM_DIFF(result->conn_time, end, start);
if (logfile)
@@ -2666,7 +3445,6 @@ done:
return result;
}
-
/*
* Support for duration option: set timer_exceeded after so many seconds.
*/
@@ -2706,8 +3484,9 @@ pthread_create(pthread_t *thread,
{
fork_pthread *th;
void *ret;
+ int rc;
- th = (fork_pthread *) xmalloc(sizeof(fork_pthread));
+ th = (fork_pthread *) pg_malloc(sizeof(fork_pthread));
if (pipe(th->pipes) < 0)
{
free(th);
@@ -2735,7 +3514,8 @@ pthread_create(pthread_t *thread,
setalarm(duration);
ret = start_routine(arg);
- write(th->pipes[1], ret, sizeof(TResult));
+ rc = write(th->pipes[1], ret, sizeof(TResult));
+ (void) rc;
close(th->pipes[1]);
free(th);
exit(0);
@@ -2755,7 +3535,7 @@ pthread_join(pthread_t th, void **thread_return)
if (thread_return != NULL)
{
/* assume result is TResult */
- *thread_return = xmalloc(sizeof(TResult));
+ *thread_return = pg_malloc(sizeof(TResult));
if (read(th->pipes[0], *thread_return, sizeof(TResult)) != sizeof(TResult))
{
free(*thread_return);
@@ -2823,7 +3603,7 @@ pthread_create(pthread_t *thread,
int save_errno;
win32_pthread *th;
- th = (win32_pthread *) xmalloc(sizeof(win32_pthread));
+ th = (win32_pthread *) pg_malloc(sizeof(win32_pthread));
th->routine = start_routine;
th->arg = arg;
th->result = NULL;
diff --git a/contrib/pgcrypto/Makefile b/contrib/pgcrypto/Makefile
index dadec953c2..1c85c982ff 100644
--- a/contrib/pgcrypto/Makefile
+++ b/contrib/pgcrypto/Makefile
@@ -26,7 +26,7 @@ MODULE_big = pgcrypto
OBJS = $(SRCS:.c=.o)
EXTENSION = pgcrypto
-DATA = pgcrypto--1.0.sql pgcrypto--unpackaged--1.0.sql
+DATA = pgcrypto--1.1.sql pgcrypto--1.0--1.1.sql pgcrypto--unpackaged--1.0.sql
REGRESS = init md5 sha1 hmac-md5 hmac-sha1 blowfish rijndael \
$(CF_TESTS) \
diff --git a/contrib/pgcrypto/crypt-blowfish.c b/contrib/pgcrypto/crypt-blowfish.c
index b49747d926..fbaa3d776a 100644
--- a/contrib/pgcrypto/crypt-blowfish.c
+++ b/contrib/pgcrypto/crypt-blowfish.c
@@ -35,6 +35,7 @@
#include "postgres.h"
#include "px-crypt.h"
+#include "px.h"
#ifdef __i386__
#define BF_ASM 0 /* 1 */
@@ -616,7 +617,7 @@ _crypt_blowfish_rn(const char *key, const char *setting,
count = (BF_word) 1 << ((setting[4] - '0') * 10 + (setting[5] - '0'));
if (count < 16 || BF_decode(data.binary.salt, &setting[7], 16))
{
- memset(data.binary.salt, 0, sizeof(data.binary.salt));
+ px_memset(data.binary.salt, 0, sizeof(data.binary.salt));
return NULL;
}
BF_swap(data.binary.salt, 4);
@@ -729,7 +730,7 @@ _crypt_blowfish_rn(const char *key, const char *setting,
/* Overwrite the most obvious sensitive data we have on the stack. Note
* that this does not guarantee there's no sensitive data left on the
* stack and/or in registers; I'm not aware of portable code that does. */
- memset(&data, 0, sizeof(data));
+ px_memset(&data, 0, sizeof(data));
return output;
}
diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c
index cc2e76a71f..4ed44beeff 100644
--- a/contrib/pgcrypto/crypt-des.c
+++ b/contrib/pgcrypto/crypt-des.c
@@ -29,7 +29,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/crypt-gensalt.c b/contrib/pgcrypto/crypt-gensalt.c
index ec2e0fa025..6dc7cbdb3a 100644
--- a/contrib/pgcrypto/crypt-gensalt.c
+++ b/contrib/pgcrypto/crypt-gensalt.c
@@ -9,7 +9,7 @@
* entirely in crypt_blowfish.c.
*
* Put bcrypt generator also here as crypt-blowfish.c
- * may not be compiled always. -- marko
+ * may not be compiled always. -- marko
*/
#include "postgres.h"
diff --git a/contrib/pgcrypto/crypt-md5.c b/contrib/pgcrypto/crypt-md5.c
index 2a5cd70208..6a09d76989 100644
--- a/contrib/pgcrypto/crypt-md5.c
+++ b/contrib/pgcrypto/crypt-md5.c
@@ -89,7 +89,7 @@ px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
px_md_update(ctx, final, pl > MD5_SIZE ? MD5_SIZE : pl);
/* Don't leave anything around in vm they could use. */
- memset(final, 0, sizeof final);
+ px_memset(final, 0, sizeof final);
/* Then something really weird... */
for (i = strlen(pw); i; i >>= 1)
@@ -154,7 +154,7 @@ px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
*p = '\0';
/* Don't leave anything around in vm they could use. */
- memset(final, 0, sizeof final);
+ px_memset(final, 0, sizeof final);
px_md_free(ctx1);
px_md_free(ctx);
diff --git a/contrib/pgcrypto/expected/pgp-encrypt.out b/contrib/pgcrypto/expected/pgp-encrypt.out
index 8ef3875fd6..b35de79afa 100644
--- a/contrib/pgcrypto/expected/pgp-encrypt.out
+++ b/contrib/pgcrypto/expected/pgp-encrypt.out
@@ -11,7 +11,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key');
-- check whether the defaults are ok
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=aes128,
+ 'key', 'expect-cipher-algo=aes128,
expect-disable-mdc=0,
expect-sess-key=0,
expect-s2k-mode=3,
@@ -25,7 +25,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
-- maybe the expect- stuff simply does not work
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=bf,
+ 'key', 'expect-cipher-algo=bf,
expect-disable-mdc=1,
expect-sess-key=1,
expect-s2k-mode=0,
@@ -56,7 +56,7 @@ select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz');
-- algorithm change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'),
- 'key', 'expect-cipher-algo=bf');
+ 'key', 'expect-cipher-algo=bf');
pgp_sym_decrypt
-----------------
Secret.
@@ -64,7 +64,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'),
- 'key', 'expect-cipher-algo=aes128');
+ 'key', 'expect-cipher-algo=aes128');
pgp_sym_decrypt
-----------------
Secret.
@@ -72,7 +72,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'),
- 'key', 'expect-cipher-algo=aes192');
+ 'key', 'expect-cipher-algo=aes192');
pgp_sym_decrypt
-----------------
Secret.
@@ -81,7 +81,7 @@ select pgp_sym_decrypt(
-- s2k change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'),
- 'key', 'expect-s2k-mode=0');
+ 'key', 'expect-s2k-mode=0');
pgp_sym_decrypt
-----------------
Secret.
@@ -89,7 +89,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'),
- 'key', 'expect-s2k-mode=1');
+ 'key', 'expect-s2k-mode=1');
pgp_sym_decrypt
-----------------
Secret.
@@ -97,7 +97,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'),
- 'key', 'expect-s2k-mode=3');
+ 'key', 'expect-s2k-mode=3');
pgp_sym_decrypt
-----------------
Secret.
@@ -106,7 +106,7 @@ select pgp_sym_decrypt(
-- s2k digest change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'),
- 'key', 'expect-s2k-digest-algo=md5');
+ 'key', 'expect-s2k-digest-algo=md5');
pgp_sym_decrypt
-----------------
Secret.
@@ -114,7 +114,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'),
- 'key', 'expect-s2k-digest-algo=sha1');
+ 'key', 'expect-s2k-digest-algo=sha1');
pgp_sym_decrypt
-----------------
Secret.
@@ -123,7 +123,7 @@ select pgp_sym_decrypt(
-- sess key
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'),
- 'key', 'expect-sess-key=0');
+ 'key', 'expect-sess-key=0');
pgp_sym_decrypt
-----------------
Secret.
@@ -131,7 +131,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'),
- 'key', 'expect-sess-key=1');
+ 'key', 'expect-sess-key=1');
pgp_sym_decrypt
-----------------
Secret.
@@ -139,7 +139,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=bf');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=bf');
pgp_sym_decrypt
-----------------
Secret.
@@ -147,7 +147,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
pgp_sym_decrypt
-----------------
Secret.
@@ -155,7 +155,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
pgp_sym_decrypt
-----------------
Secret.
@@ -164,7 +164,7 @@ select pgp_sym_decrypt(
-- no mdc
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'),
- 'key', 'expect-disable-mdc=1');
+ 'key', 'expect-disable-mdc=1');
pgp_sym_decrypt
-----------------
Secret.
@@ -173,7 +173,7 @@ select pgp_sym_decrypt(
-- crlf
select encode(pgp_sym_decrypt_bytea(
pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'),
- 'key'), 'hex');
+ 'key'), 'hex');
encode
----------------------
310d0a320d0a330d0d0a
@@ -182,7 +182,7 @@ select encode(pgp_sym_decrypt_bytea(
-- conversion should be lossless
select encode(digest(pgp_sym_decrypt(
pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'),
- 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
+ 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect;
result | expect
------------------------------------------+------------------------------------------
diff --git a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
index 7fbbd839d9..61e09b9a86 100644
--- a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
+++ b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out
@@ -417,6 +417,70 @@ hbt6LhKhCLUNdz/udIt0JAC6c/HdPLSW3HnmM3+iNj+Kug==
=UKh3
-----END PGP PRIVATE KEY BLOCK-----
');
+insert into keytbl (id, name, pubkey, seckey)
+values (7, 'rsaenc2048-psw', '
+same key with password
+', '
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+lQPEBELr2m0BCADOrnknlnXI0EzRExf/TgoHvK7Xx/E0keWqV3KrOyC3/tY2KOrj
+UVxaAX5pkFX9wdQObGPIJm06u6D16CH6CildX/vxG7YgvvKzK8JGAbwrXAfk7OIW
+czO2zRaZGDynoK3mAxHRBReyTKtNv8rDQhuZs6AOozJNARdbyUO/yqUnqNNygWuT
+4htFDEuLPIJwAbMSD0BvFW6YQaPdxzaAZm3EWVNbwDzjgbBUdBiUUwRdZIFUhsjJ
+dirFdy5+uuZru6y6CNC1OERkJ7P8EyoFiZckAIE5gshVZzNuyLOZjc5DhWBvLbX4
+NZElAnfiv+4nA6y8wQLSIbmHA3nqJaBklj85AAYp/gcDCNnoEKwFo86JYCE1J92R
+HRQ7DoyAZpW1O0dTXL8Epk0sKsKDrCJOrIkDymsjfyBexADIeqOkioy/50wD2Mku
+CVHKWO2duAiJN5t/FoRgpR1/Q11K6QdfqOG0HxwfIXLcPv7eSIso8kWorj+I01BP
+Fn/atGEbIjdWaz/q2XHbu0Q3x6Et2gIsbLRVMhiYz1UG9uzGJ0TYCdBa2SFhs184
+52akMpD+XVdM0Sq9/Cx40Seo8hzERB96+GXnQ48q2OhlvcEXiFyD6M6wYCWbEV+6
+XQVMymbl22FPP/bD9ReQX2kjrkQlFAtmhr+0y8reMCbcxwLuQfA3173lSPo7jrbH
+oLrGhkRpqd2bYCelqdy/XMmRFso0+7uytHfTFrUNfDWfmHVrygoVrNnarCbxMMI0
+I8Q+tKHMThWgf0rIOSh0+w38kOXFCEqEWF8YkAqCrMZIlJIed78rOCFgG4aHajZR
+D8rpXdUOIr/WeUddK25Tu8IuNJb0kFf12IMgNh0nS+mzlqWiofS5kA0TeB8wBV6t
+RotaeyDNSsMoowfN8cf1yHMTxli+K1Tasg003WVUoWgUc+EsJ5+KTNwaX5uGv0Cs
+j6dg6/FVeVRL9UsyF+2kt7euX3mABuUtcVGx/ZKTq/MNGEh6/r3B5U37qt+FDRbw
+ppKPc2AP+yBUWsQskyrxFgv4eSpcLEg+lgdz/zLyG4qW4lrFUoO790Cm/J6C7/WQ
+Z+E8kcS8aINJkg1skahH31d59ZkbW9PVeJMFGzNb0Z2LowngNP/BMrJ0LT2CQyLs
+UxbT16S/gwAyUpJnbhWYr3nDdlwtC0rVopVTPD7khPRppcsq1f8D70rdIxI4Ouuw
+vbjNZ1EWRJ9f2Ywb++k/xgSXwJkGodUlrUr+3i8cv8mPx+fWvif9q7Y5Ex1wCRa8
+8FAj/o+hEbQlUlNBIDIwNDggRW5jIDxyc2EyMDQ4ZW5jQGV4YW1wbGUub3JnPokB
+NAQTAQIAHgUCQuvabQIbAwYLCQgHAwIDFQIDAxYCAQIeAQIXgAAKCRDImeqTRBlV
+WRzJCACbRhx2fYjPGKta69M5dS+kr5UD/CQmsR2t9cB9zyqhratjPnKW9q13+4AG
+P3aByT14IH1c5Mha8rJkNYD2wxmC8jrrcPiJIYoRG+W1sUATY/t8wBbNWF+r9h11
+m0lEpsmNVff/jU7SpNN6JQ3P7MHd5V85LlDoXIH6QYCLd0PjKU+jNvjiBe5VX0m9
+a1nacE3xoWc1vbM0DnqEuID78Qgkcrmm0ESeg1h+tRfHxSAyYNc/gPzm8eH6l+hj
+gOvUc4Gd6LpBQSF8TcFfT2TZwJh7WVWDvNIP6FWAW7rzmHnX3wwXkGq4REWeVtk5
+yBPp6mOtWDiwaqLJYsoHWU11C8zYnQPEBELr2roBCADrgiWXZMzkQOntZa/NS56+
+CczLFQRQPl/8iJAW1eql/wOJ1UiwGSjT189WCKzE7vtazCIstdCFmwOs4DE6cz4S
+UX4HjzjYHZwmMiuSrIefwuZ7cysMBsMXypQFyMSbqwh102xGvmLz3Z++rydx7Fzl
+1RC/ny2+FN5dzYPO2DNtNi4dR2tjHktsxBWXAKCmxagAIwyxGouuEqDhYdFtwrA9
+Qy+M5n6fmGa1Dx07WWnbIud4uCilv8LPVKx5aJamDYWM3v7kS8n51MfTzeK/xoRM
+2rsgzFdLJqPdbgd2nsD37fngqZnlp7tDxSVSuMckZoSKtq1QsNemtaQSYq7xjPst
+AAYp/gcDCNnoEKwFo86JYAsxoD+wQ0zBi5RBM5EphXTpM1qKxmigsKOvBSaMmr0y
+VjHtGY3poyV3t6VboOGCsFcaKm0tIdDL7vrxxwyYESETpF29b7QrYcoaLKMG7fsy
+t9SUI3UV2H9uUquHgqHtsqz0jYOgm9tYnpesgQ/kOAWI/tej1ZJXUIWEmZMH/W6d
+ATNvZ3ivwApfC0qF5G3oPgBSoIuQ/8I+pN/kmuyNAnJWNgagFhA/2VFBvh5XgztV
+NW7G//KpR1scsn140SO/wpGBM3Kr4m8ztl9w9U6a7NlQZ2ub3/pIUTpSzyLBxJZ/
+RfuZI7ROdgDMKmEgCYrN2kfp0LIxnYL6ZJu3FDcS4V098lyf5rHvB3PAEdL6Zyhd
+qYp3Sx68r0F4vzk5iAIWf6pG2YdfoP2Z48Pmq9xW8qD9iwFcoz9oAzDEMENn6dfq
+6MzfoaXEoYp8cR/o+aeEaGUtYBHiaxQcJYx35B9IhsXXA49yRORK8qdwhSHxB3NQ
+H3pUWkfw368f/A207hQVs9yYXlEvMZikxl58gldCd3BAPqHm/XzgknRRNQZBPPKJ
+BMZebZ22Dm0qDuIqW4GXLB4sLf0+UXydVINIUOlzg+S4jrwx7eZqb6UkRXTIWVo5
+psTsD14wzWBRdUQHZOZD33+M8ugmewvLY/0Uix+2RorkmB7/jqoZvx/MehDwmCZd
+VH8sb2wpZ55sj7gCXxvrfieQD/VeH54OwjjbtK56iYq56RVD0h1az8xDY2GZXeT7
+J0c3BGpuoca5xOFWr1SylAr/miEPxOBfnfk8oZQJvZrjSBGjsTbALep2vDJk8ROD
+sdQCJuU1RHDrwKHlbUL0NbGRO2juJGsatdWnuVKsFbaFW2pHHkezKuwOcaAJv7Xt
+8LRF17czAJ1uaLKwV8Paqx6UIv+089GbWZi7HIkBHwQYAQIACQUCQuvaugIbDAAK
+CRDImeqTRBlVWS7XCACDVstKM+SHD6V0bkfO6ampHzj4krKjN0lonN5+7b7WKpgT
+QHRYvPY8lUiIrjXGISQqEG9M5Bi5ea1aoBZem0P3U/lKheg0lYtA7dM3BqsA2EfG
+RaDD9M5TFCqhy2VFR6Pk0MP7h5bkb2VxLUUQa4oNa1fT3q7zS875NvImO/HZ5UzW
+T5d2Z5iwY6I2AOKYKt4kZhzXgbt5j2O3biDDXSfWwwAojWqbqVygepn047KVr7Al
+2ug9hkY7tHz7U71HbZasroFgNPmP/UnAxmps4RKM28MRVPTI4cKUIdE3gIKFu3ou
+EqEItQ13P+50i3QkALpz8d08tJbceeYzf6I2P4q6
+=QFm5
+-----END PGP PRIVATE KEY BLOCK-----
+');
-- elg1024 / aes128
insert into encdata (id, data) values (1, '
-----BEGIN PGP MESSAGE-----
@@ -537,6 +601,18 @@ ERROR: Wrong key
select pgp_pub_decrypt(dearmor(data), dearmor(seckey))
from keytbl, encdata where keytbl.id=4 and encdata.id=1;
ERROR: No encryption key found
+-- rsa: password-protected secret key, wrong password
+select pgp_pub_decrypt(dearmor(data), dearmor(seckey), '123')
+from keytbl, encdata where keytbl.id=7 and encdata.id=4;
+ERROR: Corrupt data
+-- rsa: password-protected secret key, right password
+select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool')
+from keytbl, encdata where keytbl.id=7 and encdata.id=4;
+ pgp_pub_decrypt
+-----------------
+ Secret message.
+(1 row)
+
-- password-protected secret key, no password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey))
from keytbl, encdata where keytbl.id=5 and encdata.id=1;
diff --git a/contrib/pgcrypto/fortuna.c b/contrib/pgcrypto/fortuna.c
index 1228fb4ad0..7ab888fb98 100644
--- a/contrib/pgcrypto/fortuna.c
+++ b/contrib/pgcrypto/fortuna.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -34,6 +34,7 @@
#include <sys/time.h>
#include <time.h>
+#include "px.h"
#include "rijndael.h"
#include "sha2.h"
#include "fortuna.h"
@@ -52,7 +53,7 @@
/*
* There is some confusion about whether and how to carry forward
- * the state of the pools. Seems like original Fortuna does not
+ * the state of the pools. Seems like original Fortuna does not
* do it, resetting hash after each request. I guess expecting
* feeding to happen more often that requesting. This is absolutely
* unsuitable for pgcrypto, as nothing asynchronous happens here.
@@ -76,7 +77,7 @@
* How many pools.
*
* Original Fortuna uses 32 pools, that means 32'th pool is
- * used not earlier than in 13th year. This is a waste in
+ * used not earlier than in 13th year. This is a waste in
* pgcrypto, as we have very low-frequancy seeding. Here
* is preferable to have all entropy usable in reasonable time.
*
@@ -169,7 +170,7 @@ md_result(MD_CTX * ctx, uint8 *dst)
memcpy(&tmp, ctx, sizeof(*ctx));
SHA256_Final(dst, &tmp);
- memset(&tmp, 0, sizeof(tmp));
+ px_memset(&tmp, 0, sizeof(tmp));
}
/*
@@ -243,7 +244,7 @@ enough_time_passed(FState *st)
if (ok)
memcpy(last, &tv, sizeof(tv));
- memset(&tv, 0, sizeof(tv));
+ px_memset(&tv, 0, sizeof(tv));
return ok;
}
@@ -290,12 +291,12 @@ reseed(FState *st)
/* use new key */
ciph_init(&st->ciph, st->key, BLOCK);
- memset(&key_md, 0, sizeof(key_md));
- memset(buf, 0, BLOCK);
+ px_memset(&key_md, 0, sizeof(key_md));
+ px_memset(buf, 0, BLOCK);
}
/*
- * Pick a random pool. This uses key bytes as random source.
+ * Pick a random pool. This uses key bytes as random source.
*/
static unsigned
get_rand_pool(FState *st)
@@ -341,8 +342,8 @@ add_entropy(FState *st, const uint8 *data, unsigned len)
if (pos == 0)
st->pool0_bytes += len;
- memset(hash, 0, BLOCK);
- memset(&md, 0, sizeof(md));
+ px_memset(hash, 0, BLOCK);
+ px_memset(&md, 0, sizeof(md));
}
/*
@@ -378,7 +379,7 @@ startup_tricks(FState *st)
encrypt_counter(st, buf + CIPH_BLOCK);
md_update(&st->pool[i], buf, BLOCK);
}
- memset(buf, 0, BLOCK);
+ px_memset(buf, 0, BLOCK);
/* Hide the key. */
rekey(st);
diff --git a/contrib/pgcrypto/fortuna.h b/contrib/pgcrypto/fortuna.h
index 2e49f8aab8..bf9f4768d1 100644
--- a/contrib/pgcrypto/fortuna.h
+++ b/contrib/pgcrypto/fortuna.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c
index 3286cd9d80..5c6ebebfe2 100644
--- a/contrib/pgcrypto/imath.c
+++ b/contrib/pgcrypto/imath.c
@@ -21,7 +21,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
@@ -211,7 +211,7 @@ static int s_vcmp(mp_int a, int v);
static mp_digit s_uadd(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
-/* Unsigned magnitude subtraction. Assumes dc is big enough. */
+/* Unsigned magnitude subtraction. Assumes dc is big enough. */
static void s_usub(mp_digit *da, mp_digit *db, mp_digit *dc,
mp_size size_a, mp_size size_b);
@@ -2275,7 +2275,7 @@ mp_error_string(mp_result res)
/* }}} */
/*------------------------------------------------------------------------*/
-/* Private functions for internal use. These make assumptions. */
+/* Private functions for internal use. These make assumptions. */
/* {{{ s_alloc(num) */
diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h
index f2b02d0cd7..0a4f0f713f 100644
--- a/contrib/pgcrypto/imath.h
+++ b/contrib/pgcrypto/imath.h
@@ -20,7 +20,7 @@
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
@@ -117,14 +117,11 @@ mp_result mp_int_mul_value(mp_int a, int value, mp_int c);
mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c);
mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */
-mp_result
-mp_int_div(mp_int a, mp_int b, /* q = a / b */
+mp_result mp_int_div(mp_int a, mp_int b, /* q = a / b */
mp_int q, mp_int r); /* r = a % b */
-mp_result
-mp_int_div_value(mp_int a, int value, /* q = a / value */
+mp_result mp_int_div_value(mp_int a, int value, /* q = a / value */
mp_int q, int *r); /* r = a % value */
-mp_result
-mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
+mp_result mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */
mp_int q, mp_int r); /* r = q % 2^p2 */
mp_result mp_int_mod(mp_int a, mp_int m, mp_int c); /* c = a % m */
@@ -143,17 +140,13 @@ int mp_int_divisible_value(mp_int a, int v);
/* Returns k >= 0 such that z = 2^k, if one exists; otherwise < 0 */
int mp_int_is_pow2(mp_int z);
-mp_result
-mp_int_exptmod(mp_int a, mp_int b, mp_int m,
+mp_result mp_int_exptmod(mp_int a, mp_int b, mp_int m,
mp_int c); /* c = a^b (mod m) */
-mp_result
-mp_int_exptmod_evalue(mp_int a, int value,
+mp_result mp_int_exptmod_evalue(mp_int a, int value,
mp_int m, mp_int c); /* c = a^v (mod m) */
-mp_result
-mp_int_exptmod_bvalue(int value, mp_int b,
+mp_result mp_int_exptmod_bvalue(int value, mp_int b,
mp_int m, mp_int c); /* c = v^b (mod m) */
-mp_result
-mp_int_exptmod_known(mp_int a, mp_int b,
+mp_result mp_int_exptmod_known(mp_int a, mp_int b,
mp_int m, mp_int mu,
mp_int c); /* c = a^b (mod m) */
mp_result mp_int_redux_const(mp_int m, mp_int c);
@@ -162,8 +155,7 @@ mp_result mp_int_invmod(mp_int a, mp_int m, mp_int c); /* c = 1/a (mod m) */
mp_result mp_int_gcd(mp_int a, mp_int b, mp_int c); /* c = gcd(a, b) */
-mp_result
-mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */
+mp_result mp_int_egcd(mp_int a, mp_int b, mp_int c, /* c = gcd(a, b) */
mp_int x, mp_int y); /* c = ax + by */
mp_result mp_int_sqrt(mp_int a, mp_int c); /* c = floor(sqrt(q)) */
diff --git a/contrib/pgcrypto/internal-sha2.c b/contrib/pgcrypto/internal-sha2.c
index f86b47816b..55ec7e16bd 100644
--- a/contrib/pgcrypto/internal-sha2.c
+++ b/contrib/pgcrypto/internal-sha2.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -84,7 +84,7 @@ int_sha224_free(PX_MD *h)
{
SHA224_CTX *ctx = (SHA224_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
@@ -132,7 +132,7 @@ int_sha256_free(PX_MD *h)
{
SHA256_CTX *ctx = (SHA256_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
@@ -180,7 +180,7 @@ int_sha384_free(PX_MD *h)
{
SHA384_CTX *ctx = (SHA384_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
@@ -228,7 +228,7 @@ int_sha512_free(PX_MD *h)
{
SHA512_CTX *ctx = (SHA512_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
diff --git a/contrib/pgcrypto/internal.c b/contrib/pgcrypto/internal.c
index a02c943e04..cb8ba2633d 100644
--- a/contrib/pgcrypto/internal.c
+++ b/contrib/pgcrypto/internal.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -142,7 +142,7 @@ int_md5_free(PX_MD *h)
{
MD5_CTX *ctx = (MD5_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
@@ -190,7 +190,7 @@ int_sha1_free(PX_MD *h)
{
SHA1_CTX *ctx = (SHA1_CTX *) h->p.ptr;
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
px_free(h);
}
@@ -265,7 +265,7 @@ intctx_free(PX_Cipher *c)
if (cx)
{
- memset(cx, 0, sizeof *cx);
+ px_memset(cx, 0, sizeof *cx);
px_free(cx);
}
px_free(c);
@@ -658,7 +658,7 @@ system_reseed(void)
skip = buf[0] >= SYSTEM_RESEED_CHANCE;
}
/* clear 1 byte */
- memset(buf, 0, sizeof(buf));
+ px_memset(buf, 0, sizeof(buf));
if (skip)
return;
@@ -668,7 +668,7 @@ system_reseed(void)
fortuna_add_entropy(buf, n);
seed_time = t;
- memset(buf, 0, sizeof(buf));
+ px_memset(buf, 0, sizeof(buf));
}
int
diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
index a2c5293efb..6124e4513c 100644
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -69,7 +69,7 @@ mbuf_free(MBuf *mbuf)
{
if (mbuf->own_data)
{
- memset(mbuf->data, 0, mbuf->buf_end - mbuf->data);
+ px_memset(mbuf->data, 0, mbuf->buf_end - mbuf->data);
px_free(mbuf->data);
}
px_free(mbuf);
@@ -249,11 +249,11 @@ pullf_free(PullFilter *pf)
if (pf->buf)
{
- memset(pf->buf, 0, pf->buflen);
+ px_memset(pf->buf, 0, pf->buflen);
px_free(pf->buf);
}
- memset(pf, 0, sizeof(*pf));
+ px_memset(pf, 0, sizeof(*pf));
px_free(pf);
}
@@ -298,7 +298,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf)
if (res < 0)
{
/* so the caller must clear only on success */
- memset(tmpbuf, 0, total);
+ px_memset(tmpbuf, 0, total);
return res;
}
if (res == 0)
@@ -415,11 +415,11 @@ pushf_free(PushFilter *mp)
if (mp->buf)
{
- memset(mp->buf, 0, mp->block_size);
+ px_memset(mp->buf, 0, mp->block_size);
px_free(mp->buf);
}
- memset(mp, 0, sizeof(*mp));
+ px_memset(mp, 0, sizeof(*mp));
px_free(mp);
}
diff --git a/contrib/pgcrypto/mbuf.h b/contrib/pgcrypto/mbuf.h
index da016c0a53..988293a729 100644
--- a/contrib/pgcrypto/mbuf.h
+++ b/contrib/pgcrypto/mbuf.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/md5.c b/contrib/pgcrypto/md5.c
index 08227a809c..cac4e408ab 100644
--- a/contrib/pgcrypto/md5.c
+++ b/contrib/pgcrypto/md5.c
@@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/md5.h b/contrib/pgcrypto/md5.h
index 03b9ab58ba..07d08c134d 100644
--- a/contrib/pgcrypto/md5.h
+++ b/contrib/pgcrypto/md5.h
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/openssl.c b/contrib/pgcrypto/openssl.c
index ad7fb9ee0e..976af70591 100644
--- a/contrib/pgcrypto/openssl.c
+++ b/contrib/pgcrypto/openssl.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -142,7 +142,7 @@ EVP_MD_CTX_init(EVP_MD_CTX *ctx)
static int
EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx)
{
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
return 1;
}
@@ -381,7 +381,7 @@ gen_ossl_free(PX_Cipher *c)
{
ossldata *od = (ossldata *) c->ptr;
- memset(od, 0, sizeof(*od));
+ px_memset(od, 0, sizeof(*od));
px_free(od);
px_free(c);
}
@@ -429,8 +429,8 @@ bf_init(PX_Cipher *c, const uint8 *key, unsigned klen, const uint8 *iv)
/*
* Test if key len is supported. BF_set_key silently cut large keys and it
- * could be be a problem when user transfer crypted data from one server
- * to another.
+ * could be a problem when user transfer crypted data from one server to
+ * another.
*/
if (bf_is_strong == -1)
diff --git a/contrib/pgcrypto/pgcrypto--1.0--1.1.sql b/contrib/pgcrypto/pgcrypto--1.0--1.1.sql
new file mode 100644
index 0000000000..42e0c7fffc
--- /dev/null
+++ b/contrib/pgcrypto/pgcrypto--1.0--1.1.sql
@@ -0,0 +1,9 @@
+/* contrib/pgcrypto/pgcrypto--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pgcrypto UPDATE TO '1.1'" to load this file. \quit
+
+CREATE FUNCTION gen_random_uuid()
+RETURNS uuid
+AS 'MODULE_PATHNAME', 'pg_random_uuid'
+LANGUAGE C VOLATILE;
diff --git a/contrib/pgcrypto/pgcrypto--1.0.sql b/contrib/pgcrypto/pgcrypto--1.1.sql
index 347825ea07..a260857d30 100644
--- a/contrib/pgcrypto/pgcrypto--1.0.sql
+++ b/contrib/pgcrypto/pgcrypto--1.1.sql
@@ -1,4 +1,4 @@
-/* contrib/pgcrypto/pgcrypto--1.0.sql */
+/* contrib/pgcrypto/pgcrypto--1.1.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pgcrypto" to load this file. \quit
@@ -63,6 +63,11 @@ RETURNS bytea
AS 'MODULE_PATHNAME', 'pg_random_bytes'
LANGUAGE C VOLATILE STRICT;
+CREATE FUNCTION gen_random_uuid()
+RETURNS uuid
+AS 'MODULE_PATHNAME', 'pg_random_uuid'
+LANGUAGE C VOLATILE;
+
--
-- pgp_sym_encrypt(data, key)
--
diff --git a/contrib/pgcrypto/pgcrypto.c b/contrib/pgcrypto/pgcrypto.c
index a441ca77f1..2d446d8cc9 100644
--- a/contrib/pgcrypto/pgcrypto.c
+++ b/contrib/pgcrypto/pgcrypto.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -35,6 +35,7 @@
#include "parser/scansup.h"
#include "utils/builtins.h"
+#include "utils/uuid.h"
#include "px.h"
#include "px-crypt.h"
@@ -443,6 +444,32 @@ pg_random_bytes(PG_FUNCTION_ARGS)
PG_RETURN_BYTEA_P(res);
}
+/* SQL function: gen_random_uuid() returns uuid */
+PG_FUNCTION_INFO_V1(pg_random_uuid);
+
+Datum
+pg_random_uuid(PG_FUNCTION_ARGS)
+{
+ uint8 *buf = (uint8 *) palloc(UUID_LEN);
+ int err;
+
+ /* generate random bits */
+ err = px_get_pseudo_random_bytes(buf, UUID_LEN);
+ if (err < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION),
+ errmsg("Random generator error: %s", px_strerror(err))));
+
+ /*
+ * Set magic numbers for a "version 4" (pseudorandom) UUID, see
+ * http://tools.ietf.org/html/rfc4122#section-4.4
+ */
+ buf[6] = (buf[6] & 0x0f) | 0x40; /* "version" field */
+ buf[8] = (buf[8] & 0x3f) | 0x80; /* "variant" field */
+
+ PG_RETURN_UUID_P((pg_uuid_t *) buf);
+}
+
static void *
find_provider(text *name,
PFN provider_lookup,
diff --git a/contrib/pgcrypto/pgcrypto.control b/contrib/pgcrypto/pgcrypto.control
index 8375cf9e7b..7f79d044ab 100644
--- a/contrib/pgcrypto/pgcrypto.control
+++ b/contrib/pgcrypto/pgcrypto.control
@@ -1,5 +1,5 @@
# pgcrypto extension
comment = 'cryptographic functions'
-default_version = '1.0'
+default_version = '1.1'
module_pathname = '$libdir/pgcrypto'
relocatable = true
diff --git a/contrib/pgcrypto/pgcrypto.h b/contrib/pgcrypto/pgcrypto.h
index 6284ba2406..dfc7a10590 100644
--- a/contrib/pgcrypto/pgcrypto.h
+++ b/contrib/pgcrypto/pgcrypto.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -45,5 +45,6 @@ Datum pg_decrypt(PG_FUNCTION_ARGS);
Datum pg_encrypt_iv(PG_FUNCTION_ARGS);
Datum pg_decrypt_iv(PG_FUNCTION_ARGS);
Datum pg_random_bytes(PG_FUNCTION_ARGS);
+Datum pg_random_uuid(PG_FUNCTION_ARGS);
#endif
diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c
index 87adf91125..40f20550ea 100644
--- a/contrib/pgcrypto/pgp-armor.c
+++ b/contrib/pgcrypto/pgp-armor.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -32,7 +32,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
/*
diff --git a/contrib/pgcrypto/pgp-cfb.c b/contrib/pgcrypto/pgp-cfb.c
index 7cf9bf0b8c..1d99915f9d 100644
--- a/contrib/pgcrypto/pgp-cfb.c
+++ b/contrib/pgcrypto/pgp-cfb.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -31,7 +31,6 @@
#include "postgres.h"
-#include "mbuf.h"
#include "px.h"
#include "pgp.h"
@@ -85,12 +84,12 @@ void
pgp_cfb_free(PGP_CFB *ctx)
{
px_cipher_free(ctx->ciph);
- memset(ctx, 0, sizeof(*ctx));
+ px_memset(ctx, 0, sizeof(*ctx));
px_free(ctx);
}
/*
- * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
+ * Data processing for normal CFB. (PGP_PKT_SYMENCRYPTED_DATA_MDC)
*/
static int
mix_encrypt_normal(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst)
diff --git a/contrib/pgcrypto/pgp-compress.c b/contrib/pgcrypto/pgp-compress.c
index c592453402..57efe73338 100644
--- a/contrib/pgcrypto/pgp-compress.c
+++ b/contrib/pgcrypto/pgp-compress.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -31,7 +31,6 @@
#include "postgres.h"
-#include "mbuf.h"
#include "px.h"
#include "pgp.h"
@@ -175,7 +174,7 @@ compress_free(void *priv)
struct ZipStat *st = priv;
deflateEnd(&st->stream);
- memset(st, 0, sizeof(*st));
+ px_memset(st, 0, sizeof(*st));
px_free(st);
}
@@ -298,7 +297,7 @@ decompress_free(void *priv)
struct DecomprData *dec = priv;
inflateEnd(&dec->stream);
- memset(dec, 0, sizeof(*dec));
+ px_memset(dec, 0, sizeof(*dec));
px_free(dec);
}
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index c9aa6cd66a..e03ee7f5f0 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -210,7 +210,7 @@ pktreader_free(void *priv)
{
struct PktData *pkt = priv;
- memset(pkt, 0, sizeof(*pkt));
+ px_memset(pkt, 0, sizeof(*pkt));
px_free(pkt);
}
@@ -257,7 +257,7 @@ prefix_init(void **priv_p, void *arg, PullFilter *src)
if (res != len + 2)
{
px_debug("prefix_init: short read");
- memset(tmpbuf, 0, sizeof(tmpbuf));
+ px_memset(tmpbuf, 0, sizeof(tmpbuf));
return PXE_PGP_CORRUPT_DATA;
}
@@ -280,7 +280,7 @@ prefix_init(void **priv_p, void *arg, PullFilter *src)
*/
ctx->corrupt_prefix = 1;
}
- memset(tmpbuf, 0, sizeof(tmpbuf));
+ px_memset(tmpbuf, 0, sizeof(tmpbuf));
return 0;
}
@@ -395,8 +395,8 @@ mdc_finish(PGP_Context *ctx, PullFilter *src,
*/
px_md_finish(ctx->mdc_ctx, hash);
res = memcmp(hash, *data_p, 20);
- memset(hash, 0, 20);
- memset(tmpbuf, 0, sizeof(tmpbuf));
+ px_memset(hash, 0, 20);
+ px_memset(tmpbuf, 0, sizeof(tmpbuf));
if (res != 0)
{
px_debug("mdc_finish: mdc failed");
@@ -493,7 +493,7 @@ mdcbuf_finish(struct MDCBufData * st)
px_md_update(st->ctx->mdc_ctx, st->mdc_buf, 2);
px_md_finish(st->ctx->mdc_ctx, hash);
res = memcmp(hash, st->mdc_buf + 2, 20);
- memset(hash, 0, 20);
+ px_memset(hash, 0, 20);
if (res)
{
px_debug("mdcbuf_finish: MDC does not match");
@@ -593,7 +593,7 @@ mdcbuf_free(void *priv)
px_md_free(st->ctx->mdc_ctx);
st->ctx->mdc_ctx = NULL;
- memset(st, 0, sizeof(*st));
+ px_memset(st, 0, sizeof(*st));
px_free(st);
}
@@ -703,7 +703,7 @@ parse_symenc_sesskey(PGP_Context *ctx, PullFilter *src)
res = decrypt_key(ctx, p, res);
}
- memset(tmpbuf, 0, sizeof(tmpbuf));
+ px_memset(tmpbuf, 0, sizeof(tmpbuf));
return res;
}
@@ -753,6 +753,7 @@ copy_crlf(MBuf *dst, uint8 *data, int len, int *got_cr)
if (res < 0)
return res;
}
+ px_memset(tmpbuf, 0, sizeof(tmpbuf));
return 0;
}
@@ -792,7 +793,7 @@ parse_literal_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt)
px_debug("parse_literal_data: unexpected eof");
return PXE_PGP_CORRUPT_DATA;
}
- memset(tmpbuf, 0, 4);
+ px_memset(tmpbuf, 0, 4);
/* check if text */
if (ctx->text_mode)
diff --git a/contrib/pgcrypto/pgp-encrypt.c b/contrib/pgcrypto/pgp-encrypt.c
index 3b9b5d20ed..2320c7574b 100644
--- a/contrib/pgcrypto/pgp-encrypt.c
+++ b/contrib/pgcrypto/pgp-encrypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -128,7 +128,7 @@ mdc_flush(PushFilter *dst, void *priv)
px_md_finish(md, pkt + 2);
res = pushf_write(dst, pkt, 2 + MDC_DIGEST_LEN);
- memset(pkt, 0, 2 + MDC_DIGEST_LEN);
+ px_memset(pkt, 0, 2 + MDC_DIGEST_LEN);
return res;
}
@@ -217,7 +217,7 @@ encrypt_free(void *priv)
{
struct EncStat *st = priv;
- memset(st, 0, sizeof(*st));
+ px_memset(st, 0, sizeof(*st));
px_free(st);
}
@@ -299,7 +299,7 @@ pkt_stream_free(void *priv)
{
struct PktStreamStat *st = priv;
- memset(st, 0, sizeof(*st));
+ px_memset(st, 0, sizeof(*st));
px_free(st);
}
@@ -490,7 +490,7 @@ write_prefix(PGP_Context *ctx, PushFilter *dst)
prefix[bs + 1] = prefix[bs - 1];
res = pushf_write(dst, prefix, bs + 2);
- memset(prefix, 0, bs + 2);
+ px_memset(prefix, 0, bs + 2);
return res < 0 ? res : 0;
}
@@ -552,7 +552,7 @@ write_symenc_sesskey(PGP_Context *ctx, PushFilter *dst)
if (res >= 0)
res = pushf_write(dst, pkt, pktlen);
- memset(pkt, 0, pktlen);
+ px_memset(pkt, 0, pktlen);
return res;
}
diff --git a/contrib/pgcrypto/pgp-info.c b/contrib/pgcrypto/pgp-info.c
index b75266f18c..9bfbbe6d0c 100644
--- a/contrib/pgcrypto/pgp-info.c
+++ b/contrib/pgcrypto/pgp-info.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c
index d0e5830fe0..be95f2d092 100644
--- a/contrib/pgcrypto/pgp-mpi-internal.c
+++ b/contrib/pgcrypto/pgp-mpi-internal.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -33,7 +33,6 @@
#include "imath.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
static mpz_t *
@@ -147,7 +146,7 @@ bn_to_mpi(mpz_t *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c b/contrib/pgcrypto/pgp-mpi-openssl.c
index ed41e1151c..24484a6c54 100644
--- a/contrib/pgcrypto/pgp-mpi-openssl.c
+++ b/contrib/pgcrypto/pgp-mpi-openssl.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -33,7 +33,6 @@
#include <openssl/bn.h>
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
static BIGNUM *
@@ -82,7 +81,7 @@ bn_to_mpi(BIGNUM *bn)
*
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
- * above that it uses 'arbitrary high number'. Following
+ * above that it uses 'arbitrary high number'. Following
* algorihm hovers 10-70 bits above gpg values. And for
* larger p, it uses gpg's algorihm.
*
diff --git a/contrib/pgcrypto/pgp-mpi.c b/contrib/pgcrypto/pgp-mpi.c
index c8765b6d14..1da52acc9a 100644
--- a/contrib/pgcrypto/pgp-mpi.c
+++ b/contrib/pgcrypto/pgp-mpi.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -31,7 +31,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
int
@@ -72,7 +71,7 @@ pgp_mpi_free(PGP_MPI *mpi)
{
if (mpi == NULL)
return 0;
- memset(mpi, 0, sizeof(*mpi) + mpi->bytes);
+ px_memset(mpi, 0, sizeof(*mpi) + mpi->bytes);
px_free(mpi);
return 0;
}
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index d4eec03cc7..ad1fd08427 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -41,23 +41,6 @@
/*
* public functions
*/
-Datum pgp_sym_encrypt_text(PG_FUNCTION_ARGS);
-Datum pgp_sym_encrypt_bytea(PG_FUNCTION_ARGS);
-Datum pgp_sym_decrypt_text(PG_FUNCTION_ARGS);
-Datum pgp_sym_decrypt_bytea(PG_FUNCTION_ARGS);
-
-Datum pgp_pub_encrypt_text(PG_FUNCTION_ARGS);
-Datum pgp_pub_encrypt_bytea(PG_FUNCTION_ARGS);
-Datum pgp_pub_decrypt_text(PG_FUNCTION_ARGS);
-Datum pgp_pub_decrypt_bytea(PG_FUNCTION_ARGS);
-
-Datum pgp_key_id_w(PG_FUNCTION_ARGS);
-
-Datum pg_armor(PG_FUNCTION_ARGS);
-Datum pg_dearmor(PG_FUNCTION_ARGS);
-
-/* function headers */
-
PG_FUNCTION_INFO_V1(pgp_sym_encrypt_bytea);
PG_FUNCTION_INFO_V1(pgp_sym_encrypt_text);
PG_FUNCTION_INFO_V1(pgp_sym_decrypt_bytea);
@@ -87,11 +70,11 @@ add_block_entropy(PX_MD *md, text *data)
px_add_entropy(sha1, 20);
- memset(sha1, 0, 20);
+ px_memset(sha1, 0, 20);
}
/*
- * Mix user data into RNG. It is for user own interests to have
+ * Mix user data into RNG. It is for user own interests to have
* RNG state shuffled.
*/
static void
@@ -129,7 +112,7 @@ add_entropy(text *data1, text *data2, text *data3)
add_block_entropy(md, data3);
px_md_free(md);
- memset(rnd, 0, sizeof(rnd));
+ px_memset(rnd, 0, sizeof(rnd));
}
/*
@@ -167,7 +150,7 @@ convert_to_utf8(text *src)
static void
clear_and_pfree(text *p)
{
- memset(p, 0, VARSIZE(p));
+ px_memset(p, 0, VARSIZE(p));
pfree(p);
}
@@ -308,7 +291,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
}
/*
- * Find next word. Handle ',' and '=' as words. Skip whitespace.
+ * Find next word. Handle ',' and '=' as words. Skip whitespace.
* Put word info into res_p, res_len.
* Returns ptr to next word.
*/
diff --git a/contrib/pgcrypto/pgp-pubdec.c b/contrib/pgcrypto/pgp-pubdec.c
index fe5fae0c42..b925ff8599 100644
--- a/contrib/pgcrypto/pgp-pubdec.c
+++ b/contrib/pgcrypto/pgp-pubdec.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -31,7 +31,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
/*
diff --git a/contrib/pgcrypto/pgp-pubenc.c b/contrib/pgcrypto/pgp-pubenc.c
index 943d2e49f5..3b43bb61c0 100644
--- a/contrib/pgcrypto/pgp-pubenc.c
+++ b/contrib/pgcrypto/pgp-pubenc.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -31,7 +31,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
/*
@@ -73,7 +72,7 @@ pad_eme_pkcs1_v15(uint8 *data, int data_len, int res_len, uint8 **res_p)
if (res < 0)
{
- memset(buf, 0, res_len);
+ px_memset(buf, 0, res_len);
px_free(buf);
return res;
}
@@ -123,10 +122,10 @@ create_secmsg(PGP_Context *ctx, PGP_MPI **msg_p, int full_bytes)
if (padded)
{
- memset(padded, 0, full_bytes);
+ px_memset(padded, 0, full_bytes);
px_free(padded);
}
- memset(secmsg, 0, klen + 3);
+ px_memset(secmsg, 0, klen + 3);
px_free(secmsg);
if (res >= 0)
diff --git a/contrib/pgcrypto/pgp-pubkey.c b/contrib/pgcrypto/pgp-pubkey.c
index 283e0ec17e..f898d72ae9 100644
--- a/contrib/pgcrypto/pgp-pubkey.c
+++ b/contrib/pgcrypto/pgp-pubkey.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -77,7 +77,7 @@ pgp_key_free(PGP_PubKey *pk)
pgp_mpi_free(pk->sec.dsa.x);
break;
}
- memset(pk, 0, sizeof(*pk));
+ px_memset(pk, 0, sizeof(*pk));
px_free(pk);
}
@@ -150,7 +150,7 @@ calc_key_id(PGP_PubKey *pk)
px_md_free(md);
memcpy(pk->key_id, hash + 12, 8);
- memset(hash, 0, 20);
+ px_memset(hash, 0, 20);
return 0;
}
@@ -291,8 +291,8 @@ check_key_sha1(PullFilter *src, PGP_PubKey *pk)
res = PXE_PGP_KEYPKT_CORRUPT;
}
err:
- memset(got_sha1, 0, 20);
- memset(my_sha1, 0, 20);
+ px_memset(got_sha1, 0, 20);
+ px_memset(my_sha1, 0, 20);
return res;
}
@@ -408,16 +408,16 @@ process_secret_key(PullFilter *pkt, PGP_PubKey **pk_p,
case PGP_PUB_RSA_SIGN:
case PGP_PUB_RSA_ENCRYPT:
case PGP_PUB_RSA_ENCRYPT_SIGN:
- res = pgp_mpi_read(pkt, &pk->sec.rsa.d);
+ res = pgp_mpi_read(pf_key, &pk->sec.rsa.d);
if (res < 0)
break;
- res = pgp_mpi_read(pkt, &pk->sec.rsa.p);
+ res = pgp_mpi_read(pf_key, &pk->sec.rsa.p);
if (res < 0)
break;
- res = pgp_mpi_read(pkt, &pk->sec.rsa.q);
+ res = pgp_mpi_read(pf_key, &pk->sec.rsa.q);
if (res < 0)
break;
- res = pgp_mpi_read(pkt, &pk->sec.rsa.u);
+ res = pgp_mpi_read(pf_key, &pk->sec.rsa.u);
if (res < 0)
break;
break;
diff --git a/contrib/pgcrypto/pgp-s2k.c b/contrib/pgcrypto/pgp-s2k.c
index 349234e243..193dd95173 100644
--- a/contrib/pgcrypto/pgp-s2k.c
+++ b/contrib/pgcrypto/pgp-s2k.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -32,7 +32,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
static int
@@ -75,6 +74,7 @@ calc_s2k_simple(PGP_S2K *s2k, PX_MD *md, const uint8 *key,
remain = 0;
}
}
+ px_memset(buf, 0, sizeof(buf));
return 0;
}
@@ -118,6 +118,7 @@ calc_s2k_salted(PGP_S2K *s2k, PX_MD *md, const uint8 *key, unsigned key_len)
remain = 0;
}
}
+ px_memset(buf, 0, sizeof(buf));
return 0;
}
@@ -189,6 +190,7 @@ calc_s2k_iter_salted(PGP_S2K *s2k, PX_MD *md, const uint8 *key,
remain = 0;
}
}
+ px_memset(buf, 0, sizeof(buf));
return 0;
}
diff --git a/contrib/pgcrypto/pgp.c b/contrib/pgcrypto/pgp.c
index b8a6bc49b4..03fe48fb64 100644
--- a/contrib/pgcrypto/pgp.c
+++ b/contrib/pgcrypto/pgp.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -32,7 +32,6 @@
#include "postgres.h"
#include "px.h"
-#include "mbuf.h"
#include "pgp.h"
/*
@@ -225,7 +224,7 @@ pgp_free(PGP_Context *ctx)
{
if (ctx->pub_key)
pgp_key_free(ctx->pub_key);
- memset(ctx, 0, sizeof *ctx);
+ px_memset(ctx, 0, sizeof *ctx);
px_free(ctx);
return 0;
}
diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h
index 7ae01ccc4d..8d4ab9862d 100644
--- a/contrib/pgcrypto/pgp.h
+++ b/contrib/pgcrypto/pgp.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -29,6 +29,9 @@
* contrib/pgcrypto/pgp.h
*/
+#include "mbuf.h"
+#include "px.h"
+
enum PGP_S2K_TYPE
{
PGP_S2K_SIMPLE = 0,
@@ -265,8 +268,7 @@ int pgp_s2k_read(PullFilter *src, PGP_S2K *s2k);
int pgp_s2k_process(PGP_S2K *s2k, int cipher, const uint8 *key, int klen);
typedef struct PGP_CFB PGP_CFB;
-int
-pgp_cfb_create(PGP_CFB **ctx_p, int algo,
+int pgp_cfb_create(PGP_CFB **ctx_p, int algo,
const uint8 *key, int key_len, int recync, uint8 *iv);
void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
diff --git a/contrib/pgcrypto/px-crypt.c b/contrib/pgcrypto/px-crypt.c
index 63ec038dc5..7b003a76ca 100644
--- a/contrib/pgcrypto/px-crypt.c
+++ b/contrib/pgcrypto/px-crypt.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -158,7 +158,7 @@ px_gen_salt(const char *salt_type, char *buf, int rounds)
return res;
p = g->gen(rounds, rbuf, g->input_len, buf, PX_MAX_SALT_LEN);
- memset(rbuf, 0, sizeof(rbuf));
+ px_memset(rbuf, 0, sizeof(rbuf));
if (p == NULL)
return PXE_BAD_SALT_ROUNDS;
diff --git a/contrib/pgcrypto/px-crypt.h b/contrib/pgcrypto/px-crypt.h
index 7dde9ab77b..24daee743c 100644
--- a/contrib/pgcrypto/px-crypt.h
+++ b/contrib/pgcrypto/px-crypt.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/px-hmac.c b/contrib/pgcrypto/px-hmac.c
index 36efabd4a3..06e5148f1b 100644
--- a/contrib/pgcrypto/px-hmac.c
+++ b/contrib/pgcrypto/px-hmac.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -75,7 +75,7 @@ hmac_init(PX_HMAC *h, const uint8 *key, unsigned klen)
h->p.opad[i] = keybuf[i] ^ HMAC_OPAD;
}
- memset(keybuf, 0, bs);
+ px_memset(keybuf, 0, bs);
px_free(keybuf);
px_md_update(md, h->p.ipad, bs);
@@ -117,7 +117,7 @@ hmac_finish(PX_HMAC *h, uint8 *dst)
px_md_update(md, buf, hlen);
px_md_finish(md, dst);
- memset(buf, 0, hlen);
+ px_memset(buf, 0, hlen);
px_free(buf);
}
@@ -129,8 +129,8 @@ hmac_free(PX_HMAC *h)
bs = px_md_block_size(h->md);
px_md_free(h->md);
- memset(h->p.ipad, 0, bs);
- memset(h->p.opad, 0, bs);
+ px_memset(h->p.ipad, 0, bs);
+ px_memset(h->p.opad, 0, bs);
px_free(h->p.ipad);
px_free(h->p.opad);
px_free(h);
diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c
index f23d4de573..93c436daa0 100644
--- a/contrib/pgcrypto/px.c
+++ b/contrib/pgcrypto/px.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -104,6 +104,12 @@ px_strerror(int err)
return "Bad error code";
}
+/* memset that must not be optimized away */
+void
+px_memset(void *ptr, int c, size_t len)
+{
+ memset(ptr, c, len);
+}
const char *
px_resolve_alias(const PX_Alias *list, const char *name)
@@ -327,7 +333,7 @@ combo_free(PX_Combo *cx)
{
if (cx->cipher)
px_cipher_free(cx->cipher);
- memset(cx, 0, sizeof(*cx));
+ px_memset(cx, 0, sizeof(*cx));
px_free(cx);
}
diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h
index 80e8624460..a01a58e29c 100644
--- a/contrib/pgcrypto/px.h
+++ b/contrib/pgcrypto/px.h
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -203,6 +203,8 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
void px_set_debug_handler(void (*handler) (const char *));
+void px_memset(void *ptr, int c, size_t len);
+
#ifdef PX_DEBUG
void
px_debug(const char *fmt,...)
diff --git a/contrib/pgcrypto/random.c b/contrib/pgcrypto/random.c
index 393a0be983..3f092ca346 100644
--- a/contrib/pgcrypto/random.c
+++ b/contrib/pgcrypto/random.c
@@ -17,7 +17,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/rijndael.c b/contrib/pgcrypto/rijndael.c
index 5651d03750..4adbcc1f91 100644
--- a/contrib/pgcrypto/rijndael.c
+++ b/contrib/pgcrypto/rijndael.c
@@ -7,12 +7,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
@@ -188,7 +188,7 @@ gen_tabs(void)
/* rijndael specification is in big endian format with */
/* bit 0 as the most significant bit. In the remainder */
/* of the specification the bits are numbered from the */
- /* least significant end of a byte. */
+ /* least significant end of a byte. */
for (i = 0; i < 256; ++i)
{
diff --git a/contrib/pgcrypto/rijndael.h b/contrib/pgcrypto/rijndael.h
index fb30e46c14..e536c61a6f 100644
--- a/contrib/pgcrypto/rijndael.h
+++ b/contrib/pgcrypto/rijndael.h
@@ -8,12 +8,12 @@
/* RIJNDAEL by Joan Daemen and Vincent Rijmen */
/* */
/* which is a candidate algorithm in the Advanced Encryption Standard */
-/* programme of the US National Institute of Standards and Technology. */
+/* programme of the US National Institute of Standards and Technology. */
/* */
/* Copyright in this implementation is held by Dr B R Gladman but I */
/* hereby give permission for its free direct or derivative use subject */
/* to acknowledgment of its origin and compliance with any conditions */
-/* that the originators of the algorithm place on its exploitation. */
+/* that the originators of the algorithm place on its exploitation. */
/* */
/* Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999 */
diff --git a/contrib/pgcrypto/sha1.c b/contrib/pgcrypto/sha1.c
index ac406faf7e..0e753ce63a 100644
--- a/contrib/pgcrypto/sha1.c
+++ b/contrib/pgcrypto/sha1.c
@@ -19,7 +19,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sha1.h b/contrib/pgcrypto/sha1.h
index 3e0931efbc..5532ca160d 100644
--- a/contrib/pgcrypto/sha1.h
+++ b/contrib/pgcrypto/sha1.h
@@ -20,7 +20,7 @@
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sha2.c b/contrib/pgcrypto/sha2.c
index 5de94b2fcd..231f9dfbb0 100644
--- a/contrib/pgcrypto/sha2.c
+++ b/contrib/pgcrypto/sha2.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -40,6 +40,7 @@
#include <sys/param.h>
+#include "px.h"
#include "sha2.h"
/*
@@ -570,7 +571,7 @@ SHA256_Final(uint8 digest[], SHA256_CTX *context)
}
/* Clean up state data: */
- memset(context, 0, sizeof(*context));
+ px_memset(context, 0, sizeof(*context));
}
@@ -899,7 +900,7 @@ SHA512_Final(uint8 digest[], SHA512_CTX *context)
}
/* Zero out state data */
- memset(context, 0, sizeof(*context));
+ px_memset(context, 0, sizeof(*context));
}
@@ -944,7 +945,7 @@ SHA384_Final(uint8 digest[], SHA384_CTX *context)
}
/* Zero out state data */
- memset(context, 0, sizeof(*context));
+ px_memset(context, 0, sizeof(*context));
}
/*** SHA-224: *********************************************************/
@@ -987,5 +988,5 @@ SHA224_Final(uint8 digest[], SHA224_CTX *context)
}
/* Clean up state data: */
- memset(context, 0, sizeof(*context));
+ px_memset(context, 0, sizeof(*context));
}
diff --git a/contrib/pgcrypto/sha2.h b/contrib/pgcrypto/sha2.h
index df77a7a659..501f0e0446 100644
--- a/contrib/pgcrypto/sha2.h
+++ b/contrib/pgcrypto/sha2.h
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/contrib/pgcrypto/sql/pgp-encrypt.sql b/contrib/pgcrypto/sql/pgp-encrypt.sql
index cac37c8442..a9ac0b924b 100644
--- a/contrib/pgcrypto/sql/pgp-encrypt.sql
+++ b/contrib/pgcrypto/sql/pgp-encrypt.sql
@@ -8,7 +8,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key');
-- check whether the defaults are ok
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=aes128,
+ 'key', 'expect-cipher-algo=aes128,
expect-disable-mdc=0,
expect-sess-key=0,
expect-s2k-mode=3,
@@ -18,7 +18,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
-- maybe the expect- stuff simply does not work
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
- 'key', 'expect-cipher-algo=bf,
+ 'key', 'expect-cipher-algo=bf,
expect-disable-mdc=1,
expect-sess-key=1,
expect-s2k-mode=0,
@@ -36,62 +36,62 @@ select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz');
-- algorithm change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'),
- 'key', 'expect-cipher-algo=bf');
+ 'key', 'expect-cipher-algo=bf');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'),
- 'key', 'expect-cipher-algo=aes128');
+ 'key', 'expect-cipher-algo=aes128');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'),
- 'key', 'expect-cipher-algo=aes192');
+ 'key', 'expect-cipher-algo=aes192');
-- s2k change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'),
- 'key', 'expect-s2k-mode=0');
+ 'key', 'expect-s2k-mode=0');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'),
- 'key', 'expect-s2k-mode=1');
+ 'key', 'expect-s2k-mode=1');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'),
- 'key', 'expect-s2k-mode=3');
+ 'key', 'expect-s2k-mode=3');
-- s2k digest change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'),
- 'key', 'expect-s2k-digest-algo=md5');
+ 'key', 'expect-s2k-digest-algo=md5');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'),
- 'key', 'expect-s2k-digest-algo=sha1');
+ 'key', 'expect-s2k-digest-algo=sha1');
-- sess key
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'),
- 'key', 'expect-sess-key=0');
+ 'key', 'expect-sess-key=0');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'),
- 'key', 'expect-sess-key=1');
+ 'key', 'expect-sess-key=1');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=bf');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=bf');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'),
- 'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
+ 'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
-- no mdc
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'),
- 'key', 'expect-disable-mdc=1');
+ 'key', 'expect-disable-mdc=1');
-- crlf
select encode(pgp_sym_decrypt_bytea(
pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'),
- 'key'), 'hex');
+ 'key'), 'hex');
-- conversion should be lossless
select encode(digest(pgp_sym_decrypt(
pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'),
- 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
+ 'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect;
diff --git a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql
index cc82420084..f8495d1e54 100644
--- a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql
+++ b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql
@@ -426,6 +426,71 @@ hbt6LhKhCLUNdz/udIt0JAC6c/HdPLSW3HnmM3+iNj+Kug==
-----END PGP PRIVATE KEY BLOCK-----
');
+insert into keytbl (id, name, pubkey, seckey)
+values (7, 'rsaenc2048-psw', '
+same key with password
+', '
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+lQPEBELr2m0BCADOrnknlnXI0EzRExf/TgoHvK7Xx/E0keWqV3KrOyC3/tY2KOrj
+UVxaAX5pkFX9wdQObGPIJm06u6D16CH6CildX/vxG7YgvvKzK8JGAbwrXAfk7OIW
+czO2zRaZGDynoK3mAxHRBReyTKtNv8rDQhuZs6AOozJNARdbyUO/yqUnqNNygWuT
+4htFDEuLPIJwAbMSD0BvFW6YQaPdxzaAZm3EWVNbwDzjgbBUdBiUUwRdZIFUhsjJ
+dirFdy5+uuZru6y6CNC1OERkJ7P8EyoFiZckAIE5gshVZzNuyLOZjc5DhWBvLbX4
+NZElAnfiv+4nA6y8wQLSIbmHA3nqJaBklj85AAYp/gcDCNnoEKwFo86JYCE1J92R
+HRQ7DoyAZpW1O0dTXL8Epk0sKsKDrCJOrIkDymsjfyBexADIeqOkioy/50wD2Mku
+CVHKWO2duAiJN5t/FoRgpR1/Q11K6QdfqOG0HxwfIXLcPv7eSIso8kWorj+I01BP
+Fn/atGEbIjdWaz/q2XHbu0Q3x6Et2gIsbLRVMhiYz1UG9uzGJ0TYCdBa2SFhs184
+52akMpD+XVdM0Sq9/Cx40Seo8hzERB96+GXnQ48q2OhlvcEXiFyD6M6wYCWbEV+6
+XQVMymbl22FPP/bD9ReQX2kjrkQlFAtmhr+0y8reMCbcxwLuQfA3173lSPo7jrbH
+oLrGhkRpqd2bYCelqdy/XMmRFso0+7uytHfTFrUNfDWfmHVrygoVrNnarCbxMMI0
+I8Q+tKHMThWgf0rIOSh0+w38kOXFCEqEWF8YkAqCrMZIlJIed78rOCFgG4aHajZR
+D8rpXdUOIr/WeUddK25Tu8IuNJb0kFf12IMgNh0nS+mzlqWiofS5kA0TeB8wBV6t
+RotaeyDNSsMoowfN8cf1yHMTxli+K1Tasg003WVUoWgUc+EsJ5+KTNwaX5uGv0Cs
+j6dg6/FVeVRL9UsyF+2kt7euX3mABuUtcVGx/ZKTq/MNGEh6/r3B5U37qt+FDRbw
+ppKPc2AP+yBUWsQskyrxFgv4eSpcLEg+lgdz/zLyG4qW4lrFUoO790Cm/J6C7/WQ
+Z+E8kcS8aINJkg1skahH31d59ZkbW9PVeJMFGzNb0Z2LowngNP/BMrJ0LT2CQyLs
+UxbT16S/gwAyUpJnbhWYr3nDdlwtC0rVopVTPD7khPRppcsq1f8D70rdIxI4Ouuw
+vbjNZ1EWRJ9f2Ywb++k/xgSXwJkGodUlrUr+3i8cv8mPx+fWvif9q7Y5Ex1wCRa8
+8FAj/o+hEbQlUlNBIDIwNDggRW5jIDxyc2EyMDQ4ZW5jQGV4YW1wbGUub3JnPokB
+NAQTAQIAHgUCQuvabQIbAwYLCQgHAwIDFQIDAxYCAQIeAQIXgAAKCRDImeqTRBlV
+WRzJCACbRhx2fYjPGKta69M5dS+kr5UD/CQmsR2t9cB9zyqhratjPnKW9q13+4AG
+P3aByT14IH1c5Mha8rJkNYD2wxmC8jrrcPiJIYoRG+W1sUATY/t8wBbNWF+r9h11
+m0lEpsmNVff/jU7SpNN6JQ3P7MHd5V85LlDoXIH6QYCLd0PjKU+jNvjiBe5VX0m9
+a1nacE3xoWc1vbM0DnqEuID78Qgkcrmm0ESeg1h+tRfHxSAyYNc/gPzm8eH6l+hj
+gOvUc4Gd6LpBQSF8TcFfT2TZwJh7WVWDvNIP6FWAW7rzmHnX3wwXkGq4REWeVtk5
+yBPp6mOtWDiwaqLJYsoHWU11C8zYnQPEBELr2roBCADrgiWXZMzkQOntZa/NS56+
+CczLFQRQPl/8iJAW1eql/wOJ1UiwGSjT189WCKzE7vtazCIstdCFmwOs4DE6cz4S
+UX4HjzjYHZwmMiuSrIefwuZ7cysMBsMXypQFyMSbqwh102xGvmLz3Z++rydx7Fzl
+1RC/ny2+FN5dzYPO2DNtNi4dR2tjHktsxBWXAKCmxagAIwyxGouuEqDhYdFtwrA9
+Qy+M5n6fmGa1Dx07WWnbIud4uCilv8LPVKx5aJamDYWM3v7kS8n51MfTzeK/xoRM
+2rsgzFdLJqPdbgd2nsD37fngqZnlp7tDxSVSuMckZoSKtq1QsNemtaQSYq7xjPst
+AAYp/gcDCNnoEKwFo86JYAsxoD+wQ0zBi5RBM5EphXTpM1qKxmigsKOvBSaMmr0y
+VjHtGY3poyV3t6VboOGCsFcaKm0tIdDL7vrxxwyYESETpF29b7QrYcoaLKMG7fsy
+t9SUI3UV2H9uUquHgqHtsqz0jYOgm9tYnpesgQ/kOAWI/tej1ZJXUIWEmZMH/W6d
+ATNvZ3ivwApfC0qF5G3oPgBSoIuQ/8I+pN/kmuyNAnJWNgagFhA/2VFBvh5XgztV
+NW7G//KpR1scsn140SO/wpGBM3Kr4m8ztl9w9U6a7NlQZ2ub3/pIUTpSzyLBxJZ/
+RfuZI7ROdgDMKmEgCYrN2kfp0LIxnYL6ZJu3FDcS4V098lyf5rHvB3PAEdL6Zyhd
+qYp3Sx68r0F4vzk5iAIWf6pG2YdfoP2Z48Pmq9xW8qD9iwFcoz9oAzDEMENn6dfq
+6MzfoaXEoYp8cR/o+aeEaGUtYBHiaxQcJYx35B9IhsXXA49yRORK8qdwhSHxB3NQ
+H3pUWkfw368f/A207hQVs9yYXlEvMZikxl58gldCd3BAPqHm/XzgknRRNQZBPPKJ
+BMZebZ22Dm0qDuIqW4GXLB4sLf0+UXydVINIUOlzg+S4jrwx7eZqb6UkRXTIWVo5
+psTsD14wzWBRdUQHZOZD33+M8ugmewvLY/0Uix+2RorkmB7/jqoZvx/MehDwmCZd
+VH8sb2wpZ55sj7gCXxvrfieQD/VeH54OwjjbtK56iYq56RVD0h1az8xDY2GZXeT7
+J0c3BGpuoca5xOFWr1SylAr/miEPxOBfnfk8oZQJvZrjSBGjsTbALep2vDJk8ROD
+sdQCJuU1RHDrwKHlbUL0NbGRO2juJGsatdWnuVKsFbaFW2pHHkezKuwOcaAJv7Xt
+8LRF17czAJ1uaLKwV8Paqx6UIv+089GbWZi7HIkBHwQYAQIACQUCQuvaugIbDAAK
+CRDImeqTRBlVWS7XCACDVstKM+SHD6V0bkfO6ampHzj4krKjN0lonN5+7b7WKpgT
+QHRYvPY8lUiIrjXGISQqEG9M5Bi5ea1aoBZem0P3U/lKheg0lYtA7dM3BqsA2EfG
+RaDD9M5TFCqhy2VFR6Pk0MP7h5bkb2VxLUUQa4oNa1fT3q7zS875NvImO/HZ5UzW
+T5d2Z5iwY6I2AOKYKt4kZhzXgbt5j2O3biDDXSfWwwAojWqbqVygepn047KVr7Al
+2ug9hkY7tHz7U71HbZasroFgNPmP/UnAxmps4RKM28MRVPTI4cKUIdE3gIKFu3ou
+EqEItQ13P+50i3QkALpz8d08tJbceeYzf6I2P4q6
+=QFm5
+-----END PGP PRIVATE KEY BLOCK-----
+');
+
-- elg1024 / aes128
insert into encdata (id, data) values (1, '
@@ -535,6 +600,14 @@ from keytbl, encdata where keytbl.id=2 and encdata.id=1;
select pgp_pub_decrypt(dearmor(data), dearmor(seckey))
from keytbl, encdata where keytbl.id=4 and encdata.id=1;
+-- rsa: password-protected secret key, wrong password
+select pgp_pub_decrypt(dearmor(data), dearmor(seckey), '123')
+from keytbl, encdata where keytbl.id=7 and encdata.id=4;
+
+-- rsa: password-protected secret key, right password
+select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool')
+from keytbl, encdata where keytbl.id=7 and encdata.id=4;
+
-- password-protected secret key, no password
select pgp_pub_decrypt(dearmor(data), dearmor(seckey))
from keytbl, encdata where keytbl.id=5 and encdata.id=1;
diff --git a/contrib/pgrowlocks/Makefile b/contrib/pgrowlocks/Makefile
index f56389b0e2..fe8042344f 100644
--- a/contrib/pgrowlocks/Makefile
+++ b/contrib/pgrowlocks/Makefile
@@ -4,7 +4,7 @@ MODULE_big = pgrowlocks
OBJS = pgrowlocks.o
EXTENSION = pgrowlocks
-DATA = pgrowlocks--1.0.sql pgrowlocks--unpackaged--1.0.sql
+DATA = pgrowlocks--1.1.sql pgrowlocks--1.0--1.1.sql pgrowlocks--unpackaged--1.0.sql
ifdef USE_PGXS
PG_CONFIG = pg_config
diff --git a/contrib/pgrowlocks/pgrowlocks--1.0--1.1.sql b/contrib/pgrowlocks/pgrowlocks--1.0--1.1.sql
new file mode 100644
index 0000000000..3d5ca34c08
--- /dev/null
+++ b/contrib/pgrowlocks/pgrowlocks--1.0--1.1.sql
@@ -0,0 +1,17 @@
+/* contrib/pgrowlocks/pgrowlocks--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pgrowlocks UPDATE TO '1.1'" to load this file. \quit
+
+ALTER EXTENSION pgrowlocks DROP FUNCTION pgrowlocks(text);
+DROP FUNCTION pgrowlocks(text);
+CREATE FUNCTION pgrowlocks(IN relname text,
+ OUT locked_row TID, -- row TID
+ OUT locker XID, -- locking XID
+ OUT multi bool, -- multi XID?
+ OUT xids xid[], -- multi XIDs
+ OUT modes text[], -- multi XID statuses
+ OUT pids INTEGER[]) -- locker's process id
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pgrowlocks'
+LANGUAGE C STRICT;
diff --git a/contrib/pgrowlocks/pgrowlocks--1.0.sql b/contrib/pgrowlocks/pgrowlocks--1.1.sql
index a909b7430d..29079f4923 100644
--- a/contrib/pgrowlocks/pgrowlocks--1.0.sql
+++ b/contrib/pgrowlocks/pgrowlocks--1.1.sql
@@ -1,14 +1,14 @@
-/* contrib/pgrowlocks/pgrowlocks--1.0.sql */
+/* contrib/pgrowlocks/pgrowlocks--1.1.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pgrowlocks" to load this file. \quit
CREATE FUNCTION pgrowlocks(IN relname text,
OUT locked_row TID, -- row TID
- OUT lock_type TEXT, -- lock type
OUT locker XID, -- locking XID
OUT multi bool, -- multi XID?
OUT xids xid[], -- multi XIDs
+ OUT modes text[], -- multi XID statuses
OUT pids INTEGER[]) -- locker's process id
RETURNS SETOF record
AS 'MODULE_PATHNAME', 'pgrowlocks'
diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c
index 20beed2a30..15d9704752 100644
--- a/contrib/pgrowlocks/pgrowlocks.c
+++ b/contrib/pgrowlocks/pgrowlocks.c
@@ -35,6 +35,7 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/rel.h"
+#include "utils/snapmgr.h"
#include "utils/tqual.h"
@@ -42,8 +43,6 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(pgrowlocks);
-extern Datum pgrowlocks(PG_FUNCTION_ARGS);
-
/* ----------
* pgrowlocks:
* returns tids of rows being locked
@@ -59,6 +58,13 @@ typedef struct
int ncolumns;
} MyData;
+#define Atnum_tid 0
+#define Atnum_xmax 1
+#define Atnum_ismulti 2
+#define Atnum_xids 3
+#define Atnum_modes 4
+#define Atnum_pids 5
+
Datum
pgrowlocks(PG_FUNCTION_ARGS)
{
@@ -99,7 +105,7 @@ pgrowlocks(PG_FUNCTION_ARGS)
aclcheck_error(aclresult, ACL_KIND_CLASS,
RelationGetRelationName(rel));
- scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
+ scan = heap_beginscan(rel, GetActiveSnapshot(), 0, NULL);
mydata = palloc(sizeof(*mydata));
mydata->rel = rel;
mydata->scan = scan;
@@ -117,79 +123,151 @@ pgrowlocks(PG_FUNCTION_ARGS)
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
+ HTSU_Result htsu;
+ TransactionId xmax;
+ uint16 infomask;
+
/* must hold a buffer lock to call HeapTupleSatisfiesUpdate */
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- if (HeapTupleSatisfiesUpdate(tuple->t_data,
- GetCurrentCommandId(false),
- scan->rs_cbuf) == HeapTupleBeingUpdated)
+ htsu = HeapTupleSatisfiesUpdate(tuple,
+ GetCurrentCommandId(false),
+ scan->rs_cbuf);
+ xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
+ infomask = tuple->t_data->t_infomask;
+
+ /*
+ * a tuple is locked if HTSU returns BeingUpdated, and if it returns
+ * MayBeUpdated but the Xmax is valid and pointing at us.
+ */
+ if (htsu == HeapTupleBeingUpdated ||
+ (htsu == HeapTupleMayBeUpdated &&
+ !(infomask & HEAP_XMAX_INVALID) &&
+ !(infomask & HEAP_XMAX_IS_MULTI) &&
+ (xmax == GetCurrentTransactionIdIfAny())))
{
-
char **values;
- int i;
values = (char **) palloc(mydata->ncolumns * sizeof(char *));
- i = 0;
- values[i++] = (char *) DirectFunctionCall1(tidout, PointerGetDatum(&tuple->t_self));
+ values[Atnum_tid] = (char *) DirectFunctionCall1(tidout,
+ PointerGetDatum(&tuple->t_self));
- if (tuple->t_data->t_infomask & HEAP_XMAX_SHARED_LOCK)
- values[i++] = pstrdup("Shared");
- else
- values[i++] = pstrdup("Exclusive");
- values[i] = palloc(NCHARS * sizeof(char));
- snprintf(values[i++], NCHARS, "%d", HeapTupleHeaderGetXmax(tuple->t_data));
- if (tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI)
+ values[Atnum_xmax] = palloc(NCHARS * sizeof(char));
+ snprintf(values[Atnum_xmax], NCHARS, "%d", xmax);
+ if (infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId *xids;
- int nxids;
- int j;
- int isValidXid = 0; /* any valid xid ever exists? */
-
- values[i++] = pstrdup("true");
- nxids = GetMultiXactIdMembers(HeapTupleHeaderGetXmax(tuple->t_data), &xids);
- if (nxids == -1)
+ MultiXactMember *members;
+ int nmembers;
+ bool first = true;
+ bool allow_old;
+
+ values[Atnum_ismulti] = pstrdup("true");
+
+ allow_old = !(infomask & HEAP_LOCK_MASK) &&
+ (infomask & HEAP_XMAX_LOCK_ONLY);
+ nmembers = GetMultiXactIdMembers(xmax, &members, allow_old);
+ if (nmembers == -1)
{
- elog(ERROR, "GetMultiXactIdMembers returns error");
+ values[Atnum_xids] = "{0}";
+ values[Atnum_modes] = "{transient upgrade status}";
+ values[Atnum_pids] = "{0}";
}
+ else
+ {
+ int j;
- values[i] = palloc(NCHARS * nxids);
- values[i + 1] = palloc(NCHARS * nxids);
- strcpy(values[i], "{");
- strcpy(values[i + 1], "{");
+ values[Atnum_xids] = palloc(NCHARS * nmembers);
+ values[Atnum_modes] = palloc(NCHARS * nmembers);
+ values[Atnum_pids] = palloc(NCHARS * nmembers);
- for (j = 0; j < nxids; j++)
- {
- char buf[NCHARS];
+ strcpy(values[Atnum_xids], "{");
+ strcpy(values[Atnum_modes], "{");
+ strcpy(values[Atnum_pids], "{");
- if (TransactionIdIsInProgress(xids[j]))
+ for (j = 0; j < nmembers; j++)
{
- if (isValidXid)
+ char buf[NCHARS];
+
+ if (!first)
+ {
+ strcat(values[Atnum_xids], ",");
+ strcat(values[Atnum_modes], ",");
+ strcat(values[Atnum_pids], ",");
+ }
+ snprintf(buf, NCHARS, "%d", members[j].xid);
+ strcat(values[Atnum_xids], buf);
+ switch (members[j].status)
{
- strcat(values[i], ",");
- strcat(values[i + 1], ",");
+ case MultiXactStatusUpdate:
+ snprintf(buf, NCHARS, "Update");
+ break;
+ case MultiXactStatusNoKeyUpdate:
+ snprintf(buf, NCHARS, "No Key Update");
+ break;
+ case MultiXactStatusForUpdate:
+ snprintf(buf, NCHARS, "For Update");
+ break;
+ case MultiXactStatusForNoKeyUpdate:
+ snprintf(buf, NCHARS, "For No Key Update");
+ break;
+ case MultiXactStatusForShare:
+ snprintf(buf, NCHARS, "Share");
+ break;
+ case MultiXactStatusForKeyShare:
+ snprintf(buf, NCHARS, "Key Share");
+ break;
}
- snprintf(buf, NCHARS, "%d", xids[j]);
- strcat(values[i], buf);
- snprintf(buf, NCHARS, "%d", BackendXidGetPid(xids[j]));
- strcat(values[i + 1], buf);
+ strcat(values[Atnum_modes], buf);
+ snprintf(buf, NCHARS, "%d",
+ BackendXidGetPid(members[j].xid));
+ strcat(values[Atnum_pids], buf);
- isValidXid = 1;
+ first = false;
}
- }
- strcat(values[i], "}");
- strcat(values[i + 1], "}");
- i++;
+ strcat(values[Atnum_xids], "}");
+ strcat(values[Atnum_modes], "}");
+ strcat(values[Atnum_pids], "}");
+ }
}
else
{
- values[i++] = pstrdup("false");
- values[i] = palloc(NCHARS * sizeof(char));
- snprintf(values[i++], NCHARS, "{%d}", HeapTupleHeaderGetXmax(tuple->t_data));
+ values[Atnum_ismulti] = pstrdup("false");
+
+ values[Atnum_xids] = palloc(NCHARS * sizeof(char));
+ snprintf(values[Atnum_xids], NCHARS, "{%d}", xmax);
+
+ values[Atnum_modes] = palloc(NCHARS);
+ if (infomask & HEAP_XMAX_LOCK_ONLY)
+ {
+ if (HEAP_XMAX_IS_SHR_LOCKED(infomask))
+ snprintf(values[Atnum_modes], NCHARS, "{For Share}");
+ else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
+ snprintf(values[Atnum_modes], NCHARS, "{For Key Share}");
+ else if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
+ {
+ if (tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED)
+ snprintf(values[Atnum_modes], NCHARS, "{For Update}");
+ else
+ snprintf(values[Atnum_modes], NCHARS, "{For No Key Update}");
+ }
+ else
+ /* neither keyshare nor exclusive bit it set */
+ snprintf(values[Atnum_modes], NCHARS,
+ "{transient upgrade status}");
+ }
+ else
+ {
+ if (tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED)
+ snprintf(values[Atnum_modes], NCHARS, "{Update}");
+ else
+ snprintf(values[Atnum_modes], NCHARS, "{No Key Update}");
+ }
- values[i] = palloc(NCHARS * sizeof(char));
- snprintf(values[i++], NCHARS, "{%d}", BackendXidGetPid(HeapTupleHeaderGetXmax(tuple->t_data)));
+ values[Atnum_pids] = palloc(NCHARS * sizeof(char));
+ snprintf(values[Atnum_pids], NCHARS, "{%d}",
+ BackendXidGetPid(xmax));
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
@@ -200,10 +278,10 @@ pgrowlocks(PG_FUNCTION_ARGS)
/* make the tuple into a datum */
result = HeapTupleGetDatum(tuple);
- /* Clean up */
- for (i = 0; i < mydata->ncolumns; i++)
- pfree(values[i]);
- pfree(values);
+ /*
+ * no need to pfree what we allocated; it's on a short-lived
+ * memory context anyway
+ */
SRF_RETURN_NEXT(funcctx, result);
}
diff --git a/contrib/pgrowlocks/pgrowlocks.control b/contrib/pgrowlocks/pgrowlocks.control
index a6ba164515..dfa587d761 100644
--- a/contrib/pgrowlocks/pgrowlocks.control
+++ b/contrib/pgrowlocks/pgrowlocks.control
@@ -1,5 +1,5 @@
# pgrowlocks extension
comment = 'show row-level locking information'
-default_version = '1.0'
+default_version = '1.1'
module_pathname = '$libdir/pgrowlocks'
relocatable = true
diff --git a/contrib/pgstattuple/Makefile b/contrib/pgstattuple/Makefile
index 6ac277598c..d991c3a803 100644
--- a/contrib/pgstattuple/Makefile
+++ b/contrib/pgstattuple/Makefile
@@ -4,7 +4,7 @@ MODULE_big = pgstattuple
OBJS = pgstattuple.o pgstatindex.o
EXTENSION = pgstattuple
-DATA = pgstattuple--1.0.sql pgstattuple--unpackaged--1.0.sql
+DATA = pgstattuple--1.2.sql pgstattuple--1.1--1.2.sql pgstattuple--1.0--1.1.sql pgstattuple--unpackaged--1.0.sql
REGRESS = pgstattuple
diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out
index 7f28177890..d769f6d494 100644
--- a/contrib/pgstattuple/expected/pgstattuple.out
+++ b/contrib/pgstattuple/expected/pgstattuple.out
@@ -4,26 +4,79 @@ CREATE EXTENSION pgstattuple;
-- the pgstattuple functions, but the results for empty tables and
-- indexes should be that.
--
-create table test (a int primary key);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_pkey" for table "test"
+create table test (a int primary key, b int[]);
+select * from pgstattuple('test');
+ table_len | tuple_count | tuple_len | tuple_percent | dead_tuple_count | dead_tuple_len | dead_tuple_percent | free_space | free_percent
+-----------+-------------+-----------+---------------+------------------+----------------+--------------------+------------+--------------
+ 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0
+(1 row)
+
select * from pgstattuple('test'::text);
table_len | tuple_count | tuple_len | tuple_percent | dead_tuple_count | dead_tuple_len | dead_tuple_percent | free_space | free_percent
-----------+-------------+-----------+---------------+------------------+----------------+--------------------+------------+--------------
0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0
(1 row)
+select * from pgstattuple('test'::name);
+ table_len | tuple_count | tuple_len | tuple_percent | dead_tuple_count | dead_tuple_len | dead_tuple_percent | free_space | free_percent
+-----------+-------------+-----------+---------------+------------------+----------------+--------------------+------------+--------------
+ 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0
+(1 row)
+
select * from pgstattuple('test'::regclass);
table_len | tuple_count | tuple_len | tuple_percent | dead_tuple_count | dead_tuple_len | dead_tuple_percent | free_space | free_percent
-----------+-------------+-----------+---------------+------------------+----------------+--------------------+------------+--------------
0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0
(1 row)
+select pgstattuple(oid) from pg_class where relname = 'test';
+ pgstattuple
+---------------------
+ (0,0,0,0,0,0,0,0,0)
+(1 row)
+
+select pgstattuple(relname) from pg_class where relname = 'test';
+ pgstattuple
+---------------------
+ (0,0,0,0,0,0,0,0,0)
+(1 row)
+
select * from pgstatindex('test_pkey');
version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
(1 row)
+select * from pgstatindex('test_pkey'::text);
+ version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
+---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
+ 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+(1 row)
+
+select * from pgstatindex('test_pkey'::name);
+ version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
+---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
+ 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+(1 row)
+
+select * from pgstatindex('test_pkey'::regclass);
+ version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation
+---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+--------------------
+ 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN
+(1 row)
+
+select pgstatindex(oid) from pg_class where relname = 'test_pkey';
+ pgstatindex
+---------------------------
+ (2,0,0,0,0,0,0,0,NaN,NaN)
+(1 row)
+
+select pgstatindex(relname) from pg_class where relname = 'test_pkey';
+ pgstatindex
+---------------------------
+ (2,0,0,0,0,0,0,0,NaN,NaN)
+(1 row)
+
select pg_relpages('test');
pg_relpages
-------------
@@ -36,3 +89,40 @@ select pg_relpages('test_pkey');
1
(1 row)
+select pg_relpages('test_pkey'::text);
+ pg_relpages
+-------------
+ 1
+(1 row)
+
+select pg_relpages('test_pkey'::name);
+ pg_relpages
+-------------
+ 1
+(1 row)
+
+select pg_relpages('test_pkey'::regclass);
+ pg_relpages
+-------------
+ 1
+(1 row)
+
+select pg_relpages(oid) from pg_class where relname = 'test_pkey';
+ pg_relpages
+-------------
+ 1
+(1 row)
+
+select pg_relpages(relname) from pg_class where relname = 'test_pkey';
+ pg_relpages
+-------------
+ 1
+(1 row)
+
+create index test_ginidx on test using gin (b);
+select * from pgstatginindex('test_ginidx');
+ version | pending_pages | pending_tuples
+---------+---------------+----------------
+ 2 | 0 | 0
+(1 row)
+
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index d4fc8a0fd6..a2ea5d709c 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -27,7 +27,9 @@
#include "postgres.h"
+#include "access/gin_private.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/nbtree.h"
#include "catalog/namespace.h"
#include "funcapi.h"
@@ -37,14 +39,23 @@
#include "utils/rel.h"
-extern Datum pgstatindex(PG_FUNCTION_ARGS);
-extern Datum pg_relpages(PG_FUNCTION_ARGS);
-
+/*
+ * Because of backward-compatibility issue, we have decided to have
+ * two types of interfaces, with regclass-type input arg and text-type
+ * input arg, for each function.
+ *
+ * Those functions which have text-type input arg will be deprecated
+ * in the future release.
+ */
PG_FUNCTION_INFO_V1(pgstatindex);
+PG_FUNCTION_INFO_V1(pgstatindexbyid);
PG_FUNCTION_INFO_V1(pg_relpages);
+PG_FUNCTION_INFO_V1(pg_relpagesbyid);
+PG_FUNCTION_INFO_V1(pgstatginindex);
#define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
#define IS_BTREE(r) ((r)->rd_rel->relam == BTREE_AM_OID)
+#define IS_GIN(r) ((r)->rd_rel->relam == GIN_AM_OID)
#define CHECK_PAGE_OFFSET_RANGE(pg, offnum) { \
if ( !(FirstOffsetNumber <= (offnum) && \
@@ -79,6 +90,21 @@ typedef struct BTIndexStat
uint64 fragments;
} BTIndexStat;
+/* ------------------------------------------------
+ * A structure for a whole GIN index statistics
+ * used by pgstatginindex().
+ * ------------------------------------------------
+ */
+typedef struct GinIndexStat
+{
+ int32 version;
+
+ BlockNumber pending_pages;
+ int64 pending_tuples;
+} GinIndexStat;
+
+static Datum pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo);
+
/* ------------------------------------------------------
* pgstatindex()
*
@@ -91,11 +117,6 @@ pgstatindex(PG_FUNCTION_ARGS)
text *relname = PG_GETARG_TEXT_P(0);
Relation rel;
RangeVar *relrv;
- Datum result;
- BlockNumber nblocks;
- BlockNumber blkno;
- BTIndexStat indexStat;
- BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
if (!superuser())
ereport(ERROR,
@@ -105,6 +126,34 @@ pgstatindex(PG_FUNCTION_ARGS)
relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
+ PG_RETURN_DATUM(pgstatindex_impl(rel, fcinfo));
+}
+
+Datum
+pgstatindexbyid(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ Relation rel;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be superuser to use pgstattuple functions"))));
+
+ rel = relation_open(relid, AccessShareLock);
+
+ PG_RETURN_DATUM(pgstatindex_impl(rel, fcinfo));
+}
+
+static Datum
+pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo)
+{
+ Datum result;
+ BlockNumber nblocks;
+ BlockNumber blkno;
+ BTIndexStat indexStat;
+ BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
+
if (!IS_INDEX(rel) || !IS_BTREE(rel))
elog(ERROR, "relation \"%s\" is not a btree index",
RelationGetRelationName(rel));
@@ -216,39 +265,29 @@ pgstatindex(PG_FUNCTION_ARGS)
elog(ERROR, "return type must be a row type");
j = 0;
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", indexStat.version);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%d", indexStat.level);
- values[j] = palloc(32);
- snprintf(values[j++], 32, INT64_FORMAT,
- (indexStat.root_pages +
- indexStat.leaf_pages +
- indexStat.internal_pages +
- indexStat.deleted_pages +
- indexStat.empty_pages) * BLCKSZ);
- values[j] = palloc(32);
- snprintf(values[j++], 32, "%u", indexStat.root_blkno);
- values[j] = palloc(32);
- snprintf(values[j++], 32, INT64_FORMAT, indexStat.internal_pages);
- values[j] = palloc(32);
- snprintf(values[j++], 32, INT64_FORMAT, indexStat.leaf_pages);
- values[j] = palloc(32);
- snprintf(values[j++], 32, INT64_FORMAT, indexStat.empty_pages);
- values[j] = palloc(32);
- snprintf(values[j++], 32, INT64_FORMAT, indexStat.deleted_pages);
- values[j] = palloc(32);
+ values[j++] = psprintf("%d", indexStat.version);
+ values[j++] = psprintf("%d", indexStat.level);
+ values[j++] = psprintf(INT64_FORMAT,
+ (indexStat.root_pages +
+ indexStat.leaf_pages +
+ indexStat.internal_pages +
+ indexStat.deleted_pages +
+ indexStat.empty_pages) * BLCKSZ);
+ values[j++] = psprintf("%u", indexStat.root_blkno);
+ values[j++] = psprintf(INT64_FORMAT, indexStat.internal_pages);
+ values[j++] = psprintf(INT64_FORMAT, indexStat.leaf_pages);
+ values[j++] = psprintf(INT64_FORMAT, indexStat.empty_pages);
+ values[j++] = psprintf(INT64_FORMAT, indexStat.deleted_pages);
if (indexStat.max_avail > 0)
- snprintf(values[j++], 32, "%.2f",
- 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0);
+ values[j++] = psprintf("%.2f",
+ 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0);
else
- snprintf(values[j++], 32, "NaN");
- values[j] = palloc(32);
+ values[j++] = pstrdup("NaN");
if (indexStat.leaf_pages > 0)
- snprintf(values[j++], 32, "%.2f",
- (double) indexStat.fragments / (double) indexStat.leaf_pages * 100.0);
+ values[j++] = psprintf("%.2f",
+ (double) indexStat.fragments / (double) indexStat.leaf_pages * 100.0);
else
- snprintf(values[j++], 32, "NaN");
+ values[j++] = pstrdup("NaN");
tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc),
values);
@@ -256,7 +295,7 @@ pgstatindex(PG_FUNCTION_ARGS)
result = HeapTupleGetDatum(tuple);
}
- PG_RETURN_DATUM(result);
+ return result;
}
/* --------------------------------------------------------
@@ -292,3 +331,102 @@ pg_relpages(PG_FUNCTION_ARGS)
PG_RETURN_INT64(relpages);
}
+
+Datum
+pg_relpagesbyid(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ int64 relpages;
+ Relation rel;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be superuser to use pgstattuple functions"))));
+
+ rel = relation_open(relid, AccessShareLock);
+
+ /* note: this will work OK on non-local temp tables */
+
+ relpages = RelationGetNumberOfBlocks(rel);
+
+ relation_close(rel, AccessShareLock);
+
+ PG_RETURN_INT64(relpages);
+}
+
+/* ------------------------------------------------------
+ * pgstatginindex()
+ *
+ * Usage: SELECT * FROM pgstatginindex('ginindex');
+ * ------------------------------------------------------
+ */
+Datum
+pgstatginindex(PG_FUNCTION_ARGS)
+{
+ Oid relid = PG_GETARG_OID(0);
+ Relation rel;
+ Buffer buffer;
+ Page page;
+ GinMetaPageData *metadata;
+ GinIndexStat stats;
+ HeapTuple tuple;
+ TupleDesc tupleDesc;
+ Datum values[3];
+ bool nulls[3] = {false, false, false};
+ Datum result;
+
+ if (!superuser())
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be superuser to use pgstattuple functions"))));
+
+ rel = relation_open(relid, AccessShareLock);
+
+ if (!IS_INDEX(rel) || !IS_GIN(rel))
+ elog(ERROR, "relation \"%s\" is not a GIN index",
+ RelationGetRelationName(rel));
+
+ /*
+ * Reject attempts to read non-local temporary relations; we would be
+ * likely to get wrong data since we have no visibility into the owning
+ * session's local buffers.
+ */
+ if (RELATION_IS_OTHER_TEMP(rel))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot access temporary indexes of other sessions")));
+
+ /*
+ * Read metapage
+ */
+ buffer = ReadBuffer(rel, GIN_METAPAGE_BLKNO);
+ LockBuffer(buffer, GIN_SHARE);
+ page = BufferGetPage(buffer);
+ metadata = GinPageGetMeta(page);
+
+ stats.version = metadata->ginVersion;
+ stats.pending_pages = metadata->nPendingPages;
+ stats.pending_tuples = metadata->nPendingHeapTuples;
+
+ UnlockReleaseBuffer(buffer);
+ relation_close(rel, AccessShareLock);
+
+ /*
+ * Build a tuple descriptor for our result type
+ */
+ if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ values[0] = Int32GetDatum(stats.version);
+ values[1] = UInt32GetDatum(stats.pending_pages);
+ values[2] = Int64GetDatum(stats.pending_tuples);
+
+ /*
+ * Build and return the tuple
+ */
+ tuple = heap_form_tuple(tupleDesc, values, nulls);
+ result = HeapTupleGetDatum(tuple);
+
+ PG_RETURN_DATUM(result);
+}
diff --git a/contrib/pgstattuple/pgstattuple--1.0--1.1.sql b/contrib/pgstattuple/pgstattuple--1.0--1.1.sql
new file mode 100644
index 0000000000..cf582a0b81
--- /dev/null
+++ b/contrib/pgstattuple/pgstattuple--1.0--1.1.sql
@@ -0,0 +1,11 @@
+/* contrib/pgstattuple/pgstattuple--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pgstattuple UPDATE TO '1.1'" to load this file. \quit
+
+CREATE FUNCTION pgstatginindex(IN relname regclass,
+ OUT version INT4,
+ OUT pending_pages INT4,
+ OUT pending_tuples BIGINT)
+AS 'MODULE_PATHNAME', 'pgstatginindex'
+LANGUAGE C STRICT;
diff --git a/contrib/pgstattuple/pgstattuple--1.1--1.2.sql b/contrib/pgstattuple/pgstattuple--1.1--1.2.sql
new file mode 100644
index 0000000000..2783a63fb1
--- /dev/null
+++ b/contrib/pgstattuple/pgstattuple--1.1--1.2.sql
@@ -0,0 +1,39 @@
+/* contrib/pgstattuple/pgstattuple--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pgstattuple UPDATE TO '1.2'" to load this file. \quit
+
+ALTER EXTENSION pgstattuple DROP FUNCTION pgstattuple(oid);
+DROP FUNCTION pgstattuple(oid);
+
+CREATE FUNCTION pgstattuple(IN reloid regclass,
+ OUT table_len BIGINT, -- physical table length in bytes
+ OUT tuple_count BIGINT, -- number of live tuples
+ OUT tuple_len BIGINT, -- total tuples length in bytes
+ OUT tuple_percent FLOAT8, -- live tuples in %
+ OUT dead_tuple_count BIGINT, -- number of dead tuples
+ OUT dead_tuple_len BIGINT, -- total dead tuples length in bytes
+ OUT dead_tuple_percent FLOAT8, -- dead tuples in %
+ OUT free_space BIGINT, -- free space in bytes
+ OUT free_percent FLOAT8) -- free space in %
+AS 'MODULE_PATHNAME', 'pgstattuplebyid'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION pgstatindex(IN relname regclass,
+ OUT version INT,
+ OUT tree_level INT,
+ OUT index_size BIGINT,
+ OUT root_block_no BIGINT,
+ OUT internal_pages BIGINT,
+ OUT leaf_pages BIGINT,
+ OUT empty_pages BIGINT,
+ OUT deleted_pages BIGINT,
+ OUT avg_leaf_density FLOAT8,
+ OUT leaf_fragmentation FLOAT8)
+AS 'MODULE_PATHNAME', 'pgstatindexbyid'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION pg_relpages(IN relname regclass)
+RETURNS BIGINT
+AS 'MODULE_PATHNAME', 'pg_relpagesbyid'
+LANGUAGE C STRICT;
diff --git a/contrib/pgstattuple/pgstattuple--1.0.sql b/contrib/pgstattuple/pgstattuple--1.2.sql
index f7e03083ad..e5fa2f58da 100644
--- a/contrib/pgstattuple/pgstattuple--1.0.sql
+++ b/contrib/pgstattuple/pgstattuple--1.2.sql
@@ -1,4 +1,4 @@
-/* contrib/pgstattuple/pgstattuple--1.0.sql */
+/* contrib/pgstattuple/pgstattuple--1.2.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION pgstattuple" to load this file. \quit
@@ -16,7 +16,37 @@ CREATE FUNCTION pgstattuple(IN relname text,
AS 'MODULE_PATHNAME', 'pgstattuple'
LANGUAGE C STRICT;
-CREATE FUNCTION pgstattuple(IN reloid oid,
+CREATE FUNCTION pgstatindex(IN relname text,
+ OUT version INT,
+ OUT tree_level INT,
+ OUT index_size BIGINT,
+ OUT root_block_no BIGINT,
+ OUT internal_pages BIGINT,
+ OUT leaf_pages BIGINT,
+ OUT empty_pages BIGINT,
+ OUT deleted_pages BIGINT,
+ OUT avg_leaf_density FLOAT8,
+ OUT leaf_fragmentation FLOAT8)
+AS 'MODULE_PATHNAME', 'pgstatindex'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION pg_relpages(IN relname text)
+RETURNS BIGINT
+AS 'MODULE_PATHNAME', 'pg_relpages'
+LANGUAGE C STRICT;
+
+/* New stuff in 1.1 begins here */
+
+CREATE FUNCTION pgstatginindex(IN relname regclass,
+ OUT version INT4,
+ OUT pending_pages INT4,
+ OUT pending_tuples BIGINT)
+AS 'MODULE_PATHNAME', 'pgstatginindex'
+LANGUAGE C STRICT;
+
+/* New stuff in 1.2 begins here */
+
+CREATE FUNCTION pgstattuple(IN reloid regclass,
OUT table_len BIGINT, -- physical table length in bytes
OUT tuple_count BIGINT, -- number of live tuples
OUT tuple_len BIGINT, -- total tuples length in bytes
@@ -29,7 +59,7 @@ CREATE FUNCTION pgstattuple(IN reloid oid,
AS 'MODULE_PATHNAME', 'pgstattuplebyid'
LANGUAGE C STRICT;
-CREATE FUNCTION pgstatindex(IN relname text,
+CREATE FUNCTION pgstatindex(IN relname regclass,
OUT version INT,
OUT tree_level INT,
OUT index_size BIGINT,
@@ -40,10 +70,10 @@ CREATE FUNCTION pgstatindex(IN relname text,
OUT deleted_pages BIGINT,
OUT avg_leaf_density FLOAT8,
OUT leaf_fragmentation FLOAT8)
-AS 'MODULE_PATHNAME', 'pgstatindex'
+AS 'MODULE_PATHNAME', 'pgstatindexbyid'
LANGUAGE C STRICT;
-CREATE FUNCTION pg_relpages(IN relname text)
+CREATE FUNCTION pg_relpages(IN relname regclass)
RETURNS BIGINT
-AS 'MODULE_PATHNAME', 'pg_relpages'
+AS 'MODULE_PATHNAME', 'pg_relpagesbyid'
LANGUAGE C STRICT;
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index 2b62b78506..edc603f6a1 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -42,9 +42,6 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(pgstattuple);
PG_FUNCTION_INFO_V1(pgstattuplebyid);
-extern Datum pgstattuple(PG_FUNCTION_ARGS);
-extern Datum pgstattuplebyid(PG_FUNCTION_ARGS);
-
/*
* struct pgstattuple_type
*
@@ -216,8 +213,8 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo)
switch (rel->rd_rel->relkind)
{
case RELKIND_RELATION:
+ case RELKIND_MATVIEW:
case RELKIND_TOASTVALUE:
- case RELKIND_UNCATALOGED:
case RELKIND_SEQUENCE:
return pgstat_heap(rel, fcinfo);
case RELKIND_INDEX:
@@ -278,9 +275,11 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
Buffer buffer;
pgstattuple_type stat = {0};
BufferAccessStrategy bstrategy;
+ SnapshotData SnapshotDirty;
/* Disable syncscan because we assume we scan from block zero upwards */
scan = heap_beginscan_strat(rel, SnapshotAny, 0, NULL, true, false);
+ InitDirtySnapshot(SnapshotDirty);
nblocks = scan->rs_nblocks; /* # blocks to be scanned */
@@ -296,7 +295,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
/* must hold a buffer lock to call HeapTupleSatisfiesVisibility */
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
- if (HeapTupleSatisfiesVisibility(tuple, SnapshotNow, scan->rs_cbuf))
+ if (HeapTupleSatisfiesVisibility(tuple, &SnapshotDirty, scan->rs_cbuf))
{
stat.tuple_len += tuple->t_len;
stat.tuple_count++;
@@ -311,7 +310,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
/*
* To avoid physically reading the table twice, try to do the
- * free-space scan in parallel with the heap scan. However,
+ * free-space scan in parallel with the heap scan. However,
* heap_getnext may find no tuples on a given page, so we cannot
* simply examine the pages returned by the heap scan.
*/
diff --git a/contrib/pgstattuple/pgstattuple.control b/contrib/pgstattuple/pgstattuple.control
index 7b5129b2f2..a7cf47fd92 100644
--- a/contrib/pgstattuple/pgstattuple.control
+++ b/contrib/pgstattuple/pgstattuple.control
@@ -1,5 +1,5 @@
# pgstattuple extension
comment = 'show tuple-level statistics'
-default_version = '1.0'
+default_version = '1.2'
module_pathname = '$libdir/pgstattuple'
relocatable = true
diff --git a/contrib/pgstattuple/sql/pgstattuple.sql b/contrib/pgstattuple/sql/pgstattuple.sql
index 2fd1152e8c..0e0ad0e15d 100644
--- a/contrib/pgstattuple/sql/pgstattuple.sql
+++ b/contrib/pgstattuple/sql/pgstattuple.sql
@@ -6,12 +6,30 @@ CREATE EXTENSION pgstattuple;
-- indexes should be that.
--
-create table test (a int primary key);
+create table test (a int primary key, b int[]);
+select * from pgstattuple('test');
select * from pgstattuple('test'::text);
+select * from pgstattuple('test'::name);
select * from pgstattuple('test'::regclass);
+select pgstattuple(oid) from pg_class where relname = 'test';
+select pgstattuple(relname) from pg_class where relname = 'test';
select * from pgstatindex('test_pkey');
+select * from pgstatindex('test_pkey'::text);
+select * from pgstatindex('test_pkey'::name);
+select * from pgstatindex('test_pkey'::regclass);
+select pgstatindex(oid) from pg_class where relname = 'test_pkey';
+select pgstatindex(relname) from pg_class where relname = 'test_pkey';
select pg_relpages('test');
select pg_relpages('test_pkey');
+select pg_relpages('test_pkey'::text);
+select pg_relpages('test_pkey'::name);
+select pg_relpages('test_pkey'::regclass);
+select pg_relpages(oid) from pg_class where relname = 'test_pkey';
+select pg_relpages(relname) from pg_class where relname = 'test_pkey';
+
+create index test_ginidx on test using gin (b);
+
+select * from pgstatginindex('test_ginidx');
diff --git a/contrib/postgres_fdw/.gitignore b/contrib/postgres_fdw/.gitignore
new file mode 100644
index 0000000000..5dcb3ff972
--- /dev/null
+++ b/contrib/postgres_fdw/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile
new file mode 100644
index 0000000000..8c497201d0
--- /dev/null
+++ b/contrib/postgres_fdw/Makefile
@@ -0,0 +1,27 @@
+# contrib/postgres_fdw/Makefile
+
+MODULE_big = postgres_fdw
+OBJS = postgres_fdw.o option.o deparse.o connection.o
+
+PG_CPPFLAGS = -I$(libpq_srcdir)
+SHLIB_LINK = $(libpq)
+SHLIB_PREREQS = submake-libpq
+
+EXTENSION = postgres_fdw
+DATA = postgres_fdw--1.0.sql
+
+REGRESS = postgres_fdw
+
+# the db name is hard-coded in the tests
+override USE_MODULE_DB =
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/postgres_fdw
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
new file mode 100644
index 0000000000..116be7ddcb
--- /dev/null
+++ b/contrib/postgres_fdw/connection.c
@@ -0,0 +1,715 @@
+/*-------------------------------------------------------------------------
+ *
+ * connection.c
+ * Connection management functions for postgres_fdw
+ *
+ * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/connection.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "postgres_fdw.h"
+
+#include "access/xact.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "utils/hsearch.h"
+#include "utils/memutils.h"
+
+
+/*
+ * Connection cache hash table entry
+ *
+ * The lookup key in this hash table is the foreign server OID plus the user
+ * mapping OID. (We use just one connection per user per foreign server,
+ * so that we can ensure all scans use the same snapshot during a query.)
+ *
+ * The "conn" pointer can be NULL if we don't currently have a live connection.
+ * When we do have a connection, xact_depth tracks the current depth of
+ * transactions and subtransactions open on the remote side. We need to issue
+ * commands at the same nesting depth on the remote as we're executing at
+ * ourselves, so that rolling back a subtransaction will kill the right
+ * queries and not the wrong ones.
+ */
+typedef struct ConnCacheKey
+{
+ Oid serverid; /* OID of foreign server */
+ Oid userid; /* OID of local user whose mapping we use */
+} ConnCacheKey;
+
+typedef struct ConnCacheEntry
+{
+ ConnCacheKey key; /* hash key (must be first) */
+ PGconn *conn; /* connection to foreign server, or NULL */
+ int xact_depth; /* 0 = no xact open, 1 = main xact open, 2 =
+ * one level of subxact open, etc */
+ bool have_prep_stmt; /* have we prepared any stmts in this xact? */
+ bool have_error; /* have any subxacts aborted in this xact? */
+} ConnCacheEntry;
+
+/*
+ * Connection cache (initialized on first use)
+ */
+static HTAB *ConnectionHash = NULL;
+
+/* for assigning cursor numbers and prepared statement numbers */
+static unsigned int cursor_number = 0;
+static unsigned int prep_stmt_number = 0;
+
+/* tracks whether any work is needed in callback functions */
+static bool xact_got_connection = false;
+
+/* prototypes of private functions */
+static PGconn *connect_pg_server(ForeignServer *server, UserMapping *user);
+static void check_conn_params(const char **keywords, const char **values);
+static void configure_remote_session(PGconn *conn);
+static void do_sql_command(PGconn *conn, const char *sql);
+static void begin_remote_xact(ConnCacheEntry *entry);
+static void pgfdw_xact_callback(XactEvent event, void *arg);
+static void pgfdw_subxact_callback(SubXactEvent event,
+ SubTransactionId mySubid,
+ SubTransactionId parentSubid,
+ void *arg);
+
+
+/*
+ * Get a PGconn which can be used to execute queries on the remote PostgreSQL
+ * server with the user's authorization. A new connection is established
+ * if we don't already have a suitable one, and a transaction is opened at
+ * the right subtransaction nesting depth if we didn't do that already.
+ *
+ * will_prep_stmt must be true if caller intends to create any prepared
+ * statements. Since those don't go away automatically at transaction end
+ * (not even on error), we need this flag to cue manual cleanup.
+ *
+ * XXX Note that caching connections theoretically requires a mechanism to
+ * detect change of FDW objects to invalidate already established connections.
+ * We could manage that by watching for invalidation events on the relevant
+ * syscaches. For the moment, though, it's not clear that this would really
+ * be useful and not mere pedantry. We could not flush any active connections
+ * mid-transaction anyway.
+ */
+PGconn *
+GetConnection(ForeignServer *server, UserMapping *user,
+ bool will_prep_stmt)
+{
+ bool found;
+ ConnCacheEntry *entry;
+ ConnCacheKey key;
+
+ /* First time through, initialize connection cache hashtable */
+ if (ConnectionHash == NULL)
+ {
+ HASHCTL ctl;
+
+ MemSet(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(ConnCacheKey);
+ ctl.entrysize = sizeof(ConnCacheEntry);
+ ctl.hash = tag_hash;
+ /* allocate ConnectionHash in the cache context */
+ ctl.hcxt = CacheMemoryContext;
+ ConnectionHash = hash_create("postgres_fdw connections", 8,
+ &ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+
+ /*
+ * Register some callback functions that manage connection cleanup.
+ * This should be done just once in each backend.
+ */
+ RegisterXactCallback(pgfdw_xact_callback, NULL);
+ RegisterSubXactCallback(pgfdw_subxact_callback, NULL);
+ }
+
+ /* Set flag that we did GetConnection during the current transaction */
+ xact_got_connection = true;
+
+ /* Create hash key for the entry. Assume no pad bytes in key struct */
+ key.serverid = server->serverid;
+ key.userid = user->userid;
+
+ /*
+ * Find or create cached entry for requested connection.
+ */
+ entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found);
+ if (!found)
+ {
+ /* initialize new hashtable entry (key is already filled in) */
+ entry->conn = NULL;
+ entry->xact_depth = 0;
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ }
+
+ /*
+ * We don't check the health of cached connection here, because it would
+ * require some overhead. Broken connection will be detected when the
+ * connection is actually used.
+ */
+
+ /*
+ * If cache entry doesn't have a connection, we have to establish a new
+ * connection. (If connect_pg_server throws an error, the cache entry
+ * will be left in a valid empty state.)
+ */
+ if (entry->conn == NULL)
+ {
+ entry->xact_depth = 0; /* just to be sure */
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ entry->conn = connect_pg_server(server, user);
+ elog(DEBUG3, "new postgres_fdw connection %p for server \"%s\"",
+ entry->conn, server->servername);
+ }
+
+ /*
+ * Start a new transaction or subtransaction if needed.
+ */
+ begin_remote_xact(entry);
+
+ /* Remember if caller will prepare statements */
+ entry->have_prep_stmt |= will_prep_stmt;
+
+ return entry->conn;
+}
+
+/*
+ * Connect to remote server using specified server and user mapping properties.
+ */
+static PGconn *
+connect_pg_server(ForeignServer *server, UserMapping *user)
+{
+ PGconn *volatile conn = NULL;
+
+ /*
+ * Use PG_TRY block to ensure closing connection on error.
+ */
+ PG_TRY();
+ {
+ const char **keywords;
+ const char **values;
+ int n;
+
+ /*
+ * Construct connection params from generic options of ForeignServer
+ * and UserMapping. (Some of them might not be libpq options, in
+ * which case we'll just waste a few array slots.) Add 3 extra slots
+ * for fallback_application_name, client_encoding, end marker.
+ */
+ n = list_length(server->options) + list_length(user->options) + 3;
+ keywords = (const char **) palloc(n * sizeof(char *));
+ values = (const char **) palloc(n * sizeof(char *));
+
+ n = 0;
+ n += ExtractConnectionOptions(server->options,
+ keywords + n, values + n);
+ n += ExtractConnectionOptions(user->options,
+ keywords + n, values + n);
+
+ /* Use "postgres_fdw" as fallback_application_name. */
+ keywords[n] = "fallback_application_name";
+ values[n] = "postgres_fdw";
+ n++;
+
+ /* Set client_encoding so that libpq can convert encoding properly. */
+ keywords[n] = "client_encoding";
+ values[n] = GetDatabaseEncodingName();
+ n++;
+
+ keywords[n] = values[n] = NULL;
+
+ /* verify connection parameters and make connection */
+ check_conn_params(keywords, values);
+
+ conn = PQconnectdbParams(keywords, values, false);
+ if (!conn || PQstatus(conn) != CONNECTION_OK)
+ {
+ char *connmessage;
+ int msglen;
+
+ /* libpq typically appends a newline, strip that */
+ connmessage = pstrdup(PQerrorMessage(conn));
+ msglen = strlen(connmessage);
+ if (msglen > 0 && connmessage[msglen - 1] == '\n')
+ connmessage[msglen - 1] = '\0';
+ ereport(ERROR,
+ (errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
+ errmsg("could not connect to server \"%s\"",
+ server->servername),
+ errdetail_internal("%s", connmessage)));
+ }
+
+ /*
+ * Check that non-superuser has used password to establish connection;
+ * otherwise, he's piggybacking on the postgres server's user
+ * identity. See also dblink_security_check() in contrib/dblink.
+ */
+ if (!superuser() && !PQconnectionUsedPassword(conn))
+ ereport(ERROR,
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("password is required"),
+ errdetail("Non-superuser cannot connect if the server does not request a password."),
+ errhint("Target server's authentication method must be changed.")));
+
+ /* Prepare new session for use */
+ configure_remote_session(conn);
+
+ pfree(keywords);
+ pfree(values);
+ }
+ PG_CATCH();
+ {
+ /* Release PGconn data structure if we managed to create one */
+ if (conn)
+ PQfinish(conn);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ return conn;
+}
+
+/*
+ * For non-superusers, insist that the connstr specify a password. This
+ * prevents a password from being picked up from .pgpass, a service file,
+ * the environment, etc. We don't want the postgres user's passwords
+ * to be accessible to non-superusers. (See also dblink_connstr_check in
+ * contrib/dblink.)
+ */
+static void
+check_conn_params(const char **keywords, const char **values)
+{
+ int i;
+
+ /* no check required if superuser */
+ if (superuser())
+ return;
+
+ /* ok if params contain a non-empty password */
+ for (i = 0; keywords[i] != NULL; i++)
+ {
+ if (strcmp(keywords[i], "password") == 0 && values[i][0] != '\0')
+ return;
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
+ errmsg("password is required"),
+ errdetail("Non-superusers must provide a password in the user mapping.")));
+}
+
+/*
+ * Issue SET commands to make sure remote session is configured properly.
+ *
+ * We do this just once at connection, assuming nothing will change the
+ * values later. Since we'll never send volatile function calls to the
+ * remote, there shouldn't be any way to break this assumption from our end.
+ * It's possible to think of ways to break it at the remote end, eg making
+ * a foreign table point to a view that includes a set_config call ---
+ * but once you admit the possibility of a malicious view definition,
+ * there are any number of ways to break things.
+ */
+static void
+configure_remote_session(PGconn *conn)
+{
+ int remoteversion = PQserverVersion(conn);
+
+ /* Force the search path to contain only pg_catalog (see deparse.c) */
+ do_sql_command(conn, "SET search_path = pg_catalog");
+
+ /*
+ * Set remote timezone; this is basically just cosmetic, since all
+ * transmitted and returned timestamptzs should specify a zone explicitly
+ * anyway. However it makes the regression test outputs more predictable.
+ *
+ * We don't risk setting remote zone equal to ours, since the remote
+ * server might use a different timezone database. Instead, use UTC
+ * (quoted, because very old servers are picky about case).
+ */
+ do_sql_command(conn, "SET timezone = 'UTC'");
+
+ /*
+ * Set values needed to ensure unambiguous data output from remote. (This
+ * logic should match what pg_dump does. See also set_transmission_modes
+ * in postgres_fdw.c.)
+ */
+ do_sql_command(conn, "SET datestyle = ISO");
+ if (remoteversion >= 80400)
+ do_sql_command(conn, "SET intervalstyle = postgres");
+ if (remoteversion >= 90000)
+ do_sql_command(conn, "SET extra_float_digits = 3");
+ else
+ do_sql_command(conn, "SET extra_float_digits = 2");
+}
+
+/*
+ * Convenience subroutine to issue a non-data-returning SQL command to remote
+ */
+static void
+do_sql_command(PGconn *conn, const char *sql)
+{
+ PGresult *res;
+
+ res = PQexec(conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, sql);
+ PQclear(res);
+}
+
+/*
+ * Start remote transaction or subtransaction, if needed.
+ *
+ * Note that we always use at least REPEATABLE READ in the remote session.
+ * This is so that, if a query initiates multiple scans of the same or
+ * different foreign tables, we will get snapshot-consistent results from
+ * those scans. A disadvantage is that we can't provide sane emulation of
+ * READ COMMITTED behavior --- it would be nice if we had some other way to
+ * control which remote queries share a snapshot.
+ */
+static void
+begin_remote_xact(ConnCacheEntry *entry)
+{
+ int curlevel = GetCurrentTransactionNestLevel();
+
+ /* Start main transaction if we haven't yet */
+ if (entry->xact_depth <= 0)
+ {
+ const char *sql;
+
+ elog(DEBUG3, "starting remote transaction on connection %p",
+ entry->conn);
+
+ if (IsolationIsSerializable())
+ sql = "START TRANSACTION ISOLATION LEVEL SERIALIZABLE";
+ else
+ sql = "START TRANSACTION ISOLATION LEVEL REPEATABLE READ";
+ do_sql_command(entry->conn, sql);
+ entry->xact_depth = 1;
+ }
+
+ /*
+ * If we're in a subtransaction, stack up savepoints to match our level.
+ * This ensures we can rollback just the desired effects when a
+ * subtransaction aborts.
+ */
+ while (entry->xact_depth < curlevel)
+ {
+ char sql[64];
+
+ snprintf(sql, sizeof(sql), "SAVEPOINT s%d", entry->xact_depth + 1);
+ do_sql_command(entry->conn, sql);
+ entry->xact_depth++;
+ }
+}
+
+/*
+ * Release connection reference count created by calling GetConnection.
+ */
+void
+ReleaseConnection(PGconn *conn)
+{
+ /*
+ * Currently, we don't actually track connection references because all
+ * cleanup is managed on a transaction or subtransaction basis instead. So
+ * there's nothing to do here.
+ */
+}
+
+/*
+ * Assign a "unique" number for a cursor.
+ *
+ * These really only need to be unique per connection within a transaction.
+ * For the moment we ignore the per-connection point and assign them across
+ * all connections in the transaction, but we ask for the connection to be
+ * supplied in case we want to refine that.
+ *
+ * Note that even if wraparound happens in a very long transaction, actual
+ * collisions are highly improbable; just be sure to use %u not %d to print.
+ */
+unsigned int
+GetCursorNumber(PGconn *conn)
+{
+ return ++cursor_number;
+}
+
+/*
+ * Assign a "unique" number for a prepared statement.
+ *
+ * This works much like GetCursorNumber, except that we never reset the counter
+ * within a session. That's because we can't be 100% sure we've gotten rid
+ * of all prepared statements on all connections, and it's not really worth
+ * increasing the risk of prepared-statement name collisions by resetting.
+ */
+unsigned int
+GetPrepStmtNumber(PGconn *conn)
+{
+ return ++prep_stmt_number;
+}
+
+/*
+ * Report an error we got from the remote server.
+ *
+ * elevel: error level to use (typically ERROR, but might be less)
+ * res: PGresult containing the error
+ * conn: connection we did the query on
+ * clear: if true, PQclear the result (otherwise caller will handle it)
+ * sql: NULL, or text of remote command we tried to execute
+ *
+ * Note: callers that choose not to throw ERROR for a remote error are
+ * responsible for making sure that the associated ConnCacheEntry gets
+ * marked with have_error = true.
+ */
+void
+pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
+ bool clear, const char *sql)
+{
+ /* If requested, PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
+ char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
+ char *message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
+ char *message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
+ char *message_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
+ int sqlstate;
+
+ if (diag_sqlstate)
+ sqlstate = MAKE_SQLSTATE(diag_sqlstate[0],
+ diag_sqlstate[1],
+ diag_sqlstate[2],
+ diag_sqlstate[3],
+ diag_sqlstate[4]);
+ else
+ sqlstate = ERRCODE_CONNECTION_FAILURE;
+
+ /*
+ * If we don't get a message from the PGresult, try the PGconn. This
+ * is needed because for connection-level failures, PQexec may just
+ * return NULL, not a PGresult at all.
+ */
+ if (message_primary == NULL)
+ message_primary = PQerrorMessage(conn);
+
+ ereport(elevel,
+ (errcode(sqlstate),
+ message_primary ? errmsg_internal("%s", message_primary) :
+ errmsg("unknown error"),
+ message_detail ? errdetail_internal("%s", message_detail) : 0,
+ message_hint ? errhint("%s", message_hint) : 0,
+ message_context ? errcontext("%s", message_context) : 0,
+ sql ? errcontext("Remote SQL command: %s", sql) : 0));
+ }
+ PG_CATCH();
+ {
+ if (clear)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+ if (clear)
+ PQclear(res);
+}
+
+/*
+ * pgfdw_xact_callback --- cleanup at main-transaction end.
+ */
+static void
+pgfdw_xact_callback(XactEvent event, void *arg)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+
+ /* Quick exit if no connections were touched in this transaction. */
+ if (!xact_got_connection)
+ return;
+
+ /*
+ * Scan all connection cache entries to find open remote transactions, and
+ * close them.
+ */
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ PGresult *res;
+
+ /* Ignore cache entry if no open connection right now */
+ if (entry->conn == NULL)
+ continue;
+
+ /* If it has an open remote transaction, try to close it */
+ if (entry->xact_depth > 0)
+ {
+ elog(DEBUG3, "closing remote transaction on connection %p",
+ entry->conn);
+
+ switch (event)
+ {
+ case XACT_EVENT_PRE_COMMIT:
+ /* Commit all remote transactions during pre-commit */
+ do_sql_command(entry->conn, "COMMIT TRANSACTION");
+
+ /*
+ * If there were any errors in subtransactions, and we
+ * made prepared statements, do a DEALLOCATE ALL to make
+ * sure we get rid of all prepared statements. This is
+ * annoying and not terribly bulletproof, but it's
+ * probably not worth trying harder.
+ *
+ * DEALLOCATE ALL only exists in 8.3 and later, so this
+ * constrains how old a server postgres_fdw can
+ * communicate with. We intentionally ignore errors in
+ * the DEALLOCATE, so that we can hobble along to some
+ * extent with older servers (leaking prepared statements
+ * as we go; but we don't really support update operations
+ * pre-8.3 anyway).
+ */
+ if (entry->have_prep_stmt && entry->have_error)
+ {
+ res = PQexec(entry->conn, "DEALLOCATE ALL");
+ PQclear(res);
+ }
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ break;
+ case XACT_EVENT_PRE_PREPARE:
+
+ /*
+ * We disallow remote transactions that modified anything,
+ * since it's not very reasonable to hold them open until
+ * the prepared transaction is committed. For the moment,
+ * throw error unconditionally; later we might allow
+ * read-only cases. Note that the error will cause us to
+ * come right back here with event == XACT_EVENT_ABORT, so
+ * we'll clean up the connection state at that point.
+ */
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot prepare a transaction that modified remote tables")));
+ break;
+ case XACT_EVENT_COMMIT:
+ case XACT_EVENT_PREPARE:
+ /* Pre-commit should have closed the open transaction */
+ elog(ERROR, "missed cleaning up connection during pre-commit");
+ break;
+ case XACT_EVENT_ABORT:
+ /* Assume we might have lost track of prepared statements */
+ entry->have_error = true;
+ /* If we're aborting, abort all remote transactions too */
+ res = PQexec(entry->conn, "ABORT TRANSACTION");
+ /* Note: can't throw ERROR, it would be infinite loop */
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(WARNING, res, entry->conn, true,
+ "ABORT TRANSACTION");
+ else
+ {
+ PQclear(res);
+ /* As above, make sure to clear any prepared stmts */
+ if (entry->have_prep_stmt && entry->have_error)
+ {
+ res = PQexec(entry->conn, "DEALLOCATE ALL");
+ PQclear(res);
+ }
+ entry->have_prep_stmt = false;
+ entry->have_error = false;
+ }
+ break;
+ }
+ }
+
+ /* Reset state to show we're out of a transaction */
+ entry->xact_depth = 0;
+
+ /*
+ * If the connection isn't in a good idle state, discard it to
+ * recover. Next GetConnection will open a new connection.
+ */
+ if (PQstatus(entry->conn) != CONNECTION_OK ||
+ PQtransactionStatus(entry->conn) != PQTRANS_IDLE)
+ {
+ elog(DEBUG3, "discarding connection %p", entry->conn);
+ PQfinish(entry->conn);
+ entry->conn = NULL;
+ }
+ }
+
+ /*
+ * Regardless of the event type, we can now mark ourselves as out of the
+ * transaction. (Note: if we are here during PRE_COMMIT or PRE_PREPARE,
+ * this saves a useless scan of the hashtable during COMMIT or PREPARE.)
+ */
+ xact_got_connection = false;
+
+ /* Also reset cursor numbering for next transaction */
+ cursor_number = 0;
+}
+
+/*
+ * pgfdw_subxact_callback --- cleanup at subtransaction end.
+ */
+static void
+pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
+ SubTransactionId parentSubid, void *arg)
+{
+ HASH_SEQ_STATUS scan;
+ ConnCacheEntry *entry;
+ int curlevel;
+
+ /* Nothing to do at subxact start, nor after commit. */
+ if (!(event == SUBXACT_EVENT_PRE_COMMIT_SUB ||
+ event == SUBXACT_EVENT_ABORT_SUB))
+ return;
+
+ /* Quick exit if no connections were touched in this transaction. */
+ if (!xact_got_connection)
+ return;
+
+ /*
+ * Scan all connection cache entries to find open remote subtransactions
+ * of the current level, and close them.
+ */
+ curlevel = GetCurrentTransactionNestLevel();
+ hash_seq_init(&scan, ConnectionHash);
+ while ((entry = (ConnCacheEntry *) hash_seq_search(&scan)))
+ {
+ PGresult *res;
+ char sql[100];
+
+ /*
+ * We only care about connections with open remote subtransactions of
+ * the current level.
+ */
+ if (entry->conn == NULL || entry->xact_depth < curlevel)
+ continue;
+
+ if (entry->xact_depth > curlevel)
+ elog(ERROR, "missed cleaning up remote subtransaction at level %d",
+ entry->xact_depth);
+
+ if (event == SUBXACT_EVENT_PRE_COMMIT_SUB)
+ {
+ /* Commit all remote subtransactions during pre-commit */
+ snprintf(sql, sizeof(sql), "RELEASE SAVEPOINT s%d", curlevel);
+ do_sql_command(entry->conn, sql);
+ }
+ else
+ {
+ /* Assume we might have lost track of prepared statements */
+ entry->have_error = true;
+ /* Rollback all remote subtransactions during abort */
+ snprintf(sql, sizeof(sql),
+ "ROLLBACK TO SAVEPOINT s%d; RELEASE SAVEPOINT s%d",
+ curlevel, curlevel);
+ res = PQexec(entry->conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(WARNING, res, entry->conn, true, sql);
+ else
+ PQclear(res);
+ }
+
+ /* OK, we're outta that level of subtransaction */
+ entry->xact_depth--;
+ }
+}
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
new file mode 100644
index 0000000000..d7d9b9c77d
--- /dev/null
+++ b/contrib/postgres_fdw/deparse.c
@@ -0,0 +1,1842 @@
+/*-------------------------------------------------------------------------
+ *
+ * deparse.c
+ * Query deparser for postgres_fdw
+ *
+ * This file includes functions that examine query WHERE clauses to see
+ * whether they're safe to send to the remote server for execution, as
+ * well as functions to construct the query text to be sent. The latter
+ * functionality is annoyingly duplicative of ruleutils.c, but there are
+ * enough special considerations that it seems best to keep this separate.
+ * One saving grace is that we only need deparse logic for node types that
+ * we consider safe to send.
+ *
+ * We assume that the remote session's search_path is exactly "pg_catalog",
+ * and thus we need schema-qualify all and only names outside pg_catalog.
+ *
+ * We do not consider that it is ever safe to send COLLATE expressions to
+ * the remote server: it might not have the same collation names we do.
+ * (Later we might consider it safe to send COLLATE "C", but even that would
+ * fail on old remote servers.) An expression is considered safe to send only
+ * if all collations used in it are traceable to Var(s) of the foreign table.
+ * That implies that if the remote server gets a different answer than we do,
+ * the foreign table's columns are not marked with collations that match the
+ * remote table's columns, which we can consider to be user error.
+ *
+ * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/deparse.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "postgres_fdw.h"
+
+#include "access/heapam.h"
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "access/transam.h"
+#include "catalog/pg_collation.h"
+#include "catalog/pg_namespace.h"
+#include "catalog/pg_operator.h"
+#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
+#include "commands/defrem.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/var.h"
+#include "parser/parsetree.h"
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/syscache.h"
+
+
+/*
+ * Global context for foreign_expr_walker's search of an expression tree.
+ */
+typedef struct foreign_glob_cxt
+{
+ PlannerInfo *root; /* global planner state */
+ RelOptInfo *foreignrel; /* the foreign relation we are planning for */
+} foreign_glob_cxt;
+
+/*
+ * Local (per-tree-level) context for foreign_expr_walker's search.
+ * This is concerned with identifying collations used in the expression.
+ */
+typedef enum
+{
+ FDW_COLLATE_NONE, /* expression is of a noncollatable type */
+ FDW_COLLATE_SAFE, /* collation derives from a foreign Var */
+ FDW_COLLATE_UNSAFE /* collation derives from something else */
+} FDWCollateState;
+
+typedef struct foreign_loc_cxt
+{
+ Oid collation; /* OID of current collation, if any */
+ FDWCollateState state; /* state of current collation choice */
+} foreign_loc_cxt;
+
+/*
+ * Context for deparseExpr
+ */
+typedef struct deparse_expr_cxt
+{
+ PlannerInfo *root; /* global planner state */
+ RelOptInfo *foreignrel; /* the foreign relation we are planning for */
+ StringInfo buf; /* output buffer to append to */
+ List **params_list; /* exprs that will become remote Params */
+} deparse_expr_cxt;
+
+/*
+ * Functions to determine whether an expression can be evaluated safely on
+ * remote server.
+ */
+static bool foreign_expr_walker(Node *node,
+ foreign_glob_cxt *glob_cxt,
+ foreign_loc_cxt *outer_cxt);
+static bool is_builtin(Oid procid);
+
+/*
+ * Functions to construct string representation of a node tree.
+ */
+static void deparseTargetList(StringInfo buf,
+ PlannerInfo *root,
+ Index rtindex,
+ Relation rel,
+ Bitmapset *attrs_used,
+ List **retrieved_attrs);
+static void deparseReturningList(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ bool trig_after_row,
+ List *returningList,
+ List **retrieved_attrs);
+static void deparseColumnRef(StringInfo buf, int varno, int varattno,
+ PlannerInfo *root);
+static void deparseRelation(StringInfo buf, Relation rel);
+static void deparseStringLiteral(StringInfo buf, const char *val);
+static void deparseExpr(Expr *expr, deparse_expr_cxt *context);
+static void deparseVar(Var *node, deparse_expr_cxt *context);
+static void deparseConst(Const *node, deparse_expr_cxt *context);
+static void deparseParam(Param *node, deparse_expr_cxt *context);
+static void deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context);
+static void deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context);
+static void deparseOpExpr(OpExpr *node, deparse_expr_cxt *context);
+static void deparseOperatorName(StringInfo buf, Form_pg_operator opform);
+static void deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context);
+static void deparseScalarArrayOpExpr(ScalarArrayOpExpr *node,
+ deparse_expr_cxt *context);
+static void deparseRelabelType(RelabelType *node, deparse_expr_cxt *context);
+static void deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context);
+static void deparseNullTest(NullTest *node, deparse_expr_cxt *context);
+static void deparseArrayExpr(ArrayExpr *node, deparse_expr_cxt *context);
+static void printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context);
+static void printRemotePlaceholder(Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context);
+
+
+/*
+ * Examine each qual clause in input_conds, and classify them into two groups,
+ * which are returned as two lists:
+ * - remote_conds contains expressions that can be evaluated remotely
+ * - local_conds contains expressions that can't be evaluated remotely
+ */
+void
+classifyConditions(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *input_conds,
+ List **remote_conds,
+ List **local_conds)
+{
+ ListCell *lc;
+
+ *remote_conds = NIL;
+ *local_conds = NIL;
+
+ foreach(lc, input_conds)
+ {
+ RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
+
+ if (is_foreign_expr(root, baserel, ri->clause))
+ *remote_conds = lappend(*remote_conds, ri);
+ else
+ *local_conds = lappend(*local_conds, ri);
+ }
+}
+
+/*
+ * Returns true if given expr is safe to evaluate on the foreign server.
+ */
+bool
+is_foreign_expr(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr)
+{
+ foreign_glob_cxt glob_cxt;
+ foreign_loc_cxt loc_cxt;
+
+ /*
+ * Check that the expression consists of nodes that are safe to execute
+ * remotely.
+ */
+ glob_cxt.root = root;
+ glob_cxt.foreignrel = baserel;
+ loc_cxt.collation = InvalidOid;
+ loc_cxt.state = FDW_COLLATE_NONE;
+ if (!foreign_expr_walker((Node *) expr, &glob_cxt, &loc_cxt))
+ return false;
+
+ /* Expressions examined here should be boolean, ie noncollatable */
+ Assert(loc_cxt.collation == InvalidOid);
+ Assert(loc_cxt.state == FDW_COLLATE_NONE);
+
+ /*
+ * An expression which includes any mutable functions can't be sent over
+ * because its result is not stable. For example, sending now() remote
+ * side could cause confusion from clock offsets. Future versions might
+ * be able to make this choice with more granularity. (We check this last
+ * because it requires a lot of expensive catalog lookups.)
+ */
+ if (contain_mutable_functions((Node *) expr))
+ return false;
+
+ /* OK to evaluate on the remote server */
+ return true;
+}
+
+/*
+ * Check if expression is safe to execute remotely, and return true if so.
+ *
+ * In addition, *outer_cxt is updated with collation information.
+ *
+ * We must check that the expression contains only node types we can deparse,
+ * that all types/functions/operators are safe to send (which we approximate
+ * as being built-in), and that all collations used in the expression derive
+ * from Vars of the foreign table. Because of the latter, the logic is
+ * pretty close to assign_collations_walker() in parse_collate.c, though we
+ * can assume here that the given expression is valid.
+ */
+static bool
+foreign_expr_walker(Node *node,
+ foreign_glob_cxt *glob_cxt,
+ foreign_loc_cxt *outer_cxt)
+{
+ bool check_type = true;
+ foreign_loc_cxt inner_cxt;
+ Oid collation;
+ FDWCollateState state;
+
+ /* Need do nothing for empty subexpressions */
+ if (node == NULL)
+ return true;
+
+ /* Set up inner_cxt for possible recursion to child nodes */
+ inner_cxt.collation = InvalidOid;
+ inner_cxt.state = FDW_COLLATE_NONE;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ {
+ Var *var = (Var *) node;
+
+ /*
+ * If the Var is from the foreign table, we consider its
+ * collation (if any) safe to use. If it is from another
+ * table, we treat its collation the same way as we would a
+ * Param's collation, ie it's not safe for it to have a
+ * non-default collation.
+ */
+ if (var->varno == glob_cxt->foreignrel->relid &&
+ var->varlevelsup == 0)
+ {
+ /* Var belongs to foreign table */
+ collation = var->varcollid;
+ state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE;
+ }
+ else
+ {
+ /* Var belongs to some other table */
+ if (var->varcollid != InvalidOid &&
+ var->varcollid != DEFAULT_COLLATION_OID)
+ return false;
+
+ /* We can consider that it doesn't set collation */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ }
+ break;
+ case T_Const:
+ {
+ Const *c = (Const *) node;
+
+ /*
+ * If the constant has nondefault collation, either it's of a
+ * non-builtin type, or it reflects folding of a CollateExpr;
+ * either way, it's unsafe to send to the remote.
+ */
+ if (c->constcollid != InvalidOid &&
+ c->constcollid != DEFAULT_COLLATION_OID)
+ return false;
+
+ /* Otherwise, we can consider that it doesn't set collation */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_Param:
+ {
+ Param *p = (Param *) node;
+
+ /*
+ * Collation handling is same as for Consts.
+ */
+ if (p->paramcollid != InvalidOid &&
+ p->paramcollid != DEFAULT_COLLATION_OID)
+ return false;
+
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_ArrayRef:
+ {
+ ArrayRef *ar = (ArrayRef *) node;;
+
+ /* Assignment should not be in restrictions. */
+ if (ar->refassgnexpr != NULL)
+ return false;
+
+ /*
+ * Recurse to remaining subexpressions. Since the array
+ * subscripts must yield (noncollatable) integers, they won't
+ * affect the inner_cxt state.
+ */
+ if (!foreign_expr_walker((Node *) ar->refupperindexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+ if (!foreign_expr_walker((Node *) ar->reflowerindexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+ if (!foreign_expr_walker((Node *) ar->refexpr,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * Array subscripting should yield same collation as input,
+ * but for safety use same logic as for function nodes.
+ */
+ collation = ar->refcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_FuncExpr:
+ {
+ FuncExpr *fe = (FuncExpr *) node;
+
+ /*
+ * If function used by the expression is not built-in, it
+ * can't be sent to remote because it might have incompatible
+ * semantics on remote side.
+ */
+ if (!is_builtin(fe->funcid))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) fe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If function's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (fe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ fe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /*
+ * Detect whether node is introducing a collation not derived
+ * from a foreign Var. (If so, we just mark it unsafe for now
+ * rather than immediately returning false, since the parent
+ * node might not care.)
+ */
+ collation = fe->funccollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_OpExpr:
+ case T_DistinctExpr: /* struct-equivalent to OpExpr */
+ {
+ OpExpr *oe = (OpExpr *) node;
+
+ /*
+ * Similarly, only built-in operators can be sent to remote.
+ * (If the operator is, surely its underlying function is
+ * too.)
+ */
+ if (!is_builtin(oe->opno))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) oe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If operator's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (oe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ oe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /* Result-collation handling is same as for functions */
+ collation = oe->opcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *oe = (ScalarArrayOpExpr *) node;
+
+ /*
+ * Again, only built-in operators can be sent to remote.
+ */
+ if (!is_builtin(oe->opno))
+ return false;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) oe->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * If operator's input collation is not derived from a foreign
+ * Var, it can't be sent to remote.
+ */
+ if (oe->inputcollid == InvalidOid)
+ /* OK, inputs are all noncollatable */ ;
+ else if (inner_cxt.state != FDW_COLLATE_SAFE ||
+ oe->inputcollid != inner_cxt.collation)
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_RelabelType:
+ {
+ RelabelType *r = (RelabelType *) node;
+
+ /*
+ * Recurse to input subexpression.
+ */
+ if (!foreign_expr_walker((Node *) r->arg,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * RelabelType must not introduce a collation not derived from
+ * an input foreign Var.
+ */
+ collation = r->resultcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_BoolExpr:
+ {
+ BoolExpr *b = (BoolExpr *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) b->args,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_NullTest:
+ {
+ NullTest *nt = (NullTest *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) nt->arg,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /* Output is always boolean and so noncollatable. */
+ collation = InvalidOid;
+ state = FDW_COLLATE_NONE;
+ }
+ break;
+ case T_ArrayExpr:
+ {
+ ArrayExpr *a = (ArrayExpr *) node;
+
+ /*
+ * Recurse to input subexpressions.
+ */
+ if (!foreign_expr_walker((Node *) a->elements,
+ glob_cxt, &inner_cxt))
+ return false;
+
+ /*
+ * ArrayExpr must not introduce a collation not derived from
+ * an input foreign Var.
+ */
+ collation = a->array_collid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
+ case T_List:
+ {
+ List *l = (List *) node;
+ ListCell *lc;
+
+ /*
+ * Recurse to component subexpressions.
+ */
+ foreach(lc, l)
+ {
+ if (!foreign_expr_walker((Node *) lfirst(lc),
+ glob_cxt, &inner_cxt))
+ return false;
+ }
+
+ /*
+ * When processing a list, collation state just bubbles up
+ * from the list elements.
+ */
+ collation = inner_cxt.collation;
+ state = inner_cxt.state;
+
+ /* Don't apply exprType() to the list. */
+ check_type = false;
+ }
+ break;
+ default:
+
+ /*
+ * If it's anything else, assume it's unsafe. This list can be
+ * expanded later, but don't forget to add deparse support below.
+ */
+ return false;
+ }
+
+ /*
+ * If result type of given expression is not built-in, it can't be sent to
+ * remote because it might have incompatible semantics on remote side.
+ */
+ if (check_type && !is_builtin(exprType(node)))
+ return false;
+
+ /*
+ * Now, merge my collation information into my parent's state.
+ */
+ if (state > outer_cxt->state)
+ {
+ /* Override previous parent state */
+ outer_cxt->collation = collation;
+ outer_cxt->state = state;
+ }
+ else if (state == outer_cxt->state)
+ {
+ /* Merge, or detect error if there's a collation conflict */
+ switch (state)
+ {
+ case FDW_COLLATE_NONE:
+ /* Nothing + nothing is still nothing */
+ break;
+ case FDW_COLLATE_SAFE:
+ if (collation != outer_cxt->collation)
+ {
+ /*
+ * Non-default collation always beats default.
+ */
+ if (outer_cxt->collation == DEFAULT_COLLATION_OID)
+ {
+ /* Override previous parent state */
+ outer_cxt->collation = collation;
+ }
+ else if (collation != DEFAULT_COLLATION_OID)
+ {
+ /*
+ * Conflict; show state as indeterminate. We don't
+ * want to "return false" right away, since parent
+ * node might not care about collation.
+ */
+ outer_cxt->state = FDW_COLLATE_UNSAFE;
+ }
+ }
+ break;
+ case FDW_COLLATE_UNSAFE:
+ /* We're still conflicted ... */
+ break;
+ }
+ }
+
+ /* It looks OK */
+ return true;
+}
+
+/*
+ * Return true if given object is one of PostgreSQL's built-in objects.
+ *
+ * We use FirstBootstrapObjectId as the cutoff, so that we only consider
+ * objects with hand-assigned OIDs to be "built in", not for instance any
+ * function or type defined in the information_schema.
+ *
+ * Our constraints for dealing with types are tighter than they are for
+ * functions or operators: we want to accept only types that are in pg_catalog,
+ * else format_type might incorrectly fail to schema-qualify their names.
+ * (This could be fixed with some changes to format_type, but for now there's
+ * no need.) Thus we must exclude information_schema types.
+ *
+ * XXX there is a problem with this, which is that the set of built-in
+ * objects expands over time. Something that is built-in to us might not
+ * be known to the remote server, if it's of an older version. But keeping
+ * track of that would be a huge exercise.
+ */
+static bool
+is_builtin(Oid oid)
+{
+ return (oid < FirstBootstrapObjectId);
+}
+
+
+/*
+ * Construct a simple SELECT statement that retrieves desired columns
+ * of the specified foreign table, and append it to "buf". The output
+ * contains just "SELECT ... FROM tablename".
+ *
+ * We also create an integer List of the columns being retrieved, which is
+ * returned to *retrieved_attrs.
+ */
+void
+deparseSelectSql(StringInfo buf,
+ PlannerInfo *root,
+ RelOptInfo *baserel,
+ Bitmapset *attrs_used,
+ List **retrieved_attrs)
+{
+ RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
+ Relation rel;
+
+ /*
+ * Core code already has some lock on each rel being planned, so we can
+ * use NoLock here.
+ */
+ rel = heap_open(rte->relid, NoLock);
+
+ /*
+ * Construct SELECT list
+ */
+ appendStringInfoString(buf, "SELECT ");
+ deparseTargetList(buf, root, baserel->relid, rel, attrs_used,
+ retrieved_attrs);
+
+ /*
+ * Construct FROM clause
+ */
+ appendStringInfoString(buf, " FROM ");
+ deparseRelation(buf, rel);
+
+ heap_close(rel, NoLock);
+}
+
+/*
+ * Emit a target list that retrieves the columns specified in attrs_used.
+ * This is used for both SELECT and RETURNING targetlists.
+ *
+ * The tlist text is appended to buf, and we also create an integer List
+ * of the columns being retrieved, which is returned to *retrieved_attrs.
+ */
+static void
+deparseTargetList(StringInfo buf,
+ PlannerInfo *root,
+ Index rtindex,
+ Relation rel,
+ Bitmapset *attrs_used,
+ List **retrieved_attrs)
+{
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ bool have_wholerow;
+ bool first;
+ int i;
+
+ *retrieved_attrs = NIL;
+
+ /* If there's a whole-row reference, we'll need all the columns. */
+ have_wholerow = bms_is_member(0 - FirstLowInvalidHeapAttributeNumber,
+ attrs_used);
+
+ first = true;
+ for (i = 1; i <= tupdesc->natts; i++)
+ {
+ Form_pg_attribute attr = tupdesc->attrs[i - 1];
+
+ /* Ignore dropped attributes. */
+ if (attr->attisdropped)
+ continue;
+
+ if (have_wholerow ||
+ bms_is_member(i - FirstLowInvalidHeapAttributeNumber,
+ attrs_used))
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, i, root);
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs, i);
+ }
+ }
+
+ /*
+ * Add ctid if needed. We currently don't support retrieving any other
+ * system columns.
+ */
+ if (bms_is_member(SelfItemPointerAttributeNumber - FirstLowInvalidHeapAttributeNumber,
+ attrs_used))
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ appendStringInfoString(buf, "ctid");
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs,
+ SelfItemPointerAttributeNumber);
+ }
+
+ /* Don't generate bad syntax if no undropped columns */
+ if (first)
+ appendStringInfoString(buf, "NULL");
+}
+
+/*
+ * Deparse WHERE clauses in given list of RestrictInfos and append them to buf.
+ *
+ * baserel is the foreign table we're planning for.
+ *
+ * If no WHERE clause already exists in the buffer, is_first should be true.
+ *
+ * If params is not NULL, it receives a list of Params and other-relation Vars
+ * used in the clauses; these values must be transmitted to the remote server
+ * as parameter values.
+ *
+ * If params is NULL, we're generating the query for EXPLAIN purposes,
+ * so Params and other-relation Vars should be replaced by dummy values.
+ */
+void
+appendWhereClause(StringInfo buf,
+ PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *exprs,
+ bool is_first,
+ List **params)
+{
+ deparse_expr_cxt context;
+ int nestlevel;
+ ListCell *lc;
+
+ if (params)
+ *params = NIL; /* initialize result list to empty */
+
+ /* Set up context struct for recursion */
+ context.root = root;
+ context.foreignrel = baserel;
+ context.buf = buf;
+ context.params_list = params;
+
+ /* Make sure any constants in the exprs are printed portably */
+ nestlevel = set_transmission_modes();
+
+ foreach(lc, exprs)
+ {
+ RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
+
+ /* Connect expressions with "AND" and parenthesize each condition. */
+ if (is_first)
+ appendStringInfoString(buf, " WHERE ");
+ else
+ appendStringInfoString(buf, " AND ");
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(ri->clause, &context);
+ appendStringInfoChar(buf, ')');
+
+ is_first = false;
+ }
+
+ reset_transmission_modes(nestlevel);
+}
+
+/*
+ * deparse remote INSERT statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by RETURNING (if any), which is returned
+ * to *retrieved_attrs.
+ */
+void
+deparseInsertSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *targetAttrs, List *returningList,
+ List **retrieved_attrs)
+{
+ AttrNumber pindex;
+ bool first;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "INSERT INTO ");
+ deparseRelation(buf, rel);
+
+ if (targetAttrs)
+ {
+ appendStringInfoChar(buf, '(');
+
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ int attnum = lfirst_int(lc);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, attnum, root);
+ }
+
+ appendStringInfoString(buf, ") VALUES (");
+
+ pindex = 1;
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ appendStringInfo(buf, "$%d", pindex);
+ pindex++;
+ }
+
+ appendStringInfoChar(buf, ')');
+ }
+ else
+ appendStringInfoString(buf, " DEFAULT VALUES");
+
+ deparseReturningList(buf, root, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_insert_after_row,
+ returningList, retrieved_attrs);
+}
+
+/*
+ * deparse remote UPDATE statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by RETURNING (if any), which is returned
+ * to *retrieved_attrs.
+ */
+void
+deparseUpdateSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *targetAttrs, List *returningList,
+ List **retrieved_attrs)
+{
+ AttrNumber pindex;
+ bool first;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "UPDATE ");
+ deparseRelation(buf, rel);
+ appendStringInfoString(buf, " SET ");
+
+ pindex = 2; /* ctid is always the first param */
+ first = true;
+ foreach(lc, targetAttrs)
+ {
+ int attnum = lfirst_int(lc);
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ deparseColumnRef(buf, rtindex, attnum, root);
+ appendStringInfo(buf, " = $%d", pindex);
+ pindex++;
+ }
+ appendStringInfoString(buf, " WHERE ctid = $1");
+
+ deparseReturningList(buf, root, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_update_after_row,
+ returningList, retrieved_attrs);
+}
+
+/*
+ * deparse remote DELETE statement
+ *
+ * The statement text is appended to buf, and we also create an integer List
+ * of the columns being retrieved by RETURNING (if any), which is returned
+ * to *retrieved_attrs.
+ */
+void
+deparseDeleteSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ appendStringInfoString(buf, "DELETE FROM ");
+ deparseRelation(buf, rel);
+ appendStringInfoString(buf, " WHERE ctid = $1");
+
+ deparseReturningList(buf, root, rtindex, rel,
+ rel->trigdesc && rel->trigdesc->trig_delete_after_row,
+ returningList, retrieved_attrs);
+}
+
+/*
+ * Add a RETURNING clause, if needed, to an INSERT/UPDATE/DELETE.
+ */
+static void
+deparseReturningList(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ bool trig_after_row,
+ List *returningList,
+ List **retrieved_attrs)
+{
+ Bitmapset *attrs_used = NULL;
+
+ if (trig_after_row)
+ {
+ /* whole-row reference acquires all non-system columns */
+ attrs_used =
+ bms_make_singleton(0 - FirstLowInvalidHeapAttributeNumber);
+ }
+
+ if (returningList != NIL)
+ {
+ /*
+ * We need the attrs, non-system and system, mentioned in the local
+ * query's RETURNING list.
+ */
+ pull_varattnos((Node *) returningList, rtindex,
+ &attrs_used);
+ }
+
+ if (attrs_used != NULL)
+ {
+ appendStringInfoString(buf, " RETURNING ");
+ deparseTargetList(buf, root, rtindex, rel, attrs_used,
+ retrieved_attrs);
+ }
+ else
+ *retrieved_attrs = NIL;
+}
+
+/*
+ * Construct SELECT statement to acquire size in blocks of given relation.
+ *
+ * Note: we use local definition of block size, not remote definition.
+ * This is perhaps debatable.
+ *
+ * Note: pg_relation_size() exists in 8.1 and later.
+ */
+void
+deparseAnalyzeSizeSql(StringInfo buf, Relation rel)
+{
+ StringInfoData relname;
+
+ /* We'll need the remote relation name as a literal. */
+ initStringInfo(&relname);
+ deparseRelation(&relname, rel);
+
+ appendStringInfoString(buf, "SELECT pg_catalog.pg_relation_size(");
+ deparseStringLiteral(buf, relname.data);
+ appendStringInfo(buf, "::pg_catalog.regclass) / %d", BLCKSZ);
+}
+
+/*
+ * Construct SELECT statement to acquire sample rows of given relation.
+ *
+ * SELECT command is appended to buf, and list of columns retrieved
+ * is returned to *retrieved_attrs.
+ */
+void
+deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs)
+{
+ Oid relid = RelationGetRelid(rel);
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int i;
+ char *colname;
+ List *options;
+ ListCell *lc;
+ bool first = true;
+
+ *retrieved_attrs = NIL;
+
+ appendStringInfoString(buf, "SELECT ");
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ /* Ignore dropped columns. */
+ if (tupdesc->attrs[i]->attisdropped)
+ continue;
+
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ first = false;
+
+ /* Use attribute name or column_name option. */
+ colname = NameStr(tupdesc->attrs[i]->attname);
+ options = GetForeignColumnOptions(relid, i + 1);
+
+ foreach(lc, options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "column_name") == 0)
+ {
+ colname = defGetString(def);
+ break;
+ }
+ }
+
+ appendStringInfoString(buf, quote_identifier(colname));
+
+ *retrieved_attrs = lappend_int(*retrieved_attrs, i + 1);
+ }
+
+ /* Don't generate bad syntax for zero-column relation. */
+ if (first)
+ appendStringInfoString(buf, "NULL");
+
+ /*
+ * Construct FROM clause
+ */
+ appendStringInfoString(buf, " FROM ");
+ deparseRelation(buf, rel);
+}
+
+/*
+ * Construct name to use for given column, and emit it into buf.
+ * If it has a column_name FDW option, use that instead of attribute name.
+ */
+static void
+deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root)
+{
+ RangeTblEntry *rte;
+ char *colname = NULL;
+ List *options;
+ ListCell *lc;
+
+ /* varno must not be any of OUTER_VAR, INNER_VAR and INDEX_VAR. */
+ Assert(!IS_SPECIAL_VARNO(varno));
+
+ /* Get RangeTblEntry from array in PlannerInfo. */
+ rte = planner_rt_fetch(varno, root);
+
+ /*
+ * If it's a column of a foreign table, and it has the column_name FDW
+ * option, use that value.
+ */
+ options = GetForeignColumnOptions(rte->relid, varattno);
+ foreach(lc, options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "column_name") == 0)
+ {
+ colname = defGetString(def);
+ break;
+ }
+ }
+
+ /*
+ * If it's a column of a regular table or it doesn't have column_name FDW
+ * option, use attribute name.
+ */
+ if (colname == NULL)
+ colname = get_relid_attribute_name(rte->relid, varattno);
+
+ appendStringInfoString(buf, quote_identifier(colname));
+}
+
+/*
+ * Append remote name of specified foreign table to buf.
+ * Use value of table_name FDW option (if any) instead of relation's name.
+ * Similarly, schema_name FDW option overrides schema name.
+ */
+static void
+deparseRelation(StringInfo buf, Relation rel)
+{
+ ForeignTable *table;
+ const char *nspname = NULL;
+ const char *relname = NULL;
+ ListCell *lc;
+
+ /* obtain additional catalog information. */
+ table = GetForeignTable(RelationGetRelid(rel));
+
+ /*
+ * Use value of FDW options if any, instead of the name of object itself.
+ */
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "schema_name") == 0)
+ nspname = defGetString(def);
+ else if (strcmp(def->defname, "table_name") == 0)
+ relname = defGetString(def);
+ }
+
+ /*
+ * Note: we could skip printing the schema name if it's pg_catalog, but
+ * that doesn't seem worth the trouble.
+ */
+ if (nspname == NULL)
+ nspname = get_namespace_name(RelationGetNamespace(rel));
+ if (relname == NULL)
+ relname = RelationGetRelationName(rel);
+
+ appendStringInfo(buf, "%s.%s",
+ quote_identifier(nspname), quote_identifier(relname));
+}
+
+/*
+ * Append a SQL string literal representing "val" to buf.
+ */
+static void
+deparseStringLiteral(StringInfo buf, const char *val)
+{
+ const char *valptr;
+
+ /*
+ * Rather than making assumptions about the remote server's value of
+ * standard_conforming_strings, always use E'foo' syntax if there are any
+ * backslashes. This will fail on remote servers before 8.1, but those
+ * are long out of support.
+ */
+ if (strchr(val, '\\') != NULL)
+ appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX);
+ appendStringInfoChar(buf, '\'');
+ for (valptr = val; *valptr; valptr++)
+ {
+ char ch = *valptr;
+
+ if (SQL_STR_DOUBLE(ch, true))
+ appendStringInfoChar(buf, ch);
+ appendStringInfoChar(buf, ch);
+ }
+ appendStringInfoChar(buf, '\'');
+}
+
+/*
+ * Deparse given expression into context->buf.
+ *
+ * This function must support all the same node types that foreign_expr_walker
+ * accepts.
+ *
+ * Note: unlike ruleutils.c, we just use a simple hard-wired parenthesization
+ * scheme: anything more complex than a Var, Const, function call or cast
+ * should be self-parenthesized.
+ */
+static void
+deparseExpr(Expr *node, deparse_expr_cxt *context)
+{
+ if (node == NULL)
+ return;
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ deparseVar((Var *) node, context);
+ break;
+ case T_Const:
+ deparseConst((Const *) node, context);
+ break;
+ case T_Param:
+ deparseParam((Param *) node, context);
+ break;
+ case T_ArrayRef:
+ deparseArrayRef((ArrayRef *) node, context);
+ break;
+ case T_FuncExpr:
+ deparseFuncExpr((FuncExpr *) node, context);
+ break;
+ case T_OpExpr:
+ deparseOpExpr((OpExpr *) node, context);
+ break;
+ case T_DistinctExpr:
+ deparseDistinctExpr((DistinctExpr *) node, context);
+ break;
+ case T_ScalarArrayOpExpr:
+ deparseScalarArrayOpExpr((ScalarArrayOpExpr *) node, context);
+ break;
+ case T_RelabelType:
+ deparseRelabelType((RelabelType *) node, context);
+ break;
+ case T_BoolExpr:
+ deparseBoolExpr((BoolExpr *) node, context);
+ break;
+ case T_NullTest:
+ deparseNullTest((NullTest *) node, context);
+ break;
+ case T_ArrayExpr:
+ deparseArrayExpr((ArrayExpr *) node, context);
+ break;
+ default:
+ elog(ERROR, "unsupported expression type for deparse: %d",
+ (int) nodeTag(node));
+ break;
+ }
+}
+
+/*
+ * Deparse given Var node into context->buf.
+ *
+ * If the Var belongs to the foreign relation, just print its remote name.
+ * Otherwise, it's effectively a Param (and will in fact be a Param at
+ * run time). Handle it the same way we handle plain Params --- see
+ * deparseParam for comments.
+ */
+static void
+deparseVar(Var *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+
+ if (node->varno == context->foreignrel->relid &&
+ node->varlevelsup == 0)
+ {
+ /* Var belongs to foreign table */
+ deparseColumnRef(buf, node->varno, node->varattno, context->root);
+ }
+ else
+ {
+ /* Treat like a Param */
+ if (context->params_list)
+ {
+ int pindex = 0;
+ ListCell *lc;
+
+ /* find its index in params_list */
+ foreach(lc, *context->params_list)
+ {
+ pindex++;
+ if (equal(node, (Node *) lfirst(lc)))
+ break;
+ }
+ if (lc == NULL)
+ {
+ /* not in list, so add it */
+ pindex++;
+ *context->params_list = lappend(*context->params_list, node);
+ }
+
+ printRemoteParam(pindex, node->vartype, node->vartypmod, context);
+ }
+ else
+ {
+ printRemotePlaceholder(node->vartype, node->vartypmod, context);
+ }
+ }
+}
+
+/*
+ * Deparse given constant value into context->buf.
+ *
+ * This function has to be kept in sync with ruleutils.c's get_const_expr.
+ */
+static void
+deparseConst(Const *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ Oid typoutput;
+ bool typIsVarlena;
+ char *extval;
+ bool isfloat = false;
+ bool needlabel;
+
+ if (node->constisnull)
+ {
+ appendStringInfoString(buf, "NULL");
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(node->consttype,
+ node->consttypmod));
+ return;
+ }
+
+ getTypeOutputInfo(node->consttype,
+ &typoutput, &typIsVarlena);
+ extval = OidOutputFunctionCall(typoutput, node->constvalue);
+
+ switch (node->consttype)
+ {
+ case INT2OID:
+ case INT4OID:
+ case INT8OID:
+ case OIDOID:
+ case FLOAT4OID:
+ case FLOAT8OID:
+ case NUMERICOID:
+ {
+ /*
+ * No need to quote unless it's a special value such as 'NaN'.
+ * See comments in get_const_expr().
+ */
+ if (strspn(extval, "0123456789+-eE.") == strlen(extval))
+ {
+ if (extval[0] == '+' || extval[0] == '-')
+ appendStringInfo(buf, "(%s)", extval);
+ else
+ appendStringInfoString(buf, extval);
+ if (strcspn(extval, "eE.") != strlen(extval))
+ isfloat = true; /* it looks like a float */
+ }
+ else
+ appendStringInfo(buf, "'%s'", extval);
+ }
+ break;
+ case BITOID:
+ case VARBITOID:
+ appendStringInfo(buf, "B'%s'", extval);
+ break;
+ case BOOLOID:
+ if (strcmp(extval, "t") == 0)
+ appendStringInfoString(buf, "true");
+ else
+ appendStringInfoString(buf, "false");
+ break;
+ default:
+ deparseStringLiteral(buf, extval);
+ break;
+ }
+
+ /*
+ * Append ::typename unless the constant will be implicitly typed as the
+ * right type when it is read in.
+ *
+ * XXX this code has to be kept in sync with the behavior of the parser,
+ * especially make_const.
+ */
+ switch (node->consttype)
+ {
+ case BOOLOID:
+ case INT4OID:
+ case UNKNOWNOID:
+ needlabel = false;
+ break;
+ case NUMERICOID:
+ needlabel = !isfloat || (node->consttypmod >= 0);
+ break;
+ default:
+ needlabel = true;
+ break;
+ }
+ if (needlabel)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(node->consttype,
+ node->consttypmod));
+}
+
+/*
+ * Deparse given Param node.
+ *
+ * If we're generating the query "for real", add the Param to
+ * context->params_list if it's not already present, and then use its index
+ * in that list as the remote parameter number. During EXPLAIN, there's
+ * no need to identify a parameter number.
+ */
+static void
+deparseParam(Param *node, deparse_expr_cxt *context)
+{
+ if (context->params_list)
+ {
+ int pindex = 0;
+ ListCell *lc;
+
+ /* find its index in params_list */
+ foreach(lc, *context->params_list)
+ {
+ pindex++;
+ if (equal(node, (Node *) lfirst(lc)))
+ break;
+ }
+ if (lc == NULL)
+ {
+ /* not in list, so add it */
+ pindex++;
+ *context->params_list = lappend(*context->params_list, node);
+ }
+
+ printRemoteParam(pindex, node->paramtype, node->paramtypmod, context);
+ }
+ else
+ {
+ printRemotePlaceholder(node->paramtype, node->paramtypmod, context);
+ }
+}
+
+/*
+ * Deparse an array subscript expression.
+ */
+static void
+deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ ListCell *lowlist_item;
+ ListCell *uplist_item;
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /*
+ * Deparse referenced array expression first. If that expression includes
+ * a cast, we have to parenthesize to prevent the array subscript from
+ * being taken as typename decoration. We can avoid that in the typical
+ * case of subscripting a Var, but otherwise do it.
+ */
+ if (IsA(node->refexpr, Var))
+ deparseExpr(node->refexpr, context);
+ else
+ {
+ appendStringInfoChar(buf, '(');
+ deparseExpr(node->refexpr, context);
+ appendStringInfoChar(buf, ')');
+ }
+
+ /* Deparse subscript expressions. */
+ lowlist_item = list_head(node->reflowerindexpr); /* could be NULL */
+ foreach(uplist_item, node->refupperindexpr)
+ {
+ appendStringInfoChar(buf, '[');
+ if (lowlist_item)
+ {
+ deparseExpr(lfirst(lowlist_item), context);
+ appendStringInfoChar(buf, ':');
+ lowlist_item = lnext(lowlist_item);
+ }
+ deparseExpr(lfirst(uplist_item), context);
+ appendStringInfoChar(buf, ']');
+ }
+
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse a function call.
+ */
+static void
+deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple proctup;
+ Form_pg_proc procform;
+ const char *proname;
+ bool use_variadic;
+ bool first;
+ ListCell *arg;
+
+ /*
+ * If the function call came from an implicit coercion, then just show the
+ * first argument.
+ */
+ if (node->funcformat == COERCE_IMPLICIT_CAST)
+ {
+ deparseExpr((Expr *) linitial(node->args), context);
+ return;
+ }
+
+ /*
+ * If the function call came from a cast, then show the first argument
+ * plus an explicit cast operation.
+ */
+ if (node->funcformat == COERCE_EXPLICIT_CAST)
+ {
+ Oid rettype = node->funcresulttype;
+ int32 coercedTypmod;
+
+ /* Get the typmod if this is a length-coercion function */
+ (void) exprIsLengthCoercion((Node *) node, &coercedTypmod);
+
+ deparseExpr((Expr *) linitial(node->args), context);
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(rettype, coercedTypmod));
+ return;
+ }
+
+ /*
+ * Normal function: display as proname(args).
+ */
+ proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(node->funcid));
+ if (!HeapTupleIsValid(proctup))
+ elog(ERROR, "cache lookup failed for function %u", node->funcid);
+ procform = (Form_pg_proc) GETSTRUCT(proctup);
+
+ /* Check if need to print VARIADIC (cf. ruleutils.c) */
+ use_variadic = node->funcvariadic;
+
+ /* Print schema name only if it's not pg_catalog */
+ if (procform->pronamespace != PG_CATALOG_NAMESPACE)
+ {
+ const char *schemaname;
+
+ schemaname = get_namespace_name(procform->pronamespace);
+ appendStringInfo(buf, "%s.", quote_identifier(schemaname));
+ }
+
+ /* Deparse the function name ... */
+ proname = NameStr(procform->proname);
+ appendStringInfo(buf, "%s(", quote_identifier(proname));
+ /* ... and all the arguments */
+ first = true;
+ foreach(arg, node->args)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ if (use_variadic && lnext(arg) == NULL)
+ appendStringInfoString(buf, "VARIADIC ");
+ deparseExpr((Expr *) lfirst(arg), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ')');
+
+ ReleaseSysCache(proctup);
+}
+
+/*
+ * Deparse given operator expression. To avoid problems around
+ * priority of operations, we always parenthesize the arguments.
+ */
+static void
+deparseOpExpr(OpExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple tuple;
+ Form_pg_operator form;
+ char oprkind;
+ ListCell *arg;
+
+ /* Retrieve information about the operator from system catalog. */
+ tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for operator %u", node->opno);
+ form = (Form_pg_operator) GETSTRUCT(tuple);
+ oprkind = form->oprkind;
+
+ /* Sanity check. */
+ Assert((oprkind == 'r' && list_length(node->args) == 1) ||
+ (oprkind == 'l' && list_length(node->args) == 1) ||
+ (oprkind == 'b' && list_length(node->args) == 2));
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /* Deparse left operand. */
+ if (oprkind == 'r' || oprkind == 'b')
+ {
+ arg = list_head(node->args);
+ deparseExpr(lfirst(arg), context);
+ appendStringInfoChar(buf, ' ');
+ }
+
+ /* Deparse operator name. */
+ deparseOperatorName(buf, form);
+
+ /* Deparse right operand. */
+ if (oprkind == 'l' || oprkind == 'b')
+ {
+ arg = list_tail(node->args);
+ appendStringInfoChar(buf, ' ');
+ deparseExpr(lfirst(arg), context);
+ }
+
+ appendStringInfoChar(buf, ')');
+
+ ReleaseSysCache(tuple);
+}
+
+/*
+ * Print the name of an operator.
+ */
+static void
+deparseOperatorName(StringInfo buf, Form_pg_operator opform)
+{
+ char *opname;
+
+ /* opname is not a SQL identifier, so we should not quote it. */
+ opname = NameStr(opform->oprname);
+
+ /* Print schema name only if it's not pg_catalog */
+ if (opform->oprnamespace != PG_CATALOG_NAMESPACE)
+ {
+ const char *opnspname;
+
+ opnspname = get_namespace_name(opform->oprnamespace);
+ /* Print fully qualified operator name. */
+ appendStringInfo(buf, "OPERATOR(%s.%s)",
+ quote_identifier(opnspname), opname);
+ }
+ else
+ {
+ /* Just print operator name. */
+ appendStringInfoString(buf, opname);
+ }
+}
+
+/*
+ * Deparse IS DISTINCT FROM.
+ */
+static void
+deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+
+ Assert(list_length(node->args) == 2);
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(linitial(node->args), context);
+ appendStringInfoString(buf, " IS DISTINCT FROM ");
+ deparseExpr(lsecond(node->args), context);
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse given ScalarArrayOpExpr expression. To avoid problems
+ * around priority of operations, we always parenthesize the arguments.
+ */
+static void
+deparseScalarArrayOpExpr(ScalarArrayOpExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ HeapTuple tuple;
+ Form_pg_operator form;
+ Expr *arg1;
+ Expr *arg2;
+
+ /* Retrieve information about the operator from system catalog. */
+ tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno));
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "cache lookup failed for operator %u", node->opno);
+ form = (Form_pg_operator) GETSTRUCT(tuple);
+
+ /* Sanity check. */
+ Assert(list_length(node->args) == 2);
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, '(');
+
+ /* Deparse left operand. */
+ arg1 = linitial(node->args);
+ deparseExpr(arg1, context);
+ appendStringInfoChar(buf, ' ');
+
+ /* Deparse operator name plus decoration. */
+ deparseOperatorName(buf, form);
+ appendStringInfo(buf, " %s (", node->useOr ? "ANY" : "ALL");
+
+ /* Deparse right operand. */
+ arg2 = lsecond(node->args);
+ deparseExpr(arg2, context);
+
+ appendStringInfoChar(buf, ')');
+
+ /* Always parenthesize the expression. */
+ appendStringInfoChar(buf, ')');
+
+ ReleaseSysCache(tuple);
+}
+
+/*
+ * Deparse a RelabelType (binary-compatible cast) node.
+ */
+static void
+deparseRelabelType(RelabelType *node, deparse_expr_cxt *context)
+{
+ deparseExpr(node->arg, context);
+ if (node->relabelformat != COERCE_IMPLICIT_CAST)
+ appendStringInfo(context->buf, "::%s",
+ format_type_with_typemod(node->resulttype,
+ node->resulttypmod));
+}
+
+/*
+ * Deparse a BoolExpr node.
+ *
+ * Note: by the time we get here, AND and OR expressions have been flattened
+ * into N-argument form, so we'd better be prepared to deal with that.
+ */
+static void
+deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ const char *op = NULL; /* keep compiler quiet */
+ bool first;
+ ListCell *lc;
+
+ switch (node->boolop)
+ {
+ case AND_EXPR:
+ op = "AND";
+ break;
+ case OR_EXPR:
+ op = "OR";
+ break;
+ case NOT_EXPR:
+ appendStringInfoString(buf, "(NOT ");
+ deparseExpr(linitial(node->args), context);
+ appendStringInfoChar(buf, ')');
+ return;
+ }
+
+ appendStringInfoChar(buf, '(');
+ first = true;
+ foreach(lc, node->args)
+ {
+ if (!first)
+ appendStringInfo(buf, " %s ", op);
+ deparseExpr((Expr *) lfirst(lc), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ')');
+}
+
+/*
+ * Deparse IS [NOT] NULL expression.
+ */
+static void
+deparseNullTest(NullTest *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+
+ appendStringInfoChar(buf, '(');
+ deparseExpr(node->arg, context);
+ if (node->nulltesttype == IS_NULL)
+ appendStringInfoString(buf, " IS NULL)");
+ else
+ appendStringInfoString(buf, " IS NOT NULL)");
+}
+
+/*
+ * Deparse ARRAY[...] construct.
+ */
+static void
+deparseArrayExpr(ArrayExpr *node, deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ bool first = true;
+ ListCell *lc;
+
+ appendStringInfoString(buf, "ARRAY[");
+ foreach(lc, node->elements)
+ {
+ if (!first)
+ appendStringInfoString(buf, ", ");
+ deparseExpr(lfirst(lc), context);
+ first = false;
+ }
+ appendStringInfoChar(buf, ']');
+
+ /* If the array is empty, we need an explicit cast to the array type. */
+ if (node->elements == NIL)
+ appendStringInfo(buf, "::%s",
+ format_type_with_typemod(node->array_typeid, -1));
+}
+
+/*
+ * Print the representation of a parameter to be sent to the remote side.
+ *
+ * Note: we always label the Param's type explicitly rather than relying on
+ * transmitting a numeric type OID in PQexecParams(). This allows us to
+ * avoid assuming that types have the same OIDs on the remote side as they
+ * do locally --- they need only have the same names.
+ */
+static void
+printRemoteParam(int paramindex, Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ char *ptypename = format_type_with_typemod(paramtype, paramtypmod);
+
+ appendStringInfo(buf, "$%d::%s", paramindex, ptypename);
+}
+
+/*
+ * Print the representation of a placeholder for a parameter that will be
+ * sent to the remote side at execution time.
+ *
+ * This is used when we're just trying to EXPLAIN the remote query.
+ * We don't have the actual value of the runtime parameter yet, and we don't
+ * want the remote planner to generate a plan that depends on such a value
+ * anyway. Thus, we can't do something simple like "$1::paramtype".
+ * Instead, we emit "((SELECT null::paramtype)::paramtype)".
+ * In all extant versions of Postgres, the planner will see that as an unknown
+ * constant value, which is what we want. This might need adjustment if we
+ * ever make the planner flatten scalar subqueries. Note: the reason for the
+ * apparently useless outer cast is to ensure that the representation as a
+ * whole will be parsed as an a_expr and not a select_with_parens; the latter
+ * would do the wrong thing in the context "x = ANY(...)".
+ */
+static void
+printRemotePlaceholder(Oid paramtype, int32 paramtypmod,
+ deparse_expr_cxt *context)
+{
+ StringInfo buf = context->buf;
+ char *ptypename = format_type_with_typemod(paramtype, paramtypmod);
+
+ appendStringInfo(buf, "((SELECT null::%s)::%s)", ptypename, ptypename);
+}
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
new file mode 100644
index 0000000000..2e49ee317a
--- /dev/null
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -0,0 +1,2836 @@
+-- ===================================================================
+-- create FDW objects
+-- ===================================================================
+CREATE EXTENSION postgres_fdw;
+CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
+CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname 'contrib_regression');
+CREATE USER MAPPING FOR public SERVER testserver1
+ OPTIONS (user 'value', password 'value');
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
+-- ===================================================================
+-- create objects used through FDW loopback server
+-- ===================================================================
+CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
+CREATE SCHEMA "S 1";
+CREATE TABLE "S 1"."T 1" (
+ "C 1" int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10),
+ c8 user_enum,
+ CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
+);
+CREATE TABLE "S 1"."T 2" (
+ c1 int NOT NULL,
+ c2 text,
+ CONSTRAINT t2_pkey PRIMARY KEY (c1)
+);
+INSERT INTO "S 1"."T 1"
+ SELECT id,
+ id % 10,
+ to_char(id, 'FM00000'),
+ '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
+ '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
+ id % 10,
+ id % 10,
+ 'foo'::user_enum
+ FROM generate_series(1, 1000) id;
+INSERT INTO "S 1"."T 2"
+ SELECT id,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+ANALYZE "S 1"."T 1";
+ANALYZE "S 1"."T 2";
+-- ===================================================================
+-- create foreign tables
+-- ===================================================================
+CREATE FOREIGN TABLE ft1 (
+ c0 int,
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
+CREATE FOREIGN TABLE ft2 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ cx int,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft2',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
+-- ===================================================================
+-- tests for validator
+-- ===================================================================
+-- requiressl, krbsrvname and gsslib are omitted because they depend on
+-- configure options
+ALTER SERVER testserver1 OPTIONS (
+ use_remote_estimate 'false',
+ updatable 'true',
+ fdw_startup_cost '123.456',
+ fdw_tuple_cost '0.123',
+ service 'value',
+ connect_timeout 'value',
+ dbname 'value',
+ host 'value',
+ hostaddr 'value',
+ port 'value',
+ --client_encoding 'value',
+ application_name 'value',
+ --fallback_application_name 'value',
+ keepalives 'value',
+ keepalives_idle 'value',
+ keepalives_interval 'value',
+ -- requiressl 'value',
+ sslcompression 'value',
+ sslmode 'value',
+ sslcert 'value',
+ sslkey 'value',
+ sslrootcert 'value',
+ sslcrl 'value'
+ --requirepeer 'value',
+ -- krbsrvname 'value',
+ -- gsslib 'value',
+ --replication 'value'
+);
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (DROP user, DROP password);
+ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+\det+
+ List of foreign tables
+ Schema | Table | Server | FDW Options | Description
+--------+-------+----------+---------------------------------------+-------------
+ public | ft1 | loopback | (schema_name 'S 1', table_name 'T 1') |
+ public | ft2 | loopback | (schema_name 'S 1', table_name 'T 1') |
+(2 rows)
+
+-- Now we should be able to run ANALYZE.
+-- To exercise multiple code paths, we use local stats on ft1
+-- and remote-estimate mode on ft2.
+ANALYZE ft1;
+ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
+-- ===================================================================
+-- simple queries
+-- ===================================================================
+-- single table, with/without alias
+EXPLAIN (COSTS false) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+---------------------------------
+ Limit
+ -> Sort
+ Sort Key: c3, c1
+ -> Foreign Scan on ft1
+(4 rows)
+
+SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------
+ Limit
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ -> Sort
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Sort Key: t1.c3, t1.c1
+ -> Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(8 rows)
+
+SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 103 | 3 | 00103 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 105 | 5 | 00105 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 107 | 7 | 00107 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 109 | 9 | 00109 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- whole-row reference
+EXPLAIN (VERBOSE, COSTS false) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ QUERY PLAN
+-------------------------------------------------------------------------------------
+ Limit
+ Output: t1.*, c3, c1
+ -> Sort
+ Output: t1.*, c3, c1
+ Sort Key: t1.c3, t1.c1
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.*, c3, c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(8 rows)
+
+SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ t1
+--------------------------------------------------------------------------------------------
+ (101,1,00101,"Fri Jan 02 00:00:00 1970 PST","Fri Jan 02 00:00:00 1970",1,"1 ",foo)
+ (102,2,00102,"Sat Jan 03 00:00:00 1970 PST","Sat Jan 03 00:00:00 1970",2,"2 ",foo)
+ (103,3,00103,"Sun Jan 04 00:00:00 1970 PST","Sun Jan 04 00:00:00 1970",3,"3 ",foo)
+ (104,4,00104,"Mon Jan 05 00:00:00 1970 PST","Mon Jan 05 00:00:00 1970",4,"4 ",foo)
+ (105,5,00105,"Tue Jan 06 00:00:00 1970 PST","Tue Jan 06 00:00:00 1970",5,"5 ",foo)
+ (106,6,00106,"Wed Jan 07 00:00:00 1970 PST","Wed Jan 07 00:00:00 1970",6,"6 ",foo)
+ (107,7,00107,"Thu Jan 08 00:00:00 1970 PST","Thu Jan 08 00:00:00 1970",7,"7 ",foo)
+ (108,8,00108,"Fri Jan 09 00:00:00 1970 PST","Fri Jan 09 00:00:00 1970",8,"8 ",foo)
+ (109,9,00109,"Sat Jan 10 00:00:00 1970 PST","Sat Jan 10 00:00:00 1970",9,"9 ",foo)
+ (110,0,00110,"Sun Jan 11 00:00:00 1970 PST","Sun Jan 11 00:00:00 1970",0,"0 ",foo)
+(10 rows)
+
+-- empty result
+SELECT * FROM ft1 WHERE false;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+----+----+----+----+----+----
+(0 rows)
+
+-- with WHERE clause
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c7 >= '1'::bpchar)) AND (("C 1" = 101)) AND ((c6 = '1'::text))
+(3 rows)
+
+SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- aggregate
+SELECT COUNT(*) FROM ft1 t1;
+ count
+-------
+ 1000
+(1 row)
+
+-- join two tables
+SELECT t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+ c1
+-----
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+(10 rows)
+
+-- subquery
+SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+ 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST | Tue Jan 06 00:00:00 1970 | 5 | 5 | foo
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST | Sat Jan 10 00:00:00 1970 | 9 | 9 | foo
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST | Sun Jan 11 00:00:00 1970 | 0 | 0 | foo
+(10 rows)
+
+-- subquery+MAX
+SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 | 0 | 0 | foo
+(1 row)
+
+-- used in CTE
+WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
+ c1 | c2 | c3 | c4
+----+----+-------+------------------------------
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
+ 5 | 5 | 00005 | Tue Jan 06 00:00:00 1970 PST
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
+ 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
+ 9 | 9 | 00009 | Sat Jan 10 00:00:00 1970 PST
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
+(10 rows)
+
+-- fixed values
+SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
+ ?column? | ?column?
+----------+----------
+ fixed |
+(1 row)
+
+-- user-defined operator/function
+CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+CREATE OPERATOR === (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==
+);
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c1 = postgres_fdw_abs(t1.c2))
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2;
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c1 === t1.c2)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = abs(c2)))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = c2))
+(3 rows)
+
+-- ===================================================================
+-- WHERE with remotely-executable conditions
+-- ===================================================================
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 100)) AND ((c2 = 0))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" IS NULL))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" IS NOT NULL))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((round(abs("C 1"), 0) = 1::numeric))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = (- "C 1")))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((1::numeric = ("C 1" !)))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" IS NOT NULL) IS DISTINCT FROM ("C 1" IS NOT NULL)))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ANY (ARRAY[c2, 1, ("C 1" + 0)])))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ((ARRAY["C 1", c2, 3])[1])))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c6 = E'foo''s\\bar'::text))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
+ QUERY PLAN
+-------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
+(4 rows)
+
+-- parameterized remote path
+EXPLAIN (VERBOSE, COSTS false)
+ SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ -> Foreign Scan on public.ft2 a
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 47))
+ -> Foreign Scan on public.ft2 b
+ Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (($1::integer = "C 1"))
+(8 rows)
+
+SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 47 | 7 | 00047 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo | 7 | 7 | 00007 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+(1 row)
+
+-- check both safe and unsafe join conditions
+EXPLAIN (VERBOSE, COSTS false)
+ SELECT * FROM ft2 a, ft2 b
+ WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8, b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ -> Foreign Scan on public.ft2 a
+ Output: a.c1, a.c2, a.c3, a.c4, a.c5, a.c6, a.c7, a.c8
+ Filter: (a.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((c2 = 6))
+ -> Foreign Scan on public.ft2 b
+ Output: b.c1, b.c2, b.c3, b.c4, b.c5, b.c6, b.c7, b.c8
+ Filter: (upper((a.c7)::text) = (b.c7)::text)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (($1::integer = "C 1"))
+(10 rows)
+
+SELECT * FROM ft2 a, ft2 b
+WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+ 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo
+ 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo | 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+ 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo | 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST | Tue Jan 27 00:00:00 1970 | 6 | 6 | foo
+ 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo
+ 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo | 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST | Mon Feb 16 00:00:00 1970 | 6 | 6 | foo
+ 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo | 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST | Thu Feb 26 00:00:00 1970 | 6 | 6 | foo
+ 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo
+ 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo | 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST | Wed Mar 18 00:00:00 1970 | 6 | 6 | foo
+ 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo | 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST | Sat Mar 28 00:00:00 1970 | 6 | 6 | foo
+ 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo
+(100 rows)
+
+-- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
+SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+(4 rows)
+
+SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST | Mon Jan 05 00:00:00 1970 | 4 | 4 | foo
+(4 rows)
+
+-- ===================================================================
+-- parameterized queries
+-- ===================================================================
+-- simple join
+PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2);
+ QUERY PLAN
+--------------------------------------------------------------------
+ Nested Loop
+ Output: t1.c3, t2.c3
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" = 2))
+(8 rows)
+
+EXECUTE st1(1, 1);
+ c3 | c3
+-------+-------
+ 00001 | 00001
+(1 row)
+
+EXECUTE st1(101, 101);
+ c3 | c3
+-------+-------
+ 00101 | 00101
+(1 row)
+
+-- subquery using stable function (can't be sent to remote)
+PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20);
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------
+ Sort
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Sort Key: t1.c1
+ -> Nested Loop Semi Join
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Join Filter: (t1.c3 = t2.c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 20))
+ -> Materialize
+ Output: t2.c3
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c3
+ Filter: (date(t2.c4) = '01-17-1970'::date)
+ Remote SQL: SELECT c3, c4 FROM "S 1"."T 1" WHERE (("C 1" > 10))
+(15 rows)
+
+EXECUTE st2(10, 20);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+EXECUTE st2(101, 121);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+-----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+-- subquery using immutable function (can be sent to remote)
+PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20);
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Sort
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Sort Key: t1.c1
+ -> Nested Loop Semi Join
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Join Filter: (t1.c3 = t2.c3)
+ -> Foreign Scan on public.ft1 t1
+ Output: t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 20))
+ -> Materialize
+ Output: t2.c3
+ -> Foreign Scan on public.ft2 t2
+ Output: t2.c3
+ Remote SQL: SELECT c3 FROM "S 1"."T 1" WHERE (("C 1" > 10)) AND ((date(c5) = '1970-01-17'::date))
+(14 rows)
+
+EXECUTE st3(10, 20);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST | Sat Jan 17 00:00:00 1970 | 6 | 6 | foo
+(1 row)
+
+EXECUTE st3(20, 30);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+----+----+----+----+----+----
+(0 rows)
+
+-- custom plan should be chosen initially
+PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(3 rows)
+
+-- once we try it enough times, should switch to generic plan
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
+(3 rows)
+
+-- value of $1 should not be sent to remote
+PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = 'foo'::user_enum)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 1))
+(4 rows)
+
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Foreign Scan on public.ft1 t1
+ Output: c1, c2, c3, c4, c5, c6, c7, c8
+ Filter: (t1.c8 = $1)
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = $1::integer))
+(4 rows)
+
+EXECUTE st5('foo', 1);
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+-- cleanup
+DEALLOCATE st1;
+DEALLOCATE st2;
+DEALLOCATE st3;
+DEALLOCATE st4;
+DEALLOCATE st5;
+-- ===================================================================
+-- used in pl/pgsql function
+-- ===================================================================
+CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
+DECLARE
+ v_c1 int;
+BEGIN
+ SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
+ PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
+ RETURN v_c1;
+END;
+$$ LANGUAGE plpgsql;
+SELECT f_test(100);
+ f_test
+--------
+ 100
+(1 row)
+
+DROP FUNCTION f_test(int);
+-- ===================================================================
+-- conversion error
+-- ===================================================================
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
+SELECT * FROM ft1 WHERE c1 = 1; -- ERROR
+ERROR: invalid input syntax for integer: "foo"
+CONTEXT: column "c8" of foreign table "ft1"
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
+-- ===================================================================
+-- subtransaction
+-- + local/remote error doesn't break cursor
+-- ===================================================================
+BEGIN;
+DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+SAVEPOINT s;
+ERROR OUT; -- ERROR
+ERROR: syntax error at or near "ERROR"
+LINE 1: ERROR OUT;
+ ^
+ROLLBACK TO s;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo
+(1 row)
+
+SAVEPOINT s;
+SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
+ERROR: division by zero
+CONTEXT: Remote SQL command: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (((1 / ("C 1" - 1)) > 0))
+ROLLBACK TO s;
+FETCH c;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 3 | 3 | 00003 | Sun Jan 04 00:00:00 1970 PST | Sun Jan 04 00:00:00 1970 | 3 | 3 | foo
+(1 row)
+
+SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+----+----+-------+------------------------------+--------------------------+----+------------+-----
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo
+(1 row)
+
+COMMIT;
+-- ===================================================================
+-- test handling of collations
+-- ===================================================================
+create table loct3 (f1 text collate "C", f2 text);
+create foreign table ft3 (f1 text collate "C", f2 text)
+ server loopback options (table_name 'loct3');
+-- can be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 = 'foo';
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f1 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f1 = 'foo'::text))
+(3 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 = 'foo';
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f2 = 'foo'::text))
+(3 rows)
+
+-- can't be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
+ QUERY PLAN
+-----------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Filter: ((ft3.f1)::text = 'foo'::text)
+ Remote SQL: SELECT f1, f2 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
+ QUERY PLAN
+-----------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Filter: (ft3.f1 = 'foo'::text COLLATE "C")
+ Remote SQL: SELECT f1, f2 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
+ QUERY PLAN
+-----------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Filter: ((ft3.f2)::text = 'foo'::text)
+ Remote SQL: SELECT f1, f2 FROM public.loct3
+(4 rows)
+
+explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
+ QUERY PLAN
+-----------------------------------------------
+ Foreign Scan on public.ft3
+ Output: f1, f2
+ Filter: (ft3.f2 = 'foo'::text COLLATE "C")
+ Remote SQL: SELECT f1, f2 FROM public.loct3
+(4 rows)
+
+-- ===================================================================
+-- test writable foreign table stuff
+-- ===================================================================
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Insert on public.ft2
+ Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+ -> Subquery Scan on "*SELECT*"
+ Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1", NULL::integer, "*SELECT*"."?column?_2", NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum
+ -> Limit
+ Output: ((ft2_1.c1 + 1000)), ((ft2_1.c2 + 100)), ((ft2_1.c3 || ft2_1.c3))
+ -> Foreign Scan on public.ft2 ft2_1
+ Output: (ft2_1.c1 + 1000), (ft2_1.c2 + 100), (ft2_1.c3 || ft2_1.c3)
+ Remote SQL: SELECT "C 1", c2, c3 FROM "S 1"."T 1"
+(9 rows)
+
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3)
+ VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----+----+----+----+------------+----
+ 1101 | 201 | aaa | | | | ft2 |
+ 1102 | 202 | bbb | | | | ft2 |
+ 1103 | 203 | ccc | | | | ft2 |
+(3 rows)
+
+INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+--------------------+------------------------------+--------------------------+----+------------+-----
+ 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST | Thu Jan 08 00:00:00 1970 | 7 | 7 | foo
+ 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST | Sun Jan 18 00:00:00 1970 | 7 | 7 | foo
+ 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST | Wed Jan 28 00:00:00 1970 | 7 | 7 | foo
+ 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST | Sat Feb 07 00:00:00 1970 | 7 | 7 | foo
+ 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST | Tue Feb 17 00:00:00 1970 | 7 | 7 | foo
+ 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST | Fri Feb 27 00:00:00 1970 | 7 | 7 | foo
+ 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST | Mon Mar 09 00:00:00 1970 | 7 | 7 | foo
+ 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST | Thu Mar 19 00:00:00 1970 | 7 | 7 | foo
+ 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST | Sun Mar 29 00:00:00 1970 | 7 | 7 | foo
+ 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST | Wed Apr 08 00:00:00 1970 | 7 | 7 | foo
+ 1007 | 507 | 0000700007_update7 | | | | ft2 |
+ 1017 | 507 | 0001700017_update7 | | | | ft2 |
+(102 rows)
+
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Update on public.ft2
+ Remote SQL: UPDATE "S 1"."T 1" SET c2 = $2, c3 = $3, c7 = $4 WHERE ctid = $1
+ -> Hash Join
+ Output: ft2.c1, (ft2.c2 + 500), NULL::integer, (ft2.c3 || '_update9'::text), ft2.c4, ft2.c5, ft2.c6, 'ft2 '::character(10), ft2.c8, ft2.ctid, ft1.*
+ Hash Cond: (ft2.c2 = ft1.c1)
+ -> Foreign Scan on public.ft2
+ Output: ft2.c1, ft2.c2, ft2.c3, ft2.c4, ft2.c5, ft2.c6, ft2.c8, ft2.ctid
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c8, ctid FROM "S 1"."T 1" FOR UPDATE
+ -> Hash
+ Output: ft1.*, ft1.c1
+ -> Foreign Scan on public.ft1
+ Output: ft1.*, ft1.c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 9))
+(13 rows)
+
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+EXPLAIN (verbose, costs off)
+ DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+ QUERY PLAN
+----------------------------------------------------------------------------------------
+ Delete on public.ft2
+ Output: c1, c4
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1 RETURNING "C 1", c4
+ -> Foreign Scan on public.ft2
+ Output: ctid
+ Remote SQL: SELECT ctid FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 5)) FOR UPDATE
+(6 rows)
+
+DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+ c1 | c4
+------+------------------------------
+ 5 | Tue Jan 06 00:00:00 1970 PST
+ 15 | Fri Jan 16 00:00:00 1970 PST
+ 25 | Mon Jan 26 00:00:00 1970 PST
+ 35 | Thu Feb 05 00:00:00 1970 PST
+ 45 | Sun Feb 15 00:00:00 1970 PST
+ 55 | Wed Feb 25 00:00:00 1970 PST
+ 65 | Sat Mar 07 00:00:00 1970 PST
+ 75 | Tue Mar 17 00:00:00 1970 PST
+ 85 | Fri Mar 27 00:00:00 1970 PST
+ 95 | Mon Apr 06 00:00:00 1970 PST
+ 105 | Tue Jan 06 00:00:00 1970 PST
+ 115 | Fri Jan 16 00:00:00 1970 PST
+ 125 | Mon Jan 26 00:00:00 1970 PST
+ 135 | Thu Feb 05 00:00:00 1970 PST
+ 145 | Sun Feb 15 00:00:00 1970 PST
+ 155 | Wed Feb 25 00:00:00 1970 PST
+ 165 | Sat Mar 07 00:00:00 1970 PST
+ 175 | Tue Mar 17 00:00:00 1970 PST
+ 185 | Fri Mar 27 00:00:00 1970 PST
+ 195 | Mon Apr 06 00:00:00 1970 PST
+ 205 | Tue Jan 06 00:00:00 1970 PST
+ 215 | Fri Jan 16 00:00:00 1970 PST
+ 225 | Mon Jan 26 00:00:00 1970 PST
+ 235 | Thu Feb 05 00:00:00 1970 PST
+ 245 | Sun Feb 15 00:00:00 1970 PST
+ 255 | Wed Feb 25 00:00:00 1970 PST
+ 265 | Sat Mar 07 00:00:00 1970 PST
+ 275 | Tue Mar 17 00:00:00 1970 PST
+ 285 | Fri Mar 27 00:00:00 1970 PST
+ 295 | Mon Apr 06 00:00:00 1970 PST
+ 305 | Tue Jan 06 00:00:00 1970 PST
+ 315 | Fri Jan 16 00:00:00 1970 PST
+ 325 | Mon Jan 26 00:00:00 1970 PST
+ 335 | Thu Feb 05 00:00:00 1970 PST
+ 345 | Sun Feb 15 00:00:00 1970 PST
+ 355 | Wed Feb 25 00:00:00 1970 PST
+ 365 | Sat Mar 07 00:00:00 1970 PST
+ 375 | Tue Mar 17 00:00:00 1970 PST
+ 385 | Fri Mar 27 00:00:00 1970 PST
+ 395 | Mon Apr 06 00:00:00 1970 PST
+ 405 | Tue Jan 06 00:00:00 1970 PST
+ 415 | Fri Jan 16 00:00:00 1970 PST
+ 425 | Mon Jan 26 00:00:00 1970 PST
+ 435 | Thu Feb 05 00:00:00 1970 PST
+ 445 | Sun Feb 15 00:00:00 1970 PST
+ 455 | Wed Feb 25 00:00:00 1970 PST
+ 465 | Sat Mar 07 00:00:00 1970 PST
+ 475 | Tue Mar 17 00:00:00 1970 PST
+ 485 | Fri Mar 27 00:00:00 1970 PST
+ 495 | Mon Apr 06 00:00:00 1970 PST
+ 505 | Tue Jan 06 00:00:00 1970 PST
+ 515 | Fri Jan 16 00:00:00 1970 PST
+ 525 | Mon Jan 26 00:00:00 1970 PST
+ 535 | Thu Feb 05 00:00:00 1970 PST
+ 545 | Sun Feb 15 00:00:00 1970 PST
+ 555 | Wed Feb 25 00:00:00 1970 PST
+ 565 | Sat Mar 07 00:00:00 1970 PST
+ 575 | Tue Mar 17 00:00:00 1970 PST
+ 585 | Fri Mar 27 00:00:00 1970 PST
+ 595 | Mon Apr 06 00:00:00 1970 PST
+ 605 | Tue Jan 06 00:00:00 1970 PST
+ 615 | Fri Jan 16 00:00:00 1970 PST
+ 625 | Mon Jan 26 00:00:00 1970 PST
+ 635 | Thu Feb 05 00:00:00 1970 PST
+ 645 | Sun Feb 15 00:00:00 1970 PST
+ 655 | Wed Feb 25 00:00:00 1970 PST
+ 665 | Sat Mar 07 00:00:00 1970 PST
+ 675 | Tue Mar 17 00:00:00 1970 PST
+ 685 | Fri Mar 27 00:00:00 1970 PST
+ 695 | Mon Apr 06 00:00:00 1970 PST
+ 705 | Tue Jan 06 00:00:00 1970 PST
+ 715 | Fri Jan 16 00:00:00 1970 PST
+ 725 | Mon Jan 26 00:00:00 1970 PST
+ 735 | Thu Feb 05 00:00:00 1970 PST
+ 745 | Sun Feb 15 00:00:00 1970 PST
+ 755 | Wed Feb 25 00:00:00 1970 PST
+ 765 | Sat Mar 07 00:00:00 1970 PST
+ 775 | Tue Mar 17 00:00:00 1970 PST
+ 785 | Fri Mar 27 00:00:00 1970 PST
+ 795 | Mon Apr 06 00:00:00 1970 PST
+ 805 | Tue Jan 06 00:00:00 1970 PST
+ 815 | Fri Jan 16 00:00:00 1970 PST
+ 825 | Mon Jan 26 00:00:00 1970 PST
+ 835 | Thu Feb 05 00:00:00 1970 PST
+ 845 | Sun Feb 15 00:00:00 1970 PST
+ 855 | Wed Feb 25 00:00:00 1970 PST
+ 865 | Sat Mar 07 00:00:00 1970 PST
+ 875 | Tue Mar 17 00:00:00 1970 PST
+ 885 | Fri Mar 27 00:00:00 1970 PST
+ 895 | Mon Apr 06 00:00:00 1970 PST
+ 905 | Tue Jan 06 00:00:00 1970 PST
+ 915 | Fri Jan 16 00:00:00 1970 PST
+ 925 | Mon Jan 26 00:00:00 1970 PST
+ 935 | Thu Feb 05 00:00:00 1970 PST
+ 945 | Sun Feb 15 00:00:00 1970 PST
+ 955 | Wed Feb 25 00:00:00 1970 PST
+ 965 | Sat Mar 07 00:00:00 1970 PST
+ 975 | Tue Mar 17 00:00:00 1970 PST
+ 985 | Fri Mar 27 00:00:00 1970 PST
+ 995 | Mon Apr 06 00:00:00 1970 PST
+ 1005 |
+ 1015 |
+ 1105 |
+(103 rows)
+
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------------
+ Delete on public.ft2
+ Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1
+ -> Hash Join
+ Output: ft2.ctid, ft1.*
+ Hash Cond: (ft2.c2 = ft1.c1)
+ -> Foreign Scan on public.ft2
+ Output: ft2.ctid, ft2.c2
+ Remote SQL: SELECT c2, ctid FROM "S 1"."T 1" FOR UPDATE
+ -> Hash
+ Output: ft1.*, ft1.c1
+ -> Foreign Scan on public.ft1
+ Output: ft1.*, ft1.c1
+ Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((("C 1" % 10) = 2))
+(13 rows)
+
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
+ c1 | c2 | c3 | c4
+------+-----+--------------------+------------------------------
+ 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST
+ 3 | 303 | 00003_update3 | Sun Jan 04 00:00:00 1970 PST
+ 4 | 4 | 00004 | Mon Jan 05 00:00:00 1970 PST
+ 6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST
+ 7 | 407 | 00007_update7 | Thu Jan 08 00:00:00 1970 PST
+ 8 | 8 | 00008 | Fri Jan 09 00:00:00 1970 PST
+ 9 | 509 | 00009_update9 | Sat Jan 10 00:00:00 1970 PST
+ 10 | 0 | 00010 | Sun Jan 11 00:00:00 1970 PST
+ 11 | 1 | 00011 | Mon Jan 12 00:00:00 1970 PST
+ 13 | 303 | 00013_update3 | Wed Jan 14 00:00:00 1970 PST
+ 14 | 4 | 00014 | Thu Jan 15 00:00:00 1970 PST
+ 16 | 6 | 00016 | Sat Jan 17 00:00:00 1970 PST
+ 17 | 407 | 00017_update7 | Sun Jan 18 00:00:00 1970 PST
+ 18 | 8 | 00018 | Mon Jan 19 00:00:00 1970 PST
+ 19 | 509 | 00019_update9 | Tue Jan 20 00:00:00 1970 PST
+ 20 | 0 | 00020 | Wed Jan 21 00:00:00 1970 PST
+ 21 | 1 | 00021 | Thu Jan 22 00:00:00 1970 PST
+ 23 | 303 | 00023_update3 | Sat Jan 24 00:00:00 1970 PST
+ 24 | 4 | 00024 | Sun Jan 25 00:00:00 1970 PST
+ 26 | 6 | 00026 | Tue Jan 27 00:00:00 1970 PST
+ 27 | 407 | 00027_update7 | Wed Jan 28 00:00:00 1970 PST
+ 28 | 8 | 00028 | Thu Jan 29 00:00:00 1970 PST
+ 29 | 509 | 00029_update9 | Fri Jan 30 00:00:00 1970 PST
+ 30 | 0 | 00030 | Sat Jan 31 00:00:00 1970 PST
+ 31 | 1 | 00031 | Sun Feb 01 00:00:00 1970 PST
+ 33 | 303 | 00033_update3 | Tue Feb 03 00:00:00 1970 PST
+ 34 | 4 | 00034 | Wed Feb 04 00:00:00 1970 PST
+ 36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST
+ 37 | 407 | 00037_update7 | Sat Feb 07 00:00:00 1970 PST
+ 38 | 8 | 00038 | Sun Feb 08 00:00:00 1970 PST
+ 39 | 509 | 00039_update9 | Mon Feb 09 00:00:00 1970 PST
+ 40 | 0 | 00040 | Tue Feb 10 00:00:00 1970 PST
+ 41 | 1 | 00041 | Wed Feb 11 00:00:00 1970 PST
+ 43 | 303 | 00043_update3 | Fri Feb 13 00:00:00 1970 PST
+ 44 | 4 | 00044 | Sat Feb 14 00:00:00 1970 PST
+ 46 | 6 | 00046 | Mon Feb 16 00:00:00 1970 PST
+ 47 | 407 | 00047_update7 | Tue Feb 17 00:00:00 1970 PST
+ 48 | 8 | 00048 | Wed Feb 18 00:00:00 1970 PST
+ 49 | 509 | 00049_update9 | Thu Feb 19 00:00:00 1970 PST
+ 50 | 0 | 00050 | Fri Feb 20 00:00:00 1970 PST
+ 51 | 1 | 00051 | Sat Feb 21 00:00:00 1970 PST
+ 53 | 303 | 00053_update3 | Mon Feb 23 00:00:00 1970 PST
+ 54 | 4 | 00054 | Tue Feb 24 00:00:00 1970 PST
+ 56 | 6 | 00056 | Thu Feb 26 00:00:00 1970 PST
+ 57 | 407 | 00057_update7 | Fri Feb 27 00:00:00 1970 PST
+ 58 | 8 | 00058 | Sat Feb 28 00:00:00 1970 PST
+ 59 | 509 | 00059_update9 | Sun Mar 01 00:00:00 1970 PST
+ 60 | 0 | 00060 | Mon Mar 02 00:00:00 1970 PST
+ 61 | 1 | 00061 | Tue Mar 03 00:00:00 1970 PST
+ 63 | 303 | 00063_update3 | Thu Mar 05 00:00:00 1970 PST
+ 64 | 4 | 00064 | Fri Mar 06 00:00:00 1970 PST
+ 66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST
+ 67 | 407 | 00067_update7 | Mon Mar 09 00:00:00 1970 PST
+ 68 | 8 | 00068 | Tue Mar 10 00:00:00 1970 PST
+ 69 | 509 | 00069_update9 | Wed Mar 11 00:00:00 1970 PST
+ 70 | 0 | 00070 | Thu Mar 12 00:00:00 1970 PST
+ 71 | 1 | 00071 | Fri Mar 13 00:00:00 1970 PST
+ 73 | 303 | 00073_update3 | Sun Mar 15 00:00:00 1970 PST
+ 74 | 4 | 00074 | Mon Mar 16 00:00:00 1970 PST
+ 76 | 6 | 00076 | Wed Mar 18 00:00:00 1970 PST
+ 77 | 407 | 00077_update7 | Thu Mar 19 00:00:00 1970 PST
+ 78 | 8 | 00078 | Fri Mar 20 00:00:00 1970 PST
+ 79 | 509 | 00079_update9 | Sat Mar 21 00:00:00 1970 PST
+ 80 | 0 | 00080 | Sun Mar 22 00:00:00 1970 PST
+ 81 | 1 | 00081 | Mon Mar 23 00:00:00 1970 PST
+ 83 | 303 | 00083_update3 | Wed Mar 25 00:00:00 1970 PST
+ 84 | 4 | 00084 | Thu Mar 26 00:00:00 1970 PST
+ 86 | 6 | 00086 | Sat Mar 28 00:00:00 1970 PST
+ 87 | 407 | 00087_update7 | Sun Mar 29 00:00:00 1970 PST
+ 88 | 8 | 00088 | Mon Mar 30 00:00:00 1970 PST
+ 89 | 509 | 00089_update9 | Tue Mar 31 00:00:00 1970 PST
+ 90 | 0 | 00090 | Wed Apr 01 00:00:00 1970 PST
+ 91 | 1 | 00091 | Thu Apr 02 00:00:00 1970 PST
+ 93 | 303 | 00093_update3 | Sat Apr 04 00:00:00 1970 PST
+ 94 | 4 | 00094 | Sun Apr 05 00:00:00 1970 PST
+ 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST
+ 97 | 407 | 00097_update7 | Wed Apr 08 00:00:00 1970 PST
+ 98 | 8 | 00098 | Thu Apr 09 00:00:00 1970 PST
+ 99 | 509 | 00099_update9 | Fri Apr 10 00:00:00 1970 PST
+ 100 | 0 | 00100 | Thu Jan 01 00:00:00 1970 PST
+ 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST
+ 103 | 303 | 00103_update3 | Sun Jan 04 00:00:00 1970 PST
+ 104 | 4 | 00104 | Mon Jan 05 00:00:00 1970 PST
+ 106 | 6 | 00106 | Wed Jan 07 00:00:00 1970 PST
+ 107 | 407 | 00107_update7 | Thu Jan 08 00:00:00 1970 PST
+ 108 | 8 | 00108 | Fri Jan 09 00:00:00 1970 PST
+ 109 | 509 | 00109_update9 | Sat Jan 10 00:00:00 1970 PST
+ 110 | 0 | 00110 | Sun Jan 11 00:00:00 1970 PST
+ 111 | 1 | 00111 | Mon Jan 12 00:00:00 1970 PST
+ 113 | 303 | 00113_update3 | Wed Jan 14 00:00:00 1970 PST
+ 114 | 4 | 00114 | Thu Jan 15 00:00:00 1970 PST
+ 116 | 6 | 00116 | Sat Jan 17 00:00:00 1970 PST
+ 117 | 407 | 00117_update7 | Sun Jan 18 00:00:00 1970 PST
+ 118 | 8 | 00118 | Mon Jan 19 00:00:00 1970 PST
+ 119 | 509 | 00119_update9 | Tue Jan 20 00:00:00 1970 PST
+ 120 | 0 | 00120 | Wed Jan 21 00:00:00 1970 PST
+ 121 | 1 | 00121 | Thu Jan 22 00:00:00 1970 PST
+ 123 | 303 | 00123_update3 | Sat Jan 24 00:00:00 1970 PST
+ 124 | 4 | 00124 | Sun Jan 25 00:00:00 1970 PST
+ 126 | 6 | 00126 | Tue Jan 27 00:00:00 1970 PST
+ 127 | 407 | 00127_update7 | Wed Jan 28 00:00:00 1970 PST
+ 128 | 8 | 00128 | Thu Jan 29 00:00:00 1970 PST
+ 129 | 509 | 00129_update9 | Fri Jan 30 00:00:00 1970 PST
+ 130 | 0 | 00130 | Sat Jan 31 00:00:00 1970 PST
+ 131 | 1 | 00131 | Sun Feb 01 00:00:00 1970 PST
+ 133 | 303 | 00133_update3 | Tue Feb 03 00:00:00 1970 PST
+ 134 | 4 | 00134 | Wed Feb 04 00:00:00 1970 PST
+ 136 | 6 | 00136 | Fri Feb 06 00:00:00 1970 PST
+ 137 | 407 | 00137_update7 | Sat Feb 07 00:00:00 1970 PST
+ 138 | 8 | 00138 | Sun Feb 08 00:00:00 1970 PST
+ 139 | 509 | 00139_update9 | Mon Feb 09 00:00:00 1970 PST
+ 140 | 0 | 00140 | Tue Feb 10 00:00:00 1970 PST
+ 141 | 1 | 00141 | Wed Feb 11 00:00:00 1970 PST
+ 143 | 303 | 00143_update3 | Fri Feb 13 00:00:00 1970 PST
+ 144 | 4 | 00144 | Sat Feb 14 00:00:00 1970 PST
+ 146 | 6 | 00146 | Mon Feb 16 00:00:00 1970 PST
+ 147 | 407 | 00147_update7 | Tue Feb 17 00:00:00 1970 PST
+ 148 | 8 | 00148 | Wed Feb 18 00:00:00 1970 PST
+ 149 | 509 | 00149_update9 | Thu Feb 19 00:00:00 1970 PST
+ 150 | 0 | 00150 | Fri Feb 20 00:00:00 1970 PST
+ 151 | 1 | 00151 | Sat Feb 21 00:00:00 1970 PST
+ 153 | 303 | 00153_update3 | Mon Feb 23 00:00:00 1970 PST
+ 154 | 4 | 00154 | Tue Feb 24 00:00:00 1970 PST
+ 156 | 6 | 00156 | Thu Feb 26 00:00:00 1970 PST
+ 157 | 407 | 00157_update7 | Fri Feb 27 00:00:00 1970 PST
+ 158 | 8 | 00158 | Sat Feb 28 00:00:00 1970 PST
+ 159 | 509 | 00159_update9 | Sun Mar 01 00:00:00 1970 PST
+ 160 | 0 | 00160 | Mon Mar 02 00:00:00 1970 PST
+ 161 | 1 | 00161 | Tue Mar 03 00:00:00 1970 PST
+ 163 | 303 | 00163_update3 | Thu Mar 05 00:00:00 1970 PST
+ 164 | 4 | 00164 | Fri Mar 06 00:00:00 1970 PST
+ 166 | 6 | 00166 | Sun Mar 08 00:00:00 1970 PST
+ 167 | 407 | 00167_update7 | Mon Mar 09 00:00:00 1970 PST
+ 168 | 8 | 00168 | Tue Mar 10 00:00:00 1970 PST
+ 169 | 509 | 00169_update9 | Wed Mar 11 00:00:00 1970 PST
+ 170 | 0 | 00170 | Thu Mar 12 00:00:00 1970 PST
+ 171 | 1 | 00171 | Fri Mar 13 00:00:00 1970 PST
+ 173 | 303 | 00173_update3 | Sun Mar 15 00:00:00 1970 PST
+ 174 | 4 | 00174 | Mon Mar 16 00:00:00 1970 PST
+ 176 | 6 | 00176 | Wed Mar 18 00:00:00 1970 PST
+ 177 | 407 | 00177_update7 | Thu Mar 19 00:00:00 1970 PST
+ 178 | 8 | 00178 | Fri Mar 20 00:00:00 1970 PST
+ 179 | 509 | 00179_update9 | Sat Mar 21 00:00:00 1970 PST
+ 180 | 0 | 00180 | Sun Mar 22 00:00:00 1970 PST
+ 181 | 1 | 00181 | Mon Mar 23 00:00:00 1970 PST
+ 183 | 303 | 00183_update3 | Wed Mar 25 00:00:00 1970 PST
+ 184 | 4 | 00184 | Thu Mar 26 00:00:00 1970 PST
+ 186 | 6 | 00186 | Sat Mar 28 00:00:00 1970 PST
+ 187 | 407 | 00187_update7 | Sun Mar 29 00:00:00 1970 PST
+ 188 | 8 | 00188 | Mon Mar 30 00:00:00 1970 PST
+ 189 | 509 | 00189_update9 | Tue Mar 31 00:00:00 1970 PST
+ 190 | 0 | 00190 | Wed Apr 01 00:00:00 1970 PST
+ 191 | 1 | 00191 | Thu Apr 02 00:00:00 1970 PST
+ 193 | 303 | 00193_update3 | Sat Apr 04 00:00:00 1970 PST
+ 194 | 4 | 00194 | Sun Apr 05 00:00:00 1970 PST
+ 196 | 6 | 00196 | Tue Apr 07 00:00:00 1970 PST
+ 197 | 407 | 00197_update7 | Wed Apr 08 00:00:00 1970 PST
+ 198 | 8 | 00198 | Thu Apr 09 00:00:00 1970 PST
+ 199 | 509 | 00199_update9 | Fri Apr 10 00:00:00 1970 PST
+ 200 | 0 | 00200 | Thu Jan 01 00:00:00 1970 PST
+ 201 | 1 | 00201 | Fri Jan 02 00:00:00 1970 PST
+ 203 | 303 | 00203_update3 | Sun Jan 04 00:00:00 1970 PST
+ 204 | 4 | 00204 | Mon Jan 05 00:00:00 1970 PST
+ 206 | 6 | 00206 | Wed Jan 07 00:00:00 1970 PST
+ 207 | 407 | 00207_update7 | Thu Jan 08 00:00:00 1970 PST
+ 208 | 8 | 00208 | Fri Jan 09 00:00:00 1970 PST
+ 209 | 509 | 00209_update9 | Sat Jan 10 00:00:00 1970 PST
+ 210 | 0 | 00210 | Sun Jan 11 00:00:00 1970 PST
+ 211 | 1 | 00211 | Mon Jan 12 00:00:00 1970 PST
+ 213 | 303 | 00213_update3 | Wed Jan 14 00:00:00 1970 PST
+ 214 | 4 | 00214 | Thu Jan 15 00:00:00 1970 PST
+ 216 | 6 | 00216 | Sat Jan 17 00:00:00 1970 PST
+ 217 | 407 | 00217_update7 | Sun Jan 18 00:00:00 1970 PST
+ 218 | 8 | 00218 | Mon Jan 19 00:00:00 1970 PST
+ 219 | 509 | 00219_update9 | Tue Jan 20 00:00:00 1970 PST
+ 220 | 0 | 00220 | Wed Jan 21 00:00:00 1970 PST
+ 221 | 1 | 00221 | Thu Jan 22 00:00:00 1970 PST
+ 223 | 303 | 00223_update3 | Sat Jan 24 00:00:00 1970 PST
+ 224 | 4 | 00224 | Sun Jan 25 00:00:00 1970 PST
+ 226 | 6 | 00226 | Tue Jan 27 00:00:00 1970 PST
+ 227 | 407 | 00227_update7 | Wed Jan 28 00:00:00 1970 PST
+ 228 | 8 | 00228 | Thu Jan 29 00:00:00 1970 PST
+ 229 | 509 | 00229_update9 | Fri Jan 30 00:00:00 1970 PST
+ 230 | 0 | 00230 | Sat Jan 31 00:00:00 1970 PST
+ 231 | 1 | 00231 | Sun Feb 01 00:00:00 1970 PST
+ 233 | 303 | 00233_update3 | Tue Feb 03 00:00:00 1970 PST
+ 234 | 4 | 00234 | Wed Feb 04 00:00:00 1970 PST
+ 236 | 6 | 00236 | Fri Feb 06 00:00:00 1970 PST
+ 237 | 407 | 00237_update7 | Sat Feb 07 00:00:00 1970 PST
+ 238 | 8 | 00238 | Sun Feb 08 00:00:00 1970 PST
+ 239 | 509 | 00239_update9 | Mon Feb 09 00:00:00 1970 PST
+ 240 | 0 | 00240 | Tue Feb 10 00:00:00 1970 PST
+ 241 | 1 | 00241 | Wed Feb 11 00:00:00 1970 PST
+ 243 | 303 | 00243_update3 | Fri Feb 13 00:00:00 1970 PST
+ 244 | 4 | 00244 | Sat Feb 14 00:00:00 1970 PST
+ 246 | 6 | 00246 | Mon Feb 16 00:00:00 1970 PST
+ 247 | 407 | 00247_update7 | Tue Feb 17 00:00:00 1970 PST
+ 248 | 8 | 00248 | Wed Feb 18 00:00:00 1970 PST
+ 249 | 509 | 00249_update9 | Thu Feb 19 00:00:00 1970 PST
+ 250 | 0 | 00250 | Fri Feb 20 00:00:00 1970 PST
+ 251 | 1 | 00251 | Sat Feb 21 00:00:00 1970 PST
+ 253 | 303 | 00253_update3 | Mon Feb 23 00:00:00 1970 PST
+ 254 | 4 | 00254 | Tue Feb 24 00:00:00 1970 PST
+ 256 | 6 | 00256 | Thu Feb 26 00:00:00 1970 PST
+ 257 | 407 | 00257_update7 | Fri Feb 27 00:00:00 1970 PST
+ 258 | 8 | 00258 | Sat Feb 28 00:00:00 1970 PST
+ 259 | 509 | 00259_update9 | Sun Mar 01 00:00:00 1970 PST
+ 260 | 0 | 00260 | Mon Mar 02 00:00:00 1970 PST
+ 261 | 1 | 00261 | Tue Mar 03 00:00:00 1970 PST
+ 263 | 303 | 00263_update3 | Thu Mar 05 00:00:00 1970 PST
+ 264 | 4 | 00264 | Fri Mar 06 00:00:00 1970 PST
+ 266 | 6 | 00266 | Sun Mar 08 00:00:00 1970 PST
+ 267 | 407 | 00267_update7 | Mon Mar 09 00:00:00 1970 PST
+ 268 | 8 | 00268 | Tue Mar 10 00:00:00 1970 PST
+ 269 | 509 | 00269_update9 | Wed Mar 11 00:00:00 1970 PST
+ 270 | 0 | 00270 | Thu Mar 12 00:00:00 1970 PST
+ 271 | 1 | 00271 | Fri Mar 13 00:00:00 1970 PST
+ 273 | 303 | 00273_update3 | Sun Mar 15 00:00:00 1970 PST
+ 274 | 4 | 00274 | Mon Mar 16 00:00:00 1970 PST
+ 276 | 6 | 00276 | Wed Mar 18 00:00:00 1970 PST
+ 277 | 407 | 00277_update7 | Thu Mar 19 00:00:00 1970 PST
+ 278 | 8 | 00278 | Fri Mar 20 00:00:00 1970 PST
+ 279 | 509 | 00279_update9 | Sat Mar 21 00:00:00 1970 PST
+ 280 | 0 | 00280 | Sun Mar 22 00:00:00 1970 PST
+ 281 | 1 | 00281 | Mon Mar 23 00:00:00 1970 PST
+ 283 | 303 | 00283_update3 | Wed Mar 25 00:00:00 1970 PST
+ 284 | 4 | 00284 | Thu Mar 26 00:00:00 1970 PST
+ 286 | 6 | 00286 | Sat Mar 28 00:00:00 1970 PST
+ 287 | 407 | 00287_update7 | Sun Mar 29 00:00:00 1970 PST
+ 288 | 8 | 00288 | Mon Mar 30 00:00:00 1970 PST
+ 289 | 509 | 00289_update9 | Tue Mar 31 00:00:00 1970 PST
+ 290 | 0 | 00290 | Wed Apr 01 00:00:00 1970 PST
+ 291 | 1 | 00291 | Thu Apr 02 00:00:00 1970 PST
+ 293 | 303 | 00293_update3 | Sat Apr 04 00:00:00 1970 PST
+ 294 | 4 | 00294 | Sun Apr 05 00:00:00 1970 PST
+ 296 | 6 | 00296 | Tue Apr 07 00:00:00 1970 PST
+ 297 | 407 | 00297_update7 | Wed Apr 08 00:00:00 1970 PST
+ 298 | 8 | 00298 | Thu Apr 09 00:00:00 1970 PST
+ 299 | 509 | 00299_update9 | Fri Apr 10 00:00:00 1970 PST
+ 300 | 0 | 00300 | Thu Jan 01 00:00:00 1970 PST
+ 301 | 1 | 00301 | Fri Jan 02 00:00:00 1970 PST
+ 303 | 303 | 00303_update3 | Sun Jan 04 00:00:00 1970 PST
+ 304 | 4 | 00304 | Mon Jan 05 00:00:00 1970 PST
+ 306 | 6 | 00306 | Wed Jan 07 00:00:00 1970 PST
+ 307 | 407 | 00307_update7 | Thu Jan 08 00:00:00 1970 PST
+ 308 | 8 | 00308 | Fri Jan 09 00:00:00 1970 PST
+ 309 | 509 | 00309_update9 | Sat Jan 10 00:00:00 1970 PST
+ 310 | 0 | 00310 | Sun Jan 11 00:00:00 1970 PST
+ 311 | 1 | 00311 | Mon Jan 12 00:00:00 1970 PST
+ 313 | 303 | 00313_update3 | Wed Jan 14 00:00:00 1970 PST
+ 314 | 4 | 00314 | Thu Jan 15 00:00:00 1970 PST
+ 316 | 6 | 00316 | Sat Jan 17 00:00:00 1970 PST
+ 317 | 407 | 00317_update7 | Sun Jan 18 00:00:00 1970 PST
+ 318 | 8 | 00318 | Mon Jan 19 00:00:00 1970 PST
+ 319 | 509 | 00319_update9 | Tue Jan 20 00:00:00 1970 PST
+ 320 | 0 | 00320 | Wed Jan 21 00:00:00 1970 PST
+ 321 | 1 | 00321 | Thu Jan 22 00:00:00 1970 PST
+ 323 | 303 | 00323_update3 | Sat Jan 24 00:00:00 1970 PST
+ 324 | 4 | 00324 | Sun Jan 25 00:00:00 1970 PST
+ 326 | 6 | 00326 | Tue Jan 27 00:00:00 1970 PST
+ 327 | 407 | 00327_update7 | Wed Jan 28 00:00:00 1970 PST
+ 328 | 8 | 00328 | Thu Jan 29 00:00:00 1970 PST
+ 329 | 509 | 00329_update9 | Fri Jan 30 00:00:00 1970 PST
+ 330 | 0 | 00330 | Sat Jan 31 00:00:00 1970 PST
+ 331 | 1 | 00331 | Sun Feb 01 00:00:00 1970 PST
+ 333 | 303 | 00333_update3 | Tue Feb 03 00:00:00 1970 PST
+ 334 | 4 | 00334 | Wed Feb 04 00:00:00 1970 PST
+ 336 | 6 | 00336 | Fri Feb 06 00:00:00 1970 PST
+ 337 | 407 | 00337_update7 | Sat Feb 07 00:00:00 1970 PST
+ 338 | 8 | 00338 | Sun Feb 08 00:00:00 1970 PST
+ 339 | 509 | 00339_update9 | Mon Feb 09 00:00:00 1970 PST
+ 340 | 0 | 00340 | Tue Feb 10 00:00:00 1970 PST
+ 341 | 1 | 00341 | Wed Feb 11 00:00:00 1970 PST
+ 343 | 303 | 00343_update3 | Fri Feb 13 00:00:00 1970 PST
+ 344 | 4 | 00344 | Sat Feb 14 00:00:00 1970 PST
+ 346 | 6 | 00346 | Mon Feb 16 00:00:00 1970 PST
+ 347 | 407 | 00347_update7 | Tue Feb 17 00:00:00 1970 PST
+ 348 | 8 | 00348 | Wed Feb 18 00:00:00 1970 PST
+ 349 | 509 | 00349_update9 | Thu Feb 19 00:00:00 1970 PST
+ 350 | 0 | 00350 | Fri Feb 20 00:00:00 1970 PST
+ 351 | 1 | 00351 | Sat Feb 21 00:00:00 1970 PST
+ 353 | 303 | 00353_update3 | Mon Feb 23 00:00:00 1970 PST
+ 354 | 4 | 00354 | Tue Feb 24 00:00:00 1970 PST
+ 356 | 6 | 00356 | Thu Feb 26 00:00:00 1970 PST
+ 357 | 407 | 00357_update7 | Fri Feb 27 00:00:00 1970 PST
+ 358 | 8 | 00358 | Sat Feb 28 00:00:00 1970 PST
+ 359 | 509 | 00359_update9 | Sun Mar 01 00:00:00 1970 PST
+ 360 | 0 | 00360 | Mon Mar 02 00:00:00 1970 PST
+ 361 | 1 | 00361 | Tue Mar 03 00:00:00 1970 PST
+ 363 | 303 | 00363_update3 | Thu Mar 05 00:00:00 1970 PST
+ 364 | 4 | 00364 | Fri Mar 06 00:00:00 1970 PST
+ 366 | 6 | 00366 | Sun Mar 08 00:00:00 1970 PST
+ 367 | 407 | 00367_update7 | Mon Mar 09 00:00:00 1970 PST
+ 368 | 8 | 00368 | Tue Mar 10 00:00:00 1970 PST
+ 369 | 509 | 00369_update9 | Wed Mar 11 00:00:00 1970 PST
+ 370 | 0 | 00370 | Thu Mar 12 00:00:00 1970 PST
+ 371 | 1 | 00371 | Fri Mar 13 00:00:00 1970 PST
+ 373 | 303 | 00373_update3 | Sun Mar 15 00:00:00 1970 PST
+ 374 | 4 | 00374 | Mon Mar 16 00:00:00 1970 PST
+ 376 | 6 | 00376 | Wed Mar 18 00:00:00 1970 PST
+ 377 | 407 | 00377_update7 | Thu Mar 19 00:00:00 1970 PST
+ 378 | 8 | 00378 | Fri Mar 20 00:00:00 1970 PST
+ 379 | 509 | 00379_update9 | Sat Mar 21 00:00:00 1970 PST
+ 380 | 0 | 00380 | Sun Mar 22 00:00:00 1970 PST
+ 381 | 1 | 00381 | Mon Mar 23 00:00:00 1970 PST
+ 383 | 303 | 00383_update3 | Wed Mar 25 00:00:00 1970 PST
+ 384 | 4 | 00384 | Thu Mar 26 00:00:00 1970 PST
+ 386 | 6 | 00386 | Sat Mar 28 00:00:00 1970 PST
+ 387 | 407 | 00387_update7 | Sun Mar 29 00:00:00 1970 PST
+ 388 | 8 | 00388 | Mon Mar 30 00:00:00 1970 PST
+ 389 | 509 | 00389_update9 | Tue Mar 31 00:00:00 1970 PST
+ 390 | 0 | 00390 | Wed Apr 01 00:00:00 1970 PST
+ 391 | 1 | 00391 | Thu Apr 02 00:00:00 1970 PST
+ 393 | 303 | 00393_update3 | Sat Apr 04 00:00:00 1970 PST
+ 394 | 4 | 00394 | Sun Apr 05 00:00:00 1970 PST
+ 396 | 6 | 00396 | Tue Apr 07 00:00:00 1970 PST
+ 397 | 407 | 00397_update7 | Wed Apr 08 00:00:00 1970 PST
+ 398 | 8 | 00398 | Thu Apr 09 00:00:00 1970 PST
+ 399 | 509 | 00399_update9 | Fri Apr 10 00:00:00 1970 PST
+ 400 | 0 | 00400 | Thu Jan 01 00:00:00 1970 PST
+ 401 | 1 | 00401 | Fri Jan 02 00:00:00 1970 PST
+ 403 | 303 | 00403_update3 | Sun Jan 04 00:00:00 1970 PST
+ 404 | 4 | 00404 | Mon Jan 05 00:00:00 1970 PST
+ 406 | 6 | 00406 | Wed Jan 07 00:00:00 1970 PST
+ 407 | 407 | 00407_update7 | Thu Jan 08 00:00:00 1970 PST
+ 408 | 8 | 00408 | Fri Jan 09 00:00:00 1970 PST
+ 409 | 509 | 00409_update9 | Sat Jan 10 00:00:00 1970 PST
+ 410 | 0 | 00410 | Sun Jan 11 00:00:00 1970 PST
+ 411 | 1 | 00411 | Mon Jan 12 00:00:00 1970 PST
+ 413 | 303 | 00413_update3 | Wed Jan 14 00:00:00 1970 PST
+ 414 | 4 | 00414 | Thu Jan 15 00:00:00 1970 PST
+ 416 | 6 | 00416 | Sat Jan 17 00:00:00 1970 PST
+ 417 | 407 | 00417_update7 | Sun Jan 18 00:00:00 1970 PST
+ 418 | 8 | 00418 | Mon Jan 19 00:00:00 1970 PST
+ 419 | 509 | 00419_update9 | Tue Jan 20 00:00:00 1970 PST
+ 420 | 0 | 00420 | Wed Jan 21 00:00:00 1970 PST
+ 421 | 1 | 00421 | Thu Jan 22 00:00:00 1970 PST
+ 423 | 303 | 00423_update3 | Sat Jan 24 00:00:00 1970 PST
+ 424 | 4 | 00424 | Sun Jan 25 00:00:00 1970 PST
+ 426 | 6 | 00426 | Tue Jan 27 00:00:00 1970 PST
+ 427 | 407 | 00427_update7 | Wed Jan 28 00:00:00 1970 PST
+ 428 | 8 | 00428 | Thu Jan 29 00:00:00 1970 PST
+ 429 | 509 | 00429_update9 | Fri Jan 30 00:00:00 1970 PST
+ 430 | 0 | 00430 | Sat Jan 31 00:00:00 1970 PST
+ 431 | 1 | 00431 | Sun Feb 01 00:00:00 1970 PST
+ 433 | 303 | 00433_update3 | Tue Feb 03 00:00:00 1970 PST
+ 434 | 4 | 00434 | Wed Feb 04 00:00:00 1970 PST
+ 436 | 6 | 00436 | Fri Feb 06 00:00:00 1970 PST
+ 437 | 407 | 00437_update7 | Sat Feb 07 00:00:00 1970 PST
+ 438 | 8 | 00438 | Sun Feb 08 00:00:00 1970 PST
+ 439 | 509 | 00439_update9 | Mon Feb 09 00:00:00 1970 PST
+ 440 | 0 | 00440 | Tue Feb 10 00:00:00 1970 PST
+ 441 | 1 | 00441 | Wed Feb 11 00:00:00 1970 PST
+ 443 | 303 | 00443_update3 | Fri Feb 13 00:00:00 1970 PST
+ 444 | 4 | 00444 | Sat Feb 14 00:00:00 1970 PST
+ 446 | 6 | 00446 | Mon Feb 16 00:00:00 1970 PST
+ 447 | 407 | 00447_update7 | Tue Feb 17 00:00:00 1970 PST
+ 448 | 8 | 00448 | Wed Feb 18 00:00:00 1970 PST
+ 449 | 509 | 00449_update9 | Thu Feb 19 00:00:00 1970 PST
+ 450 | 0 | 00450 | Fri Feb 20 00:00:00 1970 PST
+ 451 | 1 | 00451 | Sat Feb 21 00:00:00 1970 PST
+ 453 | 303 | 00453_update3 | Mon Feb 23 00:00:00 1970 PST
+ 454 | 4 | 00454 | Tue Feb 24 00:00:00 1970 PST
+ 456 | 6 | 00456 | Thu Feb 26 00:00:00 1970 PST
+ 457 | 407 | 00457_update7 | Fri Feb 27 00:00:00 1970 PST
+ 458 | 8 | 00458 | Sat Feb 28 00:00:00 1970 PST
+ 459 | 509 | 00459_update9 | Sun Mar 01 00:00:00 1970 PST
+ 460 | 0 | 00460 | Mon Mar 02 00:00:00 1970 PST
+ 461 | 1 | 00461 | Tue Mar 03 00:00:00 1970 PST
+ 463 | 303 | 00463_update3 | Thu Mar 05 00:00:00 1970 PST
+ 464 | 4 | 00464 | Fri Mar 06 00:00:00 1970 PST
+ 466 | 6 | 00466 | Sun Mar 08 00:00:00 1970 PST
+ 467 | 407 | 00467_update7 | Mon Mar 09 00:00:00 1970 PST
+ 468 | 8 | 00468 | Tue Mar 10 00:00:00 1970 PST
+ 469 | 509 | 00469_update9 | Wed Mar 11 00:00:00 1970 PST
+ 470 | 0 | 00470 | Thu Mar 12 00:00:00 1970 PST
+ 471 | 1 | 00471 | Fri Mar 13 00:00:00 1970 PST
+ 473 | 303 | 00473_update3 | Sun Mar 15 00:00:00 1970 PST
+ 474 | 4 | 00474 | Mon Mar 16 00:00:00 1970 PST
+ 476 | 6 | 00476 | Wed Mar 18 00:00:00 1970 PST
+ 477 | 407 | 00477_update7 | Thu Mar 19 00:00:00 1970 PST
+ 478 | 8 | 00478 | Fri Mar 20 00:00:00 1970 PST
+ 479 | 509 | 00479_update9 | Sat Mar 21 00:00:00 1970 PST
+ 480 | 0 | 00480 | Sun Mar 22 00:00:00 1970 PST
+ 481 | 1 | 00481 | Mon Mar 23 00:00:00 1970 PST
+ 483 | 303 | 00483_update3 | Wed Mar 25 00:00:00 1970 PST
+ 484 | 4 | 00484 | Thu Mar 26 00:00:00 1970 PST
+ 486 | 6 | 00486 | Sat Mar 28 00:00:00 1970 PST
+ 487 | 407 | 00487_update7 | Sun Mar 29 00:00:00 1970 PST
+ 488 | 8 | 00488 | Mon Mar 30 00:00:00 1970 PST
+ 489 | 509 | 00489_update9 | Tue Mar 31 00:00:00 1970 PST
+ 490 | 0 | 00490 | Wed Apr 01 00:00:00 1970 PST
+ 491 | 1 | 00491 | Thu Apr 02 00:00:00 1970 PST
+ 493 | 303 | 00493_update3 | Sat Apr 04 00:00:00 1970 PST
+ 494 | 4 | 00494 | Sun Apr 05 00:00:00 1970 PST
+ 496 | 6 | 00496 | Tue Apr 07 00:00:00 1970 PST
+ 497 | 407 | 00497_update7 | Wed Apr 08 00:00:00 1970 PST
+ 498 | 8 | 00498 | Thu Apr 09 00:00:00 1970 PST
+ 499 | 509 | 00499_update9 | Fri Apr 10 00:00:00 1970 PST
+ 500 | 0 | 00500 | Thu Jan 01 00:00:00 1970 PST
+ 501 | 1 | 00501 | Fri Jan 02 00:00:00 1970 PST
+ 503 | 303 | 00503_update3 | Sun Jan 04 00:00:00 1970 PST
+ 504 | 4 | 00504 | Mon Jan 05 00:00:00 1970 PST
+ 506 | 6 | 00506 | Wed Jan 07 00:00:00 1970 PST
+ 507 | 407 | 00507_update7 | Thu Jan 08 00:00:00 1970 PST
+ 508 | 8 | 00508 | Fri Jan 09 00:00:00 1970 PST
+ 509 | 509 | 00509_update9 | Sat Jan 10 00:00:00 1970 PST
+ 510 | 0 | 00510 | Sun Jan 11 00:00:00 1970 PST
+ 511 | 1 | 00511 | Mon Jan 12 00:00:00 1970 PST
+ 513 | 303 | 00513_update3 | Wed Jan 14 00:00:00 1970 PST
+ 514 | 4 | 00514 | Thu Jan 15 00:00:00 1970 PST
+ 516 | 6 | 00516 | Sat Jan 17 00:00:00 1970 PST
+ 517 | 407 | 00517_update7 | Sun Jan 18 00:00:00 1970 PST
+ 518 | 8 | 00518 | Mon Jan 19 00:00:00 1970 PST
+ 519 | 509 | 00519_update9 | Tue Jan 20 00:00:00 1970 PST
+ 520 | 0 | 00520 | Wed Jan 21 00:00:00 1970 PST
+ 521 | 1 | 00521 | Thu Jan 22 00:00:00 1970 PST
+ 523 | 303 | 00523_update3 | Sat Jan 24 00:00:00 1970 PST
+ 524 | 4 | 00524 | Sun Jan 25 00:00:00 1970 PST
+ 526 | 6 | 00526 | Tue Jan 27 00:00:00 1970 PST
+ 527 | 407 | 00527_update7 | Wed Jan 28 00:00:00 1970 PST
+ 528 | 8 | 00528 | Thu Jan 29 00:00:00 1970 PST
+ 529 | 509 | 00529_update9 | Fri Jan 30 00:00:00 1970 PST
+ 530 | 0 | 00530 | Sat Jan 31 00:00:00 1970 PST
+ 531 | 1 | 00531 | Sun Feb 01 00:00:00 1970 PST
+ 533 | 303 | 00533_update3 | Tue Feb 03 00:00:00 1970 PST
+ 534 | 4 | 00534 | Wed Feb 04 00:00:00 1970 PST
+ 536 | 6 | 00536 | Fri Feb 06 00:00:00 1970 PST
+ 537 | 407 | 00537_update7 | Sat Feb 07 00:00:00 1970 PST
+ 538 | 8 | 00538 | Sun Feb 08 00:00:00 1970 PST
+ 539 | 509 | 00539_update9 | Mon Feb 09 00:00:00 1970 PST
+ 540 | 0 | 00540 | Tue Feb 10 00:00:00 1970 PST
+ 541 | 1 | 00541 | Wed Feb 11 00:00:00 1970 PST
+ 543 | 303 | 00543_update3 | Fri Feb 13 00:00:00 1970 PST
+ 544 | 4 | 00544 | Sat Feb 14 00:00:00 1970 PST
+ 546 | 6 | 00546 | Mon Feb 16 00:00:00 1970 PST
+ 547 | 407 | 00547_update7 | Tue Feb 17 00:00:00 1970 PST
+ 548 | 8 | 00548 | Wed Feb 18 00:00:00 1970 PST
+ 549 | 509 | 00549_update9 | Thu Feb 19 00:00:00 1970 PST
+ 550 | 0 | 00550 | Fri Feb 20 00:00:00 1970 PST
+ 551 | 1 | 00551 | Sat Feb 21 00:00:00 1970 PST
+ 553 | 303 | 00553_update3 | Mon Feb 23 00:00:00 1970 PST
+ 554 | 4 | 00554 | Tue Feb 24 00:00:00 1970 PST
+ 556 | 6 | 00556 | Thu Feb 26 00:00:00 1970 PST
+ 557 | 407 | 00557_update7 | Fri Feb 27 00:00:00 1970 PST
+ 558 | 8 | 00558 | Sat Feb 28 00:00:00 1970 PST
+ 559 | 509 | 00559_update9 | Sun Mar 01 00:00:00 1970 PST
+ 560 | 0 | 00560 | Mon Mar 02 00:00:00 1970 PST
+ 561 | 1 | 00561 | Tue Mar 03 00:00:00 1970 PST
+ 563 | 303 | 00563_update3 | Thu Mar 05 00:00:00 1970 PST
+ 564 | 4 | 00564 | Fri Mar 06 00:00:00 1970 PST
+ 566 | 6 | 00566 | Sun Mar 08 00:00:00 1970 PST
+ 567 | 407 | 00567_update7 | Mon Mar 09 00:00:00 1970 PST
+ 568 | 8 | 00568 | Tue Mar 10 00:00:00 1970 PST
+ 569 | 509 | 00569_update9 | Wed Mar 11 00:00:00 1970 PST
+ 570 | 0 | 00570 | Thu Mar 12 00:00:00 1970 PST
+ 571 | 1 | 00571 | Fri Mar 13 00:00:00 1970 PST
+ 573 | 303 | 00573_update3 | Sun Mar 15 00:00:00 1970 PST
+ 574 | 4 | 00574 | Mon Mar 16 00:00:00 1970 PST
+ 576 | 6 | 00576 | Wed Mar 18 00:00:00 1970 PST
+ 577 | 407 | 00577_update7 | Thu Mar 19 00:00:00 1970 PST
+ 578 | 8 | 00578 | Fri Mar 20 00:00:00 1970 PST
+ 579 | 509 | 00579_update9 | Sat Mar 21 00:00:00 1970 PST
+ 580 | 0 | 00580 | Sun Mar 22 00:00:00 1970 PST
+ 581 | 1 | 00581 | Mon Mar 23 00:00:00 1970 PST
+ 583 | 303 | 00583_update3 | Wed Mar 25 00:00:00 1970 PST
+ 584 | 4 | 00584 | Thu Mar 26 00:00:00 1970 PST
+ 586 | 6 | 00586 | Sat Mar 28 00:00:00 1970 PST
+ 587 | 407 | 00587_update7 | Sun Mar 29 00:00:00 1970 PST
+ 588 | 8 | 00588 | Mon Mar 30 00:00:00 1970 PST
+ 589 | 509 | 00589_update9 | Tue Mar 31 00:00:00 1970 PST
+ 590 | 0 | 00590 | Wed Apr 01 00:00:00 1970 PST
+ 591 | 1 | 00591 | Thu Apr 02 00:00:00 1970 PST
+ 593 | 303 | 00593_update3 | Sat Apr 04 00:00:00 1970 PST
+ 594 | 4 | 00594 | Sun Apr 05 00:00:00 1970 PST
+ 596 | 6 | 00596 | Tue Apr 07 00:00:00 1970 PST
+ 597 | 407 | 00597_update7 | Wed Apr 08 00:00:00 1970 PST
+ 598 | 8 | 00598 | Thu Apr 09 00:00:00 1970 PST
+ 599 | 509 | 00599_update9 | Fri Apr 10 00:00:00 1970 PST
+ 600 | 0 | 00600 | Thu Jan 01 00:00:00 1970 PST
+ 601 | 1 | 00601 | Fri Jan 02 00:00:00 1970 PST
+ 603 | 303 | 00603_update3 | Sun Jan 04 00:00:00 1970 PST
+ 604 | 4 | 00604 | Mon Jan 05 00:00:00 1970 PST
+ 606 | 6 | 00606 | Wed Jan 07 00:00:00 1970 PST
+ 607 | 407 | 00607_update7 | Thu Jan 08 00:00:00 1970 PST
+ 608 | 8 | 00608 | Fri Jan 09 00:00:00 1970 PST
+ 609 | 509 | 00609_update9 | Sat Jan 10 00:00:00 1970 PST
+ 610 | 0 | 00610 | Sun Jan 11 00:00:00 1970 PST
+ 611 | 1 | 00611 | Mon Jan 12 00:00:00 1970 PST
+ 613 | 303 | 00613_update3 | Wed Jan 14 00:00:00 1970 PST
+ 614 | 4 | 00614 | Thu Jan 15 00:00:00 1970 PST
+ 616 | 6 | 00616 | Sat Jan 17 00:00:00 1970 PST
+ 617 | 407 | 00617_update7 | Sun Jan 18 00:00:00 1970 PST
+ 618 | 8 | 00618 | Mon Jan 19 00:00:00 1970 PST
+ 619 | 509 | 00619_update9 | Tue Jan 20 00:00:00 1970 PST
+ 620 | 0 | 00620 | Wed Jan 21 00:00:00 1970 PST
+ 621 | 1 | 00621 | Thu Jan 22 00:00:00 1970 PST
+ 623 | 303 | 00623_update3 | Sat Jan 24 00:00:00 1970 PST
+ 624 | 4 | 00624 | Sun Jan 25 00:00:00 1970 PST
+ 626 | 6 | 00626 | Tue Jan 27 00:00:00 1970 PST
+ 627 | 407 | 00627_update7 | Wed Jan 28 00:00:00 1970 PST
+ 628 | 8 | 00628 | Thu Jan 29 00:00:00 1970 PST
+ 629 | 509 | 00629_update9 | Fri Jan 30 00:00:00 1970 PST
+ 630 | 0 | 00630 | Sat Jan 31 00:00:00 1970 PST
+ 631 | 1 | 00631 | Sun Feb 01 00:00:00 1970 PST
+ 633 | 303 | 00633_update3 | Tue Feb 03 00:00:00 1970 PST
+ 634 | 4 | 00634 | Wed Feb 04 00:00:00 1970 PST
+ 636 | 6 | 00636 | Fri Feb 06 00:00:00 1970 PST
+ 637 | 407 | 00637_update7 | Sat Feb 07 00:00:00 1970 PST
+ 638 | 8 | 00638 | Sun Feb 08 00:00:00 1970 PST
+ 639 | 509 | 00639_update9 | Mon Feb 09 00:00:00 1970 PST
+ 640 | 0 | 00640 | Tue Feb 10 00:00:00 1970 PST
+ 641 | 1 | 00641 | Wed Feb 11 00:00:00 1970 PST
+ 643 | 303 | 00643_update3 | Fri Feb 13 00:00:00 1970 PST
+ 644 | 4 | 00644 | Sat Feb 14 00:00:00 1970 PST
+ 646 | 6 | 00646 | Mon Feb 16 00:00:00 1970 PST
+ 647 | 407 | 00647_update7 | Tue Feb 17 00:00:00 1970 PST
+ 648 | 8 | 00648 | Wed Feb 18 00:00:00 1970 PST
+ 649 | 509 | 00649_update9 | Thu Feb 19 00:00:00 1970 PST
+ 650 | 0 | 00650 | Fri Feb 20 00:00:00 1970 PST
+ 651 | 1 | 00651 | Sat Feb 21 00:00:00 1970 PST
+ 653 | 303 | 00653_update3 | Mon Feb 23 00:00:00 1970 PST
+ 654 | 4 | 00654 | Tue Feb 24 00:00:00 1970 PST
+ 656 | 6 | 00656 | Thu Feb 26 00:00:00 1970 PST
+ 657 | 407 | 00657_update7 | Fri Feb 27 00:00:00 1970 PST
+ 658 | 8 | 00658 | Sat Feb 28 00:00:00 1970 PST
+ 659 | 509 | 00659_update9 | Sun Mar 01 00:00:00 1970 PST
+ 660 | 0 | 00660 | Mon Mar 02 00:00:00 1970 PST
+ 661 | 1 | 00661 | Tue Mar 03 00:00:00 1970 PST
+ 663 | 303 | 00663_update3 | Thu Mar 05 00:00:00 1970 PST
+ 664 | 4 | 00664 | Fri Mar 06 00:00:00 1970 PST
+ 666 | 6 | 00666 | Sun Mar 08 00:00:00 1970 PST
+ 667 | 407 | 00667_update7 | Mon Mar 09 00:00:00 1970 PST
+ 668 | 8 | 00668 | Tue Mar 10 00:00:00 1970 PST
+ 669 | 509 | 00669_update9 | Wed Mar 11 00:00:00 1970 PST
+ 670 | 0 | 00670 | Thu Mar 12 00:00:00 1970 PST
+ 671 | 1 | 00671 | Fri Mar 13 00:00:00 1970 PST
+ 673 | 303 | 00673_update3 | Sun Mar 15 00:00:00 1970 PST
+ 674 | 4 | 00674 | Mon Mar 16 00:00:00 1970 PST
+ 676 | 6 | 00676 | Wed Mar 18 00:00:00 1970 PST
+ 677 | 407 | 00677_update7 | Thu Mar 19 00:00:00 1970 PST
+ 678 | 8 | 00678 | Fri Mar 20 00:00:00 1970 PST
+ 679 | 509 | 00679_update9 | Sat Mar 21 00:00:00 1970 PST
+ 680 | 0 | 00680 | Sun Mar 22 00:00:00 1970 PST
+ 681 | 1 | 00681 | Mon Mar 23 00:00:00 1970 PST
+ 683 | 303 | 00683_update3 | Wed Mar 25 00:00:00 1970 PST
+ 684 | 4 | 00684 | Thu Mar 26 00:00:00 1970 PST
+ 686 | 6 | 00686 | Sat Mar 28 00:00:00 1970 PST
+ 687 | 407 | 00687_update7 | Sun Mar 29 00:00:00 1970 PST
+ 688 | 8 | 00688 | Mon Mar 30 00:00:00 1970 PST
+ 689 | 509 | 00689_update9 | Tue Mar 31 00:00:00 1970 PST
+ 690 | 0 | 00690 | Wed Apr 01 00:00:00 1970 PST
+ 691 | 1 | 00691 | Thu Apr 02 00:00:00 1970 PST
+ 693 | 303 | 00693_update3 | Sat Apr 04 00:00:00 1970 PST
+ 694 | 4 | 00694 | Sun Apr 05 00:00:00 1970 PST
+ 696 | 6 | 00696 | Tue Apr 07 00:00:00 1970 PST
+ 697 | 407 | 00697_update7 | Wed Apr 08 00:00:00 1970 PST
+ 698 | 8 | 00698 | Thu Apr 09 00:00:00 1970 PST
+ 699 | 509 | 00699_update9 | Fri Apr 10 00:00:00 1970 PST
+ 700 | 0 | 00700 | Thu Jan 01 00:00:00 1970 PST
+ 701 | 1 | 00701 | Fri Jan 02 00:00:00 1970 PST
+ 703 | 303 | 00703_update3 | Sun Jan 04 00:00:00 1970 PST
+ 704 | 4 | 00704 | Mon Jan 05 00:00:00 1970 PST
+ 706 | 6 | 00706 | Wed Jan 07 00:00:00 1970 PST
+ 707 | 407 | 00707_update7 | Thu Jan 08 00:00:00 1970 PST
+ 708 | 8 | 00708 | Fri Jan 09 00:00:00 1970 PST
+ 709 | 509 | 00709_update9 | Sat Jan 10 00:00:00 1970 PST
+ 710 | 0 | 00710 | Sun Jan 11 00:00:00 1970 PST
+ 711 | 1 | 00711 | Mon Jan 12 00:00:00 1970 PST
+ 713 | 303 | 00713_update3 | Wed Jan 14 00:00:00 1970 PST
+ 714 | 4 | 00714 | Thu Jan 15 00:00:00 1970 PST
+ 716 | 6 | 00716 | Sat Jan 17 00:00:00 1970 PST
+ 717 | 407 | 00717_update7 | Sun Jan 18 00:00:00 1970 PST
+ 718 | 8 | 00718 | Mon Jan 19 00:00:00 1970 PST
+ 719 | 509 | 00719_update9 | Tue Jan 20 00:00:00 1970 PST
+ 720 | 0 | 00720 | Wed Jan 21 00:00:00 1970 PST
+ 721 | 1 | 00721 | Thu Jan 22 00:00:00 1970 PST
+ 723 | 303 | 00723_update3 | Sat Jan 24 00:00:00 1970 PST
+ 724 | 4 | 00724 | Sun Jan 25 00:00:00 1970 PST
+ 726 | 6 | 00726 | Tue Jan 27 00:00:00 1970 PST
+ 727 | 407 | 00727_update7 | Wed Jan 28 00:00:00 1970 PST
+ 728 | 8 | 00728 | Thu Jan 29 00:00:00 1970 PST
+ 729 | 509 | 00729_update9 | Fri Jan 30 00:00:00 1970 PST
+ 730 | 0 | 00730 | Sat Jan 31 00:00:00 1970 PST
+ 731 | 1 | 00731 | Sun Feb 01 00:00:00 1970 PST
+ 733 | 303 | 00733_update3 | Tue Feb 03 00:00:00 1970 PST
+ 734 | 4 | 00734 | Wed Feb 04 00:00:00 1970 PST
+ 736 | 6 | 00736 | Fri Feb 06 00:00:00 1970 PST
+ 737 | 407 | 00737_update7 | Sat Feb 07 00:00:00 1970 PST
+ 738 | 8 | 00738 | Sun Feb 08 00:00:00 1970 PST
+ 739 | 509 | 00739_update9 | Mon Feb 09 00:00:00 1970 PST
+ 740 | 0 | 00740 | Tue Feb 10 00:00:00 1970 PST
+ 741 | 1 | 00741 | Wed Feb 11 00:00:00 1970 PST
+ 743 | 303 | 00743_update3 | Fri Feb 13 00:00:00 1970 PST
+ 744 | 4 | 00744 | Sat Feb 14 00:00:00 1970 PST
+ 746 | 6 | 00746 | Mon Feb 16 00:00:00 1970 PST
+ 747 | 407 | 00747_update7 | Tue Feb 17 00:00:00 1970 PST
+ 748 | 8 | 00748 | Wed Feb 18 00:00:00 1970 PST
+ 749 | 509 | 00749_update9 | Thu Feb 19 00:00:00 1970 PST
+ 750 | 0 | 00750 | Fri Feb 20 00:00:00 1970 PST
+ 751 | 1 | 00751 | Sat Feb 21 00:00:00 1970 PST
+ 753 | 303 | 00753_update3 | Mon Feb 23 00:00:00 1970 PST
+ 754 | 4 | 00754 | Tue Feb 24 00:00:00 1970 PST
+ 756 | 6 | 00756 | Thu Feb 26 00:00:00 1970 PST
+ 757 | 407 | 00757_update7 | Fri Feb 27 00:00:00 1970 PST
+ 758 | 8 | 00758 | Sat Feb 28 00:00:00 1970 PST
+ 759 | 509 | 00759_update9 | Sun Mar 01 00:00:00 1970 PST
+ 760 | 0 | 00760 | Mon Mar 02 00:00:00 1970 PST
+ 761 | 1 | 00761 | Tue Mar 03 00:00:00 1970 PST
+ 763 | 303 | 00763_update3 | Thu Mar 05 00:00:00 1970 PST
+ 764 | 4 | 00764 | Fri Mar 06 00:00:00 1970 PST
+ 766 | 6 | 00766 | Sun Mar 08 00:00:00 1970 PST
+ 767 | 407 | 00767_update7 | Mon Mar 09 00:00:00 1970 PST
+ 768 | 8 | 00768 | Tue Mar 10 00:00:00 1970 PST
+ 769 | 509 | 00769_update9 | Wed Mar 11 00:00:00 1970 PST
+ 770 | 0 | 00770 | Thu Mar 12 00:00:00 1970 PST
+ 771 | 1 | 00771 | Fri Mar 13 00:00:00 1970 PST
+ 773 | 303 | 00773_update3 | Sun Mar 15 00:00:00 1970 PST
+ 774 | 4 | 00774 | Mon Mar 16 00:00:00 1970 PST
+ 776 | 6 | 00776 | Wed Mar 18 00:00:00 1970 PST
+ 777 | 407 | 00777_update7 | Thu Mar 19 00:00:00 1970 PST
+ 778 | 8 | 00778 | Fri Mar 20 00:00:00 1970 PST
+ 779 | 509 | 00779_update9 | Sat Mar 21 00:00:00 1970 PST
+ 780 | 0 | 00780 | Sun Mar 22 00:00:00 1970 PST
+ 781 | 1 | 00781 | Mon Mar 23 00:00:00 1970 PST
+ 783 | 303 | 00783_update3 | Wed Mar 25 00:00:00 1970 PST
+ 784 | 4 | 00784 | Thu Mar 26 00:00:00 1970 PST
+ 786 | 6 | 00786 | Sat Mar 28 00:00:00 1970 PST
+ 787 | 407 | 00787_update7 | Sun Mar 29 00:00:00 1970 PST
+ 788 | 8 | 00788 | Mon Mar 30 00:00:00 1970 PST
+ 789 | 509 | 00789_update9 | Tue Mar 31 00:00:00 1970 PST
+ 790 | 0 | 00790 | Wed Apr 01 00:00:00 1970 PST
+ 791 | 1 | 00791 | Thu Apr 02 00:00:00 1970 PST
+ 793 | 303 | 00793_update3 | Sat Apr 04 00:00:00 1970 PST
+ 794 | 4 | 00794 | Sun Apr 05 00:00:00 1970 PST
+ 796 | 6 | 00796 | Tue Apr 07 00:00:00 1970 PST
+ 797 | 407 | 00797_update7 | Wed Apr 08 00:00:00 1970 PST
+ 798 | 8 | 00798 | Thu Apr 09 00:00:00 1970 PST
+ 799 | 509 | 00799_update9 | Fri Apr 10 00:00:00 1970 PST
+ 800 | 0 | 00800 | Thu Jan 01 00:00:00 1970 PST
+ 801 | 1 | 00801 | Fri Jan 02 00:00:00 1970 PST
+ 803 | 303 | 00803_update3 | Sun Jan 04 00:00:00 1970 PST
+ 804 | 4 | 00804 | Mon Jan 05 00:00:00 1970 PST
+ 806 | 6 | 00806 | Wed Jan 07 00:00:00 1970 PST
+ 807 | 407 | 00807_update7 | Thu Jan 08 00:00:00 1970 PST
+ 808 | 8 | 00808 | Fri Jan 09 00:00:00 1970 PST
+ 809 | 509 | 00809_update9 | Sat Jan 10 00:00:00 1970 PST
+ 810 | 0 | 00810 | Sun Jan 11 00:00:00 1970 PST
+ 811 | 1 | 00811 | Mon Jan 12 00:00:00 1970 PST
+ 813 | 303 | 00813_update3 | Wed Jan 14 00:00:00 1970 PST
+ 814 | 4 | 00814 | Thu Jan 15 00:00:00 1970 PST
+ 816 | 6 | 00816 | Sat Jan 17 00:00:00 1970 PST
+ 817 | 407 | 00817_update7 | Sun Jan 18 00:00:00 1970 PST
+ 818 | 8 | 00818 | Mon Jan 19 00:00:00 1970 PST
+ 819 | 509 | 00819_update9 | Tue Jan 20 00:00:00 1970 PST
+ 820 | 0 | 00820 | Wed Jan 21 00:00:00 1970 PST
+ 821 | 1 | 00821 | Thu Jan 22 00:00:00 1970 PST
+ 823 | 303 | 00823_update3 | Sat Jan 24 00:00:00 1970 PST
+ 824 | 4 | 00824 | Sun Jan 25 00:00:00 1970 PST
+ 826 | 6 | 00826 | Tue Jan 27 00:00:00 1970 PST
+ 827 | 407 | 00827_update7 | Wed Jan 28 00:00:00 1970 PST
+ 828 | 8 | 00828 | Thu Jan 29 00:00:00 1970 PST
+ 829 | 509 | 00829_update9 | Fri Jan 30 00:00:00 1970 PST
+ 830 | 0 | 00830 | Sat Jan 31 00:00:00 1970 PST
+ 831 | 1 | 00831 | Sun Feb 01 00:00:00 1970 PST
+ 833 | 303 | 00833_update3 | Tue Feb 03 00:00:00 1970 PST
+ 834 | 4 | 00834 | Wed Feb 04 00:00:00 1970 PST
+ 836 | 6 | 00836 | Fri Feb 06 00:00:00 1970 PST
+ 837 | 407 | 00837_update7 | Sat Feb 07 00:00:00 1970 PST
+ 838 | 8 | 00838 | Sun Feb 08 00:00:00 1970 PST
+ 839 | 509 | 00839_update9 | Mon Feb 09 00:00:00 1970 PST
+ 840 | 0 | 00840 | Tue Feb 10 00:00:00 1970 PST
+ 841 | 1 | 00841 | Wed Feb 11 00:00:00 1970 PST
+ 843 | 303 | 00843_update3 | Fri Feb 13 00:00:00 1970 PST
+ 844 | 4 | 00844 | Sat Feb 14 00:00:00 1970 PST
+ 846 | 6 | 00846 | Mon Feb 16 00:00:00 1970 PST
+ 847 | 407 | 00847_update7 | Tue Feb 17 00:00:00 1970 PST
+ 848 | 8 | 00848 | Wed Feb 18 00:00:00 1970 PST
+ 849 | 509 | 00849_update9 | Thu Feb 19 00:00:00 1970 PST
+ 850 | 0 | 00850 | Fri Feb 20 00:00:00 1970 PST
+ 851 | 1 | 00851 | Sat Feb 21 00:00:00 1970 PST
+ 853 | 303 | 00853_update3 | Mon Feb 23 00:00:00 1970 PST
+ 854 | 4 | 00854 | Tue Feb 24 00:00:00 1970 PST
+ 856 | 6 | 00856 | Thu Feb 26 00:00:00 1970 PST
+ 857 | 407 | 00857_update7 | Fri Feb 27 00:00:00 1970 PST
+ 858 | 8 | 00858 | Sat Feb 28 00:00:00 1970 PST
+ 859 | 509 | 00859_update9 | Sun Mar 01 00:00:00 1970 PST
+ 860 | 0 | 00860 | Mon Mar 02 00:00:00 1970 PST
+ 861 | 1 | 00861 | Tue Mar 03 00:00:00 1970 PST
+ 863 | 303 | 00863_update3 | Thu Mar 05 00:00:00 1970 PST
+ 864 | 4 | 00864 | Fri Mar 06 00:00:00 1970 PST
+ 866 | 6 | 00866 | Sun Mar 08 00:00:00 1970 PST
+ 867 | 407 | 00867_update7 | Mon Mar 09 00:00:00 1970 PST
+ 868 | 8 | 00868 | Tue Mar 10 00:00:00 1970 PST
+ 869 | 509 | 00869_update9 | Wed Mar 11 00:00:00 1970 PST
+ 870 | 0 | 00870 | Thu Mar 12 00:00:00 1970 PST
+ 871 | 1 | 00871 | Fri Mar 13 00:00:00 1970 PST
+ 873 | 303 | 00873_update3 | Sun Mar 15 00:00:00 1970 PST
+ 874 | 4 | 00874 | Mon Mar 16 00:00:00 1970 PST
+ 876 | 6 | 00876 | Wed Mar 18 00:00:00 1970 PST
+ 877 | 407 | 00877_update7 | Thu Mar 19 00:00:00 1970 PST
+ 878 | 8 | 00878 | Fri Mar 20 00:00:00 1970 PST
+ 879 | 509 | 00879_update9 | Sat Mar 21 00:00:00 1970 PST
+ 880 | 0 | 00880 | Sun Mar 22 00:00:00 1970 PST
+ 881 | 1 | 00881 | Mon Mar 23 00:00:00 1970 PST
+ 883 | 303 | 00883_update3 | Wed Mar 25 00:00:00 1970 PST
+ 884 | 4 | 00884 | Thu Mar 26 00:00:00 1970 PST
+ 886 | 6 | 00886 | Sat Mar 28 00:00:00 1970 PST
+ 887 | 407 | 00887_update7 | Sun Mar 29 00:00:00 1970 PST
+ 888 | 8 | 00888 | Mon Mar 30 00:00:00 1970 PST
+ 889 | 509 | 00889_update9 | Tue Mar 31 00:00:00 1970 PST
+ 890 | 0 | 00890 | Wed Apr 01 00:00:00 1970 PST
+ 891 | 1 | 00891 | Thu Apr 02 00:00:00 1970 PST
+ 893 | 303 | 00893_update3 | Sat Apr 04 00:00:00 1970 PST
+ 894 | 4 | 00894 | Sun Apr 05 00:00:00 1970 PST
+ 896 | 6 | 00896 | Tue Apr 07 00:00:00 1970 PST
+ 897 | 407 | 00897_update7 | Wed Apr 08 00:00:00 1970 PST
+ 898 | 8 | 00898 | Thu Apr 09 00:00:00 1970 PST
+ 899 | 509 | 00899_update9 | Fri Apr 10 00:00:00 1970 PST
+ 900 | 0 | 00900 | Thu Jan 01 00:00:00 1970 PST
+ 901 | 1 | 00901 | Fri Jan 02 00:00:00 1970 PST
+ 903 | 303 | 00903_update3 | Sun Jan 04 00:00:00 1970 PST
+ 904 | 4 | 00904 | Mon Jan 05 00:00:00 1970 PST
+ 906 | 6 | 00906 | Wed Jan 07 00:00:00 1970 PST
+ 907 | 407 | 00907_update7 | Thu Jan 08 00:00:00 1970 PST
+ 908 | 8 | 00908 | Fri Jan 09 00:00:00 1970 PST
+ 909 | 509 | 00909_update9 | Sat Jan 10 00:00:00 1970 PST
+ 910 | 0 | 00910 | Sun Jan 11 00:00:00 1970 PST
+ 911 | 1 | 00911 | Mon Jan 12 00:00:00 1970 PST
+ 913 | 303 | 00913_update3 | Wed Jan 14 00:00:00 1970 PST
+ 914 | 4 | 00914 | Thu Jan 15 00:00:00 1970 PST
+ 916 | 6 | 00916 | Sat Jan 17 00:00:00 1970 PST
+ 917 | 407 | 00917_update7 | Sun Jan 18 00:00:00 1970 PST
+ 918 | 8 | 00918 | Mon Jan 19 00:00:00 1970 PST
+ 919 | 509 | 00919_update9 | Tue Jan 20 00:00:00 1970 PST
+ 920 | 0 | 00920 | Wed Jan 21 00:00:00 1970 PST
+ 921 | 1 | 00921 | Thu Jan 22 00:00:00 1970 PST
+ 923 | 303 | 00923_update3 | Sat Jan 24 00:00:00 1970 PST
+ 924 | 4 | 00924 | Sun Jan 25 00:00:00 1970 PST
+ 926 | 6 | 00926 | Tue Jan 27 00:00:00 1970 PST
+ 927 | 407 | 00927_update7 | Wed Jan 28 00:00:00 1970 PST
+ 928 | 8 | 00928 | Thu Jan 29 00:00:00 1970 PST
+ 929 | 509 | 00929_update9 | Fri Jan 30 00:00:00 1970 PST
+ 930 | 0 | 00930 | Sat Jan 31 00:00:00 1970 PST
+ 931 | 1 | 00931 | Sun Feb 01 00:00:00 1970 PST
+ 933 | 303 | 00933_update3 | Tue Feb 03 00:00:00 1970 PST
+ 934 | 4 | 00934 | Wed Feb 04 00:00:00 1970 PST
+ 936 | 6 | 00936 | Fri Feb 06 00:00:00 1970 PST
+ 937 | 407 | 00937_update7 | Sat Feb 07 00:00:00 1970 PST
+ 938 | 8 | 00938 | Sun Feb 08 00:00:00 1970 PST
+ 939 | 509 | 00939_update9 | Mon Feb 09 00:00:00 1970 PST
+ 940 | 0 | 00940 | Tue Feb 10 00:00:00 1970 PST
+ 941 | 1 | 00941 | Wed Feb 11 00:00:00 1970 PST
+ 943 | 303 | 00943_update3 | Fri Feb 13 00:00:00 1970 PST
+ 944 | 4 | 00944 | Sat Feb 14 00:00:00 1970 PST
+ 946 | 6 | 00946 | Mon Feb 16 00:00:00 1970 PST
+ 947 | 407 | 00947_update7 | Tue Feb 17 00:00:00 1970 PST
+ 948 | 8 | 00948 | Wed Feb 18 00:00:00 1970 PST
+ 949 | 509 | 00949_update9 | Thu Feb 19 00:00:00 1970 PST
+ 950 | 0 | 00950 | Fri Feb 20 00:00:00 1970 PST
+ 951 | 1 | 00951 | Sat Feb 21 00:00:00 1970 PST
+ 953 | 303 | 00953_update3 | Mon Feb 23 00:00:00 1970 PST
+ 954 | 4 | 00954 | Tue Feb 24 00:00:00 1970 PST
+ 956 | 6 | 00956 | Thu Feb 26 00:00:00 1970 PST
+ 957 | 407 | 00957_update7 | Fri Feb 27 00:00:00 1970 PST
+ 958 | 8 | 00958 | Sat Feb 28 00:00:00 1970 PST
+ 959 | 509 | 00959_update9 | Sun Mar 01 00:00:00 1970 PST
+ 960 | 0 | 00960 | Mon Mar 02 00:00:00 1970 PST
+ 961 | 1 | 00961 | Tue Mar 03 00:00:00 1970 PST
+ 963 | 303 | 00963_update3 | Thu Mar 05 00:00:00 1970 PST
+ 964 | 4 | 00964 | Fri Mar 06 00:00:00 1970 PST
+ 966 | 6 | 00966 | Sun Mar 08 00:00:00 1970 PST
+ 967 | 407 | 00967_update7 | Mon Mar 09 00:00:00 1970 PST
+ 968 | 8 | 00968 | Tue Mar 10 00:00:00 1970 PST
+ 969 | 509 | 00969_update9 | Wed Mar 11 00:00:00 1970 PST
+ 970 | 0 | 00970 | Thu Mar 12 00:00:00 1970 PST
+ 971 | 1 | 00971 | Fri Mar 13 00:00:00 1970 PST
+ 973 | 303 | 00973_update3 | Sun Mar 15 00:00:00 1970 PST
+ 974 | 4 | 00974 | Mon Mar 16 00:00:00 1970 PST
+ 976 | 6 | 00976 | Wed Mar 18 00:00:00 1970 PST
+ 977 | 407 | 00977_update7 | Thu Mar 19 00:00:00 1970 PST
+ 978 | 8 | 00978 | Fri Mar 20 00:00:00 1970 PST
+ 979 | 509 | 00979_update9 | Sat Mar 21 00:00:00 1970 PST
+ 980 | 0 | 00980 | Sun Mar 22 00:00:00 1970 PST
+ 981 | 1 | 00981 | Mon Mar 23 00:00:00 1970 PST
+ 983 | 303 | 00983_update3 | Wed Mar 25 00:00:00 1970 PST
+ 984 | 4 | 00984 | Thu Mar 26 00:00:00 1970 PST
+ 986 | 6 | 00986 | Sat Mar 28 00:00:00 1970 PST
+ 987 | 407 | 00987_update7 | Sun Mar 29 00:00:00 1970 PST
+ 988 | 8 | 00988 | Mon Mar 30 00:00:00 1970 PST
+ 989 | 509 | 00989_update9 | Tue Mar 31 00:00:00 1970 PST
+ 990 | 0 | 00990 | Wed Apr 01 00:00:00 1970 PST
+ 991 | 1 | 00991 | Thu Apr 02 00:00:00 1970 PST
+ 993 | 303 | 00993_update3 | Sat Apr 04 00:00:00 1970 PST
+ 994 | 4 | 00994 | Sun Apr 05 00:00:00 1970 PST
+ 996 | 6 | 00996 | Tue Apr 07 00:00:00 1970 PST
+ 997 | 407 | 00997_update7 | Wed Apr 08 00:00:00 1970 PST
+ 998 | 8 | 00998 | Thu Apr 09 00:00:00 1970 PST
+ 999 | 509 | 00999_update9 | Fri Apr 10 00:00:00 1970 PST
+ 1000 | 0 | 01000 | Thu Jan 01 00:00:00 1970 PST
+ 1001 | 101 | 0000100001 |
+ 1003 | 403 | 0000300003_update3 |
+ 1004 | 104 | 0000400004 |
+ 1006 | 106 | 0000600006 |
+ 1007 | 507 | 0000700007_update7 |
+ 1008 | 108 | 0000800008 |
+ 1009 | 609 | 0000900009_update9 |
+ 1010 | 100 | 0001000010 |
+ 1011 | 101 | 0001100011 |
+ 1013 | 403 | 0001300013_update3 |
+ 1014 | 104 | 0001400014 |
+ 1016 | 106 | 0001600016 |
+ 1017 | 507 | 0001700017_update7 |
+ 1018 | 108 | 0001800018 |
+ 1019 | 609 | 0001900019_update9 |
+ 1020 | 100 | 0002000020 |
+ 1101 | 201 | aaa |
+ 1103 | 503 | ccc_update3 |
+ 1104 | 204 | ddd |
+(819 rows)
+
+-- Test that trigger on remote table works as expected
+CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
+BEGIN
+ NEW.c3 = NEW.c3 || '_trig_update';
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
+ ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
+INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----------------+----+----+----+------------+----
+ 1208 | 818 | fff_trig_update | | | | ft2 |
+(1 row)
+
+INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+-----------------+----+----+------+------------+----
+ 1218 | 818 | ggg_trig_update | | | (--; | ft2 |
+(1 row)
+
+UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
+ c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8
+------+-----+------------------------+------------------------------+--------------------------+----+------------+-----
+ 8 | 608 | 00008_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 18 | 608 | 00018_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 28 | 608 | 00028_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 38 | 608 | 00038_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 48 | 608 | 00048_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 58 | 608 | 00058_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 68 | 608 | 00068_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 78 | 608 | 00078_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 88 | 608 | 00088_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 98 | 608 | 00098_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 108 | 608 | 00108_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 118 | 608 | 00118_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 128 | 608 | 00128_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 138 | 608 | 00138_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 148 | 608 | 00148_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 158 | 608 | 00158_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 168 | 608 | 00168_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 178 | 608 | 00178_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 188 | 608 | 00188_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 198 | 608 | 00198_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 208 | 608 | 00208_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 218 | 608 | 00218_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 228 | 608 | 00228_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 238 | 608 | 00238_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 248 | 608 | 00248_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 258 | 608 | 00258_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 268 | 608 | 00268_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 278 | 608 | 00278_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 288 | 608 | 00288_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 298 | 608 | 00298_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 308 | 608 | 00308_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 318 | 608 | 00318_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 328 | 608 | 00328_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 338 | 608 | 00338_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 348 | 608 | 00348_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 358 | 608 | 00358_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 368 | 608 | 00368_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 378 | 608 | 00378_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 388 | 608 | 00388_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 398 | 608 | 00398_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 408 | 608 | 00408_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 418 | 608 | 00418_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 428 | 608 | 00428_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 438 | 608 | 00438_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 448 | 608 | 00448_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 458 | 608 | 00458_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 468 | 608 | 00468_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 478 | 608 | 00478_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 488 | 608 | 00488_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 498 | 608 | 00498_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 508 | 608 | 00508_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 518 | 608 | 00518_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 528 | 608 | 00528_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 538 | 608 | 00538_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 548 | 608 | 00548_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 558 | 608 | 00558_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 568 | 608 | 00568_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 578 | 608 | 00578_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 588 | 608 | 00588_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 598 | 608 | 00598_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 608 | 608 | 00608_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 618 | 608 | 00618_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 628 | 608 | 00628_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 638 | 608 | 00638_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 648 | 608 | 00648_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 658 | 608 | 00658_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 668 | 608 | 00668_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 678 | 608 | 00678_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 688 | 608 | 00688_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 698 | 608 | 00698_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 708 | 608 | 00708_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 718 | 608 | 00718_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 728 | 608 | 00728_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 738 | 608 | 00738_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 748 | 608 | 00748_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 758 | 608 | 00758_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 768 | 608 | 00768_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 778 | 608 | 00778_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 788 | 608 | 00788_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 798 | 608 | 00798_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 808 | 608 | 00808_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 818 | 608 | 00818_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 828 | 608 | 00828_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 838 | 608 | 00838_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 848 | 608 | 00848_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 858 | 608 | 00858_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 868 | 608 | 00868_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 878 | 608 | 00878_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 888 | 608 | 00888_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 898 | 608 | 00898_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 908 | 608 | 00908_trig_update | Fri Jan 09 00:00:00 1970 PST | Fri Jan 09 00:00:00 1970 | 8 | 8 | foo
+ 918 | 608 | 00918_trig_update | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo
+ 928 | 608 | 00928_trig_update | Thu Jan 29 00:00:00 1970 PST | Thu Jan 29 00:00:00 1970 | 8 | 8 | foo
+ 938 | 608 | 00938_trig_update | Sun Feb 08 00:00:00 1970 PST | Sun Feb 08 00:00:00 1970 | 8 | 8 | foo
+ 948 | 608 | 00948_trig_update | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo
+ 958 | 608 | 00958_trig_update | Sat Feb 28 00:00:00 1970 PST | Sat Feb 28 00:00:00 1970 | 8 | 8 | foo
+ 968 | 608 | 00968_trig_update | Tue Mar 10 00:00:00 1970 PST | Tue Mar 10 00:00:00 1970 | 8 | 8 | foo
+ 978 | 608 | 00978_trig_update | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo
+ 988 | 608 | 00988_trig_update | Mon Mar 30 00:00:00 1970 PST | Mon Mar 30 00:00:00 1970 | 8 | 8 | foo
+ 998 | 608 | 00998_trig_update | Thu Apr 09 00:00:00 1970 PST | Thu Apr 09 00:00:00 1970 | 8 | 8 | foo
+ 1008 | 708 | 0000800008_trig_update | | | | ft2 |
+ 1018 | 708 | 0001800018_trig_update | | | | ft2 |
+(102 rows)
+
+-- Test errors thrown on remote side during update
+ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
+INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+ERROR: duplicate key value violates unique constraint "t1_pkey"
+DETAIL: Key ("C 1")=(11) already exists.
+CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null).
+CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo).
+CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1
+-- Test savepoint/rollback behavior
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+begin;
+update ft2 set c2 = 42 where c2 = 0;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 42 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s1;
+update ft2 set c2 = 44 where c2 = 4;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s1;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s2;
+update ft2 set c2 = 46 where c2 = 6;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 42 | 100
+ 44 | 100
+ 46 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+rollback to savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+savepoint s3;
+update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
+ERROR: new row for relation "T 1" violates check constraint "c2positive"
+DETAIL: Failing row contains (10, -2, 00010_trig_update_trig_update, 1970-01-11 08:00:00+00, 1970-01-11 00:00:00, 0, 0 , foo).
+CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1
+rollback to savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+release savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+-- none of the above is committed yet remotely
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 0 | 100
+ 1 | 100
+ 4 | 100
+ 6 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+commit;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+ c2 | count
+-----+-------
+ 1 | 100
+ 6 | 100
+ 42 | 100
+ 44 | 100
+ 100 | 2
+ 101 | 2
+ 104 | 2
+ 106 | 2
+ 201 | 1
+ 204 | 1
+ 303 | 100
+ 403 | 2
+ 407 | 100
+(13 rows)
+
+-- ===================================================================
+-- test serial columns (ie, sequence-based defaults)
+-- ===================================================================
+create table loc1 (f1 serial, f2 text);
+create foreign table rem1 (f1 serial, f2 text)
+ server loopback options(table_name 'loc1');
+select pg_catalog.setval('rem1_f1_seq', 10, false);
+ setval
+--------
+ 10
+(1 row)
+
+insert into loc1(f2) values('hi');
+insert into rem1(f2) values('hi remote');
+insert into loc1(f2) values('bye');
+insert into rem1(f2) values('bye remote');
+select * from loc1;
+ f1 | f2
+----+------------
+ 1 | hi
+ 10 | hi remote
+ 2 | bye
+ 11 | bye remote
+(4 rows)
+
+select * from rem1;
+ f1 | f2
+----+------------
+ 1 | hi
+ 10 | hi remote
+ 2 | bye
+ 11 | bye remote
+(4 rows)
+
+-- ===================================================================
+-- test local triggers
+-- ===================================================================
+-- Trigger functions "borrowed" from triggers regress test.
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+LANGUAGE plpgsql AS $$
+
+declare
+ oldnew text[];
+ relid text;
+ argstr text;
+begin
+
+ relid := TG_relid::regclass;
+ argstr := '';
+ for i in 0 .. TG_nargs - 1 loop
+ if i > 0 then
+ argstr := argstr || ', ';
+ end if;
+ argstr := argstr || TG_argv[i];
+ end loop;
+
+ RAISE NOTICE '%(%) % % % ON %',
+ tg_name, argstr, TG_when, TG_level, TG_OP, relid;
+ oldnew := '{}'::text[];
+ if TG_OP != 'INSERT' then
+ oldnew := array_append(oldnew, format('OLD: %s', OLD));
+ end if;
+
+ if TG_OP != 'DELETE' then
+ oldnew := array_append(oldnew, format('NEW: %s', NEW));
+ end if;
+
+ RAISE NOTICE '%', array_to_string(oldnew, ',');
+
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+end;
+$$;
+-- Test basic functionality
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+delete from rem1;
+NOTICE: trigger_func(<NULL>) called: action = DELETE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (1,hi)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (10,"hi remote")
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (2,bye)
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (11,"bye remote")
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (1,hi)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (10,"hi remote")
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (2,bye)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (11,"bye remote")
+NOTICE: trigger_func(<NULL>) called: action = DELETE, when = AFTER, level = STATEMENT
+insert into rem1 values(1,'insert');
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (1,insert)
+NOTICE: trigger_func(<NULL>) called: action = INSERT, when = AFTER, level = STATEMENT
+update rem1 set f2 = 'update' where f1 = 1;
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,insert),NEW: (1,update)
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = AFTER, level = STATEMENT
+update rem1 set f2 = f2 || f2;
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = BEFORE, level = STATEMENT
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (1,update),NEW: (1,updateupdate)
+NOTICE: trigger_func(<NULL>) called: action = UPDATE, when = AFTER, level = STATEMENT
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_stmt_before ON rem1;
+DROP TRIGGER trig_stmt_after ON rem1;
+DELETE from rem1;
+-- Test WHEN conditions
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after_insupd
+AFTER INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+-- Insert or update not matching: nothing happens
+INSERT INTO rem1 values(1, 'insert');
+UPDATE rem1 set f2 = 'test';
+-- Insert or update matching: triggers are fired
+INSERT INTO rem1 values(2, 'update');
+NOTICE: trig_row_before_insupd(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (2,update)
+NOTICE: trig_row_after_insupd(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (2,update)
+UPDATE rem1 set f2 = 'update update' where f1 = '2';
+NOTICE: trig_row_before_insupd(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (2,update),NEW: (2,"update update")
+NOTICE: trig_row_after_insupd(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (2,update),NEW: (2,"update update")
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+-- Trigger is fired for f1=2, not for f1=1
+DELETE FROM rem1;
+NOTICE: trig_row_before_delete(23, skidoo) BEFORE ROW DELETE ON rem1
+NOTICE: OLD: (2,"update update")
+NOTICE: trig_row_after_delete(23, skidoo) AFTER ROW DELETE ON rem1
+NOTICE: OLD: (2,"update update")
+-- cleanup
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_after_insupd ON rem1;
+DROP TRIGGER trig_row_before_delete ON rem1;
+DROP TRIGGER trig_row_after_delete ON rem1;
+-- Test various RETURN statements in BEFORE triggers.
+CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
+ BEGIN
+ NEW.f2 := NEW.f2 || ' triggered !';
+ RETURN NEW;
+ END
+$$ language plpgsql;
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+-- The new values should have 'triggered' appended
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | insert triggered !
+(1 row)
+
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+ f2
+--------------------
+ insert triggered !
+(1 row)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | insert triggered !
+ 2 | insert triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+ f1 | f2
+----+--------------
+ 1 | triggered !
+ 2 | triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+ f2
+--------------------
+ skidoo triggered !
+ skidoo triggered !
+(2 rows)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------
+ 1 | skidoo triggered !
+ 2 | skidoo triggered !
+(2 rows)
+
+DELETE FROM rem1;
+-- Add a second trigger, to check that the changes are propagated correctly
+-- from trigger to trigger
+CREATE TRIGGER trig_row_before_insupd2
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | insert triggered ! triggered !
+(1 row)
+
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+ f2
+--------------------------------
+ insert triggered ! triggered !
+(1 row)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | insert triggered ! triggered !
+ 2 | insert triggered ! triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------
+ 1 | triggered ! triggered !
+ 2 | triggered ! triggered !
+(2 rows)
+
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+ f2
+--------------------------------
+ skidoo triggered ! triggered !
+ skidoo triggered ! triggered !
+(2 rows)
+
+SELECT * from loc1;
+ f1 | f2
+----+--------------------------------
+ 1 | skidoo triggered ! triggered !
+ 2 | skidoo triggered ! triggered !
+(2 rows)
+
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_before_insupd2 ON rem1;
+DELETE from rem1;
+INSERT INTO rem1 VALUES (1, 'test');
+-- Test with a trigger returning NULL
+CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
+ BEGIN
+ RETURN NULL;
+ END
+$$ language plpgsql;
+CREATE TRIGGER trig_null
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_null();
+-- Nothing should have changed.
+INSERT INTO rem1 VALUES (2, 'test2');
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+UPDATE rem1 SET f2 = 'test2';
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+DELETE from rem1;
+SELECT * from loc1;
+ f1 | f2
+----+------
+ 1 | test
+(1 row)
+
+DROP TRIGGER trig_null ON rem1;
+DELETE from rem1;
+-- Test a combination of local and remote triggers
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+INSERT INTO rem1(f2) VALUES ('test');
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (12,test)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (12,"test triggered !")
+UPDATE rem1 SET f2 = 'testo';
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW UPDATE ON rem1
+NOTICE: OLD: (12,"test triggered !"),NEW: (12,testo)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW UPDATE ON rem1
+NOTICE: OLD: (12,"test triggered !"),NEW: (12,"testo triggered !")
+-- Test returning a system attribute
+INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
+NOTICE: trig_row_before(23, skidoo) BEFORE ROW INSERT ON rem1
+NOTICE: NEW: (13,test)
+NOTICE: trig_row_after(23, skidoo) AFTER ROW INSERT ON rem1
+NOTICE: NEW: (13,"test triggered !")
+ ctid
+--------
+ (0,27)
+(1 row)
+
diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c
new file mode 100644
index 0000000000..65e7b8946a
--- /dev/null
+++ b/contrib/postgres_fdw/option.c
@@ -0,0 +1,295 @@
+/*-------------------------------------------------------------------------
+ *
+ * option.c
+ * FDW option handling for postgres_fdw
+ *
+ * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/option.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "postgres_fdw.h"
+
+#include "access/reloptions.h"
+#include "catalog/pg_foreign_server.h"
+#include "catalog/pg_foreign_table.h"
+#include "catalog/pg_user_mapping.h"
+#include "commands/defrem.h"
+
+
+/*
+ * Describes the valid options for objects that this wrapper uses.
+ */
+typedef struct PgFdwOption
+{
+ const char *keyword;
+ Oid optcontext; /* OID of catalog in which option may appear */
+ bool is_libpq_opt; /* true if it's used in libpq */
+} PgFdwOption;
+
+/*
+ * Valid options for postgres_fdw.
+ * Allocated and filled in InitPgFdwOptions.
+ */
+static PgFdwOption *postgres_fdw_options;
+
+/*
+ * Valid options for libpq.
+ * Allocated and filled in InitPgFdwOptions.
+ */
+static PQconninfoOption *libpq_options;
+
+/*
+ * Helper functions
+ */
+static void InitPgFdwOptions(void);
+static bool is_valid_option(const char *keyword, Oid context);
+static bool is_libpq_option(const char *keyword);
+
+
+/*
+ * Validate the generic options given to a FOREIGN DATA WRAPPER, SERVER,
+ * USER MAPPING or FOREIGN TABLE that uses postgres_fdw.
+ *
+ * Raise an ERROR if the option or its value is considered invalid.
+ */
+PG_FUNCTION_INFO_V1(postgres_fdw_validator);
+
+Datum
+postgres_fdw_validator(PG_FUNCTION_ARGS)
+{
+ List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
+ Oid catalog = PG_GETARG_OID(1);
+ ListCell *cell;
+
+ /* Build our options lists if we didn't yet. */
+ InitPgFdwOptions();
+
+ /*
+ * Check that only options supported by postgres_fdw, and allowed for the
+ * current object type, are given.
+ */
+ foreach(cell, options_list)
+ {
+ DefElem *def = (DefElem *) lfirst(cell);
+
+ if (!is_valid_option(def->defname, catalog))
+ {
+ /*
+ * Unknown option specified, complain about it. Provide a hint
+ * with list of valid options for the object.
+ */
+ PgFdwOption *opt;
+ StringInfoData buf;
+
+ initStringInfo(&buf);
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (catalog == opt->optcontext)
+ appendStringInfo(&buf, "%s%s", (buf.len > 0) ? ", " : "",
+ opt->keyword);
+ }
+
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_INVALID_OPTION_NAME),
+ errmsg("invalid option \"%s\"", def->defname),
+ errhint("Valid options in this context are: %s",
+ buf.data)));
+ }
+
+ /*
+ * Validate option value, when we can do so without any context.
+ */
+ if (strcmp(def->defname, "use_remote_estimate") == 0 ||
+ strcmp(def->defname, "updatable") == 0)
+ {
+ /* these accept only boolean values */
+ (void) defGetBoolean(def);
+ }
+ else if (strcmp(def->defname, "fdw_startup_cost") == 0 ||
+ strcmp(def->defname, "fdw_tuple_cost") == 0)
+ {
+ /* these must have a non-negative numeric value */
+ double val;
+ char *endp;
+
+ val = strtod(defGetString(def), &endp);
+ if (*endp || val < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("%s requires a non-negative numeric value",
+ def->defname)));
+ }
+ }
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Initialize option lists.
+ */
+static void
+InitPgFdwOptions(void)
+{
+ int num_libpq_opts;
+ PQconninfoOption *lopt;
+ PgFdwOption *popt;
+
+ /* non-libpq FDW-specific FDW options */
+ static const PgFdwOption non_libpq_options[] = {
+ {"schema_name", ForeignTableRelationId, false},
+ {"table_name", ForeignTableRelationId, false},
+ {"column_name", AttributeRelationId, false},
+ /* use_remote_estimate is available on both server and table */
+ {"use_remote_estimate", ForeignServerRelationId, false},
+ {"use_remote_estimate", ForeignTableRelationId, false},
+ /* cost factors */
+ {"fdw_startup_cost", ForeignServerRelationId, false},
+ {"fdw_tuple_cost", ForeignServerRelationId, false},
+ /* updatable is available on both server and table */
+ {"updatable", ForeignServerRelationId, false},
+ {"updatable", ForeignTableRelationId, false},
+ {NULL, InvalidOid, false}
+ };
+
+ /* Prevent redundant initialization. */
+ if (postgres_fdw_options)
+ return;
+
+ /*
+ * Get list of valid libpq options.
+ *
+ * To avoid unnecessary work, we get the list once and use it throughout
+ * the lifetime of this backend process. We don't need to care about
+ * memory context issues, because PQconndefaults allocates with malloc.
+ */
+ libpq_options = PQconndefaults();
+ if (!libpq_options) /* assume reason for failure is OOM */
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
+ errmsg("out of memory"),
+ errdetail("could not get libpq's default connection options")));
+
+ /* Count how many libpq options are available. */
+ num_libpq_opts = 0;
+ for (lopt = libpq_options; lopt->keyword; lopt++)
+ num_libpq_opts++;
+
+ /*
+ * Construct an array which consists of all valid options for
+ * postgres_fdw, by appending FDW-specific options to libpq options.
+ *
+ * We use plain malloc here to allocate postgres_fdw_options because it
+ * lives as long as the backend process does. Besides, keeping
+ * libpq_options in memory allows us to avoid copying every keyword
+ * string.
+ */
+ postgres_fdw_options = (PgFdwOption *)
+ malloc(sizeof(PgFdwOption) * num_libpq_opts +
+ sizeof(non_libpq_options));
+ if (postgres_fdw_options == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_FDW_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
+ popt = postgres_fdw_options;
+ for (lopt = libpq_options; lopt->keyword; lopt++)
+ {
+ /* Hide debug options, as well as settings we override internally. */
+ if (strchr(lopt->dispchar, 'D') ||
+ strcmp(lopt->keyword, "fallback_application_name") == 0 ||
+ strcmp(lopt->keyword, "client_encoding") == 0)
+ continue;
+
+ /* We don't have to copy keyword string, as described above. */
+ popt->keyword = lopt->keyword;
+
+ /*
+ * "user" and any secret options are allowed only on user mappings.
+ * Everything else is a server option.
+ */
+ if (strcmp(lopt->keyword, "user") == 0 || strchr(lopt->dispchar, '*'))
+ popt->optcontext = UserMappingRelationId;
+ else
+ popt->optcontext = ForeignServerRelationId;
+ popt->is_libpq_opt = true;
+
+ popt++;
+ }
+
+ /* Append FDW-specific options and dummy terminator. */
+ memcpy(popt, non_libpq_options, sizeof(non_libpq_options));
+}
+
+/*
+ * Check whether the given option is one of the valid postgres_fdw options.
+ * context is the Oid of the catalog holding the object the option is for.
+ */
+static bool
+is_valid_option(const char *keyword, Oid context)
+{
+ PgFdwOption *opt;
+
+ Assert(postgres_fdw_options); /* must be initialized already */
+
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (context == opt->optcontext && strcmp(opt->keyword, keyword) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Check whether the given option is one of the valid libpq options.
+ */
+static bool
+is_libpq_option(const char *keyword)
+{
+ PgFdwOption *opt;
+
+ Assert(postgres_fdw_options); /* must be initialized already */
+
+ for (opt = postgres_fdw_options; opt->keyword; opt++)
+ {
+ if (opt->is_libpq_opt && strcmp(opt->keyword, keyword) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Generate key-value arrays which include only libpq options from the
+ * given list (which can contain any kind of options). Caller must have
+ * allocated large-enough arrays. Returns number of options found.
+ */
+int
+ExtractConnectionOptions(List *defelems, const char **keywords,
+ const char **values)
+{
+ ListCell *lc;
+ int i;
+
+ /* Build our options lists if we didn't yet. */
+ InitPgFdwOptions();
+
+ i = 0;
+ foreach(lc, defelems)
+ {
+ DefElem *d = (DefElem *) lfirst(lc);
+
+ if (is_libpq_option(d->defname))
+ {
+ keywords[i] = d->defname;
+ values[i] = defGetString(d);
+ i++;
+ }
+ }
+ return i;
+}
diff --git a/contrib/postgres_fdw/postgres_fdw--1.0.sql b/contrib/postgres_fdw/postgres_fdw--1.0.sql
new file mode 100644
index 0000000000..a0f0fc1bf4
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw--1.0.sql
@@ -0,0 +1,18 @@
+/* contrib/postgres_fdw/postgres_fdw--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION postgres_fdw" to load this file. \quit
+
+CREATE FUNCTION postgres_fdw_handler()
+RETURNS fdw_handler
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FUNCTION postgres_fdw_validator(text[], oid)
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C STRICT;
+
+CREATE FOREIGN DATA WRAPPER postgres_fdw
+ HANDLER postgres_fdw_handler
+ VALIDATOR postgres_fdw_validator;
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
new file mode 100644
index 0000000000..7dd43a9937
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -0,0 +1,2702 @@
+/*-------------------------------------------------------------------------
+ *
+ * postgres_fdw.c
+ * Foreign-data wrapper for remote PostgreSQL servers
+ *
+ * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/postgres_fdw.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "postgres_fdw.h"
+
+#include "access/htup_details.h"
+#include "access/sysattr.h"
+#include "commands/defrem.h"
+#include "commands/explain.h"
+#include "commands/vacuum.h"
+#include "foreign/fdwapi.h"
+#include "funcapi.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/cost.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/prep.h"
+#include "optimizer/restrictinfo.h"
+#include "optimizer/var.h"
+#include "parser/parsetree.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+
+
+PG_MODULE_MAGIC;
+
+/* Default CPU cost to start up a foreign query. */
+#define DEFAULT_FDW_STARTUP_COST 100.0
+
+/* Default CPU cost to process 1 row (above and beyond cpu_tuple_cost). */
+#define DEFAULT_FDW_TUPLE_COST 0.01
+
+/*
+ * FDW-specific planner information kept in RelOptInfo.fdw_private for a
+ * foreign table. This information is collected by postgresGetForeignRelSize.
+ */
+typedef struct PgFdwRelationInfo
+{
+ /* baserestrictinfo clauses, broken down into safe and unsafe subsets. */
+ List *remote_conds;
+ List *local_conds;
+
+ /* Bitmap of attr numbers we need to fetch from the remote server. */
+ Bitmapset *attrs_used;
+
+ /* Cost and selectivity of local_conds. */
+ QualCost local_conds_cost;
+ Selectivity local_conds_sel;
+
+ /* Estimated size and cost for a scan with baserestrictinfo quals. */
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* Options extracted from catalogs. */
+ bool use_remote_estimate;
+ Cost fdw_startup_cost;
+ Cost fdw_tuple_cost;
+
+ /* Cached catalog information. */
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user; /* only set in use_remote_estimate mode */
+} PgFdwRelationInfo;
+
+/*
+ * Indexes of FDW-private information stored in fdw_private lists.
+ *
+ * We store various information in ForeignScan.fdw_private to pass it from
+ * planner to executor. Currently we store:
+ *
+ * 1) SELECT statement text to be sent to the remote server
+ * 2) Integer list of attribute numbers retrieved by the SELECT
+ *
+ * These items are indexed with the enum FdwScanPrivateIndex, so an item
+ * can be fetched with list_nth(). For example, to get the SELECT statement:
+ * sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql));
+ */
+enum FdwScanPrivateIndex
+{
+ /* SQL statement to execute remotely (as a String node) */
+ FdwScanPrivateSelectSql,
+ /* Integer list of attribute numbers retrieved by the SELECT */
+ FdwScanPrivateRetrievedAttrs
+};
+
+/*
+ * Similarly, this enum describes what's kept in the fdw_private list for
+ * a ModifyTable node referencing a postgres_fdw foreign table. We store:
+ *
+ * 1) INSERT/UPDATE/DELETE statement text to be sent to the remote server
+ * 2) Integer list of target attribute numbers for INSERT/UPDATE
+ * (NIL for a DELETE)
+ * 3) Boolean flag showing if the remote query has a RETURNING clause
+ * 4) Integer list of attribute numbers retrieved by RETURNING, if any
+ */
+enum FdwModifyPrivateIndex
+{
+ /* SQL statement to execute remotely (as a String node) */
+ FdwModifyPrivateUpdateSql,
+ /* Integer list of target attribute numbers for INSERT/UPDATE */
+ FdwModifyPrivateTargetAttnums,
+ /* has-returning flag (as an integer Value node) */
+ FdwModifyPrivateHasReturning,
+ /* Integer list of attribute numbers retrieved by RETURNING */
+ FdwModifyPrivateRetrievedAttrs
+};
+
+/*
+ * Execution state of a foreign scan using postgres_fdw.
+ */
+typedef struct PgFdwScanState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+
+ /* extracted fdw_private data */
+ char *query; /* text of SELECT command */
+ List *retrieved_attrs; /* list of retrieved attribute numbers */
+
+ /* for remote query execution */
+ PGconn *conn; /* connection for the scan */
+ unsigned int cursor_number; /* quasi-unique ID for my cursor */
+ bool cursor_exists; /* have we created the cursor? */
+ int numParams; /* number of parameters passed to query */
+ FmgrInfo *param_flinfo; /* output conversion functions for them */
+ List *param_exprs; /* executable expressions for param values */
+ const char **param_values; /* textual values of query parameters */
+
+ /* for storing result tuples */
+ HeapTuple *tuples; /* array of currently-retrieved tuples */
+ int num_tuples; /* # of tuples in array */
+ int next_tuple; /* index of next one to return */
+
+ /* batch-level state, for optimizing rewinds and avoiding useless fetch */
+ int fetch_ct_2; /* Min(# of fetches done, 2) */
+ bool eof_reached; /* true if last fetch reached EOF */
+
+ /* working memory contexts */
+ MemoryContext batch_cxt; /* context holding current batch of tuples */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+} PgFdwScanState;
+
+/*
+ * Execution state of a foreign insert/update/delete operation.
+ */
+typedef struct PgFdwModifyState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+
+ /* for remote query execution */
+ PGconn *conn; /* connection for the scan */
+ char *p_name; /* name of prepared statement, if created */
+
+ /* extracted fdw_private data */
+ char *query; /* text of INSERT/UPDATE/DELETE command */
+ List *target_attrs; /* list of target attribute numbers */
+ bool has_returning; /* is there a RETURNING clause? */
+ List *retrieved_attrs; /* attr numbers retrieved by RETURNING */
+
+ /* info about parameters for prepared statement */
+ AttrNumber ctidAttno; /* attnum of input resjunk ctid column */
+ int p_nums; /* number of parameters to transmit */
+ FmgrInfo *p_flinfo; /* output conversion functions for them */
+
+ /* working memory context */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+} PgFdwModifyState;
+
+/*
+ * Workspace for analyzing a foreign table.
+ */
+typedef struct PgFdwAnalyzeState
+{
+ Relation rel; /* relcache entry for the foreign table */
+ AttInMetadata *attinmeta; /* attribute datatype conversion metadata */
+ List *retrieved_attrs; /* attr numbers retrieved by query */
+
+ /* collected sample rows */
+ HeapTuple *rows; /* array of size targrows */
+ int targrows; /* target # of sample rows */
+ int numrows; /* # of sample rows collected */
+
+ /* for random sampling */
+ double samplerows; /* # of rows fetched */
+ double rowstoskip; /* # of rows to skip before next sample */
+ double rstate; /* random state */
+
+ /* working memory contexts */
+ MemoryContext anl_cxt; /* context for per-analyze lifespan data */
+ MemoryContext temp_cxt; /* context for per-tuple temporary data */
+} PgFdwAnalyzeState;
+
+/*
+ * Identify the attribute where data conversion fails.
+ */
+typedef struct ConversionLocation
+{
+ Relation rel; /* foreign table's relcache entry */
+ AttrNumber cur_attno; /* attribute number being processed, or 0 */
+} ConversionLocation;
+
+/* Callback argument for ec_member_matches_foreign */
+typedef struct
+{
+ Expr *current; /* current expr, or NULL if not yet found */
+ List *already_used; /* expressions already dealt with */
+} ec_member_foreign_arg;
+
+/*
+ * SQL functions
+ */
+PG_FUNCTION_INFO_V1(postgres_fdw_handler);
+
+/*
+ * FDW callback routines
+ */
+static void postgresGetForeignRelSize(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid);
+static void postgresGetForeignPaths(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid);
+static ForeignScan *postgresGetForeignPlan(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses);
+static void postgresBeginForeignScan(ForeignScanState *node, int eflags);
+static TupleTableSlot *postgresIterateForeignScan(ForeignScanState *node);
+static void postgresReScanForeignScan(ForeignScanState *node);
+static void postgresEndForeignScan(ForeignScanState *node);
+static void postgresAddForeignUpdateTargets(Query *parsetree,
+ RangeTblEntry *target_rte,
+ Relation target_relation);
+static List *postgresPlanForeignModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
+static void postgresBeginForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ List *fdw_private,
+ int subplan_index,
+ int eflags);
+static TupleTableSlot *postgresExecForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static TupleTableSlot *postgresExecForeignUpdate(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static TupleTableSlot *postgresExecForeignDelete(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot);
+static void postgresEndForeignModify(EState *estate,
+ ResultRelInfo *resultRelInfo);
+static int postgresIsForeignRelUpdatable(Relation rel);
+static void postgresExplainForeignScan(ForeignScanState *node,
+ ExplainState *es);
+static void postgresExplainForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *rinfo,
+ List *fdw_private,
+ int subplan_index,
+ ExplainState *es);
+static bool postgresAnalyzeForeignTable(Relation relation,
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages);
+
+/*
+ * Helper functions
+ */
+static void estimate_path_cost_size(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *join_conds,
+ double *p_rows, int *p_width,
+ Cost *p_startup_cost, Cost *p_total_cost);
+static void get_remote_estimate(const char *sql,
+ PGconn *conn,
+ double *rows,
+ int *width,
+ Cost *startup_cost,
+ Cost *total_cost);
+static bool ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg);
+static void create_cursor(ForeignScanState *node);
+static void fetch_more_data(ForeignScanState *node);
+static void close_cursor(PGconn *conn, unsigned int cursor_number);
+static void prepare_foreign_modify(PgFdwModifyState *fmstate);
+static const char **convert_prep_stmt_params(PgFdwModifyState *fmstate,
+ ItemPointer tupleid,
+ TupleTableSlot *slot);
+static void store_returning_result(PgFdwModifyState *fmstate,
+ TupleTableSlot *slot, PGresult *res);
+static int postgresAcquireSampleRowsFunc(Relation relation, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows);
+static void analyze_row_processor(PGresult *res, int row,
+ PgFdwAnalyzeState *astate);
+static HeapTuple make_tuple_from_result_row(PGresult *res,
+ int row,
+ Relation rel,
+ AttInMetadata *attinmeta,
+ List *retrieved_attrs,
+ MemoryContext temp_context);
+static void conversion_error_callback(void *arg);
+
+
+/*
+ * Foreign-data wrapper handler function: return a struct with pointers
+ * to my callback routines.
+ */
+Datum
+postgres_fdw_handler(PG_FUNCTION_ARGS)
+{
+ FdwRoutine *routine = makeNode(FdwRoutine);
+
+ /* Functions for scanning foreign tables */
+ routine->GetForeignRelSize = postgresGetForeignRelSize;
+ routine->GetForeignPaths = postgresGetForeignPaths;
+ routine->GetForeignPlan = postgresGetForeignPlan;
+ routine->BeginForeignScan = postgresBeginForeignScan;
+ routine->IterateForeignScan = postgresIterateForeignScan;
+ routine->ReScanForeignScan = postgresReScanForeignScan;
+ routine->EndForeignScan = postgresEndForeignScan;
+
+ /* Functions for updating foreign tables */
+ routine->AddForeignUpdateTargets = postgresAddForeignUpdateTargets;
+ routine->PlanForeignModify = postgresPlanForeignModify;
+ routine->BeginForeignModify = postgresBeginForeignModify;
+ routine->ExecForeignInsert = postgresExecForeignInsert;
+ routine->ExecForeignUpdate = postgresExecForeignUpdate;
+ routine->ExecForeignDelete = postgresExecForeignDelete;
+ routine->EndForeignModify = postgresEndForeignModify;
+ routine->IsForeignRelUpdatable = postgresIsForeignRelUpdatable;
+
+ /* Support functions for EXPLAIN */
+ routine->ExplainForeignScan = postgresExplainForeignScan;
+ routine->ExplainForeignModify = postgresExplainForeignModify;
+
+ /* Support functions for ANALYZE */
+ routine->AnalyzeForeignTable = postgresAnalyzeForeignTable;
+
+ PG_RETURN_POINTER(routine);
+}
+
+/*
+ * postgresGetForeignRelSize
+ * Estimate # of rows and width of the result of the scan
+ *
+ * We should consider the effect of all baserestrictinfo clauses here, but
+ * not any join clauses.
+ */
+static void
+postgresGetForeignRelSize(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid)
+{
+ PgFdwRelationInfo *fpinfo;
+ ListCell *lc;
+
+ /*
+ * We use PgFdwRelationInfo to pass various information to subsequent
+ * functions.
+ */
+ fpinfo = (PgFdwRelationInfo *) palloc0(sizeof(PgFdwRelationInfo));
+ baserel->fdw_private = (void *) fpinfo;
+
+ /* Look up foreign-table catalog info. */
+ fpinfo->table = GetForeignTable(foreigntableid);
+ fpinfo->server = GetForeignServer(fpinfo->table->serverid);
+
+ /*
+ * Extract user-settable option values. Note that per-table setting of
+ * use_remote_estimate overrides per-server setting.
+ */
+ fpinfo->use_remote_estimate = false;
+ fpinfo->fdw_startup_cost = DEFAULT_FDW_STARTUP_COST;
+ fpinfo->fdw_tuple_cost = DEFAULT_FDW_TUPLE_COST;
+
+ foreach(lc, fpinfo->server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "use_remote_estimate") == 0)
+ fpinfo->use_remote_estimate = defGetBoolean(def);
+ else if (strcmp(def->defname, "fdw_startup_cost") == 0)
+ fpinfo->fdw_startup_cost = strtod(defGetString(def), NULL);
+ else if (strcmp(def->defname, "fdw_tuple_cost") == 0)
+ fpinfo->fdw_tuple_cost = strtod(defGetString(def), NULL);
+ }
+ foreach(lc, fpinfo->table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "use_remote_estimate") == 0)
+ {
+ fpinfo->use_remote_estimate = defGetBoolean(def);
+ break; /* only need the one value */
+ }
+ }
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * identify which user to do remote access as during planning. This
+ * should match what ExecCheckRTEPerms() does. If we fail due to lack of
+ * permissions, the query would have failed at runtime anyway.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root);
+ Oid userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ fpinfo->user = GetUserMapping(userid, fpinfo->server->serverid);
+ }
+ else
+ fpinfo->user = NULL;
+
+ /*
+ * Identify which baserestrictinfo clauses can be sent to the remote
+ * server and which can't.
+ */
+ classifyConditions(root, baserel, baserel->baserestrictinfo,
+ &fpinfo->remote_conds, &fpinfo->local_conds);
+
+ /*
+ * Identify which attributes will need to be retrieved from the remote
+ * server. These include all attrs needed for joins or final output, plus
+ * all attrs used in the local_conds. (Note: if we end up using a
+ * parameterized scan, it's possible that some of the join clauses will be
+ * sent to the remote and thus we wouldn't really need to retrieve the
+ * columns used in them. Doesn't seem worth detecting that case though.)
+ */
+ fpinfo->attrs_used = NULL;
+ pull_varattnos((Node *) baserel->reltargetlist, baserel->relid,
+ &fpinfo->attrs_used);
+ foreach(lc, fpinfo->local_conds)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ pull_varattnos((Node *) rinfo->clause, baserel->relid,
+ &fpinfo->attrs_used);
+ }
+
+ /*
+ * Compute the selectivity and cost of the local_conds, so we don't have
+ * to do it over again for each path. The best we can do for these
+ * conditions is to estimate selectivity on the basis of local statistics.
+ */
+ fpinfo->local_conds_sel = clauselist_selectivity(root,
+ fpinfo->local_conds,
+ baserel->relid,
+ JOIN_INNER,
+ NULL);
+
+ cost_qual_eval(&fpinfo->local_conds_cost, fpinfo->local_conds, root);
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * connect to the foreign server and execute EXPLAIN to estimate the
+ * number of rows selected by the restriction clauses, as well as the
+ * average row width. Otherwise, estimate using whatever statistics we
+ * have locally, in a way similar to ordinary tables.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ /*
+ * Get cost/size estimates with help of remote server. Save the
+ * values in fpinfo so we don't need to do it again to generate the
+ * basic foreign path.
+ */
+ estimate_path_cost_size(root, baserel, NIL,
+ &fpinfo->rows, &fpinfo->width,
+ &fpinfo->startup_cost, &fpinfo->total_cost);
+
+ /* Report estimated baserel size to planner. */
+ baserel->rows = fpinfo->rows;
+ baserel->width = fpinfo->width;
+ }
+ else
+ {
+ /*
+ * If the foreign table has never been ANALYZEd, it will have relpages
+ * and reltuples equal to zero, which most likely has nothing to do
+ * with reality. We can't do a whole lot about that if we're not
+ * allowed to consult the remote server, but we can use a hack similar
+ * to plancat.c's treatment of empty relations: use a minimum size
+ * estimate of 10 pages, and divide by the column-datatype-based width
+ * estimate to get the corresponding number of tuples.
+ */
+ if (baserel->pages == 0 && baserel->tuples == 0)
+ {
+ baserel->pages = 10;
+ baserel->tuples =
+ (10 * BLCKSZ) / (baserel->width + sizeof(HeapTupleHeaderData));
+ }
+
+ /* Estimate baserel size as best we can with local statistics. */
+ set_baserel_size_estimates(root, baserel);
+
+ /* Fill in basically-bogus cost estimates for use later. */
+ estimate_path_cost_size(root, baserel, NIL,
+ &fpinfo->rows, &fpinfo->width,
+ &fpinfo->startup_cost, &fpinfo->total_cost);
+ }
+}
+
+/*
+ * postgresGetForeignPaths
+ * Create possible scan paths for a scan on the foreign table
+ */
+static void
+postgresGetForeignPaths(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) baserel->fdw_private;
+ ForeignPath *path;
+ List *ppi_list;
+ ListCell *lc;
+
+ /*
+ * Create simplest ForeignScan path node and add it to baserel. This path
+ * corresponds to SeqScan path of regular tables (though depending on what
+ * baserestrict conditions we were able to send to remote, there might
+ * actually be an indexscan happening there). We already did all the work
+ * to estimate cost and size of this path.
+ */
+ path = create_foreignscan_path(root, baserel,
+ fpinfo->rows,
+ fpinfo->startup_cost,
+ fpinfo->total_cost,
+ NIL, /* no pathkeys */
+ NULL, /* no outer rel either */
+ NIL); /* no fdw_private list */
+ add_path(baserel, (Path *) path);
+
+ /*
+ * If we're not using remote estimates, stop here. We have no way to
+ * estimate whether any join clauses would be worth sending across, so
+ * don't bother building parameterized paths.
+ */
+ if (!fpinfo->use_remote_estimate)
+ return;
+
+ /*
+ * Thumb through all join clauses for the rel to identify which outer
+ * relations could supply one or more safe-to-send-to-remote join clauses.
+ * We'll build a parameterized path for each such outer relation.
+ *
+ * It's convenient to manage this by representing each candidate outer
+ * relation by the ParamPathInfo node for it. We can then use the
+ * ppi_clauses list in the ParamPathInfo node directly as a list of the
+ * interesting join clauses for that rel. This takes care of the
+ * possibility that there are multiple safe join clauses for such a rel,
+ * and also ensures that we account for unsafe join clauses that we'll
+ * still have to enforce locally (since the parameterized-path machinery
+ * insists that we handle all movable clauses).
+ */
+ ppi_list = NIL;
+ foreach(lc, baserel->joininfo)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Relids required_outer;
+ ParamPathInfo *param_info;
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, baserel))
+ continue;
+
+ /* See if it is safe to send to remote */
+ if (!is_foreign_expr(root, baserel, rinfo->clause))
+ continue;
+
+ /* Calculate required outer rels for the resulting path */
+ required_outer = bms_union(rinfo->clause_relids,
+ baserel->lateral_relids);
+ /* We do not want the foreign rel itself listed in required_outer */
+ required_outer = bms_del_member(required_outer, baserel->relid);
+
+ /*
+ * required_outer probably can't be empty here, but if it were, we
+ * couldn't make a parameterized path.
+ */
+ if (bms_is_empty(required_outer))
+ continue;
+
+ /* Get the ParamPathInfo */
+ param_info = get_baserel_parampathinfo(root, baserel,
+ required_outer);
+ Assert(param_info != NULL);
+
+ /*
+ * Add it to list unless we already have it. Testing pointer equality
+ * is OK since get_baserel_parampathinfo won't make duplicates.
+ */
+ ppi_list = list_append_unique_ptr(ppi_list, param_info);
+ }
+
+ /*
+ * The above scan examined only "generic" join clauses, not those that
+ * were absorbed into EquivalenceClauses. See if we can make anything out
+ * of EquivalenceClauses.
+ */
+ if (baserel->has_eclass_joins)
+ {
+ /*
+ * We repeatedly scan the eclass list looking for column references
+ * (or expressions) belonging to the foreign rel. Each time we find
+ * one, we generate a list of equivalence joinclauses for it, and then
+ * see if any are safe to send to the remote. Repeat till there are
+ * no more candidate EC members.
+ */
+ ec_member_foreign_arg arg;
+
+ arg.already_used = NIL;
+ for (;;)
+ {
+ List *clauses;
+
+ /* Make clauses, skipping any that join to lateral_referencers */
+ arg.current = NULL;
+ clauses = generate_implied_equalities_for_column(root,
+ baserel,
+ ec_member_matches_foreign,
+ (void *) &arg,
+ baserel->lateral_referencers);
+
+ /* Done if there are no more expressions in the foreign rel */
+ if (arg.current == NULL)
+ {
+ Assert(clauses == NIL);
+ break;
+ }
+
+ /* Scan the extracted join clauses */
+ foreach(lc, clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ Relids required_outer;
+ ParamPathInfo *param_info;
+
+ /* Check if clause can be moved to this rel */
+ if (!join_clause_is_movable_to(rinfo, baserel))
+ continue;
+
+ /* See if it is safe to send to remote */
+ if (!is_foreign_expr(root, baserel, rinfo->clause))
+ continue;
+
+ /* Calculate required outer rels for the resulting path */
+ required_outer = bms_union(rinfo->clause_relids,
+ baserel->lateral_relids);
+ required_outer = bms_del_member(required_outer, baserel->relid);
+ if (bms_is_empty(required_outer))
+ continue;
+
+ /* Get the ParamPathInfo */
+ param_info = get_baserel_parampathinfo(root, baserel,
+ required_outer);
+ Assert(param_info != NULL);
+
+ /* Add it to list unless we already have it */
+ ppi_list = list_append_unique_ptr(ppi_list, param_info);
+ }
+
+ /* Try again, now ignoring the expression we found this time */
+ arg.already_used = lappend(arg.already_used, arg.current);
+ }
+ }
+
+ /*
+ * Now build a path for each useful outer relation.
+ */
+ foreach(lc, ppi_list)
+ {
+ ParamPathInfo *param_info = (ParamPathInfo *) lfirst(lc);
+ double rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+
+ /* Get a cost estimate from the remote */
+ estimate_path_cost_size(root, baserel,
+ param_info->ppi_clauses,
+ &rows, &width,
+ &startup_cost, &total_cost);
+
+ /*
+ * ppi_rows currently won't get looked at by anything, but still we
+ * may as well ensure that it matches our idea of the rowcount.
+ */
+ param_info->ppi_rows = rows;
+
+ /* Make the path */
+ path = create_foreignscan_path(root, baserel,
+ rows,
+ startup_cost,
+ total_cost,
+ NIL, /* no pathkeys */
+ param_info->ppi_req_outer,
+ NIL); /* no fdw_private list */
+ add_path(baserel, (Path *) path);
+ }
+}
+
+/*
+ * postgresGetForeignPlan
+ * Create ForeignScan plan node which implements selected best path
+ */
+static ForeignScan *
+postgresGetForeignPlan(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) baserel->fdw_private;
+ Index scan_relid = baserel->relid;
+ List *fdw_private;
+ List *remote_conds = NIL;
+ List *local_exprs = NIL;
+ List *params_list = NIL;
+ List *retrieved_attrs;
+ StringInfoData sql;
+ ListCell *lc;
+
+ /*
+ * Separate the scan_clauses into those that can be executed remotely and
+ * those that can't. baserestrictinfo clauses that were previously
+ * determined to be safe or unsafe by classifyConditions are shown in
+ * fpinfo->remote_conds and fpinfo->local_conds. Anything else in the
+ * scan_clauses list will be a join clause, which we have to check for
+ * remote-safety.
+ *
+ * Note: the join clauses we see here should be the exact same ones
+ * previously examined by postgresGetForeignPaths. Possibly it'd be worth
+ * passing forward the classification work done then, rather than
+ * repeating it here.
+ *
+ * This code must match "extract_actual_clauses(scan_clauses, false)"
+ * except for the additional decision about remote versus local execution.
+ * Note however that we only strip the RestrictInfo nodes from the
+ * local_exprs list, since appendWhereClause expects a list of
+ * RestrictInfos.
+ */
+ foreach(lc, scan_clauses)
+ {
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+
+ Assert(IsA(rinfo, RestrictInfo));
+
+ /* Ignore any pseudoconstants, they're dealt with elsewhere */
+ if (rinfo->pseudoconstant)
+ continue;
+
+ if (list_member_ptr(fpinfo->remote_conds, rinfo))
+ remote_conds = lappend(remote_conds, rinfo);
+ else if (list_member_ptr(fpinfo->local_conds, rinfo))
+ local_exprs = lappend(local_exprs, rinfo->clause);
+ else if (is_foreign_expr(root, baserel, rinfo->clause))
+ remote_conds = lappend(remote_conds, rinfo);
+ else
+ local_exprs = lappend(local_exprs, rinfo->clause);
+ }
+
+ /*
+ * Build the query string to be sent for execution, and identify
+ * expressions to be sent as parameters.
+ */
+ initStringInfo(&sql);
+ deparseSelectSql(&sql, root, baserel, fpinfo->attrs_used,
+ &retrieved_attrs);
+ if (remote_conds)
+ appendWhereClause(&sql, root, baserel, remote_conds,
+ true, &params_list);
+
+ /*
+ * Add FOR UPDATE/SHARE if appropriate. We apply locking during the
+ * initial row fetch, rather than later on as is done for local tables.
+ * The extra roundtrips involved in trying to duplicate the local
+ * semantics exactly don't seem worthwhile (see also comments for
+ * RowMarkType).
+ *
+ * Note: because we actually run the query as a cursor, this assumes that
+ * DECLARE CURSOR ... FOR UPDATE is supported, which it isn't before 8.3.
+ */
+ if (baserel->relid == root->parse->resultRelation &&
+ (root->parse->commandType == CMD_UPDATE ||
+ root->parse->commandType == CMD_DELETE))
+ {
+ /* Relation is UPDATE/DELETE target, so use FOR UPDATE */
+ appendStringInfoString(&sql, " FOR UPDATE");
+ }
+ else
+ {
+ RowMarkClause *rc = get_parse_rowmark(root->parse, baserel->relid);
+
+ if (rc)
+ {
+ /*
+ * Relation is specified as a FOR UPDATE/SHARE target, so handle
+ * that.
+ *
+ * For now, just ignore any [NO] KEY specification, since (a) it's
+ * not clear what that means for a remote table that we don't have
+ * complete information about, and (b) it wouldn't work anyway on
+ * older remote servers. Likewise, we don't worry about NOWAIT.
+ */
+ switch (rc->strength)
+ {
+ case LCS_FORKEYSHARE:
+ case LCS_FORSHARE:
+ appendStringInfoString(&sql, " FOR SHARE");
+ break;
+ case LCS_FORNOKEYUPDATE:
+ case LCS_FORUPDATE:
+ appendStringInfoString(&sql, " FOR UPDATE");
+ break;
+ }
+ }
+ }
+
+ /*
+ * Build the fdw_private list that will be available to the executor.
+ * Items in the list must match enum FdwScanPrivateIndex, above.
+ */
+ fdw_private = list_make2(makeString(sql.data),
+ retrieved_attrs);
+
+ /*
+ * Create the ForeignScan node from target list, local filtering
+ * expressions, remote parameter expressions, and FDW private information.
+ *
+ * Note that the remote parameter expressions are stored in the fdw_exprs
+ * field of the finished plan node; we can't keep them in private state
+ * because then they wouldn't be subject to later planner processing.
+ */
+ return make_foreignscan(tlist,
+ local_exprs,
+ scan_relid,
+ params_list,
+ fdw_private);
+}
+
+/*
+ * postgresBeginForeignScan
+ * Initiate an executor scan of a foreign PostgreSQL table.
+ */
+static void
+postgresBeginForeignScan(ForeignScanState *node, int eflags)
+{
+ ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan;
+ EState *estate = node->ss.ps.state;
+ PgFdwScanState *fsstate;
+ RangeTblEntry *rte;
+ Oid userid;
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ int numParams;
+ int i;
+ ListCell *lc;
+
+ /*
+ * Do nothing in EXPLAIN (no ANALYZE) case. node->fdw_state stays NULL.
+ */
+ if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
+ return;
+
+ /*
+ * We'll save private state in node->fdw_state.
+ */
+ fsstate = (PgFdwScanState *) palloc0(sizeof(PgFdwScanState));
+ node->fdw_state = (void *) fsstate;
+
+ /*
+ * Identify which user to do the remote access as. This should match what
+ * ExecCheckRTEPerms() does.
+ */
+ rte = rt_fetch(fsplan->scan.scanrelid, estate->es_range_table);
+ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ /* Get info about foreign table. */
+ fsstate->rel = node->ss.ss_currentRelation;
+ table = GetForeignTable(RelationGetRelid(fsstate->rel));
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(userid, server->serverid);
+
+ /*
+ * Get connection to the foreign server. Connection manager will
+ * establish new connection if necessary.
+ */
+ fsstate->conn = GetConnection(server, user, false);
+
+ /* Assign a unique ID for my cursor */
+ fsstate->cursor_number = GetCursorNumber(fsstate->conn);
+ fsstate->cursor_exists = false;
+
+ /* Get private info created by planner functions. */
+ fsstate->query = strVal(list_nth(fsplan->fdw_private,
+ FdwScanPrivateSelectSql));
+ fsstate->retrieved_attrs = (List *) list_nth(fsplan->fdw_private,
+ FdwScanPrivateRetrievedAttrs);
+
+ /* Create contexts for batches of tuples and per-tuple temp workspace. */
+ fsstate->batch_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw tuple data",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+ fsstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_MINSIZE,
+ ALLOCSET_SMALL_INITSIZE,
+ ALLOCSET_SMALL_MAXSIZE);
+
+ /* Get info we'll need for input data conversion. */
+ fsstate->attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(fsstate->rel));
+
+ /* Prepare for output conversion of parameters used in remote query. */
+ numParams = list_length(fsplan->fdw_exprs);
+ fsstate->numParams = numParams;
+ fsstate->param_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * numParams);
+
+ i = 0;
+ foreach(lc, fsplan->fdw_exprs)
+ {
+ Node *param_expr = (Node *) lfirst(lc);
+ Oid typefnoid;
+ bool isvarlena;
+
+ getTypeOutputInfo(exprType(param_expr), &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &fsstate->param_flinfo[i]);
+ i++;
+ }
+
+ /*
+ * Prepare remote-parameter expressions for evaluation. (Note: in
+ * practice, we expect that all these expressions will be just Params, so
+ * we could possibly do something more efficient than using the full
+ * expression-eval machinery for this. But probably there would be little
+ * benefit, and it'd require postgres_fdw to know more than is desirable
+ * about Param evaluation.)
+ */
+ fsstate->param_exprs = (List *)
+ ExecInitExpr((Expr *) fsplan->fdw_exprs,
+ (PlanState *) node);
+
+ /*
+ * Allocate buffer for text form of query parameters, if any.
+ */
+ if (numParams > 0)
+ fsstate->param_values = (const char **) palloc0(numParams * sizeof(char *));
+ else
+ fsstate->param_values = NULL;
+}
+
+/*
+ * postgresIterateForeignScan
+ * Retrieve next row from the result set, or clear tuple slot to indicate
+ * EOF.
+ */
+static TupleTableSlot *
+postgresIterateForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
+
+ /*
+ * If this is the first call after Begin or ReScan, we need to create the
+ * cursor on the remote side.
+ */
+ if (!fsstate->cursor_exists)
+ create_cursor(node);
+
+ /*
+ * Get some more tuples, if we've run out.
+ */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ {
+ /* No point in another fetch if we already detected EOF, though. */
+ if (!fsstate->eof_reached)
+ fetch_more_data(node);
+ /* If we didn't get any tuples, must be end of data. */
+ if (fsstate->next_tuple >= fsstate->num_tuples)
+ return ExecClearTuple(slot);
+ }
+
+ /*
+ * Return the next tuple.
+ */
+ ExecStoreTuple(fsstate->tuples[fsstate->next_tuple++],
+ slot,
+ InvalidBuffer,
+ false);
+
+ return slot;
+}
+
+/*
+ * postgresReScanForeignScan
+ * Restart the scan.
+ */
+static void
+postgresReScanForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ char sql[64];
+ PGresult *res;
+
+ /* If we haven't created the cursor yet, nothing to do. */
+ if (!fsstate->cursor_exists)
+ return;
+
+ /*
+ * If any internal parameters affecting this node have changed, we'd
+ * better destroy and recreate the cursor. Otherwise, rewinding it should
+ * be good enough. If we've only fetched zero or one batch, we needn't
+ * even rewind the cursor, just rescan what we have.
+ */
+ if (node->ss.ps.chgParam != NULL)
+ {
+ fsstate->cursor_exists = false;
+ snprintf(sql, sizeof(sql), "CLOSE c%u",
+ fsstate->cursor_number);
+ }
+ else if (fsstate->fetch_ct_2 > 1)
+ {
+ snprintf(sql, sizeof(sql), "MOVE BACKWARD ALL IN c%u",
+ fsstate->cursor_number);
+ }
+ else
+ {
+ /* Easy: just rescan what we already have in memory, if anything */
+ fsstate->next_tuple = 0;
+ return;
+ }
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexec(fsstate->conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fsstate->conn, true, sql);
+ PQclear(res);
+
+ /* Now force a fresh FETCH. */
+ fsstate->tuples = NULL;
+ fsstate->num_tuples = 0;
+ fsstate->next_tuple = 0;
+ fsstate->fetch_ct_2 = 0;
+ fsstate->eof_reached = false;
+}
+
+/*
+ * postgresEndForeignScan
+ * Finish scanning foreign table and dispose objects used for this scan
+ */
+static void
+postgresEndForeignScan(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+
+ /* if fsstate is NULL, we are in EXPLAIN; nothing to do */
+ if (fsstate == NULL)
+ return;
+
+ /* Close the cursor if open, to prevent accumulation of cursors */
+ if (fsstate->cursor_exists)
+ close_cursor(fsstate->conn, fsstate->cursor_number);
+
+ /* Release remote connection */
+ ReleaseConnection(fsstate->conn);
+ fsstate->conn = NULL;
+
+ /* MemoryContexts will be deleted automatically. */
+}
+
+/*
+ * postgresAddForeignUpdateTargets
+ * Add resjunk column(s) needed for update/delete on a foreign table
+ */
+static void
+postgresAddForeignUpdateTargets(Query *parsetree,
+ RangeTblEntry *target_rte,
+ Relation target_relation)
+{
+ Var *var;
+ const char *attrname;
+ TargetEntry *tle;
+
+ /*
+ * In postgres_fdw, what we need is the ctid, same as for a regular table.
+ */
+
+ /* Make a Var representing the desired value */
+ var = makeVar(parsetree->resultRelation,
+ SelfItemPointerAttributeNumber,
+ TIDOID,
+ -1,
+ InvalidOid,
+ 0);
+
+ /* Wrap it in a resjunk TLE with the right name ... */
+ attrname = "ctid";
+
+ tle = makeTargetEntry((Expr *) var,
+ list_length(parsetree->targetList) + 1,
+ pstrdup(attrname),
+ true);
+
+ /* ... and add it to the query's targetlist */
+ parsetree->targetList = lappend(parsetree->targetList, tle);
+}
+
+/*
+ * postgresPlanForeignModify
+ * Plan an insert/update/delete operation on a foreign table
+ *
+ * Note: currently, the plan tree generated for UPDATE/DELETE will always
+ * include a ForeignScan that retrieves ctids (using SELECT FOR UPDATE)
+ * and then the ModifyTable node will have to execute individual remote
+ * UPDATE/DELETE commands. If there are no local conditions or joins
+ * needed, it'd be better to let the scan node do UPDATE/DELETE RETURNING
+ * and then do nothing at ModifyTable. Room for future optimization ...
+ */
+static List *
+postgresPlanForeignModify(PlannerInfo *root,
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index)
+{
+ CmdType operation = plan->operation;
+ RangeTblEntry *rte = planner_rt_fetch(resultRelation, root);
+ Relation rel;
+ StringInfoData sql;
+ List *targetAttrs = NIL;
+ List *returningList = NIL;
+ List *retrieved_attrs = NIL;
+
+ initStringInfo(&sql);
+
+ /*
+ * Core code already has some lock on each rel being planned, so we can
+ * use NoLock here.
+ */
+ rel = heap_open(rte->relid, NoLock);
+
+ /*
+ * In an INSERT, we transmit all columns that are defined in the foreign
+ * table. In an UPDATE, we transmit only columns that were explicitly
+ * targets of the UPDATE, so as to avoid unnecessary data transmission.
+ * (We can't do that for INSERT since we would miss sending default values
+ * for columns not listed in the source statement.)
+ */
+ if (operation == CMD_INSERT)
+ {
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int attnum;
+
+ for (attnum = 1; attnum <= tupdesc->natts; attnum++)
+ {
+ Form_pg_attribute attr = tupdesc->attrs[attnum - 1];
+
+ if (!attr->attisdropped)
+ targetAttrs = lappend_int(targetAttrs, attnum);
+ }
+ }
+ else if (operation == CMD_UPDATE)
+ {
+ Bitmapset *tmpset = bms_copy(rte->modifiedCols);
+ AttrNumber col;
+
+ while ((col = bms_first_member(tmpset)) >= 0)
+ {
+ col += FirstLowInvalidHeapAttributeNumber;
+ if (col <= InvalidAttrNumber) /* shouldn't happen */
+ elog(ERROR, "system-column update is not supported");
+ targetAttrs = lappend_int(targetAttrs, col);
+ }
+ }
+
+ /*
+ * Extract the relevant RETURNING list if any.
+ */
+ if (plan->returningLists)
+ returningList = (List *) list_nth(plan->returningLists, subplan_index);
+
+ /*
+ * Construct the SQL command string.
+ */
+ switch (operation)
+ {
+ case CMD_INSERT:
+ deparseInsertSql(&sql, root, resultRelation, rel,
+ targetAttrs, returningList,
+ &retrieved_attrs);
+ break;
+ case CMD_UPDATE:
+ deparseUpdateSql(&sql, root, resultRelation, rel,
+ targetAttrs, returningList,
+ &retrieved_attrs);
+ break;
+ case CMD_DELETE:
+ deparseDeleteSql(&sql, root, resultRelation, rel,
+ returningList,
+ &retrieved_attrs);
+ break;
+ default:
+ elog(ERROR, "unexpected operation: %d", (int) operation);
+ break;
+ }
+
+ heap_close(rel, NoLock);
+
+ /*
+ * Build the fdw_private list that will be available to the executor.
+ * Items in the list must match enum FdwModifyPrivateIndex, above.
+ */
+ return list_make4(makeString(sql.data),
+ targetAttrs,
+ makeInteger((retrieved_attrs != NIL)),
+ retrieved_attrs);
+}
+
+/*
+ * postgresBeginForeignModify
+ * Begin an insert/update/delete operation on a foreign table
+ */
+static void
+postgresBeginForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *resultRelInfo,
+ List *fdw_private,
+ int subplan_index,
+ int eflags)
+{
+ PgFdwModifyState *fmstate;
+ EState *estate = mtstate->ps.state;
+ CmdType operation = mtstate->operation;
+ Relation rel = resultRelInfo->ri_RelationDesc;
+ RangeTblEntry *rte;
+ Oid userid;
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ AttrNumber n_params;
+ Oid typefnoid;
+ bool isvarlena;
+ ListCell *lc;
+
+ /*
+ * Do nothing in EXPLAIN (no ANALYZE) case. resultRelInfo->ri_FdwState
+ * stays NULL.
+ */
+ if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
+ return;
+
+ /* Begin constructing PgFdwModifyState. */
+ fmstate = (PgFdwModifyState *) palloc0(sizeof(PgFdwModifyState));
+ fmstate->rel = rel;
+
+ /*
+ * Identify which user to do the remote access as. This should match what
+ * ExecCheckRTEPerms() does.
+ */
+ rte = rt_fetch(resultRelInfo->ri_RangeTableIndex, estate->es_range_table);
+ userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+ /* Get info about foreign table. */
+ table = GetForeignTable(RelationGetRelid(rel));
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(userid, server->serverid);
+
+ /* Open connection; report that we'll create a prepared statement. */
+ fmstate->conn = GetConnection(server, user, true);
+ fmstate->p_name = NULL; /* prepared statement not made yet */
+
+ /* Deconstruct fdw_private data. */
+ fmstate->query = strVal(list_nth(fdw_private,
+ FdwModifyPrivateUpdateSql));
+ fmstate->target_attrs = (List *) list_nth(fdw_private,
+ FdwModifyPrivateTargetAttnums);
+ fmstate->has_returning = intVal(list_nth(fdw_private,
+ FdwModifyPrivateHasReturning));
+ fmstate->retrieved_attrs = (List *) list_nth(fdw_private,
+ FdwModifyPrivateRetrievedAttrs);
+
+ /* Create context for per-tuple temp workspace. */
+ fmstate->temp_cxt = AllocSetContextCreate(estate->es_query_cxt,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_MINSIZE,
+ ALLOCSET_SMALL_INITSIZE,
+ ALLOCSET_SMALL_MAXSIZE);
+
+ /* Prepare for input conversion of RETURNING results. */
+ if (fmstate->has_returning)
+ fmstate->attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(rel));
+
+ /* Prepare for output conversion of parameters used in prepared stmt. */
+ n_params = list_length(fmstate->target_attrs) + 1;
+ fmstate->p_flinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * n_params);
+ fmstate->p_nums = 0;
+
+ if (operation == CMD_UPDATE || operation == CMD_DELETE)
+ {
+ /* Find the ctid resjunk column in the subplan's result */
+ Plan *subplan = mtstate->mt_plans[subplan_index]->plan;
+
+ fmstate->ctidAttno = ExecFindJunkAttributeInTlist(subplan->targetlist,
+ "ctid");
+ if (!AttributeNumberIsValid(fmstate->ctidAttno))
+ elog(ERROR, "could not find junk ctid column");
+
+ /* First transmittable parameter will be ctid */
+ getTypeOutputInfo(TIDOID, &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]);
+ fmstate->p_nums++;
+ }
+
+ if (operation == CMD_INSERT || operation == CMD_UPDATE)
+ {
+ /* Set up for remaining transmittable parameters */
+ foreach(lc, fmstate->target_attrs)
+ {
+ int attnum = lfirst_int(lc);
+ Form_pg_attribute attr = RelationGetDescr(rel)->attrs[attnum - 1];
+
+ Assert(!attr->attisdropped);
+
+ getTypeOutputInfo(attr->atttypid, &typefnoid, &isvarlena);
+ fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]);
+ fmstate->p_nums++;
+ }
+ }
+
+ Assert(fmstate->p_nums <= n_params);
+
+ resultRelInfo->ri_FdwState = fmstate;
+}
+
+/*
+ * postgresExecForeignInsert
+ * Insert one row into a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignInsert(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ const char **p_values;
+ PGresult *res;
+ int n_rows;
+
+ /* Set up the prepared statement on the remote server, if we didn't yet */
+ if (!fmstate->p_name)
+ prepare_foreign_modify(fmstate);
+
+ /* Convert parameters needed by prepared statement to text form */
+ p_values = convert_prep_stmt_params(fmstate, NULL, slot);
+
+ /*
+ * Execute the prepared statement, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexecPrepared(fmstate->conn,
+ fmstate->p_name,
+ fmstate->p_nums,
+ p_values,
+ NULL,
+ NULL,
+ 0);
+ if (PQresultStatus(res) !=
+ (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+
+ /* Check number of rows affected, and fetch RETURNING tuple if any */
+ if (fmstate->has_returning)
+ {
+ n_rows = PQntuples(res);
+ if (n_rows > 0)
+ store_returning_result(fmstate, slot, res);
+ }
+ else
+ n_rows = atoi(PQcmdTuples(res));
+
+ /* And clean up */
+ PQclear(res);
+
+ MemoryContextReset(fmstate->temp_cxt);
+
+ /* Return NULL if nothing was inserted on the remote end */
+ return (n_rows > 0) ? slot : NULL;
+}
+
+/*
+ * postgresExecForeignUpdate
+ * Update one row in a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignUpdate(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ Datum datum;
+ bool isNull;
+ const char **p_values;
+ PGresult *res;
+ int n_rows;
+
+ /* Set up the prepared statement on the remote server, if we didn't yet */
+ if (!fmstate->p_name)
+ prepare_foreign_modify(fmstate);
+
+ /* Get the ctid that was passed up as a resjunk column */
+ datum = ExecGetJunkAttribute(planSlot,
+ fmstate->ctidAttno,
+ &isNull);
+ /* shouldn't ever get a null result... */
+ if (isNull)
+ elog(ERROR, "ctid is NULL");
+
+ /* Convert parameters needed by prepared statement to text form */
+ p_values = convert_prep_stmt_params(fmstate,
+ (ItemPointer) DatumGetPointer(datum),
+ slot);
+
+ /*
+ * Execute the prepared statement, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexecPrepared(fmstate->conn,
+ fmstate->p_name,
+ fmstate->p_nums,
+ p_values,
+ NULL,
+ NULL,
+ 0);
+ if (PQresultStatus(res) !=
+ (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+
+ /* Check number of rows affected, and fetch RETURNING tuple if any */
+ if (fmstate->has_returning)
+ {
+ n_rows = PQntuples(res);
+ if (n_rows > 0)
+ store_returning_result(fmstate, slot, res);
+ }
+ else
+ n_rows = atoi(PQcmdTuples(res));
+
+ /* And clean up */
+ PQclear(res);
+
+ MemoryContextReset(fmstate->temp_cxt);
+
+ /* Return NULL if nothing was updated on the remote end */
+ return (n_rows > 0) ? slot : NULL;
+}
+
+/*
+ * postgresExecForeignDelete
+ * Delete one row from a foreign table
+ */
+static TupleTableSlot *
+postgresExecForeignDelete(EState *estate,
+ ResultRelInfo *resultRelInfo,
+ TupleTableSlot *slot,
+ TupleTableSlot *planSlot)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+ Datum datum;
+ bool isNull;
+ const char **p_values;
+ PGresult *res;
+ int n_rows;
+
+ /* Set up the prepared statement on the remote server, if we didn't yet */
+ if (!fmstate->p_name)
+ prepare_foreign_modify(fmstate);
+
+ /* Get the ctid that was passed up as a resjunk column */
+ datum = ExecGetJunkAttribute(planSlot,
+ fmstate->ctidAttno,
+ &isNull);
+ /* shouldn't ever get a null result... */
+ if (isNull)
+ elog(ERROR, "ctid is NULL");
+
+ /* Convert parameters needed by prepared statement to text form */
+ p_values = convert_prep_stmt_params(fmstate,
+ (ItemPointer) DatumGetPointer(datum),
+ NULL);
+
+ /*
+ * Execute the prepared statement, and check for success.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexecPrepared(fmstate->conn,
+ fmstate->p_name,
+ fmstate->p_nums,
+ p_values,
+ NULL,
+ NULL,
+ 0);
+ if (PQresultStatus(res) !=
+ (fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+
+ /* Check number of rows affected, and fetch RETURNING tuple if any */
+ if (fmstate->has_returning)
+ {
+ n_rows = PQntuples(res);
+ if (n_rows > 0)
+ store_returning_result(fmstate, slot, res);
+ }
+ else
+ n_rows = atoi(PQcmdTuples(res));
+
+ /* And clean up */
+ PQclear(res);
+
+ MemoryContextReset(fmstate->temp_cxt);
+
+ /* Return NULL if nothing was deleted on the remote end */
+ return (n_rows > 0) ? slot : NULL;
+}
+
+/*
+ * postgresEndForeignModify
+ * Finish an insert/update/delete operation on a foreign table
+ */
+static void
+postgresEndForeignModify(EState *estate,
+ ResultRelInfo *resultRelInfo)
+{
+ PgFdwModifyState *fmstate = (PgFdwModifyState *) resultRelInfo->ri_FdwState;
+
+ /* If fmstate is NULL, we are in EXPLAIN; nothing to do */
+ if (fmstate == NULL)
+ return;
+
+ /* If we created a prepared statement, destroy it */
+ if (fmstate->p_name)
+ {
+ char sql[64];
+ PGresult *res;
+
+ snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name);
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexec(fmstate->conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, sql);
+ PQclear(res);
+ fmstate->p_name = NULL;
+ }
+
+ /* Release remote connection */
+ ReleaseConnection(fmstate->conn);
+ fmstate->conn = NULL;
+}
+
+/*
+ * postgresIsForeignRelUpdatable
+ * Determine whether a foreign table supports INSERT, UPDATE and/or
+ * DELETE.
+ */
+static int
+postgresIsForeignRelUpdatable(Relation rel)
+{
+ bool updatable;
+ ForeignTable *table;
+ ForeignServer *server;
+ ListCell *lc;
+
+ /*
+ * By default, all postgres_fdw foreign tables are assumed updatable. This
+ * can be overridden by a per-server setting, which in turn can be
+ * overridden by a per-table setting.
+ */
+ updatable = true;
+
+ table = GetForeignTable(RelationGetRelid(rel));
+ server = GetForeignServer(table->serverid);
+
+ foreach(lc, server->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "updatable") == 0)
+ updatable = defGetBoolean(def);
+ }
+ foreach(lc, table->options)
+ {
+ DefElem *def = (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "updatable") == 0)
+ updatable = defGetBoolean(def);
+ }
+
+ /*
+ * Currently "updatable" means support for INSERT, UPDATE and DELETE.
+ */
+ return updatable ?
+ (1 << CMD_INSERT) | (1 << CMD_UPDATE) | (1 << CMD_DELETE) : 0;
+}
+
+/*
+ * postgresExplainForeignScan
+ * Produce extra output for EXPLAIN of a ForeignScan on a foreign table
+ */
+static void
+postgresExplainForeignScan(ForeignScanState *node, ExplainState *es)
+{
+ List *fdw_private;
+ char *sql;
+
+ if (es->verbose)
+ {
+ fdw_private = ((ForeignScan *) node->ss.ps.plan)->fdw_private;
+ sql = strVal(list_nth(fdw_private, FdwScanPrivateSelectSql));
+ ExplainPropertyText("Remote SQL", sql, es);
+ }
+}
+
+/*
+ * postgresExplainForeignModify
+ * Produce extra output for EXPLAIN of a ModifyTable on a foreign table
+ */
+static void
+postgresExplainForeignModify(ModifyTableState *mtstate,
+ ResultRelInfo *rinfo,
+ List *fdw_private,
+ int subplan_index,
+ ExplainState *es)
+{
+ if (es->verbose)
+ {
+ char *sql = strVal(list_nth(fdw_private,
+ FdwModifyPrivateUpdateSql));
+
+ ExplainPropertyText("Remote SQL", sql, es);
+ }
+}
+
+
+/*
+ * estimate_path_cost_size
+ * Get cost and size estimates for a foreign scan
+ *
+ * We assume that all the baserestrictinfo clauses will be applied, plus
+ * any join clauses listed in join_conds.
+ */
+static void
+estimate_path_cost_size(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *join_conds,
+ double *p_rows, int *p_width,
+ Cost *p_startup_cost, Cost *p_total_cost)
+{
+ PgFdwRelationInfo *fpinfo = (PgFdwRelationInfo *) baserel->fdw_private;
+ double rows;
+ double retrieved_rows;
+ int width;
+ Cost startup_cost;
+ Cost total_cost;
+ Cost run_cost;
+ Cost cpu_per_tuple;
+
+ /*
+ * If the table or the server is configured to use remote estimates,
+ * connect to the foreign server and execute EXPLAIN to estimate the
+ * number of rows selected by the restriction+join clauses. Otherwise,
+ * estimate rows using whatever statistics we have locally, in a way
+ * similar to ordinary tables.
+ */
+ if (fpinfo->use_remote_estimate)
+ {
+ List *remote_join_conds;
+ List *local_join_conds;
+ StringInfoData sql;
+ List *retrieved_attrs;
+ PGconn *conn;
+ Selectivity local_sel;
+ QualCost local_cost;
+
+ /*
+ * join_conds might contain both clauses that are safe to send across,
+ * and clauses that aren't.
+ */
+ classifyConditions(root, baserel, join_conds,
+ &remote_join_conds, &local_join_conds);
+
+ /*
+ * Construct EXPLAIN query including the desired SELECT, FROM, and
+ * WHERE clauses. Params and other-relation Vars are replaced by
+ * dummy values.
+ */
+ initStringInfo(&sql);
+ appendStringInfoString(&sql, "EXPLAIN ");
+ deparseSelectSql(&sql, root, baserel, fpinfo->attrs_used,
+ &retrieved_attrs);
+ if (fpinfo->remote_conds)
+ appendWhereClause(&sql, root, baserel, fpinfo->remote_conds,
+ true, NULL);
+ if (remote_join_conds)
+ appendWhereClause(&sql, root, baserel, remote_join_conds,
+ (fpinfo->remote_conds == NIL), NULL);
+
+ /* Get the remote estimate */
+ conn = GetConnection(fpinfo->server, fpinfo->user, false);
+ get_remote_estimate(sql.data, conn, &rows, &width,
+ &startup_cost, &total_cost);
+ ReleaseConnection(conn);
+
+ retrieved_rows = rows;
+
+ /* Factor in the selectivity of the locally-checked quals */
+ local_sel = clauselist_selectivity(root,
+ local_join_conds,
+ baserel->relid,
+ JOIN_INNER,
+ NULL);
+ local_sel *= fpinfo->local_conds_sel;
+
+ rows = clamp_row_est(rows * local_sel);
+
+ /* Add in the eval cost of the locally-checked quals */
+ startup_cost += fpinfo->local_conds_cost.startup;
+ total_cost += fpinfo->local_conds_cost.per_tuple * retrieved_rows;
+ cost_qual_eval(&local_cost, local_join_conds, root);
+ startup_cost += local_cost.startup;
+ total_cost += local_cost.per_tuple * retrieved_rows;
+ }
+ else
+ {
+ /*
+ * We don't support join conditions in this mode (hence, no
+ * parameterized paths can be made).
+ */
+ Assert(join_conds == NIL);
+
+ /* Use rows/width estimates made by set_baserel_size_estimates. */
+ rows = baserel->rows;
+ width = baserel->width;
+
+ /*
+ * Back into an estimate of the number of retrieved rows. Just in
+ * case this is nuts, clamp to at most baserel->tuples.
+ */
+ retrieved_rows = clamp_row_est(rows / fpinfo->local_conds_sel);
+ retrieved_rows = Min(retrieved_rows, baserel->tuples);
+
+ /*
+ * Cost as though this were a seqscan, which is pessimistic. We
+ * effectively imagine the local_conds are being evaluated remotely,
+ * too.
+ */
+ startup_cost = 0;
+ run_cost = 0;
+ run_cost += seq_page_cost * baserel->pages;
+
+ startup_cost += baserel->baserestrictcost.startup;
+ cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ total_cost = startup_cost + run_cost;
+ }
+
+ /*
+ * Add some additional cost factors to account for connection overhead
+ * (fdw_startup_cost), transferring data across the network
+ * (fdw_tuple_cost per retrieved row), and local manipulation of the data
+ * (cpu_tuple_cost per retrieved row).
+ */
+ startup_cost += fpinfo->fdw_startup_cost;
+ total_cost += fpinfo->fdw_startup_cost;
+ total_cost += fpinfo->fdw_tuple_cost * retrieved_rows;
+ total_cost += cpu_tuple_cost * retrieved_rows;
+
+ /* Return results. */
+ *p_rows = rows;
+ *p_width = width;
+ *p_startup_cost = startup_cost;
+ *p_total_cost = total_cost;
+}
+
+/*
+ * Estimate costs of executing a SQL statement remotely.
+ * The given "sql" must be an EXPLAIN command.
+ */
+static void
+get_remote_estimate(const char *sql, PGconn *conn,
+ double *rows, int *width,
+ Cost *startup_cost, Cost *total_cost)
+{
+ PGresult *volatile res = NULL;
+
+ /* PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ char *line;
+ char *p;
+ int n;
+
+ /*
+ * Execute EXPLAIN remotely.
+ */
+ res = PQexec(conn, sql);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql);
+
+ /*
+ * Extract cost numbers for topmost plan node. Note we search for a
+ * left paren from the end of the line to avoid being confused by
+ * other uses of parentheses.
+ */
+ line = PQgetvalue(res, 0, 0);
+ p = strrchr(line, '(');
+ if (p == NULL)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+ n = sscanf(p, "(cost=%lf..%lf rows=%lf width=%d)",
+ startup_cost, total_cost, rows, width);
+ if (n != 4)
+ elog(ERROR, "could not interpret EXPLAIN output: \"%s\"", line);
+
+ PQclear(res);
+ res = NULL;
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+}
+
+/*
+ * Detect whether we want to process an EquivalenceClass member.
+ *
+ * This is a callback for use by generate_implied_equalities_for_column.
+ */
+static bool
+ec_member_matches_foreign(PlannerInfo *root, RelOptInfo *rel,
+ EquivalenceClass *ec, EquivalenceMember *em,
+ void *arg)
+{
+ ec_member_foreign_arg *state = (ec_member_foreign_arg *) arg;
+ Expr *expr = em->em_expr;
+
+ /*
+ * If we've identified what we're processing in the current scan, we only
+ * want to match that expression.
+ */
+ if (state->current != NULL)
+ return equal(expr, state->current);
+
+ /*
+ * Otherwise, ignore anything we've already processed.
+ */
+ if (list_member(state->already_used, expr))
+ return false;
+
+ /* This is the new target to process. */
+ state->current = expr;
+ return true;
+}
+
+/*
+ * Create cursor for node's query with current parameter values.
+ */
+static void
+create_cursor(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ ExprContext *econtext = node->ss.ps.ps_ExprContext;
+ int numParams = fsstate->numParams;
+ const char **values = fsstate->param_values;
+ PGconn *conn = fsstate->conn;
+ StringInfoData buf;
+ PGresult *res;
+
+ /*
+ * Construct array of query parameter values in text format. We do the
+ * conversions in the short-lived per-tuple context, so as not to cause a
+ * memory leak over repeated scans.
+ */
+ if (numParams > 0)
+ {
+ int nestlevel;
+ MemoryContext oldcontext;
+ int i;
+ ListCell *lc;
+
+ oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
+
+ nestlevel = set_transmission_modes();
+
+ i = 0;
+ foreach(lc, fsstate->param_exprs)
+ {
+ ExprState *expr_state = (ExprState *) lfirst(lc);
+ Datum expr_value;
+ bool isNull;
+
+ /* Evaluate the parameter expression */
+ expr_value = ExecEvalExpr(expr_state, econtext, &isNull, NULL);
+
+ /*
+ * Get string representation of each parameter value by invoking
+ * type-specific output function, unless the value is null.
+ */
+ if (isNull)
+ values[i] = NULL;
+ else
+ values[i] = OutputFunctionCall(&fsstate->param_flinfo[i],
+ expr_value);
+ i++;
+ }
+
+ reset_transmission_modes(nestlevel);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+
+ /* Construct the DECLARE CURSOR command */
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "DECLARE c%u CURSOR FOR\n%s",
+ fsstate->cursor_number, fsstate->query);
+
+ /*
+ * Notice that we pass NULL for paramTypes, thus forcing the remote server
+ * to infer types for all parameters. Since we explicitly cast every
+ * parameter (see deparse.c), the "inference" is trivial and will produce
+ * the desired result. This allows us to avoid assuming that the remote
+ * server has the same OIDs we do for the parameters' types.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexecParams(conn, buf.data, numParams, NULL, values,
+ NULL, NULL, 0);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, fsstate->query);
+ PQclear(res);
+
+ /* Mark the cursor as created, and show no tuples have been retrieved */
+ fsstate->cursor_exists = true;
+ fsstate->tuples = NULL;
+ fsstate->num_tuples = 0;
+ fsstate->next_tuple = 0;
+ fsstate->fetch_ct_2 = 0;
+ fsstate->eof_reached = false;
+
+ /* Clean up */
+ pfree(buf.data);
+}
+
+/*
+ * Fetch some more rows from the node's cursor.
+ */
+static void
+fetch_more_data(ForeignScanState *node)
+{
+ PgFdwScanState *fsstate = (PgFdwScanState *) node->fdw_state;
+ PGresult *volatile res = NULL;
+ MemoryContext oldcontext;
+
+ /*
+ * We'll store the tuples in the batch_cxt. First, flush the previous
+ * batch.
+ */
+ fsstate->tuples = NULL;
+ MemoryContextReset(fsstate->batch_cxt);
+ oldcontext = MemoryContextSwitchTo(fsstate->batch_cxt);
+
+ /* PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ PGconn *conn = fsstate->conn;
+ char sql[64];
+ int fetch_size;
+ int numrows;
+ int i;
+
+ /* The fetch size is arbitrary, but shouldn't be enormous. */
+ fetch_size = 100;
+
+ snprintf(sql, sizeof(sql), "FETCH %d FROM c%u",
+ fetch_size, fsstate->cursor_number);
+
+ res = PQexec(conn, sql);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, fsstate->query);
+
+ /* Convert the data into HeapTuples */
+ numrows = PQntuples(res);
+ fsstate->tuples = (HeapTuple *) palloc0(numrows * sizeof(HeapTuple));
+ fsstate->num_tuples = numrows;
+ fsstate->next_tuple = 0;
+
+ for (i = 0; i < numrows; i++)
+ {
+ fsstate->tuples[i] =
+ make_tuple_from_result_row(res, i,
+ fsstate->rel,
+ fsstate->attinmeta,
+ fsstate->retrieved_attrs,
+ fsstate->temp_cxt);
+ }
+
+ /* Update fetch_ct_2 */
+ if (fsstate->fetch_ct_2 < 2)
+ fsstate->fetch_ct_2++;
+
+ /* Must be EOF if we didn't get as many tuples as we asked for. */
+ fsstate->eof_reached = (numrows < fetch_size);
+
+ PQclear(res);
+ res = NULL;
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * Force assorted GUC parameters to settings that ensure that we'll output
+ * data values in a form that is unambiguous to the remote server.
+ *
+ * This is rather expensive and annoying to do once per row, but there's
+ * little choice if we want to be sure values are transmitted accurately;
+ * we can't leave the settings in place between rows for fear of affecting
+ * user-visible computations.
+ *
+ * We use the equivalent of a function SET option to allow the settings to
+ * persist only until the caller calls reset_transmission_modes(). If an
+ * error is thrown in between, guc.c will take care of undoing the settings.
+ *
+ * The return value is the nestlevel that must be passed to
+ * reset_transmission_modes() to undo things.
+ */
+int
+set_transmission_modes(void)
+{
+ int nestlevel = NewGUCNestLevel();
+
+ /*
+ * The values set here should match what pg_dump does. See also
+ * configure_remote_session in connection.c.
+ */
+ if (DateStyle != USE_ISO_DATES)
+ (void) set_config_option("datestyle", "ISO",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0);
+ if (IntervalStyle != INTSTYLE_POSTGRES)
+ (void) set_config_option("intervalstyle", "postgres",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0);
+ if (extra_float_digits < 3)
+ (void) set_config_option("extra_float_digits", "3",
+ PGC_USERSET, PGC_S_SESSION,
+ GUC_ACTION_SAVE, true, 0);
+
+ return nestlevel;
+}
+
+/*
+ * Undo the effects of set_transmission_modes().
+ */
+void
+reset_transmission_modes(int nestlevel)
+{
+ AtEOXact_GUC(true, nestlevel);
+}
+
+/*
+ * Utility routine to close a cursor.
+ */
+static void
+close_cursor(PGconn *conn, unsigned int cursor_number)
+{
+ char sql[64];
+ PGresult *res;
+
+ snprintf(sql, sizeof(sql), "CLOSE c%u", cursor_number);
+
+ /*
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQexec(conn, sql);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, true, sql);
+ PQclear(res);
+}
+
+/*
+ * prepare_foreign_modify
+ * Establish a prepared statement for execution of INSERT/UPDATE/DELETE
+ */
+static void
+prepare_foreign_modify(PgFdwModifyState *fmstate)
+{
+ char prep_name[NAMEDATALEN];
+ char *p_name;
+ PGresult *res;
+
+ /* Construct name we'll use for the prepared statement. */
+ snprintf(prep_name, sizeof(prep_name), "pgsql_fdw_prep_%u",
+ GetPrepStmtNumber(fmstate->conn));
+ p_name = pstrdup(prep_name);
+
+ /*
+ * We intentionally do not specify parameter types here, but leave the
+ * remote server to derive them by default. This avoids possible problems
+ * with the remote server using different type OIDs than we do. All of
+ * the prepared statements we use in this module are simple enough that
+ * the remote server will make the right choices.
+ *
+ * We don't use a PG_TRY block here, so be careful not to throw error
+ * without releasing the PGresult.
+ */
+ res = PQprepare(fmstate->conn,
+ p_name,
+ fmstate->query,
+ 0,
+ NULL);
+
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, fmstate->conn, true, fmstate->query);
+ PQclear(res);
+
+ /* This action shows that the prepare has been done. */
+ fmstate->p_name = p_name;
+}
+
+/*
+ * convert_prep_stmt_params
+ * Create array of text strings representing parameter values
+ *
+ * tupleid is ctid to send, or NULL if none
+ * slot is slot to get remaining parameters from, or NULL if none
+ *
+ * Data is constructed in temp_cxt; caller should reset that after use.
+ */
+static const char **
+convert_prep_stmt_params(PgFdwModifyState *fmstate,
+ ItemPointer tupleid,
+ TupleTableSlot *slot)
+{
+ const char **p_values;
+ int pindex = 0;
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(fmstate->temp_cxt);
+
+ p_values = (const char **) palloc(sizeof(char *) * fmstate->p_nums);
+
+ /* 1st parameter should be ctid, if it's in use */
+ if (tupleid != NULL)
+ {
+ /* don't need set_transmission_modes for TID output */
+ p_values[pindex] = OutputFunctionCall(&fmstate->p_flinfo[pindex],
+ PointerGetDatum(tupleid));
+ pindex++;
+ }
+
+ /* get following parameters from slot */
+ if (slot != NULL && fmstate->target_attrs != NIL)
+ {
+ int nestlevel;
+ ListCell *lc;
+
+ nestlevel = set_transmission_modes();
+
+ foreach(lc, fmstate->target_attrs)
+ {
+ int attnum = lfirst_int(lc);
+ Datum value;
+ bool isnull;
+
+ value = slot_getattr(slot, attnum, &isnull);
+ if (isnull)
+ p_values[pindex] = NULL;
+ else
+ p_values[pindex] = OutputFunctionCall(&fmstate->p_flinfo[pindex],
+ value);
+ pindex++;
+ }
+
+ reset_transmission_modes(nestlevel);
+ }
+
+ Assert(pindex == fmstate->p_nums);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return p_values;
+}
+
+/*
+ * store_returning_result
+ * Store the result of a RETURNING clause
+ *
+ * On error, be sure to release the PGresult on the way out. Callers do not
+ * have PG_TRY blocks to ensure this happens.
+ */
+static void
+store_returning_result(PgFdwModifyState *fmstate,
+ TupleTableSlot *slot, PGresult *res)
+{
+ /* PGresult must be released before leaving this function. */
+ PG_TRY();
+ {
+ HeapTuple newtup;
+
+ newtup = make_tuple_from_result_row(res, 0,
+ fmstate->rel,
+ fmstate->attinmeta,
+ fmstate->retrieved_attrs,
+ fmstate->temp_cxt);
+ /* tuple will be deleted when it is cleared from the slot */
+ ExecStoreTuple(newtup, slot, InvalidBuffer, true);
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+}
+
+/*
+ * postgresAnalyzeForeignTable
+ * Test whether analyzing this foreign table is supported
+ */
+static bool
+postgresAnalyzeForeignTable(Relation relation,
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages)
+{
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ PGconn *conn;
+ StringInfoData sql;
+ PGresult *volatile res = NULL;
+
+ /* Return the row-analysis function pointer */
+ *func = postgresAcquireSampleRowsFunc;
+
+ /*
+ * Now we have to get the number of pages. It's annoying that the ANALYZE
+ * API requires us to return that now, because it forces some duplication
+ * of effort between this routine and postgresAcquireSampleRowsFunc. But
+ * it's probably not worth redefining that API at this point.
+ */
+
+ /*
+ * Get the connection to use. We do the remote access as the table's
+ * owner, even if the ANALYZE was started by some other user.
+ */
+ table = GetForeignTable(RelationGetRelid(relation));
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(relation->rd_rel->relowner, server->serverid);
+ conn = GetConnection(server, user, false);
+
+ /*
+ * Construct command to get page count for relation.
+ */
+ initStringInfo(&sql);
+ deparseAnalyzeSizeSql(&sql, relation);
+
+ /* In what follows, do not risk leaking any PGresults. */
+ PG_TRY();
+ {
+ res = PQexec(conn, sql.data);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+
+ if (PQntuples(res) != 1 || PQnfields(res) != 1)
+ elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query");
+ *totalpages = strtoul(PQgetvalue(res, 0, 0), NULL, 10);
+
+ PQclear(res);
+ res = NULL;
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ return true;
+}
+
+/*
+ * Acquire a random sample of rows from foreign table managed by postgres_fdw.
+ *
+ * We fetch the whole table from the remote side and pick out some sample rows.
+ *
+ * Selected rows are returned in the caller-allocated array rows[],
+ * which must have at least targrows entries.
+ * The actual number of rows selected is returned as the function result.
+ * We also count the total number of rows in the table and return it into
+ * *totalrows. Note that *totaldeadrows is always set to 0.
+ *
+ * Note that the returned list of rows is not always in order by physical
+ * position in the table. Therefore, correlation estimates derived later
+ * may be meaningless, but it's OK because we don't use the estimates
+ * currently (the planner only pays attention to correlation for indexscans).
+ */
+static int
+postgresAcquireSampleRowsFunc(Relation relation, int elevel,
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows)
+{
+ PgFdwAnalyzeState astate;
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ PGconn *conn;
+ unsigned int cursor_number;
+ StringInfoData sql;
+ PGresult *volatile res = NULL;
+
+ /* Initialize workspace state */
+ astate.rel = relation;
+ astate.attinmeta = TupleDescGetAttInMetadata(RelationGetDescr(relation));
+
+ astate.rows = rows;
+ astate.targrows = targrows;
+ astate.numrows = 0;
+ astate.samplerows = 0;
+ astate.rowstoskip = -1; /* -1 means not set yet */
+ astate.rstate = anl_init_selection_state(targrows);
+
+ /* Remember ANALYZE context, and create a per-tuple temp context */
+ astate.anl_cxt = CurrentMemoryContext;
+ astate.temp_cxt = AllocSetContextCreate(CurrentMemoryContext,
+ "postgres_fdw temporary data",
+ ALLOCSET_SMALL_MINSIZE,
+ ALLOCSET_SMALL_INITSIZE,
+ ALLOCSET_SMALL_MAXSIZE);
+
+ /*
+ * Get the connection to use. We do the remote access as the table's
+ * owner, even if the ANALYZE was started by some other user.
+ */
+ table = GetForeignTable(RelationGetRelid(relation));
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(relation->rd_rel->relowner, server->serverid);
+ conn = GetConnection(server, user, false);
+
+ /*
+ * Construct cursor that retrieves whole rows from remote.
+ */
+ cursor_number = GetCursorNumber(conn);
+ initStringInfo(&sql);
+ appendStringInfo(&sql, "DECLARE c%u CURSOR FOR ", cursor_number);
+ deparseAnalyzeSql(&sql, relation, &astate.retrieved_attrs);
+
+ /* In what follows, do not risk leaking any PGresults. */
+ PG_TRY();
+ {
+ res = PQexec(conn, sql.data);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+ PQclear(res);
+ res = NULL;
+
+ /* Retrieve and process rows a batch at a time. */
+ for (;;)
+ {
+ char fetch_sql[64];
+ int fetch_size;
+ int numrows;
+ int i;
+
+ /* Allow users to cancel long query */
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * XXX possible future improvement: if rowstoskip is large, we
+ * could issue a MOVE rather than physically fetching the rows,
+ * then just adjust rowstoskip and samplerows appropriately.
+ */
+
+ /* The fetch size is arbitrary, but shouldn't be enormous. */
+ fetch_size = 100;
+
+ /* Fetch some rows */
+ snprintf(fetch_sql, sizeof(fetch_sql), "FETCH %d FROM c%u",
+ fetch_size, cursor_number);
+
+ res = PQexec(conn, fetch_sql);
+ /* On error, report the original query, not the FETCH. */
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ pgfdw_report_error(ERROR, res, conn, false, sql.data);
+
+ /* Process whatever we got. */
+ numrows = PQntuples(res);
+ for (i = 0; i < numrows; i++)
+ analyze_row_processor(res, i, &astate);
+
+ PQclear(res);
+ res = NULL;
+
+ /* Must be EOF if we didn't get all the rows requested. */
+ if (numrows < fetch_size)
+ break;
+ }
+
+ /* Close the cursor, just to be tidy. */
+ close_cursor(conn, cursor_number);
+ }
+ PG_CATCH();
+ {
+ if (res)
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ /* We assume that we have no dead tuple. */
+ *totaldeadrows = 0.0;
+
+ /* We've retrieved all living tuples from foreign server. */
+ *totalrows = astate.samplerows;
+
+ /*
+ * Emit some interesting relation info
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": table contains %.0f rows, %d rows in sample",
+ RelationGetRelationName(relation),
+ astate.samplerows, astate.numrows)));
+
+ return astate.numrows;
+}
+
+/*
+ * Collect sample rows from the result of query.
+ * - Use all tuples in sample until target # of samples are collected.
+ * - Subsequently, replace already-sampled tuples randomly.
+ */
+static void
+analyze_row_processor(PGresult *res, int row, PgFdwAnalyzeState *astate)
+{
+ int targrows = astate->targrows;
+ int pos; /* array index to store tuple in */
+ MemoryContext oldcontext;
+
+ /* Always increment sample row counter. */
+ astate->samplerows += 1;
+
+ /*
+ * Determine the slot where this sample row should be stored. Set pos to
+ * negative value to indicate the row should be skipped.
+ */
+ if (astate->numrows < targrows)
+ {
+ /* First targrows rows are always included into the sample */
+ pos = astate->numrows++;
+ }
+ else
+ {
+ /*
+ * Now we start replacing tuples in the sample until we reach the end
+ * of the relation. Same algorithm as in acquire_sample_rows in
+ * analyze.c; see Jeff Vitter's paper.
+ */
+ if (astate->rowstoskip < 0)
+ astate->rowstoskip = anl_get_next_S(astate->samplerows, targrows,
+ &astate->rstate);
+
+ if (astate->rowstoskip <= 0)
+ {
+ /* Choose a random reservoir element to replace. */
+ pos = (int) (targrows * anl_random_fract());
+ Assert(pos >= 0 && pos < targrows);
+ heap_freetuple(astate->rows[pos]);
+ }
+ else
+ {
+ /* Skip this tuple. */
+ pos = -1;
+ }
+
+ astate->rowstoskip -= 1;
+ }
+
+ if (pos >= 0)
+ {
+ /*
+ * Create sample tuple from current result row, and store it in the
+ * position determined above. The tuple has to be created in anl_cxt.
+ */
+ oldcontext = MemoryContextSwitchTo(astate->anl_cxt);
+
+ astate->rows[pos] = make_tuple_from_result_row(res, row,
+ astate->rel,
+ astate->attinmeta,
+ astate->retrieved_attrs,
+ astate->temp_cxt);
+
+ MemoryContextSwitchTo(oldcontext);
+ }
+}
+
+/*
+ * Create a tuple from the specified row of the PGresult.
+ *
+ * rel is the local representation of the foreign table, attinmeta is
+ * conversion data for the rel's tupdesc, and retrieved_attrs is an
+ * integer list of the table column numbers present in the PGresult.
+ * temp_context is a working context that can be reset after each tuple.
+ */
+static HeapTuple
+make_tuple_from_result_row(PGresult *res,
+ int row,
+ Relation rel,
+ AttInMetadata *attinmeta,
+ List *retrieved_attrs,
+ MemoryContext temp_context)
+{
+ HeapTuple tuple;
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ Datum *values;
+ bool *nulls;
+ ItemPointer ctid = NULL;
+ ConversionLocation errpos;
+ ErrorContextCallback errcallback;
+ MemoryContext oldcontext;
+ ListCell *lc;
+ int j;
+
+ Assert(row < PQntuples(res));
+
+ /*
+ * Do the following work in a temp context that we reset after each tuple.
+ * This cleans up not only the data we have direct access to, but any
+ * cruft the I/O functions might leak.
+ */
+ oldcontext = MemoryContextSwitchTo(temp_context);
+
+ values = (Datum *) palloc0(tupdesc->natts * sizeof(Datum));
+ nulls = (bool *) palloc(tupdesc->natts * sizeof(bool));
+ /* Initialize to nulls for any columns not present in result */
+ memset(nulls, true, tupdesc->natts * sizeof(bool));
+
+ /*
+ * Set up and install callback to report where conversion error occurs.
+ */
+ errpos.rel = rel;
+ errpos.cur_attno = 0;
+ errcallback.callback = conversion_error_callback;
+ errcallback.arg = (void *) &errpos;
+ errcallback.previous = error_context_stack;
+ error_context_stack = &errcallback;
+
+ /*
+ * i indexes columns in the relation, j indexes columns in the PGresult.
+ */
+ j = 0;
+ foreach(lc, retrieved_attrs)
+ {
+ int i = lfirst_int(lc);
+ char *valstr;
+
+ /* fetch next column's textual value */
+ if (PQgetisnull(res, row, j))
+ valstr = NULL;
+ else
+ valstr = PQgetvalue(res, row, j);
+
+ /* convert value to internal representation */
+ if (i > 0)
+ {
+ /* ordinary column */
+ Assert(i <= tupdesc->natts);
+ nulls[i - 1] = (valstr == NULL);
+ /* Apply the input function even to nulls, to support domains */
+ errpos.cur_attno = i;
+ values[i - 1] = InputFunctionCall(&attinmeta->attinfuncs[i - 1],
+ valstr,
+ attinmeta->attioparams[i - 1],
+ attinmeta->atttypmods[i - 1]);
+ errpos.cur_attno = 0;
+ }
+ else if (i == SelfItemPointerAttributeNumber)
+ {
+ /* ctid --- note we ignore any other system column in result */
+ if (valstr != NULL)
+ {
+ Datum datum;
+
+ datum = DirectFunctionCall1(tidin, CStringGetDatum(valstr));
+ ctid = (ItemPointer) DatumGetPointer(datum);
+ }
+ }
+
+ j++;
+ }
+
+ /* Uninstall error context callback. */
+ error_context_stack = errcallback.previous;
+
+ /*
+ * Check we got the expected number of columns. Note: j == 0 and
+ * PQnfields == 1 is expected, since deparse emits a NULL if no columns.
+ */
+ if (j > 0 && j != PQnfields(res))
+ elog(ERROR, "remote query result does not match the foreign table");
+
+ /*
+ * Build the result tuple in caller's memory context.
+ */
+ MemoryContextSwitchTo(oldcontext);
+
+ tuple = heap_form_tuple(tupdesc, values, nulls);
+
+ if (ctid)
+ tuple->t_self = *ctid;
+
+ /* Clean up */
+ MemoryContextReset(temp_context);
+
+ return tuple;
+}
+
+/*
+ * Callback function which is called when error occurs during column value
+ * conversion. Print names of column and relation.
+ */
+static void
+conversion_error_callback(void *arg)
+{
+ ConversionLocation *errpos = (ConversionLocation *) arg;
+ TupleDesc tupdesc = RelationGetDescr(errpos->rel);
+
+ if (errpos->cur_attno > 0 && errpos->cur_attno <= tupdesc->natts)
+ errcontext("column \"%s\" of foreign table \"%s\"",
+ NameStr(tupdesc->attrs[errpos->cur_attno - 1]->attname),
+ RelationGetRelationName(errpos->rel));
+}
diff --git a/contrib/postgres_fdw/postgres_fdw.control b/contrib/postgres_fdw/postgres_fdw.control
new file mode 100644
index 0000000000..f9ed490752
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.control
@@ -0,0 +1,5 @@
+# postgres_fdw extension
+comment = 'foreign-data wrapper for remote PostgreSQL servers'
+default_version = '1.0'
+module_pathname = '$libdir/postgres_fdw'
+relocatable = true
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
new file mode 100644
index 0000000000..8aa8f1a1b5
--- /dev/null
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -0,0 +1,77 @@
+/*-------------------------------------------------------------------------
+ *
+ * postgres_fdw.h
+ * Foreign-data wrapper for remote PostgreSQL servers
+ *
+ * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/postgres_fdw/postgres_fdw.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef POSTGRES_FDW_H
+#define POSTGRES_FDW_H
+
+#include "foreign/foreign.h"
+#include "lib/stringinfo.h"
+#include "nodes/relation.h"
+#include "utils/rel.h"
+
+#include "libpq-fe.h"
+
+/* in postgres_fdw.c */
+extern int set_transmission_modes(void);
+extern void reset_transmission_modes(int nestlevel);
+
+/* in connection.c */
+extern PGconn *GetConnection(ForeignServer *server, UserMapping *user,
+ bool will_prep_stmt);
+extern void ReleaseConnection(PGconn *conn);
+extern unsigned int GetCursorNumber(PGconn *conn);
+extern unsigned int GetPrepStmtNumber(PGconn *conn);
+extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
+ bool clear, const char *sql);
+
+/* in option.c */
+extern int ExtractConnectionOptions(List *defelems,
+ const char **keywords,
+ const char **values);
+
+/* in deparse.c */
+extern void classifyConditions(PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *input_conds,
+ List **remote_conds,
+ List **local_conds);
+extern bool is_foreign_expr(PlannerInfo *root,
+ RelOptInfo *baserel,
+ Expr *expr);
+extern void deparseSelectSql(StringInfo buf,
+ PlannerInfo *root,
+ RelOptInfo *baserel,
+ Bitmapset *attrs_used,
+ List **retrieved_attrs);
+extern void appendWhereClause(StringInfo buf,
+ PlannerInfo *root,
+ RelOptInfo *baserel,
+ List *exprs,
+ bool is_first,
+ List **params);
+extern void deparseInsertSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *targetAttrs, List *returningList,
+ List **retrieved_attrs);
+extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *targetAttrs, List *returningList,
+ List **retrieved_attrs);
+extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
+ Index rtindex, Relation rel,
+ List *returningList,
+ List **retrieved_attrs);
+extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
+extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
+ List **retrieved_attrs);
+
+#endif /* POSTGRES_FDW_H */
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
new file mode 100644
index 0000000000..6187839453
--- /dev/null
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -0,0 +1,611 @@
+-- ===================================================================
+-- create FDW objects
+-- ===================================================================
+
+CREATE EXTENSION postgres_fdw;
+
+CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw;
+CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw
+ OPTIONS (dbname 'contrib_regression');
+
+CREATE USER MAPPING FOR public SERVER testserver1
+ OPTIONS (user 'value', password 'value');
+CREATE USER MAPPING FOR CURRENT_USER SERVER loopback;
+
+-- ===================================================================
+-- create objects used through FDW loopback server
+-- ===================================================================
+CREATE TYPE user_enum AS ENUM ('foo', 'bar', 'buz');
+CREATE SCHEMA "S 1";
+CREATE TABLE "S 1"."T 1" (
+ "C 1" int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10),
+ c8 user_enum,
+ CONSTRAINT t1_pkey PRIMARY KEY ("C 1")
+);
+CREATE TABLE "S 1"."T 2" (
+ c1 int NOT NULL,
+ c2 text,
+ CONSTRAINT t2_pkey PRIMARY KEY (c1)
+);
+
+INSERT INTO "S 1"."T 1"
+ SELECT id,
+ id % 10,
+ to_char(id, 'FM00000'),
+ '1970-01-01'::timestamptz + ((id % 100) || ' days')::interval,
+ '1970-01-01'::timestamp + ((id % 100) || ' days')::interval,
+ id % 10,
+ id % 10,
+ 'foo'::user_enum
+ FROM generate_series(1, 1000) id;
+INSERT INTO "S 1"."T 2"
+ SELECT id,
+ 'AAA' || to_char(id, 'FM000')
+ FROM generate_series(1, 100) id;
+
+ANALYZE "S 1"."T 1";
+ANALYZE "S 1"."T 2";
+
+-- ===================================================================
+-- create foreign tables
+-- ===================================================================
+CREATE FOREIGN TABLE ft1 (
+ c0 int,
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft1',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft1 DROP COLUMN c0;
+
+CREATE FOREIGN TABLE ft2 (
+ c1 int NOT NULL,
+ c2 int NOT NULL,
+ cx int,
+ c3 text,
+ c4 timestamptz,
+ c5 timestamp,
+ c6 varchar(10),
+ c7 char(10) default 'ft2',
+ c8 user_enum
+) SERVER loopback;
+ALTER FOREIGN TABLE ft2 DROP COLUMN cx;
+
+-- ===================================================================
+-- tests for validator
+-- ===================================================================
+-- requiressl, krbsrvname and gsslib are omitted because they depend on
+-- configure options
+ALTER SERVER testserver1 OPTIONS (
+ use_remote_estimate 'false',
+ updatable 'true',
+ fdw_startup_cost '123.456',
+ fdw_tuple_cost '0.123',
+ service 'value',
+ connect_timeout 'value',
+ dbname 'value',
+ host 'value',
+ hostaddr 'value',
+ port 'value',
+ --client_encoding 'value',
+ application_name 'value',
+ --fallback_application_name 'value',
+ keepalives 'value',
+ keepalives_idle 'value',
+ keepalives_interval 'value',
+ -- requiressl 'value',
+ sslcompression 'value',
+ sslmode 'value',
+ sslcert 'value',
+ sslkey 'value',
+ sslrootcert 'value',
+ sslcrl 'value'
+ --requirepeer 'value',
+ -- krbsrvname 'value',
+ -- gsslib 'value',
+ --replication 'value'
+);
+ALTER USER MAPPING FOR public SERVER testserver1
+ OPTIONS (DROP user, DROP password);
+ALTER FOREIGN TABLE ft1 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft2 OPTIONS (schema_name 'S 1', table_name 'T 1');
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+ALTER FOREIGN TABLE ft2 ALTER COLUMN c1 OPTIONS (column_name 'C 1');
+\det+
+
+-- Now we should be able to run ANALYZE.
+-- To exercise multiple code paths, we use local stats on ft1
+-- and remote-estimate mode on ft2.
+ANALYZE ft1;
+ALTER FOREIGN TABLE ft2 OPTIONS (use_remote_estimate 'true');
+
+-- ===================================================================
+-- simple queries
+-- ===================================================================
+-- single table, with/without alias
+EXPLAIN (COSTS false) SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+SELECT * FROM ft1 ORDER BY c3, c1 OFFSET 100 LIMIT 10;
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT * FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- whole-row reference
+EXPLAIN (VERBOSE, COSTS false) SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+SELECT t1 FROM ft1 t1 ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- empty result
+SELECT * FROM ft1 WHERE false;
+-- with WHERE clause
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1';
+-- aggregate
+SELECT COUNT(*) FROM ft1 t1;
+-- join two tables
+SELECT t1.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10;
+-- subquery
+SELECT * FROM ft1 t1 WHERE t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 <= 10) ORDER BY c1;
+-- subquery+MAX
+SELECT * FROM ft1 t1 WHERE t1.c3 = (SELECT MAX(c3) FROM ft2 t2) ORDER BY c1;
+-- used in CTE
+WITH t1 AS (SELECT * FROM ft1 WHERE c1 <= 10) SELECT t2.c1, t2.c2, t2.c3, t2.c4 FROM t1, ft2 t2 WHERE t1.c1 = t2.c1 ORDER BY t1.c1;
+-- fixed values
+SELECT 'fixed', NULL FROM ft1 t1 WHERE c1 = 1;
+-- user-defined operator/function
+CREATE FUNCTION postgres_fdw_abs(int) RETURNS int AS $$
+BEGIN
+RETURN abs($1);
+END
+$$ LANGUAGE plpgsql IMMUTABLE;
+CREATE OPERATOR === (
+ LEFTARG = int,
+ RIGHTARG = int,
+ PROCEDURE = int4eq,
+ COMMUTATOR = ===,
+ NEGATOR = !==
+);
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = postgres_fdw_abs(t1.c2);
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 === t1.c2;
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = abs(t1.c2);
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = t1.c2;
+
+-- ===================================================================
+-- WHERE with remotely-executable conditions
+-- ===================================================================
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 1; -- Var, OpExpr(b), Const
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 100 AND t1.c2 = 0; -- BoolExpr
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NULL; -- NullTest
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 IS NOT NULL; -- NullTest
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE round(abs(c1), 0) = 1; -- FuncExpr
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- OpExpr(l)
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
+EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
+-- parameterized remote path
+EXPLAIN (VERBOSE, COSTS false)
+ SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+SELECT * FROM ft2 a, ft2 b WHERE a.c1 = 47 AND b.c1 = a.c2;
+-- check both safe and unsafe join conditions
+EXPLAIN (VERBOSE, COSTS false)
+ SELECT * FROM ft2 a, ft2 b
+ WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+SELECT * FROM ft2 a, ft2 b
+WHERE a.c2 = 6 AND b.c1 = a.c1 AND a.c8 = 'foo' AND b.c7 = upper(a.c7);
+-- bug before 9.3.5 due to sloppy handling of remote-estimate parameters
+SELECT * FROM ft1 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft2 WHERE c1 < 5));
+SELECT * FROM ft2 WHERE c1 = ANY (ARRAY(SELECT c1 FROM ft1 WHERE c1 < 5));
+
+-- ===================================================================
+-- parameterized queries
+-- ===================================================================
+-- simple join
+PREPARE st1(int, int) AS SELECT t1.c3, t2.c3 FROM ft1 t1, ft2 t2 WHERE t1.c1 = $1 AND t2.c1 = $2;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st1(1, 2);
+EXECUTE st1(1, 1);
+EXECUTE st1(101, 101);
+-- subquery using stable function (can't be sent to remote)
+PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c4) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st2(10, 20);
+EXECUTE st2(10, 20);
+EXECUTE st2(101, 121);
+-- subquery using immutable function (can be sent to remote)
+PREPARE st3(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND date(c5) = '1970-01-17'::date) ORDER BY c1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st3(10, 20);
+EXECUTE st3(10, 20);
+EXECUTE st3(20, 30);
+-- custom plan should be chosen initially
+PREPARE st4(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 = $1;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+-- once we try it enough times, should switch to generic plan
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st4(1);
+-- value of $1 should not be sent to remote
+PREPARE st5(user_enum,int) AS SELECT * FROM ft1 t1 WHERE c8 = $1 and c1 = $2;
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXPLAIN (VERBOSE, COSTS false) EXECUTE st5('foo', 1);
+EXECUTE st5('foo', 1);
+
+-- cleanup
+DEALLOCATE st1;
+DEALLOCATE st2;
+DEALLOCATE st3;
+DEALLOCATE st4;
+DEALLOCATE st5;
+
+-- ===================================================================
+-- used in pl/pgsql function
+-- ===================================================================
+CREATE OR REPLACE FUNCTION f_test(p_c1 int) RETURNS int AS $$
+DECLARE
+ v_c1 int;
+BEGIN
+ SELECT c1 INTO v_c1 FROM ft1 WHERE c1 = p_c1 LIMIT 1;
+ PERFORM c1 FROM ft1 WHERE c1 = p_c1 AND p_c1 = v_c1 LIMIT 1;
+ RETURN v_c1;
+END;
+$$ LANGUAGE plpgsql;
+SELECT f_test(100);
+DROP FUNCTION f_test(int);
+
+-- ===================================================================
+-- conversion error
+-- ===================================================================
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE int;
+SELECT * FROM ft1 WHERE c1 = 1; -- ERROR
+ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE user_enum;
+
+-- ===================================================================
+-- subtransaction
+-- + local/remote error doesn't break cursor
+-- ===================================================================
+BEGIN;
+DECLARE c CURSOR FOR SELECT * FROM ft1 ORDER BY c1;
+FETCH c;
+SAVEPOINT s;
+ERROR OUT; -- ERROR
+ROLLBACK TO s;
+FETCH c;
+SAVEPOINT s;
+SELECT * FROM ft1 WHERE 1 / (c1 - 1) > 0; -- ERROR
+ROLLBACK TO s;
+FETCH c;
+SELECT * FROM ft1 ORDER BY c1 LIMIT 1;
+COMMIT;
+
+-- ===================================================================
+-- test handling of collations
+-- ===================================================================
+create table loct3 (f1 text collate "C", f2 text);
+create foreign table ft3 (f1 text collate "C", f2 text)
+ server loopback options (table_name 'loct3');
+
+-- can be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 = 'foo';
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo';
+explain (verbose, costs off) select * from ft3 where f2 = 'foo';
+-- can't be sent to remote
+explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo';
+explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C";
+explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo';
+explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C";
+
+-- ===================================================================
+-- test writable foreign table stuff
+-- ===================================================================
+EXPLAIN (verbose, costs off)
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3) SELECT c1+1000,c2+100, c3 || c3 FROM ft2 LIMIT 20;
+INSERT INTO ft2 (c1,c2,c3)
+ VALUES (1101,201,'aaa'), (1102,202,'bbb'), (1103,203,'ccc') RETURNING *;
+INSERT INTO ft2 (c1,c2,c3) VALUES (1104,204,'ddd'), (1105,205,'eee');
+UPDATE ft2 SET c2 = c2 + 300, c3 = c3 || '_update3' WHERE c1 % 10 = 3;
+UPDATE ft2 SET c2 = c2 + 400, c3 = c3 || '_update7' WHERE c1 % 10 = 7 RETURNING *;
+EXPLAIN (verbose, costs off)
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+UPDATE ft2 SET c2 = ft2.c2 + 500, c3 = ft2.c3 || '_update9', c7 = DEFAULT
+ FROM ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 9;
+EXPLAIN (verbose, costs off)
+ DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+DELETE FROM ft2 WHERE c1 % 10 = 5 RETURNING c1, c4;
+EXPLAIN (verbose, costs off)
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2;
+SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1;
+
+-- Test that trigger on remote table works as expected
+CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$
+BEGIN
+ NEW.c3 = NEW.c3 || '_trig_update';
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER t1_br_insert BEFORE INSERT OR UPDATE
+ ON "S 1"."T 1" FOR EACH ROW EXECUTE PROCEDURE "S 1".F_BRTRIG();
+
+INSERT INTO ft2 (c1,c2,c3) VALUES (1208, 818, 'fff') RETURNING *;
+INSERT INTO ft2 (c1,c2,c3,c6) VALUES (1218, 818, 'ggg', '(--;') RETURNING *;
+UPDATE ft2 SET c2 = c2 + 600 WHERE c1 % 10 = 8 AND c1 < 1200 RETURNING *;
+
+-- Test errors thrown on remote side during update
+ALTER TABLE "S 1"."T 1" ADD CONSTRAINT c2positive CHECK (c2 >= 0);
+
+INSERT INTO ft1(c1, c2) VALUES(11, 12); -- duplicate key
+INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive
+UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive
+
+-- Test savepoint/rollback behavior
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+begin;
+update ft2 set c2 = 42 where c2 = 0;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s1;
+update ft2 set c2 = 44 where c2 = 4;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s1;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s2;
+update ft2 set c2 = 46 where c2 = 6;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+rollback to savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s2;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+savepoint s3;
+update ft2 set c2 = -2 where c2 = 42 and c1 = 10; -- fail on remote side
+rollback to savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+release savepoint s3;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+-- none of the above is committed yet remotely
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+commit;
+select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1;
+select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1;
+
+-- ===================================================================
+-- test serial columns (ie, sequence-based defaults)
+-- ===================================================================
+create table loc1 (f1 serial, f2 text);
+create foreign table rem1 (f1 serial, f2 text)
+ server loopback options(table_name 'loc1');
+select pg_catalog.setval('rem1_f1_seq', 10, false);
+insert into loc1(f2) values('hi');
+insert into rem1(f2) values('hi remote');
+insert into loc1(f2) values('bye');
+insert into rem1(f2) values('bye remote');
+select * from loc1;
+select * from rem1;
+
+-- ===================================================================
+-- test local triggers
+-- ===================================================================
+
+-- Trigger functions "borrowed" from triggers regress test.
+CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS $$
+BEGIN
+ RAISE NOTICE 'trigger_func(%) called: action = %, when = %, level = %',
+ TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL;
+ RETURN NULL;
+END;$$;
+
+CREATE TRIGGER trig_stmt_before BEFORE DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER trig_stmt_after AFTER DELETE OR INSERT OR UPDATE ON rem1
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+
+CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger
+LANGUAGE plpgsql AS $$
+
+declare
+ oldnew text[];
+ relid text;
+ argstr text;
+begin
+
+ relid := TG_relid::regclass;
+ argstr := '';
+ for i in 0 .. TG_nargs - 1 loop
+ if i > 0 then
+ argstr := argstr || ', ';
+ end if;
+ argstr := argstr || TG_argv[i];
+ end loop;
+
+ RAISE NOTICE '%(%) % % % ON %',
+ tg_name, argstr, TG_when, TG_level, TG_OP, relid;
+ oldnew := '{}'::text[];
+ if TG_OP != 'INSERT' then
+ oldnew := array_append(oldnew, format('OLD: %s', OLD));
+ end if;
+
+ if TG_OP != 'DELETE' then
+ oldnew := array_append(oldnew, format('NEW: %s', NEW));
+ end if;
+
+ RAISE NOTICE '%', array_to_string(oldnew, ',');
+
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+end;
+$$;
+
+-- Test basic functionality
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+delete from rem1;
+insert into rem1 values(1,'insert');
+update rem1 set f2 = 'update' where f1 = 1;
+update rem1 set f2 = f2 || f2;
+
+
+-- cleanup
+DROP TRIGGER trig_row_before ON rem1;
+DROP TRIGGER trig_row_after ON rem1;
+DROP TRIGGER trig_stmt_before ON rem1;
+DROP TRIGGER trig_stmt_after ON rem1;
+
+DELETE from rem1;
+
+
+-- Test WHEN conditions
+
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after_insupd
+AFTER INSERT OR UPDATE ON rem1
+FOR EACH ROW
+WHEN (NEW.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+-- Insert or update not matching: nothing happens
+INSERT INTO rem1 values(1, 'insert');
+UPDATE rem1 set f2 = 'test';
+
+-- Insert or update matching: triggers are fired
+INSERT INTO rem1 values(2, 'update');
+UPDATE rem1 set f2 = 'update update' where f1 = '2';
+
+CREATE TRIGGER trig_row_before_delete
+BEFORE DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after_delete
+AFTER DELETE ON rem1
+FOR EACH ROW
+WHEN (OLD.f2 like '%update%')
+EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+-- Trigger is fired for f1=2, not for f1=1
+DELETE FROM rem1;
+
+-- cleanup
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_after_insupd ON rem1;
+DROP TRIGGER trig_row_before_delete ON rem1;
+DROP TRIGGER trig_row_after_delete ON rem1;
+
+
+-- Test various RETURN statements in BEFORE triggers.
+
+CREATE FUNCTION trig_row_before_insupdate() RETURNS TRIGGER AS $$
+ BEGIN
+ NEW.f2 := NEW.f2 || ' triggered !';
+ RETURN NEW;
+ END
+$$ language plpgsql;
+
+CREATE TRIGGER trig_row_before_insupd
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+-- The new values should have 'triggered' appended
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+SELECT * from loc1;
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+SELECT * from loc1;
+
+DELETE FROM rem1;
+
+-- Add a second trigger, to check that the changes are propagated correctly
+-- from trigger to trigger
+CREATE TRIGGER trig_row_before_insupd2
+BEFORE INSERT OR UPDATE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+INSERT INTO rem1 values(1, 'insert');
+SELECT * from loc1;
+INSERT INTO rem1 values(2, 'insert') RETURNING f2;
+SELECT * from loc1;
+UPDATE rem1 set f2 = '';
+SELECT * from loc1;
+UPDATE rem1 set f2 = 'skidoo' RETURNING f2;
+SELECT * from loc1;
+
+DROP TRIGGER trig_row_before_insupd ON rem1;
+DROP TRIGGER trig_row_before_insupd2 ON rem1;
+
+DELETE from rem1;
+
+INSERT INTO rem1 VALUES (1, 'test');
+
+-- Test with a trigger returning NULL
+CREATE FUNCTION trig_null() RETURNS TRIGGER AS $$
+ BEGIN
+ RETURN NULL;
+ END
+$$ language plpgsql;
+
+CREATE TRIGGER trig_null
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trig_null();
+
+-- Nothing should have changed.
+INSERT INTO rem1 VALUES (2, 'test2');
+
+SELECT * from loc1;
+
+UPDATE rem1 SET f2 = 'test2';
+
+SELECT * from loc1;
+
+DELETE from rem1;
+
+SELECT * from loc1;
+
+DROP TRIGGER trig_null ON rem1;
+DELETE from rem1;
+
+-- Test a combination of local and remote triggers
+CREATE TRIGGER trig_row_before
+BEFORE INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_row_after
+AFTER INSERT OR UPDATE OR DELETE ON rem1
+FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo');
+
+CREATE TRIGGER trig_local_before BEFORE INSERT OR UPDATE ON loc1
+FOR EACH ROW EXECUTE PROCEDURE trig_row_before_insupdate();
+
+INSERT INTO rem1(f2) VALUES ('test');
+UPDATE rem1 SET f2 = 'testo';
+
+-- Test returning a system attribute
+INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid;
diff --git a/contrib/seg/Makefile b/contrib/seg/Makefile
index d84934c67f..fb9c5765c3 100644
--- a/contrib/seg/Makefile
+++ b/contrib/seg/Makefile
@@ -25,20 +25,6 @@ endif
# segscan is compiled as part of segparse
segparse.o: segscan.c
-segparse.c: segparse.y
-ifdef BISON
- $(BISON) $(BISONFLAGS) -o $@ $<
-else
- @$(missing) bison $< $@
-endif
-
-segscan.c: segscan.l
-ifdef FLEX
- $(FLEX) $(FLEXFLAGS) -o'$@' $<
-else
- @$(missing) flex $< $@
-endif
-
distprep: segparse.c segscan.c
maintainer-clean:
diff --git a/contrib/seg/seg-validate.pl b/contrib/seg/seg-validate.pl
index 9272936aef..cb3fb9a099 100755
--- a/contrib/seg/seg-validate.pl
+++ b/contrib/seg/seg-validate.pl
@@ -2,12 +2,12 @@
$integer = '[+-]?[0-9]+';
$real = '[+-]?[0-9]+\.[0-9]+';
-$RANGE = '(\.\.)(\.)?';
-$PLUMIN = q(\'\+\-\');
-$FLOAT = "(($integer)|($real))([eE]($integer))?";
+$RANGE = '(\.\.)(\.)?';
+$PLUMIN = q(\'\+\-\');
+$FLOAT = "(($integer)|($real))([eE]($integer))?";
$EXTENSION = '<|>|~';
-$boundary = "($EXTENSION)?$FLOAT";
+$boundary = "($EXTENSION)?$FLOAT";
$deviation = $FLOAT;
$rule_1 = $boundary . $PLUMIN . $deviation;
@@ -18,25 +18,33 @@ $rule_5 = $boundary;
print "$rule_5\n";
-while (<>) {
-# s/ +//g;
- if ( /^($rule_1)$/ ) {
- print;
- }
- elsif ( /^($rule_2)$/ ) {
- print;
- }
- elsif ( /^($rule_3)$/ ) {
- print;
- }
- elsif ( /^($rule_4)$/ ) {
- print;
- }
- elsif ( /^($rule_5)$/ ) {
- print;
- }
- else {
- print STDERR "error in $_\n";
- }
+while (<>)
+{
+
+ # s/ +//g;
+ if (/^($rule_1)$/)
+ {
+ print;
+ }
+ elsif (/^($rule_2)$/)
+ {
+ print;
+ }
+ elsif (/^($rule_3)$/)
+ {
+ print;
+ }
+ elsif (/^($rule_4)$/)
+ {
+ print;
+ }
+ elsif (/^($rule_5)$/)
+ {
+ print;
+ }
+ else
+ {
+ print STDERR "error in $_\n";
+ }
}
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
index 0cf9853060..0807e238f1 100644
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -52,13 +52,6 @@ PG_FUNCTION_INFO_V1(seg_lower);
PG_FUNCTION_INFO_V1(seg_upper);
PG_FUNCTION_INFO_V1(seg_center);
-Datum seg_in(PG_FUNCTION_ARGS);
-Datum seg_out(PG_FUNCTION_ARGS);
-Datum seg_size(PG_FUNCTION_ARGS);
-Datum seg_lower(PG_FUNCTION_ARGS);
-Datum seg_upper(PG_FUNCTION_ARGS);
-Datum seg_center(PG_FUNCTION_ARGS);
-
/*
** GiST support methods
*/
diff --git a/contrib/seg/segscan.l b/contrib/seg/segscan.l
index 2d71cc66a0..a3e685488a 100644
--- a/contrib/seg/segscan.l
+++ b/contrib/seg/segscan.l
@@ -10,7 +10,13 @@
/* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */
#undef fprintf
-#define fprintf(file, fmt, msg) ereport(ERROR, (errmsg_internal("%s", msg)))
+#define fprintf(file, fmt, msg) fprintf_to_ereport(fmt, msg)
+
+static void
+fprintf_to_ereport(const char *fmt, const char *msg)
+{
+ ereport(ERROR, (errmsg_internal("%s", msg)));
+}
/* Handles to the buffer that the lexer uses internally */
static YY_BUFFER_STATE scanbufhandle;
diff --git a/contrib/seg/sort-segments.pl b/contrib/seg/sort-segments.pl
index 62cdfb1ffd..a465468d5b 100755
--- a/contrib/seg/sort-segments.pl
+++ b/contrib/seg/sort-segments.pl
@@ -2,19 +2,22 @@
# this script will sort any table with the segment data type in its last column
-while (<>) {
- chomp;
- push @rows, $_;
+while (<>)
+{
+ chomp;
+ push @rows, $_;
}
-foreach ( sort {
- @ar = split("\t", $a);
- $valA = pop @ar;
- $valA =~ s/[~<> ]+//g;
- @ar = split("\t", $b);
- $valB = pop @ar;
- $valB =~ s/[~<> ]+//g;
- $valA <=> $valB
-} @rows ) {
- print "$_\n";;
+foreach (
+ sort {
+ @ar = split("\t", $a);
+ $valA = pop @ar;
+ $valA =~ s/[~<> ]+//g;
+ @ar = split("\t", $b);
+ $valB = pop @ar;
+ $valB =~ s/[~<> ]+//g;
+ $valA <=> $valB
+ } @rows)
+{
+ print "$_\n";
}
diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c
index 5a4246752a..cc8b31eb74 100644
--- a/contrib/sepgsql/database.c
+++ b/contrib/sepgsql/database.c
@@ -4,7 +4,7 @@
*
* Routines corresponding to database objects
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -12,12 +12,14 @@
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/sysattr.h"
#include "catalog/dependency.h"
#include "catalog/pg_database.h"
#include "catalog/indexing.h"
#include "commands/dbcommands.h"
#include "commands/seclabel.h"
+#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/tqual.h"
#include "sepgsql.h"
@@ -37,9 +39,9 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
HeapTuple tuple;
char *tcontext;
char *ncontext;
- char audit_name[NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_database datForm;
+ StringInfoData audit_name;
/*
* Oid of the source database is not saved in pg_database catalog, so we
@@ -60,11 +62,12 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
/*
* check db_database:{getattr} permission
*/
- snprintf(audit_name, sizeof(audit_name), "database %s", dtemplate);
+ initStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s", quote_identifier(dtemplate));
sepgsql_avc_check_perms_label(tcontext,
SEPG_CLASS_DB_DATABASE,
SEPG_DB_DATABASE__GETATTR,
- audit_name,
+ audit_name.data,
true);
/*
@@ -91,17 +94,19 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
- SEPG_CLASS_DB_DATABASE);
+ SEPG_CLASS_DB_DATABASE,
+ NameStr(datForm->datname));
/*
* check db_database:{create} permission
*/
- snprintf(audit_name, sizeof(audit_name),
- "database %s", NameStr(datForm->datname));
+ resetStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s",
+ quote_identifier(NameStr(datForm->datname)));
sepgsql_avc_check_perms_label(ncontext,
SEPG_CLASS_DB_DATABASE,
SEPG_DB_DATABASE__CREATE,
- audit_name,
+ audit_name.data,
true);
systable_endscan(sscan);
@@ -137,7 +142,7 @@ sepgsql_database_drop(Oid databaseId)
object.classId = DatabaseRelationId;
object.objectId = databaseId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_DATABASE,
@@ -148,6 +153,33 @@ sepgsql_database_drop(Oid databaseId)
}
/*
+ * sepgsql_database_post_alter
+ *
+ * It checks privileges to alter the supplied database
+ */
+void
+sepgsql_database_setattr(Oid databaseId)
+{
+ ObjectAddress object;
+ char *audit_name;
+
+ /*
+ * check db_database:{setattr} permission
+ */
+ object.classId = DatabaseRelationId;
+ object.objectId = databaseId;
+ object.objectSubId = 0;
+ audit_name = getObjectIdentity(&object);
+
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_DATABASE,
+ SEPG_DB_DATABASE__SETATTR,
+ audit_name,
+ true);
+ pfree(audit_name);
+}
+
+/*
* sepgsql_database_relabel
*
* It checks privileges to relabel the supplied database with the `seclabel'
@@ -161,7 +193,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
object.classId = DatabaseRelationId;
object.objectId = databaseId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
/*
* check db_database:{setattr relabelfrom} permission
diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c
index 47a1087417..bb82c0d6d2 100644
--- a/contrib/sepgsql/dml.c
+++ b/contrib/sepgsql/dml.c
@@ -4,12 +4,13 @@
*
* Routines to handle DML permission checks
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
+#include "access/htup_details.h"
#include "access/sysattr.h"
#include "access/tupdesc.h"
#include "catalog/catalog.h"
@@ -148,7 +149,7 @@ check_relation_privileges(Oid relOid,
Bitmapset *selected,
Bitmapset *modified,
uint32 required,
- bool abort)
+ bool abort_on_violation)
{
ObjectAddress object;
char *audit_name;
@@ -186,7 +187,7 @@ check_relation_privileges(Oid relOid,
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
switch (relkind)
{
case RELKIND_RELATION:
@@ -194,7 +195,7 @@ check_relation_privileges(Oid relOid,
SEPG_CLASS_DB_TABLE,
required,
audit_name,
- abort);
+ abort_on_violation);
break;
case RELKIND_SEQUENCE:
@@ -205,7 +206,7 @@ check_relation_privileges(Oid relOid,
SEPG_CLASS_DB_SEQUENCE,
SEPG_DB_SEQUENCE__GET_VALUE,
audit_name,
- abort);
+ abort_on_violation);
break;
case RELKIND_VIEW:
@@ -213,7 +214,7 @@ check_relation_privileges(Oid relOid,
SEPG_CLASS_DB_VIEW,
SEPG_DB_VIEW__EXPAND,
audit_name,
- abort);
+ abort_on_violation);
break;
default:
@@ -264,7 +265,7 @@ check_relation_privileges(Oid relOid,
SEPG_CLASS_DB_COLUMN,
column_perms,
audit_name,
- abort);
+ abort_on_violation);
pfree(audit_name);
if (!result)
@@ -279,7 +280,7 @@ check_relation_privileges(Oid relOid,
* Entrypoint of the DML permission checks
*/
bool
-sepgsql_dml_privileges(List *rangeTabls, bool abort)
+sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation)
{
ListCell *lr;
@@ -351,7 +352,7 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort)
if (!check_relation_privileges(tableOid,
selectedCols,
modifiedCols,
- required, abort))
+ required, abort_on_violation))
return false;
}
list_free(tableIds);
diff --git a/contrib/sepgsql/expected/alter.out b/contrib/sepgsql/expected/alter.out
new file mode 100644
index 0000000000..124f862cec
--- /dev/null
+++ b/contrib/sepgsql/expected/alter.out
@@ -0,0 +1,223 @@
+--
+-- Test for various ALTER statements
+--
+-- clean-up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database_1;
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database;
+DROP USER IF EXISTS regtest_sepgsql_test_user;
+RESET client_min_messages;
+SELECT sepgsql_getcon(); -- confirm client privilege
+ sepgsql_getcon
+-------------------------------------------
+ unconfined_u:unconfined_r:unconfined_t:s0
+(1 row)
+
+--
+-- CREATE Objects to be altered (with debug_audit being silent)
+--
+CREATE DATABASE regtest_sepgsql_test_database_1;
+CREATE USER regtest_sepgsql_test_user;
+CREATE SCHEMA regtest_schema_1;
+CREATE SCHEMA regtest_schema_2;
+GRANT ALL ON SCHEMA regtest_schema_1 TO public;
+GRANT ALL ON SCHEMA regtest_schema_2 TO public;
+SET search_path = regtest_schema_1, regtest_schema_2, public;
+CREATE TABLE regtest_table_1 (a int, b text);
+CREATE TABLE regtest_table_2 (c text) inherits (regtest_table_1);
+CREATE TABLE regtest_table_3 (x int primary key, y text);
+CREATE SEQUENCE regtest_seq_1;
+CREATE VIEW regtest_view_1 AS SELECT * FROM regtest_table_1 WHERE a > 0;
+CREATE FUNCTION regtest_func_1 (text) RETURNS bool
+ AS 'BEGIN RETURN true; END' LANGUAGE 'plpgsql';
+-- switch on debug_audit
+SET sepgsql.debug_audit = true;
+SET client_min_messages = LOG;
+--
+-- ALTER xxx OWNER TO
+--
+-- XXX: It should take db_xxx:{setattr} permission checks even if
+-- owner is not actually changed.
+--
+ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1"
+ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1"
+ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1"
+ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1"
+ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1"
+ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1"
+ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1"
+ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1"
+ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)"
+ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)"
+--
+-- ALTER xxx SET SCHEMA
+--
+ALTER TABLE regtest_table_1 SET SCHEMA regtest_schema_2;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1"
+ALTER SEQUENCE regtest_seq_1 SET SCHEMA regtest_schema_2;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1"
+ALTER VIEW regtest_view_1 SET SCHEMA regtest_schema_2;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1"
+ALTER FUNCTION regtest_func_1(text) SET SCHEMA regtest_schema_2;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)"
+--
+-- ALTER xxx RENAME TO
+--
+ALTER DATABASE regtest_sepgsql_test_database_1 RENAME TO regtest_sepgsql_test_database;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1"
+ALTER SCHEMA regtest_schema_1 RENAME TO regtest_schema;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1"
+ALTER TABLE regtest_table_1 RENAME TO regtest_table;
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table_1"
+ALTER SEQUENCE regtest_seq_1 RENAME TO regtest_seq;
+LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq_1"
+ALTER VIEW regtest_view_1 RENAME TO regtest_view;
+LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view_1"
+ALTER FUNCTION regtest_func_1(text) RENAME TO regtest_func;
+LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_2.regtest_func_1(pg_catalog.text)"
+SET search_path = regtest_schema, regtest_schema_2, public;
+--
+-- misc ALTER commands
+--
+ALTER DATABASE regtest_sepgsql_test_database CONNECTION LIMIT 999;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database"
+ALTER DATABASE regtest_sepgsql_test_database SET search_path TO regtest_schema, public; -- not supported yet
+ALTER TABLE regtest_table ADD COLUMN d float;
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d"
+ALTER TABLE regtest_table DROP COLUMN d;
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d"
+ALTER TABLE regtest_table ALTER b SET DEFAULT 'abcd'; -- not supported yet
+ALTER TABLE regtest_table ALTER b SET DEFAULT 'XYZ'; -- not supported yet
+ALTER TABLE regtest_table ALTER b DROP DEFAULT; -- not supported yet
+ALTER TABLE regtest_table ALTER b SET NOT NULL;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
+ALTER TABLE regtest_table ALTER b DROP NOT NULL;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
+ALTER TABLE regtest_table ALTER b SET STATISTICS -1;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
+ALTER TABLE regtest_table ALTER b SET (n_distinct = 999);
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b"
+ALTER TABLE regtest_table ALTER b SET STORAGE PLAIN;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
+ALTER TABLE regtest_table ADD CONSTRAINT test_fk FOREIGN KEY (a) REFERENCES regtest_table_3(x); -- not supported
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2"
+LINE 1: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" f...
+ ^
+QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LINE 1: ...schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_s...
+ ^
+QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
+CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)"
+ALTER TABLE regtest_table ADD CONSTRAINT test_ck CHECK (b like '%abc%') NOT VALID; -- not supported
+ALTER TABLE regtest_table VALIDATE CONSTRAINT test_ck; -- not supported
+ALTER TABLE regtest_table DROP CONSTRAINT test_ck; -- not supported
+CREATE TRIGGER regtest_test_trig BEFORE UPDATE ON regtest_table
+ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
+ALTER TABLE regtest_table DISABLE TRIGGER regtest_test_trig; -- not supported
+ALTER TABLE regtest_table ENABLE TRIGGER regtest_test_trig; -- not supported
+CREATE RULE regtest_test_rule AS ON INSERT TO regtest_table_3 DO ALSO NOTHING;
+ALTER TABLE regtest_table_3 DISABLE RULE regtest_test_rule; -- not supported
+ALTER TABLE regtest_table_3 ENABLE RULE regtest_test_rule; -- not supported
+ALTER TABLE regtest_table SET WITH OIDS;
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
+ALTER TABLE regtest_table SET WITHOUT OIDS;
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid"
+ALTER TABLE regtest_table SET (fillfactor = 75);
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
+ALTER TABLE regtest_table RESET (fillfactor);
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
+ALTER TABLE regtest_table_2 NO INHERIT regtest_table; -- not supported
+ALTER TABLE regtest_table_2 INHERIT regtest_table; -- not supported
+ALTER TABLE regtest_table SET TABLESPACE pg_default;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table"
+ALTER VIEW regtest_view SET (security_barrier);
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view"
+ALTER SEQUENCE regtest_seq INCREMENT BY 10 START WITH 1000;
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq"
+--
+-- clean-up objects
+--
+RESET sepgsql.debug_audit;
+RESET client_min_messages;
+DROP DATABASE regtest_sepgsql_test_database;
+DROP SCHEMA regtest_schema CASCADE;
+NOTICE: drop cascades to 3 other objects
+DETAIL: drop cascades to table regtest_table_2
+drop cascades to table regtest_table_3
+drop cascades to constraint test_fk on table regtest_table
+DROP SCHEMA regtest_schema_2 CASCADE;
+NOTICE: drop cascades to 4 other objects
+DETAIL: drop cascades to table regtest_table
+drop cascades to sequence regtest_seq
+drop cascades to view regtest_view
+drop cascades to function regtest_func(text)
+DROP USER regtest_sepgsql_test_user;
diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out
index 1c7bcc5ca9..08cd6d5e01 100644
--- a/contrib/sepgsql/expected/ddl.out
+++ b/contrib/sepgsql/expected/ddl.out
@@ -1,6 +1,11 @@
--
-- Regression Test for DDL of Object Permission Checks
--
+-- clean-up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database;
+DROP USER IF EXISTS regtest_sepgsql_test_user;
+RESET client_min_messages;
-- confirm required permissions using audit messages
SELECT sepgsql_getcon(); -- confirm client privilege
sepgsql_getcon
@@ -14,151 +19,257 @@ SET client_min_messages = LOG;
-- CREATE Permission checks
--
CREATE DATABASE regtest_sepgsql_test_database;
-LOG: SELinux: allowed { getattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_db_t:s0 tclass=db_database name="database template1"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="database regtest_sepgsql_test_database"
+LOG: SELinux: allowed { getattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_db_t:s0 tclass=db_database name="template1"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database"
CREATE USER regtest_sepgsql_test_user;
CREATE SCHEMA regtest_schema;
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
GRANT ALL ON SCHEMA regtest_schema TO regtest_sepgsql_test_user;
SET search_path = regtest_schema, public;
CREATE TABLE regtest_table (x serial primary key, y text);
-NOTICE: CREATE TABLE will create implicit sequence "regtest_table_x_seq" for serial column "regtest_table.x"
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_table_x_seq"
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column tableoid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column cmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column xmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column cmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column xmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column ctid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column x"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column y"
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "regtest_table_pkey" for table "regtest_table"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LINE 1: CREATE TABLE regtest_table (x serial primary key, y text);
+ ^
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq"
ALTER TABLE regtest_table ADD COLUMN z int;
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column z"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z"
CREATE TABLE regtest_table_2 (a int) WITH OIDS;
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table_2"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column tableoid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column cmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column xmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column cmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column xmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column oid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column ctid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column a"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a"
-- corresponding toast table should not have label and permission checks
ALTER TABLE regtest_table_2 ADD COLUMN b text;
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column b"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
-- VACUUM FULL internally create a new table and swap them later.
VACUUM FULL regtest_table;
CREATE VIEW regtest_view AS SELECT * FROM regtest_table WHERE x < 100;
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="view regtest_view"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view"
CREATE SEQUENCE regtest_seq;
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_seq"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq"
CREATE TYPE regtest_comptype AS (a int, b text);
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
CREATE FUNCTION regtest_func(text,int[]) RETURNS bool LANGUAGE plpgsql
AS 'BEGIN RAISE NOTICE ''regtest_func => %'', $1; RETURN true; END';
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_func(text,integer[])"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])"
CREATE AGGREGATE regtest_agg (
sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond1 = '0'
);
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_agg(integer)"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)"
-- CREATE objects owned by others
SET SESSION AUTHORIZATION regtest_sepgsql_test_user;
SET search_path = regtest_schema, public;
CREATE TABLE regtest_table_3 (x int, y serial);
-NOTICE: CREATE TABLE will create implicit sequence "regtest_table_3_y_seq" for serial column "regtest_table_3.y"
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_table_3_y_seq"
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table_3"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column tableoid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column cmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column xmax"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column cmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column xmin"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column ctid"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column y"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq"
CREATE VIEW regtest_view_2 AS SELECT * FROM regtest_table_3 WHERE x < y;
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="view regtest_view_2"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2"
CREATE FUNCTION regtest_func_2(int) RETURNS bool LANGUAGE plpgsql
AS 'BEGIN RETURN $1 * $1 < 100; END';
-LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_func_2(integer)"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)"
RESET SESSION AUTHORIZATION;
--
+-- ALTER and CREATE/DROP extra attribute permissions
+--
+CREATE TABLE regtest_table_4 (x int primary key, y int, z int);
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y"
+LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LINE 1: CREATE TABLE regtest_table_4 (x int primary key, y int, z in...
+ ^
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+CREATE INDEX regtest_index_tbl4_y ON regtest_table_4(y);
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+CREATE INDEX regtest_index_tbl4_z ON regtest_table_4(z);
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+ALTER TABLE regtest_table_4 ALTER COLUMN y TYPE float;
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y"
+DROP INDEX regtest_index_tbl4_y;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+ALTER TABLE regtest_table_4
+ ADD CONSTRAINT regtest_tbl4_con EXCLUDE USING btree (z WITH =);
+LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+DROP TABLE regtest_table_4 CASCADE;
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z"
+--
-- DROP Permission checks (with clean-up)
--
DROP FUNCTION regtest_func(text,int[]);
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_func(text,integer[])"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])"
DROP AGGREGATE regtest_agg(int);
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_agg(integer)"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)"
DROP SEQUENCE regtest_seq;
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_seq"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq"
DROP VIEW regtest_view;
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="view regtest_view"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view"
ALTER TABLE regtest_table DROP COLUMN y;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column y"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y"
ALTER TABLE regtest_table_2 SET WITHOUT OIDS;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column oid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid"
DROP TABLE regtest_table;
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_table_x_seq"
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column tableoid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column cmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column xmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column cmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column xmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column ctid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column x"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column z"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z"
DROP OWNED BY regtest_sepgsql_test_user;
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="function regtest_func_2(integer)"
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="view regtest_view_2"
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="sequence regtest_table_3_y_seq"
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table_3"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column tableoid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column cmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column xmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column cmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column xmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column ctid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column y"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y"
DROP DATABASE regtest_sepgsql_test_database;
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="database regtest_sepgsql_test_database"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database"
DROP USER regtest_sepgsql_test_user;
DROP SCHEMA IF EXISTS regtest_schema CASCADE;
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public"
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table regtest_table_2
drop cascades to type regtest_comptype
-LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="table regtest_table_2"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column tableoid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column cmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column xmax"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column cmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column xmin"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column ctid"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column a"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_2 column b"
-LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="schema regtest_schema"
+LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b"
+LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema"
diff --git a/contrib/sepgsql/expected/dml.out b/contrib/sepgsql/expected/dml.out
index 949789f2f1..3b90f89347 100644
--- a/contrib/sepgsql/expected/dml.out
+++ b/contrib/sepgsql/expected/dml.out
@@ -22,7 +22,6 @@ SECURITY LABEL ON COLUMN t5.e IS 'system_u:object_r:sepgsql_table_t:s0';
SECURITY LABEL ON COLUMN t5.f IS 'system_u:object_r:sepgsql_ro_table_t:s0';
SECURITY LABEL ON COLUMN t5.g IS 'system_u:object_r:sepgsql_secret_table_t:s0';
CREATE TABLE customer (cid int primary key, cname text, ccredit text);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "customer_pkey" for table "customer"
SECURITY LABEL ON COLUMN customer.ccredit IS 'system_u:object_r:sepgsql_secret_table_t:s0';
INSERT INTO customer VALUES (1, 'Taro', '1111-2222-3333-4444'),
(2, 'Hanako', '5555-6666-7777-8888');
@@ -48,6 +47,12 @@ ORDER BY objname;
column | t5.g | system_u:object_r:sepgsql_secret_table_t:s0
(8 rows)
+CREATE SCHEMA my_schema_1;
+CREATE TABLE my_schema_1.ts1 (a int, b text);
+CREATE SCHEMA my_schema_2;
+CREATE TABLE my_schema_2.ts2 (x int, y text);
+SECURITY LABEL ON SCHEMA my_schema_2
+ IS 'system_u:object_r:sepgsql_regtest_invisible_schema_t:s0';
-- Hardwired Rules
UPDATE pg_attribute SET attisdropped = true
WHERE attrelid = 't5'::regclass AND attname = 'f'; -- failed
@@ -167,6 +172,23 @@ COPY t5 (e,f) FROM '/dev/null'; -- failed
ERROR: SELinux: security policy violation
COPY t5 (e) FROM '/dev/null'; -- ok
--
+-- Schema search path
+--
+SET search_path = my_schema_1, my_schema_2, public;
+SELECT * FROM ts1; -- ok
+ a | b
+---+---
+(0 rows)
+
+SELECT * FROM ts2; -- failed (relation not found)
+ERROR: relation "ts2" does not exist
+LINE 1: SELECT * FROM ts2;
+ ^
+SELECT * FROM my_schema_2.ts2; -- failed (policy violation)
+ERROR: SELinux: security policy violation
+LINE 1: SELECT * FROM my_schema_2.ts2;
+ ^
+--
-- Clean up
--
SELECT sepgsql_getcon(); -- confirm client privilege
@@ -181,3 +203,7 @@ DROP TABLE IF EXISTS t3 CASCADE;
DROP TABLE IF EXISTS t4 CASCADE;
DROP TABLE IF EXISTS t5 CASCADE;
DROP TABLE IF EXISTS customer CASCADE;
+DROP SCHEMA IF EXISTS my_schema_1 CASCADE;
+NOTICE: drop cascades to table my_schema_1.ts1
+DROP SCHEMA IF EXISTS my_schema_2 CASCADE;
+NOTICE: drop cascades to table my_schema_2.ts2
diff --git a/contrib/sepgsql/expected/label.out b/contrib/sepgsql/expected/label.out
index d4a6f8ae96..9d1f90437a 100644
--- a/contrib/sepgsql/expected/label.out
+++ b/contrib/sepgsql/expected/label.out
@@ -64,10 +64,16 @@ SELECT sepgsql_getcon(); -- confirm client privilege
CREATE TABLE t3 (s int, t text);
INSERT INTO t3 VALUES (1, 'sss'), (2, 'ttt'), (3, 'uuu');
+SELECT sepgsql_getcon(); -- confirm client privilege
+ sepgsql_getcon
+----------------------------------------------------
+ unconfined_u:unconfined_r:sepgsql_regtest_dba_t:s0
+(1 row)
+
+CREATE TABLE t4 (m int, n text);
+INSERT INTO t4 VALUES (1,'mmm'), (2,'nnn'), (3,'ooo');
SELECT objtype, objname, label FROM pg_seclabels
- WHERE provider = 'selinux'
- AND objtype in ('table', 'column')
- AND objname in ('t1', 't2', 't3');
+ WHERE provider = 'selinux' AND objtype = 'table' AND objname in ('t1', 't2', 't3');
objtype | objname | label
---------+---------+-----------------------------------------------
table | t1 | unconfined_u:object_r:sepgsql_table_t:s0
@@ -75,6 +81,28 @@ SELECT objtype, objname, label FROM pg_seclabels
table | t3 | unconfined_u:object_r:user_sepgsql_table_t:s0
(3 rows)
+SELECT objtype, objname, label FROM pg_seclabels
+ WHERE provider = 'selinux' AND objtype = 'column' AND (objname like 't3.%' OR objname like 't4.%');
+ objtype | objname | label
+---------+-------------+-----------------------------------------------
+ column | t3.t | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.s | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.ctid | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.xmin | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.cmin | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.xmax | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.cmax | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t3.tableoid | unconfined_u:object_r:user_sepgsql_table_t:s0
+ column | t4.n | unconfined_u:object_r:sepgsql_table_t:s0
+ column | t4.m | unconfined_u:object_r:sepgsql_table_t:s0
+ column | t4.ctid | unconfined_u:object_r:sepgsql_sysobj_t:s0
+ column | t4.xmin | unconfined_u:object_r:sepgsql_sysobj_t:s0
+ column | t4.cmin | unconfined_u:object_r:sepgsql_sysobj_t:s0
+ column | t4.xmax | unconfined_u:object_r:sepgsql_sysobj_t:s0
+ column | t4.cmax | unconfined_u:object_r:sepgsql_sysobj_t:s0
+ column | t4.tableoid | unconfined_u:object_r:sepgsql_sysobj_t:s0
+(16 rows)
+
--
-- Tests for SECURITY LABEL
--
@@ -103,23 +131,40 @@ SELECT sepgsql_getcon(); -- confirm client privilege
unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0
(1 row)
+SET sepgsql.debug_audit = true;
+SET client_min_messages = log;
SELECT f1(); -- normal procedure
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="public.f1()"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.sepgsql_getcon()"
+CONTEXT: SQL function "f1" statement 1
f1
-----------------------------------------------------
unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0
(1 row)
SELECT f2(); -- trusted procedure
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_trusted_proc_exec_t:s0 tclass=db_procedure name="public.f2()"
+LOG: SELinux: allowed { entrypoint } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_trusted_proc_exec_t:s0 tclass=db_procedure name="function f2()"
+LOG: SELinux: allowed { transition } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=unconfined_u:unconfined_r:sepgsql_trusted_proc_t:s0 tclass=process
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_trusted_proc_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.sepgsql_getcon()"
+CONTEXT: SQL function "f2" statement 1
f2
-----------------------------------------------------
unconfined_u:unconfined_r:sepgsql_trusted_proc_t:s0
(1 row)
SELECT f3(); -- trusted procedure that raises an error
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_trusted_proc_exec_t:s0 tclass=db_procedure name="public.f3()"
+LOG: SELinux: allowed { entrypoint } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_trusted_proc_exec_t:s0 tclass=db_procedure name="function f3()"
+LOG: SELinux: allowed { transition } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=unconfined_u:unconfined_r:sepgsql_trusted_proc_t:s0 tclass=process
ERROR: an exception from f3()
SELECT f4(); -- failed on domain transition
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_nosuch_trusted_proc_exec_t:s0 tclass=db_procedure name="public.f4()"
+LOG: SELinux: allowed { entrypoint } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_nosuch_trusted_proc_exec_t:s0 tclass=db_procedure name="function f4()"
+LOG: SELinux: denied { transition } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=unconfined_u:unconfined_r:sepgsql_regtest_nosuch_t:s0 tclass=process
ERROR: SELinux: security policy violation
SELECT sepgsql_getcon(); -- client's label must be restored
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.sepgsql_getcon()"
sepgsql_getcon
-----------------------------------------------------
unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0
@@ -456,6 +501,7 @@ SELECT sepgsql_getcon(); -- confirm client privilege
DROP TABLE IF EXISTS t1 CASCADE;
DROP TABLE IF EXISTS t2 CASCADE;
DROP TABLE IF EXISTS t3 CASCADE;
+DROP TABLE IF EXISTS t4 CASCADE;
DROP FUNCTION IF EXISTS f1() CASCADE;
DROP FUNCTION IF EXISTS f2() CASCADE;
DROP FUNCTION IF EXISTS f3() CASCADE;
diff --git a/contrib/sepgsql/expected/misc.out b/contrib/sepgsql/expected/misc.out
index 329852c574..5904840163 100644
--- a/contrib/sepgsql/expected/misc.out
+++ b/contrib/sepgsql/expected/misc.out
@@ -3,3 +3,70 @@
--
LOAD '$libdir/sepgsql'; -- failed
ERROR: SELinux: LOAD is not permitted
+--
+-- Permissions to execute functions
+--
+CREATE TABLE t1 (x int, y text);
+INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(1,100) x);
+SET sepgsql.debug_audit = on;
+SET client_min_messages = log;
+-- regular function and operators
+SELECT * FROM t1 WHERE x > 50 AND y like '%64%';
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
+ x | y
+-----+----------------------------------
+ 77 | 28dd2c7955ce926456240b2ff0100bde
+ 89 | 7647966b7343c29048673252e490f736
+ 90 | 8613985ec49eb8f757ae6439e879bb2a
+ 91 | 54229abfcfa5649e7003b83dd4755294
+ 99 | ac627ab1ccbdb62ec96e702f07f6425b
+ 100 | f899139df5e1059396431415e770c6dd
+(6 rows)
+
+-- aggregate function
+SELECT MIN(x), AVG(x) FROM t1;
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)"
+ min | avg
+-----+---------------------
+ 1 | 50.5000000000000000
+(1 row)
+
+-- window function
+SELECT row_number() OVER (order by x), * FROM t1 WHERE y like '%86%';
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x"
+LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()"
+LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)"
+ row_number | x | y
+------------+----+----------------------------------
+ 1 | 2 | c81e728d9d4c2f636f067f89cc14862c
+ 2 | 17 | 70efdf2ec9b086079795c442636b55fb
+ 3 | 22 | b6d767d2f8ed5d21a44b0e5886680cb9
+ 4 | 27 | 02e74f10e0327ad868d138f2b4fdd6f0
+ 5 | 33 | 182be0c5cdcd5072bb1864cdee4d3d6e
+ 6 | 43 | 17e62166fc8586dfa4d1bc0e1742c08b
+ 7 | 54 | a684eceee76fc522773286a895bc8436
+ 8 | 73 | d2ddea18f00665ce8623e36bd4e3c7c5
+ 9 | 76 | fbd7939d674997cdb4692d34de8633c4
+ 10 | 89 | 7647966b7343c29048673252e490f736
+ 11 | 90 | 8613985ec49eb8f757ae6439e879bb2a
+ 12 | 94 | f4b9ec30ad9f68f89b29639786cb62ef
+(12 rows)
+
+RESET sepgsql.debug_audit;
+RESET client_min_messages;
+--
+-- Cleanup
+--
+DROP TABLE IF EXISTS t1 CASCADE;
diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c
index fabd04b71d..d5338fa38d 100644
--- a/contrib/sepgsql/hooks.c
+++ b/contrib/sepgsql/hooks.c
@@ -4,7 +4,7 @@
*
* Entrypoints of the hooks in PostgreSQL, and dispatches the callbacks.
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -38,7 +38,6 @@ void _PG_init(void);
static object_access_hook_type next_object_access_hook = NULL;
static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL;
static ProcessUtility_hook_type next_ProcessUtility_hook = NULL;
-static ExecutorStart_hook_type next_ExecutorStart_hook = NULL;
/*
* Contextual information on DDL commands
@@ -97,53 +96,55 @@ sepgsql_object_access(ObjectAccessType access,
switch (access)
{
case OAT_POST_CREATE:
- switch (classId)
{
- case DatabaseRelationId:
- sepgsql_database_post_create(objectId,
+ ObjectAccessPostCreate *pc_arg = arg;
+ bool is_internal;
+
+ is_internal = pc_arg ? pc_arg->is_internal : false;
+
+ switch (classId)
+ {
+ case DatabaseRelationId:
+ Assert(!is_internal);
+ sepgsql_database_post_create(objectId,
sepgsql_context_info.createdb_dtemplate);
- break;
+ break;
- case NamespaceRelationId:
- sepgsql_schema_post_create(objectId);
- break;
+ case NamespaceRelationId:
+ Assert(!is_internal);
+ sepgsql_schema_post_create(objectId);
+ break;
- case RelationRelationId:
- if (subId == 0)
- {
- /*
- * All cases we want to apply permission checks on
- * creation of a new relation are invocation of the
- * heap_create_with_catalog via DefineRelation or
- * OpenIntoRel. Elsewhere, we need neither assignment
- * of security label nor permission checks.
- */
- switch (sepgsql_context_info.cmdtype)
+ case RelationRelationId:
+ if (subId == 0)
{
- case T_CreateStmt:
- case T_ViewStmt:
- case T_CreateSeqStmt:
- case T_CompositeTypeStmt:
- case T_CreateForeignTableStmt:
- case T_SelectStmt:
- sepgsql_relation_post_create(objectId);
- break;
- default:
- /* via make_new_heap() */
+ /*
+ * The cases in which we want to apply permission
+ * checks on creation of a new relation correspond
+ * to direct user invocation. For internal uses,
+ * that is creation of toast tables, index rebuild
+ * or ALTER TABLE commands, we need neither
+ * assignment of security labels nor permission
+ * checks.
+ */
+ if (is_internal)
break;
+
+ sepgsql_relation_post_create(objectId);
}
- }
- else
- sepgsql_attribute_post_create(objectId, subId);
- break;
+ else
+ sepgsql_attribute_post_create(objectId, subId);
+ break;
- case ProcedureRelationId:
- sepgsql_proc_post_create(objectId);
- break;
+ case ProcedureRelationId:
+ Assert(!is_internal);
+ sepgsql_proc_post_create(objectId);
+ break;
- default:
- /* Ignore unsupported object classes */
- break;
+ default:
+ /* Ignore unsupported object classes */
+ break;
+ }
}
break;
@@ -187,6 +188,80 @@ sepgsql_object_access(ObjectAccessType access,
}
break;
+ case OAT_POST_ALTER:
+ {
+ ObjectAccessPostAlter *pa_arg = arg;
+ bool is_internal = pa_arg->is_internal;
+
+ switch (classId)
+ {
+ case DatabaseRelationId:
+ Assert(!is_internal);
+ sepgsql_database_setattr(objectId);
+ break;
+
+ case NamespaceRelationId:
+ Assert(!is_internal);
+ sepgsql_schema_setattr(objectId);
+ break;
+
+ case RelationRelationId:
+ if (subId == 0)
+ {
+ /*
+ * A case when we don't want to apply permission
+ * check is that relation is internally altered
+ * without user's intention. E.g, no need to check
+ * on toast table/index to be renamed at end of
+ * the table rewrites.
+ */
+ if (is_internal)
+ break;
+
+ sepgsql_relation_setattr(objectId);
+ }
+ else
+ sepgsql_attribute_setattr(objectId, subId);
+ break;
+
+ case ProcedureRelationId:
+ Assert(!is_internal);
+ sepgsql_proc_setattr(objectId);
+ break;
+
+ default:
+ /* Ignore unsupported object classes */
+ break;
+ }
+ }
+ break;
+
+ case OAT_NAMESPACE_SEARCH:
+ {
+ ObjectAccessNamespaceSearch *ns_arg = arg;
+
+ /*
+ * If stacked extension already decided not to allow users to
+ * search this schema, we just stick with that decision.
+ */
+ if (!ns_arg->result)
+ break;
+
+ Assert(classId == NamespaceRelationId);
+ Assert(ns_arg->result);
+ ns_arg->result
+ = sepgsql_schema_search(objectId,
+ ns_arg->ereport_on_violation);
+ }
+ break;
+
+ case OAT_FUNCTION_EXECUTE:
+ {
+ Assert(classId == ProcedureRelationId);
+ sepgsql_proc_execute(objectId);
+ }
+ break;
+
default:
elog(ERROR, "unexpected object access type: %d", (int) access);
break;
@@ -216,46 +291,6 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
}
/*
- * sepgsql_executor_start
- *
- * It saves contextual information during ExecutorStart to distinguish
- * a case with/without permission checks later.
- */
-static void
-sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
-{
- sepgsql_context_info_t saved_context_info = sepgsql_context_info;
-
- PG_TRY();
- {
- if (queryDesc->operation == CMD_SELECT)
- sepgsql_context_info.cmdtype = T_SelectStmt;
- else if (queryDesc->operation == CMD_INSERT)
- sepgsql_context_info.cmdtype = T_InsertStmt;
- else if (queryDesc->operation == CMD_DELETE)
- sepgsql_context_info.cmdtype = T_DeleteStmt;
- else if (queryDesc->operation == CMD_UPDATE)
- sepgsql_context_info.cmdtype = T_UpdateStmt;
-
- /*
- * XXX - If queryDesc->operation is not above four cases, an error
- * shall be raised on the following executor stage soon.
- */
- if (next_ExecutorStart_hook)
- (*next_ExecutorStart_hook) (queryDesc, eflags);
- else
- standard_ExecutorStart(queryDesc, eflags);
- }
- PG_CATCH();
- {
- sepgsql_context_info = saved_context_info;
- PG_RE_THROW();
- }
- PG_END_TRY();
- sepgsql_context_info = saved_context_info;
-}
-
-/*
* sepgsql_utility_command
*
* It tries to rough-grained control on utility commands; some of them can
@@ -264,8 +299,8 @@ sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
static void
sepgsql_utility_command(Node *parsetree,
const char *queryString,
+ ProcessUtilityContext context,
ParamListInfo params,
- bool isTopLevel,
DestReceiver *dest,
#ifdef PGXC
bool sentToRemote,
@@ -330,15 +365,17 @@ sepgsql_utility_command(Node *parsetree,
}
if (next_ProcessUtility_hook)
- (*next_ProcessUtility_hook) (parsetree, queryString, params,
- isTopLevel, dest,
+ (*next_ProcessUtility_hook) (parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif
completionTag);
else
- standard_ProcessUtility(parsetree, queryString, params,
- isTopLevel, dest,
+ standard_ProcessUtility(parsetree, queryString,
+ context, params,
+ dest,
#ifdef PGXC
sentToRemote,
#endif
@@ -436,10 +473,6 @@ _PG_init(void)
next_ProcessUtility_hook = ProcessUtility_hook;
ProcessUtility_hook = sepgsql_utility_command;
- /* ExecutorStart hook */
- next_ExecutorStart_hook = ExecutorStart_hook;
- ExecutorStart_hook = sepgsql_executor_start;
-
/* init contextual info */
memset(&sepgsql_context_info, 0, sizeof(sepgsql_context_info));
}
diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c
index 23577b5844..2682b37864 100644
--- a/contrib/sepgsql/label.c
+++ b/contrib/sepgsql/label.c
@@ -4,13 +4,14 @@
*
* Routines to support SELinux labels (security context)
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/genam.h"
#include "access/xact.h"
#include "catalog/catalog.h"
@@ -105,7 +106,7 @@ sepgsql_get_client_label(void)
* sepgsql_set_client_label
*
* This routine tries to switch the current security label of the client, and
- * checks related permissions. The supplied new label shall be added to the
+ * checks related permissions. The supplied new label shall be added to the
* client_label_pending list, then saved at transaction-commit time to ensure
* transaction-awareness.
*/
@@ -160,7 +161,7 @@ sepgsql_set_client_label(const char *new_label)
/*
* sepgsql_xact_callback
*
- * A callback routine of transaction commit/abort/prepare. Commmit or abort
+ * A callback routine of transaction commit/abort/prepare. Commmit or abort
* changes in the client_label_pending list.
*/
static void
@@ -302,7 +303,8 @@ sepgsql_needs_fmgr_hook(Oid functionId)
object.objectSubId = 0;
if (!sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
- SEPG_DB_PROCEDURE__EXECUTE,
+ SEPG_DB_PROCEDURE__EXECUTE |
+ SEPG_DB_PROCEDURE__ENTRYPOINT,
SEPGSQL_AVC_NOAUDIT, false))
return true;
@@ -346,13 +348,30 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
* process:transition permission between old and new label,
* when user tries to switch security label of the client on
* execution of trusted procedure.
+ *
+ * Also, db_procedure:entrypoint permission should be checked
+ * whether this procedure can perform as an entrypoint of the
+ * trusted procedure, or not. Note that db_procedure:execute
+ * permission shall be checked individually.
*/
if (stack->new_label)
+ {
+ ObjectAddress object;
+
+ object.classId = ProcedureRelationId;
+ object.objectId = flinfo->fn_oid;
+ object.objectSubId = 0;
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_PROCEDURE,
+ SEPG_DB_PROCEDURE__ENTRYPOINT,
+ getObjectDescription(&object),
+ true);
+
sepgsql_avc_check_perms_label(stack->new_label,
SEPG_CLASS_PROCESS,
SEPG_PROCESS__TRANSITION,
NULL, true);
-
+ }
*private = PointerGetDatum(stack);
}
Assert(!stack->old_label);
@@ -708,7 +727,7 @@ exec_object_restorecon(struct selabel_handle * sehnd, Oid catalogId)
rel = heap_open(catalogId, AccessShareLock);
sscan = systable_beginscan(rel, InvalidOid, false,
- SnapshotNow, 0, NULL);
+ NULL, 0, NULL);
while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
{
Form_pg_database datForm;
diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher
index 038ef7128f..62a6c2737d 100755
--- a/contrib/sepgsql/launcher
+++ b/contrib/sepgsql/launcher
@@ -2,7 +2,7 @@
#
# A wrapper script to launch psql command in regression test
#
-# Copyright (c) 2010-2012, PostgreSQL Global Development Group
+# Copyright (c) 2010-2014, PostgreSQL Global Development Group
#
# -------------------------------------------------------------------------
diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c
index b68314d878..1880eb43fa 100644
--- a/contrib/sepgsql/proc.c
+++ b/contrib/sepgsql/proc.c
@@ -4,7 +4,7 @@
*
* Routines corresponding to procedure objects
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -12,15 +12,19 @@
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/sysattr.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_proc.h"
+#include "catalog/pg_type.h"
#include "commands/seclabel.h"
+#include "lib/stringinfo.h"
#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
+#include "utils/syscache.h"
#include "utils/tqual.h"
#include "sepgsql.h"
@@ -38,9 +42,11 @@ sepgsql_proc_post_create(Oid functionId)
ScanKeyData skey;
SysScanDesc sscan;
HeapTuple tuple;
+ char *nsp_name;
char *scontext;
char *tcontext;
char *ncontext;
+ uint32 required;
int i;
StringInfoData audit_name;
ObjectAddress object;
@@ -75,7 +81,7 @@ sepgsql_proc_post_create(Oid functionId)
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_SCHEMA,
SEPG_DB_SCHEMA__ADD_NAME,
- getObjectDescription(&object),
+ getObjectIdentity(&object),
true);
/*
@@ -91,26 +97,35 @@ sepgsql_proc_post_create(Oid functionId)
tcontext = sepgsql_get_label(NamespaceRelationId,
proForm->pronamespace, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
- SEPG_CLASS_DB_PROCEDURE);
+ SEPG_CLASS_DB_PROCEDURE,
+ NameStr(proForm->proname));
/*
- * check db_procedure:{create} permission
+ * check db_procedure:{create (install)} permission
*/
initStringInfo(&audit_name);
- appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
+ nsp_name = get_namespace_name(proForm->pronamespace);
+ appendStringInfo(&audit_name, "%s(",
+ quote_qualified_identifier(nsp_name, NameStr(proForm->proname)));
for (i = 0; i < proForm->pronargs; i++)
{
- Oid typeoid = proForm->proargtypes.values[i];
-
if (i > 0)
appendStringInfoChar(&audit_name, ',');
- appendStringInfoString(&audit_name, format_type_be(typeoid));
+
+ object.classId = TypeRelationId;
+ object.objectId = proForm->proargtypes.values[i];
+ object.objectSubId = 0;
+ appendStringInfoString(&audit_name, getObjectIdentity(&object));
}
appendStringInfoChar(&audit_name, ')');
+ required = SEPG_DB_PROCEDURE__CREATE;
+ if (proForm->proleakproof)
+ required |= SEPG_DB_PROCEDURE__INSTALL;
+
sepgsql_avc_check_perms_label(ncontext,
SEPG_CLASS_DB_PROCEDURE,
- SEPG_DB_PROCEDURE__CREATE,
+ required,
audit_name.data,
true);
@@ -150,7 +165,7 @@ sepgsql_proc_drop(Oid functionId)
object.classId = NamespaceRelationId;
object.objectId = get_func_namespace(functionId);
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_SCHEMA,
@@ -165,7 +180,7 @@ sepgsql_proc_drop(Oid functionId)
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
@@ -190,7 +205,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
/*
* check db_procedure:{setattr relabelfrom} permission
@@ -212,3 +227,109 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
true);
pfree(audit_name);
}
+
+/*
+ * sepgsql_proc_setattr
+ *
+ * It checks privileges to alter the supplied function.
+ */
+void
+sepgsql_proc_setattr(Oid functionId)
+{
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple oldtup;
+ HeapTuple newtup;
+ Form_pg_proc oldform;
+ Form_pg_proc newform;
+ uint32 required;
+ ObjectAddress object;
+ char *audit_name;
+
+ /*
+ * Fetch newer catalog
+ */
+ rel = heap_open(ProcedureRelationId, AccessShareLock);
+
+ ScanKeyInit(&skey,
+ ObjectIdAttributeNumber,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(functionId));
+
+ sscan = systable_beginscan(rel, ProcedureOidIndexId, true,
+ SnapshotSelf, 1, &skey);
+ newtup = systable_getnext(sscan);
+ if (!HeapTupleIsValid(newtup))
+ elog(ERROR, "catalog lookup failed for function %u", functionId);
+ newform = (Form_pg_proc) GETSTRUCT(newtup);
+
+ /*
+ * Fetch older catalog
+ */
+ oldtup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionId));
+ if (!HeapTupleIsValid(oldtup))
+ elog(ERROR, "cache lookup failed for function %u", functionId);
+ oldform = (Form_pg_proc) GETSTRUCT(oldtup);
+
+ /*
+ * Does this ALTER command takes operation to namespace?
+ */
+ if (newform->pronamespace != oldform->pronamespace)
+ {
+ sepgsql_schema_remove_name(oldform->pronamespace);
+ sepgsql_schema_add_name(oldform->pronamespace);
+ }
+ if (strcmp(NameStr(newform->proname), NameStr(oldform->proname)) != 0)
+ sepgsql_schema_rename(oldform->pronamespace);
+
+ /*
+ * check db_procedure:{setattr (install)} permission
+ */
+ required = SEPG_DB_PROCEDURE__SETATTR;
+ if (!oldform->proleakproof && newform->proleakproof)
+ required |= SEPG_DB_PROCEDURE__INSTALL;
+
+ object.classId = ProcedureRelationId;
+ object.objectId = functionId;
+ object.objectSubId = 0;
+ audit_name = getObjectIdentity(&object);
+
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_PROCEDURE,
+ required,
+ audit_name,
+ true);
+ /* cleanups */
+ pfree(audit_name);
+
+ ReleaseSysCache(oldtup);
+ systable_endscan(sscan);
+ heap_close(rel, AccessShareLock);
+}
+
+/*
+ * sepgsql_proc_execute
+ *
+ * It checks privileges to execute the supplied function
+ */
+void
+sepgsql_proc_execute(Oid functionId)
+{
+ ObjectAddress object;
+ char *audit_name;
+
+ /*
+ * check db_procedure:{execute} permission
+ */
+ object.classId = ProcedureRelationId;
+ object.objectId = functionId;
+ object.objectSubId = 0;
+ audit_name = getObjectIdentity(&object);
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_PROCEDURE,
+ SEPG_DB_PROCEDURE__EXECUTE,
+ audit_name,
+ true);
+ pfree(audit_name);
+}
diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c
index e759a7d98e..14c877ea32 100644
--- a/contrib/sepgsql/relation.c
+++ b/contrib/sepgsql/relation.c
@@ -1,10 +1,10 @@
/* -------------------------------------------------------------------------
*
- * contrib/sepgsql/label.c
+ * contrib/sepgsql/relation.c
*
* Routines corresponding to relation/attribute objects
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -12,6 +12,7 @@
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/sysattr.h"
#include "catalog/indexing.h"
#include "catalog/dependency.h"
@@ -19,13 +20,19 @@
#include "catalog/pg_class.h"
#include "catalog/pg_namespace.h"
#include "commands/seclabel.h"
+#include "lib/stringinfo.h"
+#include "utils/builtins.h"
#include "utils/fmgroids.h"
+#include "utils/catcache.h"
#include "utils/lsyscache.h"
+#include "utils/rel.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
#include "sepgsql.h"
+static void sepgsql_index_modify(Oid indexOid);
+
/*
* sepgsql_attribute_post_create
*
@@ -44,9 +51,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
char *scontext;
char *tcontext;
char *ncontext;
- char audit_name[2 * NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_attribute attForm;
+ StringInfoData audit_name;
/*
* Only attributes within regular relation have individual security
@@ -83,17 +90,24 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
- SEPG_CLASS_DB_COLUMN);
+ SEPG_CLASS_DB_COLUMN,
+ NameStr(attForm->attname));
/*
* check db_column:{create} permission
*/
- snprintf(audit_name, sizeof(audit_name), "table %s column %s",
- get_rel_name(relOid), NameStr(attForm->attname));
+ object.classId = RelationRelationId;
+ object.objectId = relOid;
+ object.objectSubId = 0;
+
+ initStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s.%s",
+ getObjectIdentity(&object),
+ quote_identifier(NameStr(attForm->attname)));
sepgsql_avc_check_perms_label(ncontext,
SEPG_CLASS_DB_COLUMN,
SEPG_DB_COLUMN__CREATE,
- audit_name,
+ audit_name.data,
true);
/*
@@ -131,7 +145,7 @@ sepgsql_attribute_drop(Oid relOid, AttrNumber attnum)
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = attnum;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_COLUMN,
@@ -162,7 +176,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = attnum;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
/*
* check db_column:{setattr relabelfrom} permission
@@ -186,6 +200,36 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
}
/*
+ * sepgsql_attribute_setattr
+ *
+ * It checks privileges to alter the supplied column.
+ */
+void
+sepgsql_attribute_setattr(Oid relOid, AttrNumber attnum)
+{
+ ObjectAddress object;
+ char *audit_name;
+
+ if (get_rel_relkind(relOid) != RELKIND_RELATION)
+ return;
+
+ /*
+ * check db_column:{setattr} permission
+ */
+ object.classId = RelationRelationId;
+ object.objectId = relOid;
+ object.objectSubId = attnum;
+ audit_name = getObjectIdentity(&object);
+
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_COLUMN,
+ SEPG_DB_COLUMN__SETATTR,
+ audit_name,
+ true);
+ pfree(audit_name);
+}
+
+/*
* sepgsql_relation_post_create
*
* The post creation hook of relation/attribute
@@ -200,12 +244,12 @@ sepgsql_relation_post_create(Oid relOid)
Form_pg_class classForm;
ObjectAddress object;
uint16 tclass;
- const char *tclass_text;
char *scontext; /* subject */
char *tcontext; /* schema */
char *rcontext; /* relation */
char *ccontext; /* column */
- char audit_name[2 * NAMEDATALEN + 20];
+ char *nsp_name;
+ StringInfoData audit_name;
/*
* Fetch catalog record of the new relation. Because pg_class entry is not
@@ -227,54 +271,65 @@ sepgsql_relation_post_create(Oid relOid)
classForm = (Form_pg_class) GETSTRUCT(tuple);
+ /* ignore indexes on toast tables */
+ if (classForm->relkind == RELKIND_INDEX &&
+ classForm->relnamespace == PG_TOAST_NAMESPACE)
+ goto out;
+
+ /*
+ * check db_schema:{add_name} permission of the namespace
+ */
+ object.classId = NamespaceRelationId;
+ object.objectId = classForm->relnamespace;
+ object.objectSubId = 0;
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_SCHEMA,
+ SEPG_DB_SCHEMA__ADD_NAME,
+ getObjectIdentity(&object),
+ true);
+
switch (classForm->relkind)
{
case RELKIND_RELATION:
tclass = SEPG_CLASS_DB_TABLE;
- tclass_text = "table";
break;
case RELKIND_SEQUENCE:
tclass = SEPG_CLASS_DB_SEQUENCE;
- tclass_text = "sequence";
break;
case RELKIND_VIEW:
tclass = SEPG_CLASS_DB_VIEW;
- tclass_text = "view";
break;
+ case RELKIND_INDEX:
+ /* deal with indexes specially; no need for tclass */
+ sepgsql_index_modify(relOid);
+ goto out;
default:
+ /* ignore other relkinds */
goto out;
}
/*
- * check db_schema:{add_name} permission of the namespace
- */
- object.classId = NamespaceRelationId;
- object.objectId = classForm->relnamespace;
- object.objectSubId = 0;
- sepgsql_avc_check_perms(&object,
- SEPG_CLASS_DB_SCHEMA,
- SEPG_DB_SCHEMA__ADD_NAME,
- getObjectDescription(&object),
- true);
-
- /*
* Compute a default security label when we create a new relation object
* under the specified namespace.
*/
scontext = sepgsql_get_client_label();
tcontext = sepgsql_get_label(NamespaceRelationId,
classForm->relnamespace, 0);
- rcontext = sepgsql_compute_create(scontext, tcontext, tclass);
+ rcontext = sepgsql_compute_create(scontext, tcontext, tclass,
+ NameStr(classForm->relname));
/*
* check db_xxx:{create} permission
*/
- snprintf(audit_name, sizeof(audit_name), "%s %s",
- tclass_text, NameStr(classForm->relname));
+ nsp_name = get_namespace_name(classForm->relnamespace);
+ initStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s.%s",
+ quote_identifier(nsp_name),
+ quote_identifier(NameStr(classForm->relname)));
sepgsql_avc_check_perms_label(rcontext,
tclass,
SEPG_DB_DATABASE__CREATE,
- audit_name,
+ audit_name.data,
true);
/*
@@ -311,14 +366,16 @@ sepgsql_relation_post_create(Oid relOid)
{
attForm = (Form_pg_attribute) GETSTRUCT(atup);
- snprintf(audit_name, sizeof(audit_name), "%s %s column %s",
- tclass_text,
- NameStr(classForm->relname),
- NameStr(attForm->attname));
+ resetStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s.%s.%s",
+ quote_identifier(nsp_name),
+ quote_identifier(NameStr(classForm->relname)),
+ quote_identifier(NameStr(attForm->attname)));
ccontext = sepgsql_compute_create(scontext,
rcontext,
- SEPG_CLASS_DB_COLUMN);
+ SEPG_CLASS_DB_COLUMN,
+ NameStr(attForm->attname));
/*
* check db_column:{create} permission
@@ -326,7 +383,7 @@ sepgsql_relation_post_create(Oid relOid)
sepgsql_avc_check_perms_label(ccontext,
SEPG_CLASS_DB_COLUMN,
SEPG_DB_COLUMN__CREATE,
- audit_name,
+ audit_name.data,
true);
object.classId = RelationRelationId;
@@ -340,6 +397,7 @@ sepgsql_relation_post_create(Oid relOid)
heap_close(arel, AccessShareLock);
}
pfree(rcontext);
+
out:
systable_endscan(sscan);
heap_close(rel, AccessShareLock);
@@ -355,18 +413,31 @@ sepgsql_relation_drop(Oid relOid)
{
ObjectAddress object;
char *audit_name;
- uint16_t tclass = 0;
+ uint16_t tclass;
char relkind;
relkind = get_rel_relkind(relOid);
- if (relkind == RELKIND_RELATION)
- tclass = SEPG_CLASS_DB_TABLE;
- else if (relkind == RELKIND_SEQUENCE)
- tclass = SEPG_CLASS_DB_SEQUENCE;
- else if (relkind == RELKIND_VIEW)
- tclass = SEPG_CLASS_DB_VIEW;
- else
- return;
+ switch (relkind)
+ {
+ case RELKIND_RELATION:
+ tclass = SEPG_CLASS_DB_TABLE;
+ break;
+ case RELKIND_SEQUENCE:
+ tclass = SEPG_CLASS_DB_SEQUENCE;
+ break;
+ case RELKIND_VIEW:
+ tclass = SEPG_CLASS_DB_VIEW;
+ break;
+ case RELKIND_INDEX:
+ /* ignore indexes on toast tables */
+ if (get_rel_namespace(relOid) == PG_TOAST_NAMESPACE)
+ return;
+ /* other indexes are handled specially below; no need for tclass */
+ break;
+ default:
+ /* ignore other relkinds */
+ return;
+ }
/*
* check db_schema:{remove_name} permission
@@ -374,7 +445,7 @@ sepgsql_relation_drop(Oid relOid)
object.classId = NamespaceRelationId;
object.objectId = get_rel_namespace(relOid);
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_SCHEMA,
@@ -383,13 +454,20 @@ sepgsql_relation_drop(Oid relOid)
true);
pfree(audit_name);
+ /* deal with indexes specially */
+ if (relkind == RELKIND_INDEX)
+ {
+ sepgsql_index_modify(relOid);
+ return;
+ }
+
/*
* check db_table/sequence/view:{drop} permission
*/
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
tclass,
@@ -420,7 +498,7 @@ sepgsql_relation_drop(Oid relOid)
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = attForm->attnum;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_COLUMN,
@@ -462,7 +540,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
object.classId = RelationRelationId;
object.objectId = relOid;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
/*
* check db_xxx:{setattr relabelfrom} permission
@@ -484,3 +562,168 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
true);
pfree(audit_name);
}
+
+/*
+ * sepgsql_relation_setattr
+ *
+ * It checks privileges to set attribute of the supplied relation
+ */
+void
+sepgsql_relation_setattr(Oid relOid)
+{
+ Relation rel;
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple oldtup;
+ HeapTuple newtup;
+ Form_pg_class oldform;
+ Form_pg_class newform;
+ ObjectAddress object;
+ char *audit_name;
+ uint16_t tclass;
+
+ switch (get_rel_relkind(relOid))
+ {
+ case RELKIND_RELATION:
+ tclass = SEPG_CLASS_DB_TABLE;
+ break;
+ case RELKIND_SEQUENCE:
+ tclass = SEPG_CLASS_DB_SEQUENCE;
+ break;
+ case RELKIND_VIEW:
+ tclass = SEPG_CLASS_DB_VIEW;
+ break;
+ case RELKIND_INDEX:
+ /* deal with indexes specially */
+ sepgsql_index_modify(relOid);
+ return;
+ default:
+ /* other relkinds don't need additional work */
+ return;
+ }
+
+ /*
+ * Fetch newer catalog
+ */
+ rel = heap_open(RelationRelationId, AccessShareLock);
+
+ ScanKeyInit(&skey,
+ ObjectIdAttributeNumber,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(relOid));
+
+ sscan = systable_beginscan(rel, ClassOidIndexId, true,
+ SnapshotSelf, 1, &skey);
+
+ newtup = systable_getnext(sscan);
+ if (!HeapTupleIsValid(newtup))
+ elog(ERROR, "catalog lookup failed for relation %u", relOid);
+ newform = (Form_pg_class) GETSTRUCT(newtup);
+
+ /*
+ * Fetch older catalog
+ */
+ oldtup = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid));
+ if (!HeapTupleIsValid(oldtup))
+ elog(ERROR, "cache lookup failed for relation %u", relOid);
+ oldform = (Form_pg_class) GETSTRUCT(oldtup);
+
+ /*
+ * Does this ALTER command takes operation to namespace?
+ */
+ if (newform->relnamespace != oldform->relnamespace)
+ {
+ sepgsql_schema_remove_name(oldform->relnamespace);
+ sepgsql_schema_add_name(newform->relnamespace);
+ }
+ if (strcmp(NameStr(newform->relname), NameStr(oldform->relname)) != 0)
+ sepgsql_schema_rename(oldform->relnamespace);
+
+ /*
+ * XXX - In the future version, db_tuple:{use} of system catalog entry
+ * shall be checked, if tablespace configuration is changed.
+ */
+
+ /*
+ * check db_xxx:{setattr} permission
+ */
+ object.classId = RelationRelationId;
+ object.objectId = relOid;
+ object.objectSubId = 0;
+ audit_name = getObjectIdentity(&object);
+
+ sepgsql_avc_check_perms(&object,
+ tclass,
+ SEPG_DB_TABLE__SETATTR,
+ audit_name,
+ true);
+ pfree(audit_name);
+
+ ReleaseSysCache(oldtup);
+ systable_endscan(sscan);
+ heap_close(rel, AccessShareLock);
+}
+
+/*
+ * sepgsql_relation_setattr_extra
+ *
+ * It checks permission of the relation being referenced by extra attributes,
+ * such as pg_index entries. Like core PostgreSQL, sepgsql also does not deal
+ * with such entries as individual "objects", thus, modification of these
+ * entries shall be considered as setting an attribute of the underlying
+ * relation.
+ */
+static void
+sepgsql_relation_setattr_extra(Relation catalog,
+ Oid catindex_id,
+ Oid extra_oid,
+ AttrNumber anum_relation_id,
+ AttrNumber anum_extra_id)
+{
+ ScanKeyData skey;
+ SysScanDesc sscan;
+ HeapTuple tuple;
+ Datum datum;
+ bool isnull;
+
+ ScanKeyInit(&skey, anum_extra_id,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(extra_oid));
+
+ sscan = systable_beginscan(catalog, catindex_id, true,
+ SnapshotSelf, 1, &skey);
+ tuple = systable_getnext(sscan);
+ if (!HeapTupleIsValid(tuple))
+ elog(ERROR, "catalog lookup failed for object %u in catalog \"%s\"",
+ extra_oid, RelationGetRelationName(catalog));
+
+ datum = heap_getattr(tuple, anum_relation_id,
+ RelationGetDescr(catalog), &isnull);
+ Assert(!isnull);
+
+ sepgsql_relation_setattr(DatumGetObjectId(datum));
+
+ systable_endscan(sscan);
+}
+
+/*
+ * sepgsql_index_modify
+ * Handle index create, update, drop
+ *
+ * Unlike other relation kinds, indexes do not have their own security labels,
+ * so instead of doing checks directly, treat them as extra attributes of their
+ * owning tables; so check 'setattr' permissions on the table.
+ */
+static void
+sepgsql_index_modify(Oid indexOid)
+{
+ Relation catalog = heap_open(IndexRelationId, AccessShareLock);
+
+ /* check db_table:{setattr} permission of the table being indexed */
+ sepgsql_relation_setattr_extra(catalog,
+ IndexRelidIndexId,
+ indexOid,
+ Anum_pg_index_indrelid,
+ Anum_pg_index_indexrelid);
+ heap_close(catalog, AccessShareLock);
+}
diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c
index 230449dc4b..fc103b0260 100644
--- a/contrib/sepgsql/schema.c
+++ b/contrib/sepgsql/schema.c
@@ -4,7 +4,7 @@
*
* Routines corresponding to schema objects
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -12,13 +12,16 @@
#include "access/genam.h"
#include "access/heapam.h"
+#include "access/htup_details.h"
#include "access/sysattr.h"
#include "catalog/dependency.h"
#include "catalog/indexing.h"
#include "catalog/pg_database.h"
#include "catalog/pg_namespace.h"
#include "commands/seclabel.h"
+#include "lib/stringinfo.h"
#include "miscadmin.h"
+#include "utils/builtins.h"
#include "utils/fmgroids.h"
#include "utils/lsyscache.h"
#include "utils/tqual.h"
@@ -40,9 +43,10 @@ sepgsql_schema_post_create(Oid namespaceId)
HeapTuple tuple;
char *tcontext;
char *ncontext;
- char audit_name[NAMEDATALEN + 20];
+ const char *nsp_name;
ObjectAddress object;
Form_pg_namespace nspForm;
+ StringInfoData audit_name;
/*
* Compute a default security label when we create a new schema object
@@ -66,21 +70,27 @@ sepgsql_schema_post_create(Oid namespaceId)
elog(ERROR, "catalog lookup failed for namespace %u", namespaceId);
nspForm = (Form_pg_namespace) GETSTRUCT(tuple);
+ nsp_name = NameStr(nspForm->nspname);
+ if (strncmp(nsp_name, "pg_temp_", 8) == 0)
+ nsp_name = "pg_temp";
+ else if (strncmp(nsp_name, "pg_toast_temp_", 14) == 0)
+ nsp_name = "pg_toast_temp";
tcontext = sepgsql_get_label(DatabaseRelationId, MyDatabaseId, 0);
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
- SEPG_CLASS_DB_SCHEMA);
+ SEPG_CLASS_DB_SCHEMA,
+ nsp_name);
/*
* check db_schema:{create}
*/
- snprintf(audit_name, sizeof(audit_name),
- "schema %s", NameStr(nspForm->nspname));
+ initStringInfo(&audit_name);
+ appendStringInfo(&audit_name, "%s", quote_identifier(nsp_name));
sepgsql_avc_check_perms_label(ncontext,
SEPG_CLASS_DB_SCHEMA,
SEPG_DB_SCHEMA__CREATE,
- audit_name,
+ audit_name.data,
true);
systable_endscan(sscan);
heap_close(rel, AccessShareLock);
@@ -114,7 +124,7 @@ sepgsql_schema_drop(Oid namespaceId)
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_SCHEMA,
@@ -139,7 +149,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
object.objectSubId = 0;
- audit_name = getObjectDescription(&object);
+ audit_name = getObjectIdentity(&object);
/*
* check db_schema:{setattr relabelfrom} permission
@@ -161,3 +171,67 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
true);
pfree(audit_name);
}
+
+/*
+ * sepgsql_schema_check_perms
+ *
+ * utility routine to check db_schema:{xxx} permissions
+ */
+static bool
+check_schema_perms(Oid namespaceId, uint32 required, bool abort_on_violation)
+{
+ ObjectAddress object;
+ char *audit_name;
+ bool result;
+
+ object.classId = NamespaceRelationId;
+ object.objectId = namespaceId;
+ object.objectSubId = 0;
+ audit_name = getObjectIdentity(&object);
+
+ result = sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_SCHEMA,
+ required,
+ audit_name,
+ abort_on_violation);
+ pfree(audit_name);
+
+ return result;
+}
+
+/* db_schema:{setattr} permission */
+void
+sepgsql_schema_setattr(Oid namespaceId)
+{
+ check_schema_perms(namespaceId, SEPG_DB_SCHEMA__SETATTR, true);
+}
+
+/* db_schema:{search} permission */
+bool
+sepgsql_schema_search(Oid namespaceId, bool abort_on_violation)
+{
+ return check_schema_perms(namespaceId,
+ SEPG_DB_SCHEMA__SEARCH,
+ abort_on_violation);
+}
+
+void
+sepgsql_schema_add_name(Oid namespaceId)
+{
+ check_schema_perms(namespaceId, SEPG_DB_SCHEMA__ADD_NAME, true);
+}
+
+void
+sepgsql_schema_remove_name(Oid namespaceId)
+{
+ check_schema_perms(namespaceId, SEPG_DB_SCHEMA__REMOVE_NAME, true);
+}
+
+void
+sepgsql_schema_rename(Oid namespaceId)
+{
+ check_schema_perms(namespaceId,
+ SEPG_DB_SCHEMA__ADD_NAME |
+ SEPG_DB_SCHEMA__REMOVE_NAME,
+ true);
+}
diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c
index baf92b6f6d..b7a1083ee6 100644
--- a/contrib/sepgsql/selinux.c
+++ b/contrib/sepgsql/selinux.c
@@ -5,7 +5,7 @@
* Interactions between userspace and selinux in kernelspace,
* using libselinux api.
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -836,7 +836,8 @@ sepgsql_compute_avd(const char *scontext,
char *
sepgsql_compute_create(const char *scontext,
const char *tcontext,
- uint16 tclass)
+ uint16 tclass,
+ const char *objname)
{
security_context_t ncontext;
security_class_t tclass_ex;
@@ -853,9 +854,11 @@ sepgsql_compute_create(const char *scontext,
* Ask SELinux what is the default context for the given object class on a
* pair of security contexts
*/
- if (security_compute_create_raw((security_context_t) scontext,
- (security_context_t) tcontext,
- tclass_ex, &ncontext) < 0)
+ if (security_compute_create_name_raw((security_context_t) scontext,
+ (security_context_t) tcontext,
+ tclass_ex,
+ objname,
+ &ncontext) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("SELinux could not compute a new context: "
@@ -893,7 +896,7 @@ sepgsql_compute_create(const char *scontext,
* tclass: class code (SEPG_CLASS_*) of the object being referenced
* required: a mask of required permissions (SEPG_<class>__<perm>)
* audit_name: a human readable object name for audit logs, or NULL.
- * abort: true, if caller wants to raise an error on access violation
+ * abort_on_violation: true, if error shall be raised on access violation
*/
bool
sepgsql_check_perms(const char *scontext,
@@ -901,7 +904,7 @@ sepgsql_check_perms(const char *scontext,
uint16 tclass,
uint32 required,
const char *audit_name,
- bool abort)
+ bool abort_on_violation)
{
struct av_decision avd;
uint32 denied;
@@ -937,7 +940,7 @@ sepgsql_check_perms(const char *scontext,
audit_name);
}
- if (!result && abort)
+ if (!result && abort_on_violation)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("SELinux: security policy violation")));
diff --git a/contrib/sepgsql/sepgsql-regtest.te b/contrib/sepgsql/sepgsql-regtest.te
index d872945074..8727523ca5 100644
--- a/contrib/sepgsql/sepgsql-regtest.te
+++ b/contrib/sepgsql/sepgsql-regtest.te
@@ -1,4 +1,4 @@
-policy_module(sepgsql-regtest, 1.04)
+policy_module(sepgsql-regtest, 1.07)
gen_require(`
all_userspace_class_perms
@@ -20,6 +20,9 @@ postgresql_procedure_object(sepgsql_regtest_trusted_proc_exec_t)
type sepgsql_nosuch_trusted_proc_exec_t;
postgresql_procedure_object(sepgsql_nosuch_trusted_proc_exec_t)
+type sepgsql_regtest_invisible_schema_t;
+postgresql_schema_object(sepgsql_regtest_invisible_schema_t);
+
#
# Test domains for database administrators
#
@@ -43,6 +46,21 @@ allow sepgsql_regtest_dba_t sepgsql_regtest_user_t : process { dyntransition };
allow sepgsql_regtest_dba_t sepgsql_regtest_foo_t : process { dyntransition };
allow sepgsql_regtest_dba_t sepgsql_regtest_var_t : process { dyntransition };
+# special rule for system columns
+optional_policy(`
+ gen_require(`
+ attribute sepgsql_table_type;
+ type sepgsql_sysobj_t;
+ ')
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "ctid";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "oid";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "xmin";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "xmax";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "cmin";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "cmax";
+ type_transition sepgsql_regtest_dba_t sepgsql_table_type:db_column sepgsql_sysobj_t "tableoid";
+')
+
#
# Dummy domain for unpriv users
#
@@ -154,25 +172,14 @@ optional_policy(`
#
# Rule to execute original trusted procedures
#
-# XXX - sepgsql_client_type contains any valid client types, so we allow
-# them to execute the original trusted procedure at once.
+# These rules intends to allow any valid client types to launch trusted-
+# procedures (including ones causes domain transition to invalid domain)
+# being labeled as sepgsql_regtest_trusted_proc_exec_t and
+# sepgsql_nosuch_trusted_proc_exec_t.
#
optional_policy(`
gen_require(`
attribute sepgsql_client_type;
')
- allow sepgsql_client_type { sepgsql_regtest_trusted_proc_exec_t sepgsql_nosuch_trusted_proc_exec_t }:db_procedure { getattr execute };
-
- # These rules intends sepgsql_regtest_user_t domain to translate
- # sepgsql_regtest_dba_t on execution of procedures labeled as
- # sepgsql_regtest_trusted_proc_exec_t.
- #
-# allow sepgsql_client_type sepgsql_regtest_trusted_proc_exec_t:db_procedure { getattr execute };
-
- # These rules intends sepgsql_regtest_user_t domain to translate
- # sepgsql_regtest_nosuch_t on execution of procedures labeled as
- # sepgsql_nosuch_trusted_proc_exec_t, without permissions to
- # translate to sepgsql_nosuch_trusted_proc_exec_t.
- #
-# allow sepgsql_client_type sepgsql_nosuch_trusted_proc_exec_t:db_procedure { getattr execute install };
+ allow sepgsql_client_type { sepgsql_regtest_trusted_proc_exec_t sepgsql_nosuch_trusted_proc_exec_t }:db_procedure { getattr execute entrypoint };
')
diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h
index 479b136909..6dadb1dea5 100644
--- a/contrib/sepgsql/sepgsql.h
+++ b/contrib/sepgsql/sepgsql.h
@@ -4,7 +4,7 @@
*
* Definitions corresponding to SE-PostgreSQL
*
- * Copyright (c) 2010-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2010-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -145,7 +145,6 @@
#define SEPG_DB_TABLE__INSERT (1<<8)
#define SEPG_DB_TABLE__DELETE (1<<9)
#define SEPG_DB_TABLE__LOCK (1<<10)
-#define SEPG_DB_TABLE__INDEXON (1<<11)
#define SEPG_DB_SEQUENCE__CREATE (SEPG_DB_DATABASE__CREATE)
#define SEPG_DB_SEQUENCE__DROP (SEPG_DB_DATABASE__DROP)
@@ -240,14 +239,15 @@ extern void sepgsql_compute_avd(const char *scontext,
extern char *sepgsql_compute_create(const char *scontext,
const char *tcontext,
- uint16 tclass);
+ uint16 tclass,
+ const char *objname);
extern bool sepgsql_check_perms(const char *scontext,
const char *tcontext,
uint16 tclass,
uint32 required,
const char *audit_name,
- bool abort);
+ bool abort_on_violation);
/*
* uavc.c
@@ -257,12 +257,12 @@ extern bool sepgsql_avc_check_perms_label(const char *tcontext,
uint16 tclass,
uint32 required,
const char *audit_name,
- bool abort);
+ bool abort_on_violation);
extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
uint16 tclass,
uint32 required,
const char *audit_name,
- bool abort);
+ bool abort_on_violation);
extern char *sepgsql_avc_trusted_proc(Oid functionId);
extern void sepgsql_avc_init(void);
@@ -285,7 +285,7 @@ extern Datum sepgsql_restorecon(PG_FUNCTION_ARGS);
/*
* dml.c
*/
-extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort);
+extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort_on_violation);
/*
* database.c
@@ -294,6 +294,7 @@ extern void sepgsql_database_post_create(Oid databaseId,
const char *dtemplate);
extern void sepgsql_database_drop(Oid databaseId);
extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
+extern void sepgsql_database_setattr(Oid databaseId);
/*
* schema.c
@@ -301,6 +302,11 @@ extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
extern void sepgsql_schema_post_create(Oid namespaceId);
extern void sepgsql_schema_drop(Oid namespaceId);
extern void sepgsql_schema_relabel(Oid namespaceId, const char *seclabel);
+extern void sepgsql_schema_setattr(Oid namespaceId);
+extern bool sepgsql_schema_search(Oid namespaceId, bool abort_on_violation);
+extern void sepgsql_schema_add_name(Oid namespaceId);
+extern void sepgsql_schema_remove_name(Oid namespaceId);
+extern void sepgsql_schema_rename(Oid namespaceId);
/*
* relation.c
@@ -309,9 +315,11 @@ extern void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum);
extern void sepgsql_attribute_drop(Oid relOid, AttrNumber attnum);
extern void sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
const char *seclabel);
+extern void sepgsql_attribute_setattr(Oid relOid, AttrNumber attnum);
extern void sepgsql_relation_post_create(Oid relOid);
extern void sepgsql_relation_drop(Oid relOid);
extern void sepgsql_relation_relabel(Oid relOid, const char *seclabel);
+extern void sepgsql_relation_setattr(Oid relOid);
/*
* proc.c
@@ -319,5 +327,7 @@ extern void sepgsql_relation_relabel(Oid relOid, const char *seclabel);
extern void sepgsql_proc_post_create(Oid functionId);
extern void sepgsql_proc_drop(Oid functionId);
extern void sepgsql_proc_relabel(Oid functionId, const char *seclabel);
+extern void sepgsql_proc_setattr(Oid functionId);
+extern void sepgsql_proc_execute(Oid functionId);
#endif /* SEPGSQL_H */
diff --git a/contrib/sepgsql/sql/alter.sql b/contrib/sepgsql/sql/alter.sql
new file mode 100644
index 0000000000..4bded7ead5
--- /dev/null
+++ b/contrib/sepgsql/sql/alter.sql
@@ -0,0 +1,136 @@
+--
+-- Test for various ALTER statements
+--
+
+-- clean-up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database_1;
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database;
+DROP USER IF EXISTS regtest_sepgsql_test_user;
+RESET client_min_messages;
+
+-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0
+
+--
+-- CREATE Objects to be altered (with debug_audit being silent)
+--
+CREATE DATABASE regtest_sepgsql_test_database_1;
+
+CREATE USER regtest_sepgsql_test_user;
+
+CREATE SCHEMA regtest_schema_1;
+CREATE SCHEMA regtest_schema_2;
+
+GRANT ALL ON SCHEMA regtest_schema_1 TO public;
+GRANT ALL ON SCHEMA regtest_schema_2 TO public;
+
+SET search_path = regtest_schema_1, regtest_schema_2, public;
+
+CREATE TABLE regtest_table_1 (a int, b text);
+
+CREATE TABLE regtest_table_2 (c text) inherits (regtest_table_1);
+
+CREATE TABLE regtest_table_3 (x int primary key, y text);
+
+CREATE SEQUENCE regtest_seq_1;
+
+CREATE VIEW regtest_view_1 AS SELECT * FROM regtest_table_1 WHERE a > 0;
+
+CREATE FUNCTION regtest_func_1 (text) RETURNS bool
+ AS 'BEGIN RETURN true; END' LANGUAGE 'plpgsql';
+
+-- switch on debug_audit
+SET sepgsql.debug_audit = true;
+SET client_min_messages = LOG;
+
+--
+-- ALTER xxx OWNER TO
+--
+-- XXX: It should take db_xxx:{setattr} permission checks even if
+-- owner is not actually changed.
+--
+ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user;
+ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user;
+ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user;
+ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user;
+ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user;
+ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user;
+ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user;
+ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user;
+ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user;
+ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user;
+ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user;
+ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user;
+
+--
+-- ALTER xxx SET SCHEMA
+--
+ALTER TABLE regtest_table_1 SET SCHEMA regtest_schema_2;
+ALTER SEQUENCE regtest_seq_1 SET SCHEMA regtest_schema_2;
+ALTER VIEW regtest_view_1 SET SCHEMA regtest_schema_2;
+ALTER FUNCTION regtest_func_1(text) SET SCHEMA regtest_schema_2;
+
+--
+-- ALTER xxx RENAME TO
+--
+ALTER DATABASE regtest_sepgsql_test_database_1 RENAME TO regtest_sepgsql_test_database;
+ALTER SCHEMA regtest_schema_1 RENAME TO regtest_schema;
+ALTER TABLE regtest_table_1 RENAME TO regtest_table;
+ALTER SEQUENCE regtest_seq_1 RENAME TO regtest_seq;
+ALTER VIEW regtest_view_1 RENAME TO regtest_view;
+ALTER FUNCTION regtest_func_1(text) RENAME TO regtest_func;
+
+SET search_path = regtest_schema, regtest_schema_2, public;
+
+--
+-- misc ALTER commands
+--
+ALTER DATABASE regtest_sepgsql_test_database CONNECTION LIMIT 999;
+ALTER DATABASE regtest_sepgsql_test_database SET search_path TO regtest_schema, public; -- not supported yet
+
+ALTER TABLE regtest_table ADD COLUMN d float;
+ALTER TABLE regtest_table DROP COLUMN d;
+ALTER TABLE regtest_table ALTER b SET DEFAULT 'abcd'; -- not supported yet
+ALTER TABLE regtest_table ALTER b SET DEFAULT 'XYZ'; -- not supported yet
+ALTER TABLE regtest_table ALTER b DROP DEFAULT; -- not supported yet
+ALTER TABLE regtest_table ALTER b SET NOT NULL;
+ALTER TABLE regtest_table ALTER b DROP NOT NULL;
+ALTER TABLE regtest_table ALTER b SET STATISTICS -1;
+ALTER TABLE regtest_table ALTER b SET (n_distinct = 999);
+ALTER TABLE regtest_table ALTER b SET STORAGE PLAIN;
+ALTER TABLE regtest_table ADD CONSTRAINT test_fk FOREIGN KEY (a) REFERENCES regtest_table_3(x); -- not supported
+ALTER TABLE regtest_table ADD CONSTRAINT test_ck CHECK (b like '%abc%') NOT VALID; -- not supported
+ALTER TABLE regtest_table VALIDATE CONSTRAINT test_ck; -- not supported
+ALTER TABLE regtest_table DROP CONSTRAINT test_ck; -- not supported
+
+CREATE TRIGGER regtest_test_trig BEFORE UPDATE ON regtest_table
+ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger();
+
+ALTER TABLE regtest_table DISABLE TRIGGER regtest_test_trig; -- not supported
+ALTER TABLE regtest_table ENABLE TRIGGER regtest_test_trig; -- not supported
+
+CREATE RULE regtest_test_rule AS ON INSERT TO regtest_table_3 DO ALSO NOTHING;
+ALTER TABLE regtest_table_3 DISABLE RULE regtest_test_rule; -- not supported
+ALTER TABLE regtest_table_3 ENABLE RULE regtest_test_rule; -- not supported
+
+ALTER TABLE regtest_table SET WITH OIDS;
+ALTER TABLE regtest_table SET WITHOUT OIDS;
+ALTER TABLE regtest_table SET (fillfactor = 75);
+ALTER TABLE regtest_table RESET (fillfactor);
+ALTER TABLE regtest_table_2 NO INHERIT regtest_table; -- not supported
+ALTER TABLE regtest_table_2 INHERIT regtest_table; -- not supported
+ALTER TABLE regtest_table SET TABLESPACE pg_default;
+
+ALTER VIEW regtest_view SET (security_barrier);
+
+ALTER SEQUENCE regtest_seq INCREMENT BY 10 START WITH 1000;
+
+--
+-- clean-up objects
+--
+RESET sepgsql.debug_audit;
+RESET client_min_messages;
+DROP DATABASE regtest_sepgsql_test_database;
+DROP SCHEMA regtest_schema CASCADE;
+DROP SCHEMA regtest_schema_2 CASCADE;
+DROP USER regtest_sepgsql_test_user;
diff --git a/contrib/sepgsql/sql/ddl.sql b/contrib/sepgsql/sql/ddl.sql
index 8dd57e0eaf..c91c4cf572 100644
--- a/contrib/sepgsql/sql/ddl.sql
+++ b/contrib/sepgsql/sql/ddl.sql
@@ -2,6 +2,12 @@
-- Regression Test for DDL of Object Permission Checks
--
+-- clean-up in case a prior regression run failed
+SET client_min_messages TO 'warning';
+DROP DATABASE IF EXISTS regtest_sepgsql_test_database;
+DROP USER IF EXISTS regtest_sepgsql_test_user;
+RESET client_min_messages;
+
-- confirm required permissions using audit messages
-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0
SET sepgsql.debug_audit = true;
@@ -60,6 +66,18 @@ CREATE FUNCTION regtest_func_2(int) RETURNS bool LANGUAGE plpgsql
RESET SESSION AUTHORIZATION;
--
+-- ALTER and CREATE/DROP extra attribute permissions
+--
+CREATE TABLE regtest_table_4 (x int primary key, y int, z int);
+CREATE INDEX regtest_index_tbl4_y ON regtest_table_4(y);
+CREATE INDEX regtest_index_tbl4_z ON regtest_table_4(z);
+ALTER TABLE regtest_table_4 ALTER COLUMN y TYPE float;
+DROP INDEX regtest_index_tbl4_y;
+ALTER TABLE regtest_table_4
+ ADD CONSTRAINT regtest_tbl4_con EXCLUDE USING btree (z WITH =);
+DROP TABLE regtest_table_4 CASCADE;
+
+--
-- DROP Permission checks (with clean-up)
--
diff --git a/contrib/sepgsql/sql/dml.sql b/contrib/sepgsql/sql/dml.sql
index 94bf31a97a..97e01c3e3c 100644
--- a/contrib/sepgsql/sql/dml.sql
+++ b/contrib/sepgsql/sql/dml.sql
@@ -43,6 +43,14 @@ SELECT objtype, objname, label FROM pg_seclabels
AND objname in ('t1', 't2', 't3', 't4', 't5', 't5.e', 't5.f', 't5.g')
ORDER BY objname;
+CREATE SCHEMA my_schema_1;
+CREATE TABLE my_schema_1.ts1 (a int, b text);
+CREATE SCHEMA my_schema_2;
+CREATE TABLE my_schema_2.ts2 (x int, y text);
+
+SECURITY LABEL ON SCHEMA my_schema_2
+ IS 'system_u:object_r:sepgsql_regtest_invisible_schema_t:s0';
+
-- Hardwired Rules
UPDATE pg_attribute SET attisdropped = true
WHERE attrelid = 't5'::regclass AND attname = 'f'; -- failed
@@ -108,6 +116,14 @@ COPY t5 (e,f) FROM '/dev/null'; -- failed
COPY t5 (e) FROM '/dev/null'; -- ok
--
+-- Schema search path
+--
+SET search_path = my_schema_1, my_schema_2, public;
+SELECT * FROM ts1; -- ok
+SELECT * FROM ts2; -- failed (relation not found)
+SELECT * FROM my_schema_2.ts2; -- failed (policy violation)
+
+--
-- Clean up
--
-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c255
@@ -117,3 +133,5 @@ DROP TABLE IF EXISTS t3 CASCADE;
DROP TABLE IF EXISTS t4 CASCADE;
DROP TABLE IF EXISTS t5 CASCADE;
DROP TABLE IF EXISTS customer CASCADE;
+DROP SCHEMA IF EXISTS my_schema_1 CASCADE;
+DROP SCHEMA IF EXISTS my_schema_2 CASCADE;
diff --git a/contrib/sepgsql/sql/label.sql b/contrib/sepgsql/sql/label.sql
index e63b5f691d..7a05c248eb 100644
--- a/contrib/sepgsql/sql/label.sql
+++ b/contrib/sepgsql/sql/label.sql
@@ -71,10 +71,14 @@ SECURITY LABEL ON TABLE var_tbl
CREATE TABLE t3 (s int, t text);
INSERT INTO t3 VALUES (1, 'sss'), (2, 'ttt'), (3, 'uuu');
+-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_dba_t:s0
+CREATE TABLE t4 (m int, n text);
+INSERT INTO t4 VALUES (1,'mmm'), (2,'nnn'), (3,'ooo');
+
+SELECT objtype, objname, label FROM pg_seclabels
+ WHERE provider = 'selinux' AND objtype = 'table' AND objname in ('t1', 't2', 't3');
SELECT objtype, objname, label FROM pg_seclabels
- WHERE provider = 'selinux'
- AND objtype in ('table', 'column')
- AND objname in ('t1', 't2', 't3');
+ WHERE provider = 'selinux' AND objtype = 'column' AND (objname like 't3.%' OR objname like 't4.%');
--
-- Tests for SECURITY LABEL
@@ -93,6 +97,8 @@ SECURITY LABEL ON COLUMN t2.b
-- Tests for Trusted Procedures
--
-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_user_t:s0
+SET sepgsql.debug_audit = true;
+SET client_min_messages = log;
SELECT f1(); -- normal procedure
SELECT f2(); -- trusted procedure
SELECT f3(); -- trusted procedure that raises an error
@@ -229,6 +235,7 @@ SELECT sepgsql_getcon();
DROP TABLE IF EXISTS t1 CASCADE;
DROP TABLE IF EXISTS t2 CASCADE;
DROP TABLE IF EXISTS t3 CASCADE;
+DROP TABLE IF EXISTS t4 CASCADE;
DROP FUNCTION IF EXISTS f1() CASCADE;
DROP FUNCTION IF EXISTS f2() CASCADE;
DROP FUNCTION IF EXISTS f3() CASCADE;
diff --git a/contrib/sepgsql/sql/misc.sql b/contrib/sepgsql/sql/misc.sql
index a46d8a6b5c..c277711781 100644
--- a/contrib/sepgsql/sql/misc.sql
+++ b/contrib/sepgsql/sql/misc.sql
@@ -3,3 +3,28 @@
--
LOAD '$libdir/sepgsql'; -- failed
+
+--
+-- Permissions to execute functions
+--
+CREATE TABLE t1 (x int, y text);
+INSERT INTO t1 (SELECT x, md5(x::text) FROM generate_series(1,100) x);
+
+SET sepgsql.debug_audit = on;
+SET client_min_messages = log;
+
+-- regular function and operators
+SELECT * FROM t1 WHERE x > 50 AND y like '%64%';
+
+-- aggregate function
+SELECT MIN(x), AVG(x) FROM t1;
+
+-- window function
+SELECT row_number() OVER (order by x), * FROM t1 WHERE y like '%86%';
+
+RESET sepgsql.debug_audit;
+RESET client_min_messages;
+--
+-- Cleanup
+--
+DROP TABLE IF EXISTS t1 CASCADE;
diff --git a/contrib/sepgsql/test_sepgsql b/contrib/sepgsql/test_sepgsql
index 473004f6d2..12667189f5 100755
--- a/contrib/sepgsql/test_sepgsql
+++ b/contrib/sepgsql/test_sepgsql
@@ -14,6 +14,9 @@
PG_BINDIR=`pg_config --bindir`
+# we must move to contrib/sepgsql directory to run pg_regress correctly
+cd `dirname $0`
+
echo
echo "============== checking selinux environment =============="
@@ -162,6 +165,31 @@ if [ "${POLICY_STATUS}" != on ]; then
echo ""
exit 1
fi
+POLICY_STATUS=`getsebool sepgsql_enable_users_ddl | awk '{print $3}'`
+echo ${POLICY_STATUS:-failed}
+if [ "${POLICY_STATUS}" != on ]; then
+ echo ""
+ echo "The SELinux boolean 'sepgsql_enable_users_ddl' must be"
+ echo "turned on in order to enable the rules necessary to run"
+ echo "the regression tests."
+ echo ""
+ if [ "${POLICY_STATUS}" = "" ]; then
+ echo "We attempted to determine the state of this Boolean using"
+ echo "'getsebool', but that command did not produce the expected"
+ echo "output. Please verify that getsebool is available and in"
+ echo "your PATH."
+ else
+ echo "You can turn on this variable using the following commands:"
+ echo ""
+ echo " \$ sudo setsebool sepgsql_enable_users_ddl on"
+ echo ""
+ echo "For security reasons, it is suggested that you turn off this"
+ echo "variable when regression testing is complete, unless you"
+ echo "don't want to allow unprivileged users DDL commands."
+ fi
+ echo ""
+ exit 1
+fi
# 'psql' command must be executable from test domain
echo -n "checking whether we can run psql ... "
@@ -259,6 +287,6 @@ echo "found ${NUM}"
echo
echo "============== running sepgsql regression tests =============="
-make REGRESS="label dml ddl misc" REGRESS_OPTS="--launcher ./launcher" installcheck
+make REGRESS="label dml ddl alter misc" REGRESS_OPTS="--launcher ./launcher" installcheck
# exit with the exit code provided by "make"
diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c
index 9641a17d79..b014b01f36 100644
--- a/contrib/sepgsql/uavc.c
+++ b/contrib/sepgsql/uavc.c
@@ -6,7 +6,7 @@
* access control decisions recently used, and reduce number of kernel
* invocations to avoid unnecessary performance hit.
*
- * Copyright (c) 2011-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2011-2014, PostgreSQL Global Development Group
*
* -------------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ sepgsql_avc_reclaim(void)
* Access control decisions must be atomic, but multiple system calls may
* be required to make a decision; thus, when referencing the access vector
* cache, we must loop until we complete without an intervening cache flush
- * event. In practice, looping even once should be very rare. Callers should
+ * event. In practice, looping even once should be very rare. Callers should
* do something like this:
*
* sepgsql_avc_check_valid();
@@ -250,10 +250,10 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
{
if (!ucontext)
ncontext = sepgsql_compute_create(scontext, tcontext,
- SEPG_CLASS_PROCESS);
+ SEPG_CLASS_PROCESS, NULL);
else
ncontext = sepgsql_compute_create(scontext, ucontext,
- SEPG_CLASS_PROCESS);
+ SEPG_CLASS_PROCESS, NULL);
if (strcmp(scontext, ncontext) == 0)
{
pfree(ncontext);
@@ -335,7 +335,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
*
* It returns 'true', if the security policy suggested to allow the required
* permissions. Otherwise, it returns 'false' or raises an error according
- * to the 'abort' argument.
+ * to the 'abort_on_violation' argument.
* The 'tobject' and 'tclass' identify the target object being referenced,
* and 'required' is a bitmask of permissions (SEPG_*__*) defined for each
* object classes.
@@ -345,7 +345,8 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
bool
sepgsql_avc_check_perms_label(const char *tcontext,
uint16 tclass, uint32 required,
- const char *audit_name, bool abort)
+ const char *audit_name,
+ bool abort_on_violation)
{
char *scontext = sepgsql_get_client_label();
avc_cache *cache;
@@ -415,7 +416,7 @@ sepgsql_avc_check_perms_label(const char *tcontext,
audit_name);
}
- if (abort && !result)
+ if (abort_on_violation && !result)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("SELinux: security policy violation")));
@@ -426,14 +427,15 @@ sepgsql_avc_check_perms_label(const char *tcontext,
bool
sepgsql_avc_check_perms(const ObjectAddress *tobject,
uint16 tclass, uint32 required,
- const char *audit_name, bool abort)
+ const char *audit_name,
+ bool abort_on_violation)
{
char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
bool rc;
rc = sepgsql_avc_check_perms_label(tcontext,
tclass, required,
- audit_name, abort);
+ audit_name, abort_on_violation);
if (tcontext)
pfree(tcontext);
diff --git a/contrib/spi/autoinc.c b/contrib/spi/autoinc.c
index 54bbc4345c..41eae4fdc4 100644
--- a/contrib/spi/autoinc.c
+++ b/contrib/spi/autoinc.c
@@ -12,8 +12,6 @@
PG_MODULE_MAGIC;
-extern Datum autoinc(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(autoinc);
Datum
diff --git a/contrib/spi/insert_username.c b/contrib/spi/insert_username.c
index 3bc51c7f02..875207881a 100644
--- a/contrib/spi/insert_username.c
+++ b/contrib/spi/insert_username.c
@@ -17,8 +17,6 @@
PG_MODULE_MAGIC;
-extern Datum insert_username(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(insert_username);
Datum
diff --git a/contrib/spi/moddatetime.c b/contrib/spi/moddatetime.c
index 2ec9654036..c6d33b7355 100644
--- a/contrib/spi/moddatetime.c
+++ b/contrib/spi/moddatetime.c
@@ -23,8 +23,6 @@ OH, me, I'm Terry Mackintosh <terry@terrym.com>
PG_MODULE_MAGIC;
-extern Datum moddatetime(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(moddatetime);
Datum
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
index 8dc565a190..2602210741 100644
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -16,10 +16,6 @@
PG_MODULE_MAGIC;
-extern Datum check_primary_key(PG_FUNCTION_ARGS);
-extern Datum check_foreign_key(PG_FUNCTION_ARGS);
-
-
typedef struct
{
char *ident;
@@ -634,8 +630,7 @@ find_plan(char *ident, EPlan **eplan, int *nplans)
(*nplans) = i = 0;
}
- newp->ident = (char *) malloc(strlen(ident) + 1);
- strcpy(newp->ident, ident);
+ newp->ident = strdup(ident);
newp->nplans = 0;
newp->splan = NULL;
(*nplans)++;
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
index 34b4453f87..a37cbee863 100644
--- a/contrib/spi/timetravel.c
+++ b/contrib/spi/timetravel.c
@@ -22,9 +22,6 @@
PG_MODULE_MAGIC;
/* AbsoluteTime currabstime(void); */
-Datum timetravel(PG_FUNCTION_ARGS);
-Datum set_timetravel(PG_FUNCTION_ARGS);
-Datum get_timetravel(PG_FUNCTION_ARGS);
typedef struct
{
@@ -48,17 +45,17 @@ static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans);
/*
* timetravel () --
- * 1. IF an update affects tuple with stop_date eq INFINITY
+ * 1. IF an update affects tuple with stop_date eq INFINITY
* then form (and return) new tuple with start_date eq current date
* and stop_date eq INFINITY [ and update_user eq current user ]
* and all other column values as in new tuple, and insert tuple
* with old data and stop_date eq current date
* ELSE - skip updation of tuple.
- * 2. IF an delete affects tuple with stop_date eq INFINITY
+ * 2. IF an delete affects tuple with stop_date eq INFINITY
* then insert the same tuple with stop_date eq current date
* [ and delete_user eq current user ]
* ELSE - skip deletion of tuple.
- * 3. On INSERT, if start_date is NULL then current date will be
+ * 3. On INSERT, if start_date is NULL then current date will be
* inserted, if stop_date is NULL then INFINITY will be inserted.
* [ and insert_user eq current user, update_user and delete_user
* eq NULL ]
@@ -540,8 +537,7 @@ find_plan(char *ident, EPlan **eplan, int *nplans)
(*nplans) = i = 0;
}
- newp->ident = (char *) malloc(strlen(ident) + 1);
- strcpy(newp->ident, ident);
+ newp->ident = strdup(ident);
newp->splan = NULL;
(*nplans)++;
diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c
index 7a5847004b..db491a4bc8 100644
--- a/contrib/sslinfo/sslinfo.c
+++ b/contrib/sslinfo/sslinfo.c
@@ -22,18 +22,9 @@
PG_MODULE_MAGIC;
-Datum ssl_is_used(PG_FUNCTION_ARGS);
-Datum ssl_version(PG_FUNCTION_ARGS);
-Datum ssl_cipher(PG_FUNCTION_ARGS);
-Datum ssl_client_cert_present(PG_FUNCTION_ARGS);
-Datum ssl_client_serial(PG_FUNCTION_ARGS);
-Datum ssl_client_dn_field(PG_FUNCTION_ARGS);
-Datum ssl_issuer_field(PG_FUNCTION_ARGS);
-Datum ssl_client_dn(PG_FUNCTION_ARGS);
-Datum ssl_issuer_dn(PG_FUNCTION_ARGS);
-Datum X509_NAME_field_to_text(X509_NAME *name, text *fieldName);
-Datum X509_NAME_to_text(X509_NAME *name);
-Datum ASN1_STRING_to_text(ASN1_STRING *str);
+static Datum X509_NAME_field_to_text(X509_NAME *name, text *fieldName);
+static Datum X509_NAME_to_text(X509_NAME *name);
+static Datum ASN1_STRING_to_text(ASN1_STRING *str);
/*
@@ -51,7 +42,7 @@ ssl_is_used(PG_FUNCTION_ARGS)
/*
- * Returns SSL cipher currently in use.
+ * Returns SSL version currently in use.
*/
PG_FUNCTION_INFO_V1(ssl_version);
Datum
@@ -77,7 +68,7 @@ ssl_cipher(PG_FUNCTION_ARGS)
/*
- * Indicates whether current client have provided a certificate
+ * Indicates whether current client provided a certificate
*
* Function has no arguments. Returns bool. True if current session
* is SSL session and client certificate is verified, otherwise false.
@@ -132,13 +123,13 @@ ssl_client_serial(PG_FUNCTION_ARGS)
* current database encoding if possible. Any invalid characters are
* replaced by question marks.
*
- * Parameter: str - OpenSSL ASN1_STRING structure. Memory management
+ * Parameter: str - OpenSSL ASN1_STRING structure. Memory management
* of this structure is responsibility of caller.
*
* Returns Datum, which can be directly returned from a C language SQL
* function.
*/
-Datum
+static Datum
ASN1_STRING_to_text(ASN1_STRING *str)
{
BIO *membuf;
@@ -157,10 +148,7 @@ ASN1_STRING_to_text(ASN1_STRING *str)
nullterm = '\0';
BIO_write(membuf, &nullterm, 1);
size = BIO_get_mem_data(membuf, &sp);
- dp = (char *) pg_do_encoding_conversion((unsigned char *) sp,
- size - 1,
- PG_UTF8,
- GetDatabaseEncoding());
+ dp = pg_any_to_server(sp, size - 1, PG_UTF8);
result = cstring_to_text(dp);
if (dp != sp)
pfree(dp);
@@ -182,7 +170,7 @@ ASN1_STRING_to_text(ASN1_STRING *str)
* Returns result of ASN1_STRING_to_text applied to appropriate
* part of name
*/
-Datum
+static Datum
X509_NAME_field_to_text(X509_NAME *name, text *fieldName)
{
char *string_fieldname;
@@ -287,7 +275,7 @@ ssl_issuer_field(PG_FUNCTION_ARGS)
* Returns: text datum which contains string representation of
* X509_NAME
*/
-Datum
+static Datum
X509_NAME_to_text(X509_NAME *name)
{
BIO *membuf = BIO_new(BIO_s_mem());
@@ -322,10 +310,7 @@ X509_NAME_to_text(X509_NAME *name)
nullterm = '\0';
BIO_write(membuf, &nullterm, 1);
size = BIO_get_mem_data(membuf, &sp);
- dp = (char *) pg_do_encoding_conversion((unsigned char *) sp,
- size - 1,
- PG_UTF8,
- GetDatabaseEncoding());
+ dp = pg_any_to_server(sp, size - 1, PG_UTF8);
result = cstring_to_text(dp);
if (dp != sp)
pfree(dp);
diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL
index e4b56dfb3b..22ed9ff45e 100755
--- a/contrib/start-scripts/osx/PostgreSQL
+++ b/contrib/start-scripts/osx/PostgreSQL
@@ -24,11 +24,7 @@
#
# POSTGRESQL=-NO-
#
-# For more information on Darwin/Mac OS X startup bundles, see this article:
-#
-# http://www.opensource.apple.com/projects/documentation/howto/html/SystemStarter_HOWTO.html
-#
-# Created by David Wheeler, 2002.
+# Created by David Wheeler, 2002
# modified by Ray Aspeitia 12-03-2003 :
# added log rotation script to db startup
diff --git a/contrib/tablefunc/expected/tablefunc.out b/contrib/tablefunc/expected/tablefunc.out
index 7ad4336ada..0437ecf90a 100644
--- a/contrib/tablefunc/expected/tablefunc.out
+++ b/contrib/tablefunc/expected/tablefunc.out
@@ -146,7 +146,6 @@ SELECT * FROM crosstab_out('SELECT rowid, attribute, val FROM ct where rowclass
-- hash based crosstab
--
create table cth(id serial, rowid text, rowdt timestamp, attribute text, val text);
-NOTICE: CREATE TABLE will create implicit sequence "cth_id_seq" for serial column "cth.id"
insert into cth values(DEFAULT,'test1','01 March 2003','temperature','42');
insert into cth values(DEFAULT,'test1','01 March 2003','test_result','PASS');
-- the next line is intentionally left commented and is therefore a "missing" attribute
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 65a470114b..10ee8c76db 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -10,7 +10,7 @@
* And contributors:
* Nabil Sayegh <postgresql@e-trolley.de>
*
- * Copyright (c) 2002-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2014, PostgreSQL Global Development Group
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
@@ -34,9 +34,11 @@
#include <math.h>
+#include "access/htup_details.h"
#include "catalog/pg_type.h"
-#include "funcapi.h"
#include "executor/spi.h"
+#include "funcapi.h"
+#include "lib/stringinfo.h"
#include "miscadmin.h"
#include "utils/builtins.h"
@@ -1338,7 +1340,7 @@ build_tuplestore_recursively(char *key_fld,
for (i = 0; i < proc; i++)
{
/* initialize branch for this pass */
- appendStringInfo(&branchstr, "%s", branch);
+ appendStringInfoString(&branchstr, branch);
appendStringInfo(&chk_branchstr, "%s%s%s", branch_delim, branch, branch_delim);
/* get the next sql result tuple */
diff --git a/contrib/tablefunc/tablefunc.h b/contrib/tablefunc/tablefunc.h
index d9670f5401..a983bab2ee 100644
--- a/contrib/tablefunc/tablefunc.h
+++ b/contrib/tablefunc/tablefunc.h
@@ -10,7 +10,7 @@
* And contributors:
* Nabil Sayegh <postgresql@e-trolley.de>
*
- * Copyright (c) 2002-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2002-2014, PostgreSQL Global Development Group
*
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c
index 314632dd89..ba34f9b1fa 100644
--- a/contrib/tcn/tcn.c
+++ b/contrib/tcn/tcn.c
@@ -3,7 +3,7 @@
* tcn.c
* triggered change notification support for PostgreSQL
*
- * Portions Copyright (c) 2011-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2011-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -15,6 +15,7 @@
#include "postgres.h"
+#include "access/htup_details.h"
#include "executor/spi.h"
#include "commands/async.h"
#include "commands/trigger.h"
@@ -26,10 +27,6 @@
PG_MODULE_MAGIC;
-/* forward declarations */
-Datum triggered_change_notification(PG_FUNCTION_ARGS);
-
-
/*
* Copy from s (for source) to r (for result), wrapping with q (quote)
* characters and doubling any quote characters found.
@@ -52,7 +49,7 @@ strcpy_quoted(StringInfo r, const char *s, const char q)
* triggered_change_notification
*
* This trigger function will send a notification of data modification with
- * primary key values. The channel will be "tcn" unless the trigger is
+ * primary key values. The channel will be "tcn" unless the trigger is
* created with a parameter, in which case that parameter will be used.
*/
PG_FUNCTION_INFO_V1(triggered_change_notification);
@@ -140,8 +137,8 @@ triggered_change_notification(PG_FUNCTION_ARGS)
if (!HeapTupleIsValid(indexTuple)) /* should not happen */
elog(ERROR, "cache lookup failed for index %u", indexoid);
index = (Form_pg_index) GETSTRUCT(indexTuple);
- /* we're only interested if it is the primary key */
- if (index->indisprimary)
+ /* we're only interested if it is the primary key and valid */
+ if (index->indisprimary && IndexIsValid(index))
{
int numatts = index->indnatts;
diff --git a/contrib/test_decoding/.gitignore b/contrib/test_decoding/.gitignore
new file mode 100644
index 0000000000..1f95503494
--- /dev/null
+++ b/contrib/test_decoding/.gitignore
@@ -0,0 +1,5 @@
+# Generated subdirectories
+/log/
+/isolation_output/
+/regression_output/
+/tmp_check/
diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile
new file mode 100644
index 0000000000..58e0f384cb
--- /dev/null
+++ b/contrib/test_decoding/Makefile
@@ -0,0 +1,73 @@
+# contrib/test_decoding/Makefile
+
+MODULES = test_decoding
+OBJS = test_decoding.o
+
+# Note: because we don't tell the Makefile there are any regression tests,
+# we have to clean those result files explicitly
+EXTRA_CLEAN = $(pg_regress_clean_files) ./regression_output ./isolation_output
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/test_decoding
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+# Disabled because these tests require "wal_level=logical", which
+# typical installcheck users do not have (e.g. buildfarm clients).
+installcheck:;
+
+# But it can nonetheless be very helpful to run tests on preexisting
+# installation, allow to do so, but only if requested explicitly.
+installcheck-force: regresscheck-install-force isolationcheck-install-force
+
+check: regresscheck isolationcheck
+
+submake-regress:
+ $(MAKE) -C $(top_builddir)/src/test/regress all
+
+submake-isolation:
+ $(MAKE) -C $(top_builddir)/src/test/isolation all
+
+submake-test_decoding:
+ $(MAKE) -C $(top_builddir)/contrib/test_decoding
+
+REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact binary prepared
+
+regresscheck: all | submake-regress submake-test_decoding
+ $(MKDIR_P) regression_output
+ $(pg_regress_check) \
+ --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
+ --temp-install=./tmp_check \
+ --extra-install=contrib/test_decoding \
+ --outputdir=./regression_output \
+ $(REGRESSCHECKS)
+
+regresscheck-install-force: | submake-regress submake-test_decoding
+ $(pg_regress_installcheck) \
+ --extra-install=contrib/test_decoding \
+ $(REGRESSCHECKS)
+
+ISOLATIONCHECKS=mxact delayed_startup concurrent_ddl_dml
+
+isolationcheck: all | submake-isolation submake-test_decoding
+ $(MKDIR_P) isolation_output
+ $(pg_isolation_regress_check) \
+ --temp-config $(top_srcdir)/contrib/test_decoding/logical.conf \
+ --extra-install=contrib/test_decoding \
+ --outputdir=./isolation_output \
+ $(ISOLATIONCHECKS)
+
+isolationcheck-install-force: all | submake-isolation submake-test_decoding
+ $(pg_isolation_regress_installcheck) \
+ --extra-install=contrib/test_decoding \
+ $(ISOLATIONCHECKS)
+
+PHONY: submake-test_decoding submake-regress check \
+ regresscheck regresscheck-install-force \
+ isolationcheck isolationcheck-install-force
diff --git a/contrib/test_decoding/expected/binary.out b/contrib/test_decoding/expected/binary.out
new file mode 100644
index 0000000000..4164784ab3
--- /dev/null
+++ b/contrib/test_decoding/expected/binary.out
@@ -0,0 +1,35 @@
+-- predictability
+SET synchronous_commit = on;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+-- succeeds, textual plugin, textual consumer
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0');
+ data
+------
+(0 rows)
+
+-- fails, binary plugin, textual consumer
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1');
+ERROR: output plugin cannot produce binary output
+-- succeeds, textual plugin, binary consumer
+SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0');
+ data
+------
+(0 rows)
+
+-- succeeds, binary plugin, binary consumer
+SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1');
+ data
+------
+(0 rows)
+
+SELECT 'init' FROM pg_drop_replication_slot('regression_slot');
+ ?column?
+----------
+ init
+(1 row)
+
diff --git a/contrib/test_decoding/expected/concurrent_ddl_dml.out b/contrib/test_decoding/expected/concurrent_ddl_dml.out
new file mode 100644
index 0000000000..cc9165655f
--- /dev/null
+++ b/contrib/test_decoding/expected/concurrent_ddl_dml.out
@@ -0,0 +1,733 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_float s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_float: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl1_float s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waiting ...>
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl1_float: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_char s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl1_char s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...>
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl1_char: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s1_insert_tbl2 s2_alter_tbl1_float s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_float: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s1_insert_tbl2 s2_alter_tbl1_char s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_char: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_float s1_insert_tbl2 s2_alter_tbl1_float s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_float: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_float: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_char s1_insert_tbl2 s2_alter_tbl1_char s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_char: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_char s1_begin s1_insert_tbl1 s2_alter_tbl2_text s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_text: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_char s1_begin s1_insert_tbl1 s2_alter_tbl2_text s1_insert_tbl2 s2_alter_tbl1_char s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_text: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_char: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1'
+COMMIT
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_boolean s1_insert_tbl2 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_boolean: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean;
+ERROR: column "val2" cannot be cast automatically to type boolean
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_boolean s1_insert_tbl2 s2_alter_tbl1_boolean s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_boolean: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean;
+ERROR: column "val2" cannot be cast automatically to type boolean
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl1_boolean: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl1_boolean: <... completed>
+error in steps s1_commit s2_alter_tbl1_boolean: ERROR: column "val2" cannot be cast automatically to type boolean
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_add_int s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s1_insert_tbl2 s1_commit s1_begin s2_alter_tbl2_add_int s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s1_begin: BEGIN;
+step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_add_float s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s1_insert_tbl2 s1_commit s1_begin s2_alter_tbl2_add_float s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s1_begin: BEGIN;
+step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s2_alter_tbl2_add_char s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s1_begin s1_insert_tbl1 s1_insert_tbl2 s1_commit s1_begin s2_alter_tbl2_add_char s1_insert_tbl2_3col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s1_begin: BEGIN;
+step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_int s1_begin s1_insert_tbl2_3col s2_alter_tbl2_drop_3rd_col s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER;
+step s1_begin: BEGIN;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl2_drop_3rd_col: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+BEGIN
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_int s1_begin s1_insert_tbl2_3col s2_alter_tbl2_drop_3rd_col s1_insert_tbl2 s1_commit s1_insert_tbl2 s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER;
+step s1_begin: BEGIN;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; <waiting ...>
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_drop_3rd_col: <... completed>
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:null
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_int s1_begin s1_insert_tbl2_3col s2_alter_tbl2_drop_3rd_col s1_commit s2_get_changes s2_alter_tbl2_add_text s1_begin s1_insert_tbl2_3col s2_alter_tbl2_3rd_char s1_insert_tbl2_3col s1_commit s2_get_changes s2_alter_tbl2_3rd_int s1_insert_tbl2_3col s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER;
+step s1_begin: BEGIN;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; <waiting ...>
+step s1_commit: COMMIT;
+step s2_alter_tbl2_drop_3rd_col: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+BEGIN
+COMMIT
+step s2_alter_tbl2_add_text: ALTER TABLE tbl2 ADD COLUMN val3 TEXT;
+step s1_begin: BEGIN;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character varying; <waiting ...>
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_3rd_char: <... completed>
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+COMMIT
+BEGIN
+COMMIT
+step s2_alter_tbl2_3rd_int: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:null
+table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+table public.pg_temp: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_char s1_begin s1_insert_tbl1 s1_insert_tbl2_3col s2_alter_tbl2_3rd_text s1_insert_tbl2_3col s1_commit s1_insert_tbl2_3col s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_3rd_text: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text; <waiting ...>
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_3rd_text: <... completed>
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_text s1_begin s1_insert_tbl1 s1_insert_tbl2_3col s2_alter_tbl2_3rd_char s1_insert_tbl2_3col s1_commit s1_insert_tbl2_3col s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_text: ALTER TABLE tbl2 ADD COLUMN val3 TEXT;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character varying; <waiting ...>
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_3rd_char: <... completed>
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_char s1_begin s1_insert_tbl1 s2_alter_tbl2_3rd_text s1_insert_tbl2_3col s1_commit s2_alter_tbl2_drop_3rd_col s1_insert_tbl2 s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_3rd_text: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1'
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_text s1_begin s1_insert_tbl1 s2_alter_tbl2_3rd_char s1_insert_tbl2_3col s1_commit s2_alter_tbl2_drop_3rd_col s1_insert_tbl2 s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_text: ALTER TABLE tbl2 ADD COLUMN val3 TEXT;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character varying;
+step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1);
+step s1_commit: COMMIT;
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3;
+step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1);
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1'
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
+
+starting permutation: s1_init s2_alter_tbl2_add_char s1_begin s1_insert_tbl1 s2_alter_tbl2_drop_3rd_col s1_insert_tbl1 s1_commit s2_get_changes
+step s1_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying;
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0');
+data
+
+BEGIN
+COMMIT
+BEGIN
+COMMIT
+BEGIN
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1
+COMMIT
+?column?
+
+stop
diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out
new file mode 100644
index 0000000000..e13a6c7370
--- /dev/null
+++ b/contrib/test_decoding/expected/ddl.out
@@ -0,0 +1,647 @@
+-- predictability
+SET synchronous_commit = on;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+-- fail because of an already existing slot
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ERROR: replication slot "regression_slot" already exists
+-- fail because of an invalid name
+SELECT 'init' FROM pg_create_logical_replication_slot('Invalid Name', 'test_decoding');
+ERROR: replication slot name "Invalid Name" contains invalid character
+HINT: Replication slot names may only contain letters, numbers and the underscore character.
+-- fail twice because of an invalid parameter values
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
+ERROR: could not parse value "frakbar" for parameter "include-xids"
+CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistant-option', 'frakbar');
+ERROR: option "nonexistant-option" = "frakbar" is unknown
+CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
+ERROR: could not parse value "frakbar" for parameter "include-xids"
+CONTEXT: slot "regression_slot", output plugin "test_decoding", in the startup callback
+-- succeed once
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+-- fail
+SELECT pg_drop_replication_slot('regression_slot');
+ERROR: replication slot "regression_slot" does not exist
+-- check that we're detecting a streaming rep slot used for logical decoding
+SELECT 'init' FROM pg_create_physical_replication_slot('repl');
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0');
+ERROR: cannot use physical replication slot for logical decoding
+SELECT pg_drop_replication_slot('repl');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+/* check whether status function reports us, only reproduceable columns */
+SELECT slot_name, plugin, slot_type, active,
+ NOT catalog_xmin IS NULL AS catalog_xmin_set,
+ xmin IS NULl AS data_xmin_not_set,
+ pg_xlog_location_diff(restart_lsn, '0/01000000') > 0 AS some_wal
+FROM pg_replication_slots;
+ slot_name | plugin | slot_type | active | catalog_xmin_set | data_xmin_not_set | some_wal
+-----------------+---------------+-----------+--------+------------------+-------------------+----------
+ regression_slot | test_decoding | logical | f | t | t | t
+(1 row)
+
+/*
+ * Check that changes are handled correctly when interleaved with ddl
+ */
+CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));
+BEGIN;
+INSERT INTO replication_example(somedata, text) VALUES (1, 1);
+INSERT INTO replication_example(somedata, text) VALUES (1, 2);
+COMMIT;
+ALTER TABLE replication_example ADD COLUMN bar int;
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 1, 4);
+BEGIN;
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 2, 4);
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 3, 4);
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 4, NULL);
+COMMIT;
+ALTER TABLE replication_example DROP COLUMN bar;
+INSERT INTO replication_example(somedata, text) VALUES (3, 1);
+BEGIN;
+INSERT INTO replication_example(somedata, text) VALUES (3, 2);
+INSERT INTO replication_example(somedata, text) VALUES (3, 3);
+COMMIT;
+ALTER TABLE replication_example RENAME COLUMN text TO somenum;
+INSERT INTO replication_example(somedata, somenum) VALUES (4, 1);
+-- collect all changes
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+---------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:'1'
+ table public.replication_example: INSERT: id[integer]:2 somedata[integer]:1 text[character varying]:'2'
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:3 somedata[integer]:2 text[character varying]:'1' bar[integer]:4
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:4 somedata[integer]:2 text[character varying]:'2' bar[integer]:4
+ table public.replication_example: INSERT: id[integer]:5 somedata[integer]:2 text[character varying]:'3' bar[integer]:4
+ table public.replication_example: INSERT: id[integer]:6 somedata[integer]:2 text[character varying]:'4' bar[integer]:null
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:7 somedata[integer]:3 text[character varying]:'1'
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:8 somedata[integer]:3 text[character varying]:'2'
+ table public.replication_example: INSERT: id[integer]:9 somedata[integer]:3 text[character varying]:'3'
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:10 somedata[integer]:4 somenum[character varying]:'1'
+ COMMIT
+(30 rows)
+
+ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4);
+-- throw away changes, they contain oids
+SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ count
+-------
+ 12
+(1 row)
+
+INSERT INTO replication_example(somedata, somenum) VALUES (5, 1);
+BEGIN;
+INSERT INTO replication_example(somedata, somenum) VALUES (6, 1);
+ALTER TABLE replication_example ADD COLUMN zaphod1 int;
+INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 2, 1);
+ALTER TABLE replication_example ADD COLUMN zaphod2 int;
+INSERT INTO replication_example(somedata, somenum, zaphod2) VALUES (6, 3, 1);
+INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2);
+COMMIT;
+-- show changes
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:11 somedata[integer]:5 somenum[integer]:1
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:12 somedata[integer]:6 somenum[integer]:1
+ table public.replication_example: INSERT: id[integer]:13 somedata[integer]:6 somenum[integer]:2 zaphod1[integer]:1
+ table public.replication_example: INSERT: id[integer]:14 somedata[integer]:6 somenum[integer]:3 zaphod1[integer]:null zaphod2[integer]:1
+ table public.replication_example: INSERT: id[integer]:15 somedata[integer]:6 somenum[integer]:4 zaphod1[integer]:2 zaphod2[integer]:null
+ COMMIT
+(9 rows)
+
+-- hide changes bc of oid visible in full table rewrites
+CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
+INSERT INTO tr_unique(data) VALUES(10);
+ALTER TABLE tr_unique RENAME TO tr_pkey;
+ALTER TABLE tr_pkey ADD COLUMN id serial primary key;
+SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ count
+-------
+ 10
+(1 row)
+
+INSERT INTO tr_pkey(data) VALUES(1);
+--show deletion with primary key
+DELETE FROM tr_pkey;
+/* display results */
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+----------------------------------------------------------------------------
+ BEGIN
+ table public.tr_pkey: INSERT: id2[integer]:2 data[integer]:1 id[integer]:2
+ COMMIT
+ BEGIN
+ table public.tr_pkey: DELETE: id[integer]:1
+ table public.tr_pkey: DELETE: id[integer]:2
+ COMMIT
+(7 rows)
+
+/*
+ * check that disk spooling works
+ */
+BEGIN;
+CREATE TABLE tr_etoomuch (id serial primary key, data int);
+INSERT INTO tr_etoomuch(data) SELECT g.i FROM generate_series(1, 10234) g(i);
+DELETE FROM tr_etoomuch WHERE id < 5000;
+UPDATE tr_etoomuch SET data = - data WHERE id > 5000;
+COMMIT;
+/* display results, but hide most of the output */
+SELECT count(*), min(data), max(data)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0')
+GROUP BY substring(data, 1, 24)
+ORDER BY 1,2;
+ count | min | max
+-------+-------------------------------------------------+------------------------------------------------------------------------
+ 1 | BEGIN | BEGIN
+ 1 | COMMIT | COMMIT
+ 20467 | table public.tr_etoomuch: DELETE: id[integer]:1 | table public.tr_etoomuch: UPDATE: id[integer]:9999 data[integer]:-9999
+(3 rows)
+
+/*
+ * check whether we decode subtransactions correctly in relation with each
+ * other
+ */
+CREATE TABLE tr_sub (id serial primary key, path text);
+-- toplevel, subtxn, toplevel, subtxn, subtxn
+BEGIN;
+INSERT INTO tr_sub(path) VALUES ('1-top-#1');
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('1-top-1-#1');
+INSERT INTO tr_sub(path) VALUES ('1-top-1-#2');
+RELEASE SAVEPOINT a;
+SAVEPOINT b;
+SAVEPOINT c;
+INSERT INTO tr_sub(path) VALUES ('1-top-2-1-#1');
+INSERT INTO tr_sub(path) VALUES ('1-top-2-1-#2');
+RELEASE SAVEPOINT c;
+INSERT INTO tr_sub(path) VALUES ('1-top-2-#1');
+RELEASE SAVEPOINT b;
+COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+----------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.tr_sub: INSERT: id[integer]:1 path[text]:'1-top-#1'
+ table public.tr_sub: INSERT: id[integer]:2 path[text]:'1-top-1-#1'
+ table public.tr_sub: INSERT: id[integer]:3 path[text]:'1-top-1-#2'
+ table public.tr_sub: INSERT: id[integer]:4 path[text]:'1-top-2-1-#1'
+ table public.tr_sub: INSERT: id[integer]:5 path[text]:'1-top-2-1-#2'
+ table public.tr_sub: INSERT: id[integer]:6 path[text]:'1-top-2-#1'
+ COMMIT
+(10 rows)
+
+-- check that we handle xlog assignments correctly
+BEGIN;
+-- nest 80 subtxns
+SAVEPOINT subtop;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+-- assign xid by inserting
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#1');
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#2');
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#3');
+RELEASE SAVEPOINT subtop;
+INSERT INTO tr_sub(path) VALUES ('2-top-#1');
+COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------------------------------------------------------------------------
+ BEGIN
+ table public.tr_sub: INSERT: id[integer]:7 path[text]:'2-top-1...--#1'
+ table public.tr_sub: INSERT: id[integer]:8 path[text]:'2-top-1...--#2'
+ table public.tr_sub: INSERT: id[integer]:9 path[text]:'2-top-1...--#3'
+ table public.tr_sub: INSERT: id[integer]:10 path[text]:'2-top-#1'
+ COMMIT
+(6 rows)
+
+-- make sure rollbacked subtransactions aren't decoded
+BEGIN;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-#1');
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-1-#1');
+SAVEPOINT b;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-2-#1');
+ROLLBACK TO SAVEPOINT b;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-#2');
+COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+-----------------------------------------------------------------------
+ BEGIN
+ table public.tr_sub: INSERT: id[integer]:11 path[text]:'3-top-2-#1'
+ table public.tr_sub: INSERT: id[integer]:12 path[text]:'3-top-2-1-#1'
+ table public.tr_sub: INSERT: id[integer]:14 path[text]:'3-top-2-#2'
+ COMMIT
+(5 rows)
+
+-- test whether a known, but not yet logged toplevel xact, followed by a
+-- subxact commit is handled correctly
+BEGIN;
+SELECT txid_current() != 0; -- so no fixed xid apears in the outfile
+ ?column?
+----------
+ t
+(1 row)
+
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('4-top-1-#1');
+RELEASE SAVEPOINT a;
+COMMIT;
+-- test whether a change in a subtransaction, in an unknown toplevel
+-- xact is handled correctly.
+BEGIN;
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('5-top-1-#1');
+COMMIT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+---------------------------------------------------------------------
+ BEGIN
+ table public.tr_sub: INSERT: id[integer]:15 path[text]:'4-top-1-#1'
+ COMMIT
+ BEGIN
+ table public.tr_sub: INSERT: id[integer]:16 path[text]:'5-top-1-#1'
+ COMMIT
+(6 rows)
+
+/*
+ * Check whether treating a table as a catalog table works somewhat
+ */
+CREATE TABLE replication_metadata (
+ id serial primary key,
+ relation name NOT NULL,
+ options text[]
+)
+WITH (user_catalog_table = true)
+;
+\d+ replication_metadata
+ Table "public.replication_metadata"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+---------+-------------------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('replication_metadata_id_seq'::regclass) | plain | |
+ relation | name | not null | plain | |
+ options | text[] | | extended | |
+Indexes:
+ "replication_metadata_pkey" PRIMARY KEY, btree (id)
+Options: user_catalog_table=true
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('foo', ARRAY['a', 'b']);
+ALTER TABLE replication_metadata RESET (user_catalog_table);
+\d+ replication_metadata
+ Table "public.replication_metadata"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+---------+-------------------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('replication_metadata_id_seq'::regclass) | plain | |
+ relation | name | not null | plain | |
+ options | text[] | | extended | |
+Indexes:
+ "replication_metadata_pkey" PRIMARY KEY, btree (id)
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('bar', ARRAY['a', 'b']);
+ALTER TABLE replication_metadata SET (user_catalog_table = true);
+\d+ replication_metadata
+ Table "public.replication_metadata"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------+---------+-------------------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('replication_metadata_id_seq'::regclass) | plain | |
+ relation | name | not null | plain | |
+ options | text[] | | extended | |
+Indexes:
+ "replication_metadata_pkey" PRIMARY KEY, btree (id)
+Options: user_catalog_table=true
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('blub', NULL);
+-- make sure rewrites don't work
+ALTER TABLE replication_metadata ADD COLUMN rewritemeornot int;
+ALTER TABLE replication_metadata ALTER COLUMN rewritemeornot TYPE text;
+ERROR: cannot rewrite table "replication_metadata" used as a catalog table
+ALTER TABLE replication_metadata SET (user_catalog_table = false);
+\d+ replication_metadata
+ Table "public.replication_metadata"
+ Column | Type | Modifiers | Storage | Stats target | Description
+----------------+---------+-------------------------------------------------------------------+----------+--------------+-------------
+ id | integer | not null default nextval('replication_metadata_id_seq'::regclass) | plain | |
+ relation | name | not null | plain | |
+ options | text[] | | extended | |
+ rewritemeornot | integer | | plain | |
+Indexes:
+ "replication_metadata_pkey" PRIMARY KEY, btree (id)
+Options: user_catalog_table=false
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('zaphod', NULL);
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_metadata: INSERT: id[integer]:1 relation[name]:'foo' options[text[]]:'{a,b}'
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_metadata: INSERT: id[integer]:2 relation[name]:'bar' options[text[]]:'{a,b}'
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_metadata: INSERT: id[integer]:3 relation[name]:'blub' options[text[]]:null
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_metadata: INSERT: id[integer]:4 relation[name]:'zaphod' options[text[]]:null rewritemeornot[integer]:null
+ COMMIT
+(22 rows)
+
+/*
+ * check whether we handle updates/deletes correct with & without a pkey
+ */
+/* we should handle the case without a key at all more gracefully */
+CREATE TABLE table_without_key(id serial, data int);
+INSERT INTO table_without_key(data) VALUES(1),(2);
+DELETE FROM table_without_key WHERE data = 1;
+-- won't log old keys
+UPDATE table_without_key SET data = 3 WHERE data = 2;
+UPDATE table_without_key SET id = -id;
+UPDATE table_without_key SET id = -id;
+-- should log the full old row now
+ALTER TABLE table_without_key REPLICA IDENTITY FULL;
+UPDATE table_without_key SET data = 3 WHERE data = 2;
+UPDATE table_without_key SET id = -id;
+UPDATE table_without_key SET id = -id;
+DELETE FROM table_without_key WHERE data = 3;
+CREATE TABLE table_with_pkey(id serial primary key, data int);
+INSERT INTO table_with_pkey(data) VALUES(1), (2);
+DELETE FROM table_with_pkey WHERE data = 1;
+-- should log the old pkey
+UPDATE table_with_pkey SET data = 3 WHERE data = 2;
+UPDATE table_with_pkey SET id = -id;
+UPDATE table_with_pkey SET id = -id;
+-- check that we log nothing despite having a pkey
+ALTER TABLE table_without_key REPLICA IDENTITY NOTHING;
+UPDATE table_with_pkey SET id = -id;
+-- check that we log everything despite having a pkey
+ALTER TABLE table_without_key REPLICA IDENTITY FULL;
+UPDATE table_with_pkey SET id = -id;
+DELETE FROM table_with_pkey WHERE data = 3;
+CREATE TABLE table_with_unique_not_null(id serial unique, data int);
+ALTER TABLE table_with_unique_not_null ALTER COLUMN id SET NOT NULL; --already set
+-- won't log anything, replica identity not setup
+INSERT INTO table_with_unique_not_null(data) VALUES(1), (2);
+DELETE FROM table_with_unique_not_null WHERE data = 1;
+UPDATE table_with_unique_not_null SET data = 3 WHERE data = 2;
+UPDATE table_with_unique_not_null SET id = -id;
+UPDATE table_with_unique_not_null SET id = -id;
+DELETE FROM table_with_unique_not_null WHERE data = 3;
+-- should log old key
+ALTER TABLE table_with_unique_not_null REPLICA IDENTITY USING INDEX table_with_unique_not_null_id_key;
+INSERT INTO table_with_unique_not_null(data) VALUES(1), (2);
+DELETE FROM table_with_unique_not_null WHERE data = 1;
+UPDATE table_with_unique_not_null SET data = 3 WHERE data = 2;
+UPDATE table_with_unique_not_null SET id = -id;
+UPDATE table_with_unique_not_null SET id = -id;
+DELETE FROM table_with_unique_not_null WHERE data = 3;
+-- check toast support
+BEGIN;
+CREATE SEQUENCE toasttable_rand_seq START 79 INCREMENT 1499; -- portable "random"
+CREATE TABLE toasttable(
+ id serial primary key,
+ toasted_col1 text,
+ rand1 float8 DEFAULT nextval('toasttable_rand_seq'),
+ toasted_col2 text,
+ rand2 float8 DEFAULT nextval('toasttable_rand_seq')
+ );
+COMMIT;
+-- uncompressed external toast data
+INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i);
+-- compressed external toast data
+INSERT INTO toasttable(toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i);
+-- update of existing column
+UPDATE toasttable
+ SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i))
+WHERE id = 1;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_without_key: INSERT: id[integer]:1 data[integer]:1
+ table public.table_without_key: INSERT: id[integer]:2 data[integer]:2
+ COMMIT
+ BEGIN
+ table public.table_without_key: DELETE: (no-tuple-data)
+ COMMIT
+ BEGIN
+ table public.table_without_key: UPDATE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_without_key: UPDATE: id[integer]:-2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_without_key: UPDATE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_without_key: UPDATE: old-key: id[integer]:2 data[integer]:3 new-tuple: id[integer]:-2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_without_key: UPDATE: old-key: id[integer]:-2 data[integer]:3 new-tuple: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_without_key: DELETE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: INSERT: id[integer]:1 data[integer]:1
+ table public.table_with_pkey: INSERT: id[integer]:2 data[integer]:2
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: DELETE: id[integer]:1
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: UPDATE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: UPDATE: old-key: id[integer]:2 new-tuple: id[integer]:-2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: UPDATE: old-key: id[integer]:2 new-tuple: id[integer]:-2 data[integer]:3
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_pkey: DELETE: id[integer]:2
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: INSERT: id[integer]:1 data[integer]:1
+ table public.table_with_unique_not_null: INSERT: id[integer]:2 data[integer]:2
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: DELETE: (no-tuple-data)
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: id[integer]:-2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: id[integer]:2 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: DELETE: (no-tuple-data)
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: INSERT: id[integer]:3 data[integer]:1
+ table public.table_with_unique_not_null: INSERT: id[integer]:4 data[integer]:2
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: DELETE: id[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: id[integer]:4 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: old-key: id[integer]:4 new-tuple: id[integer]:-4 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: UPDATE: old-key: id[integer]:-4 new-tuple: id[integer]:4 data[integer]:3
+ COMMIT
+ BEGIN
+ table public.table_with_unique_not_null: DELETE: id[integer]:4
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.toasttable: INSERT: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578
+ COMMIT
+ BEGIN
+ table public.toasttable: INSERT: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'0001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500000100020003000400050006000700080009001000110012001300140015001600170018001900200021002200230024002500260027002800290030003100320033003400350036003700380039004000410042004300440045004600470048004900500051005200530054005500560057005800590060006100620063006400650066006700680069007000710072007300740075007600770078007900800081008200830084008500860087008800890090009100920093009400950096009700980099010001010102010301040105010601070108010901100111011201130114011501160117011801190120012101220123012401250126012701280129013001310132013301340135013601370138013901400141014201430144014501460147014801490150015101520153015401550156015701580159016001610162016301640165016601670168016901700171017201730174017501760177017801790180018101820183018401850186018701880189019001910192019301940195019601970198019902000201020202030204020502060207020802090210021102120213021402150216021702180219022002210222022302240225022602270228022902300231023202330234023502360237023802390240024102420243024402450246024702480249025002510252025302540255025602570258025902600261026202630264026502660267026802690270027102720273027402750276027702780279028002810282028302840285028602870288028902900291029202930294029502960297029802990300030103020303030403050306030703080309031003110312031303140315031603170318031903200321032203230324032503260327032803290330033103320333033403350336033703380339034003410342034303440345034603470348034903500351035203530354035503560357035803590360036103620363036403650366036703680369037003710372037303740375037603770378037903800381038203830384038503860387038803890390039103920393039403950396039703980399040004010402040304040405040604070408040904100411041204130414041504160417041804190420042104220423042404250426042704280429043004310432043304340435043604370438043904400441044204430444044504460447044804490450045104520453045404550456045704580459046004610462046304640465046604670468046904700471047204730474047504760477047804790480048104820483048404850486048704880489049004910492049304940495049604970498049905000001000200030004000500060007000800090010001100120013001400150016001700180019002000210022002300240025002600270028002900300031003200330034003500360037003800390040004100420043004400450046004700480049005000510052005300540055005600570058005900600061006200630064006500660067006800690070007100720073007400750076007700780079008000810082008300840085008600870088008900900091009200930094009500960097009800990100010101020103010401050106010701080109011001110112011301140115011601170118011901200121012201230124012501260127012801290130013101320133013401350136013701380139014001410142014301440145014601470148014901500151015201530154015501560157015801590160016101620163016401650166016701680169017001710172017301740175017601770178017901800181018201830184018501860187018801890190019101920193019401950196019701980199020002010202020302040205020602070208020902100211021202130214021502160217021802190220022102220223022402250226022702280229023002310232023302340235023602370238023902400241024202430244024502460247024802490250025102520253025402550256025702580259026002610262026302640265026602670268026902700271027202730274027502760277027802790280028102820283028402850286028702880289029002910292029302940295029602970298029903000301030203030304030503060307030803090310031103120313031403150316031703180319032003210322032303240325032603270328032903300331033203330334033503360337033803390340034103420343034403450346034703480349035003510352035303540355035603570358035903600361036203630364036503660367036803690370037103720373037403750376037703780379038003810382038303840385038603870388038903900391039203930394039503960397039803990400040104020403040404050406040704080409041004110412041304140415041604170418041904200421042204230424042504260427042804290430043104320433043404350436043704380439044004410442044304440445044604470448044904500451045204530454045504560457045804590460046104620463046404650466046704680469047004710472047304740475047604770478047904800481048204830484048504860487048804890490049104920493049404950496049704980499050000010002000300040005000600070008000900100011001200130014001500160017001800190020002100220023002400250026002700280029003000310032003300340035003600370038003900400041004200430044004500460047004800490050005100520053005400550056005700580059006000610062006300640065006600670068006900700071007200730074007500760077007800790080008100820083008400850086008700880089009000910092009300940095009600970098009901000101010201030104010501060107010801090110011101120113011401150116011701180119012001210122012301240125012601270128012901300131013201330134013501360137013801390140014101420143014401450146014701480149015001510152015301540155015601570158015901600161016201630164016501660167016801690170017101720173017401750176017701780179018001810182018301840185018601870188018901900191019201930194019501960197019801990200020102020203020402050206020702080209021002110212021302140215021602170218021902200221022202230224022502260227022802290230023102320233023402350236023702380239024002410242024302440245024602470248024902500251025202530254025502560257025802590260026102620263026402650266026702680269027002710272027302740275027602770278027902800281028202830284028502860287028802890290029102920293029402950296029702980299030003010302030303040305030603070308030903100311031203130314031503160317031803190320032103220323032403250326032703280329033003310332033303340335033603370338033903400341034203430344034503460347034803490350035103520353035403550356035703580359036003610362036303640365036603670368036903700371037203730374037503760377037803790380038103820383038403850386038703880389039003910392039303940395039603970398039904000401040204030404040504060407040804090410041104120413041404150416041704180419042004210422042304240425042604270428042904300431043204330434043504360437043804390440044104420443044404450446044704480449045004510452045304540455045604570458045904600461046204630464046504660467046804690470047104720473047404750476047704780479048004810482048304840485048604870488048904900491049204930494049504960497049804990500' rand2[double precision]:4576
+ COMMIT
+ BEGIN
+ table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578
+ COMMIT
+(113 rows)
+
+INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i);
+-- update of second column, first column unchanged
+UPDATE toasttable
+ SET toasted_col2 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i))
+WHERE id = 1;
+-- make sure we decode correctly even if the toast table is gone
+DROP TABLE toasttable;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.toasttable: INSERT: id[integer]:3 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:6075 toasted_col2[text]:null rand2[double precision]:7574
+ COMMIT
+ BEGIN
+ table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:79 toasted_col2[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand2[double precision]:1578
+ COMMIT
+ BEGIN
+ COMMIT
+(8 rows)
+
+-- done, free logical replication slot
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------
+(0 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+/* check that the slot is gone */
+SELECT * FROM pg_replication_slots;
+ slot_name | plugin | slot_type | datoid | database | active | xmin | catalog_xmin | restart_lsn
+-----------+--------+-----------+--------+----------+--------+------+--------------+-------------
+(0 rows)
+
diff --git a/contrib/test_decoding/expected/decoding_in_xact.out b/contrib/test_decoding/expected/decoding_in_xact.out
new file mode 100644
index 0000000000..d15b0b542b
--- /dev/null
+++ b/contrib/test_decoding/expected/decoding_in_xact.out
@@ -0,0 +1,89 @@
+-- predictability
+SET synchronous_commit = on;
+-- fail because we're creating a slot while in an xact with xid
+BEGIN;
+SELECT txid_current() = 0;
+ ?column?
+----------
+ f
+(1 row)
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ERROR: cannot create logical replication slot in transaction that has performed writes
+ROLLBACK;
+-- fail because we're creating a slot while in an subxact whose topxact has a xid
+BEGIN;
+SELECT txid_current() = 0;
+ ?column?
+----------
+ f
+(1 row)
+
+SAVEPOINT barf;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ERROR: cannot create logical replication slot in transaction that has performed writes
+ROLLBACK TO SAVEPOINT barf;
+ROLLBACK;
+-- succeed, outside tx.
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+SELECT 'stop' FROM pg_drop_replication_slot('regression_slot');
+ ?column?
+----------
+ stop
+(1 row)
+
+-- succeed, in tx without xid.
+BEGIN;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+COMMIT;
+CREATE TABLE nobarf(id serial primary key, data text);
+INSERT INTO nobarf(data) VALUES('1');
+-- decoding works in transaction with xid
+BEGIN;
+SELECT txid_current() = 0;
+ ?column?
+----------
+ f
+(1 row)
+
+-- don't show yet, haven't committed
+INSERT INTO nobarf(data) VALUES('2');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+-----------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.nobarf: INSERT: id[integer]:1 data[text]:'1'
+ COMMIT
+(5 rows)
+
+COMMIT;
+INSERT INTO nobarf(data) VALUES('3');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+-----------------------------------------------------------
+ BEGIN
+ table public.nobarf: INSERT: id[integer]:2 data[text]:'2'
+ COMMIT
+ BEGIN
+ table public.nobarf: INSERT: id[integer]:3 data[text]:'3'
+ COMMIT
+(6 rows)
+
+SELECT 'stop' FROM pg_drop_replication_slot('regression_slot');
+ ?column?
+----------
+ stop
+(1 row)
+
diff --git a/contrib/test_decoding/expected/delayed_startup.out b/contrib/test_decoding/expected/delayed_startup.out
new file mode 100644
index 0000000000..db8c525ac4
--- /dev/null
+++ b/contrib/test_decoding/expected/delayed_startup.out
@@ -0,0 +1,38 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1b s1w s2init s1c s2start s1b s1w s1c s2start s1b s1w s2start s1c s2start
+step s1b: BEGIN ISOLATION LEVEL SERIALIZABLE;
+step s1w: INSERT INTO do_write DEFAULT VALUES;
+step s2init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); <waiting ...>
+step s1c: COMMIT;
+step s2init: <... completed>
+?column?
+
+init
+step s2start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+step s1b: BEGIN ISOLATION LEVEL SERIALIZABLE;
+step s1w: INSERT INTO do_write DEFAULT VALUES;
+step s1c: COMMIT;
+step s2start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+BEGIN
+table public.do_write: INSERT: id[integer]:2
+COMMIT
+step s1b: BEGIN ISOLATION LEVEL SERIALIZABLE;
+step s1w: INSERT INTO do_write DEFAULT VALUES;
+step s2start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+step s1c: COMMIT;
+step s2start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+BEGIN
+table public.do_write: INSERT: id[integer]:3
+COMMIT
+?column?
+
+stop
diff --git a/contrib/test_decoding/expected/mxact.out b/contrib/test_decoding/expected/mxact.out
new file mode 100644
index 0000000000..f0d96cc67d
--- /dev/null
+++ b/contrib/test_decoding/expected/mxact.out
@@ -0,0 +1,66 @@
+Parsed test spec with 3 sessions
+
+starting permutation: s0init s0start s1begin s1sharepgclass s2begin s2sharepgclass s0w s0start s2commit s1commit
+step s0init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s0start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+step s1begin: BEGIN;
+step s1sharepgclass: SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR SHARE) s;
+?column?
+
+t
+step s2begin: BEGIN;
+step s2sharepgclass: SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR SHARE) s;
+?column?
+
+t
+step s0w: INSERT INTO do_write DEFAULT VALUES;
+step s0start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+BEGIN
+table public.do_write: INSERT: id[integer]:1
+COMMIT
+step s2commit: COMMIT;
+step s1commit: COMMIT;
+?column?
+
+stop
+
+starting permutation: s0init s0start s1begin s1keysharepgclass s2begin s2keysharepgclass s0alter s0w s0start s2commit s1commit
+step s0init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+
+init
+step s0start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+step s1begin: BEGIN;
+step s1keysharepgclass: SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR KEY SHARE) s;
+?column?
+
+t
+step s2begin: BEGIN;
+step s2keysharepgclass: SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR KEY SHARE) s;
+?column?
+
+t
+step s0alter: ALTER TABLE do_write ADD column ts timestamptz;
+step s0w: INSERT INTO do_write DEFAULT VALUES;
+step s0start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');
+data
+
+BEGIN
+COMMIT
+BEGIN
+table public.do_write: INSERT: id[integer]:1 ts[timestamp with time zone]:null
+COMMIT
+step s2commit: COMMIT;
+step s1commit: COMMIT;
+?column?
+
+stop
diff --git a/contrib/test_decoding/expected/permissions.out b/contrib/test_decoding/expected/permissions.out
new file mode 100644
index 0000000000..85b7f5d625
--- /dev/null
+++ b/contrib/test_decoding/expected/permissions.out
@@ -0,0 +1,130 @@
+-- predictability
+SET synchronous_commit = on;
+-- setup
+CREATE ROLE lr_normal;
+CREATE ROLE lr_superuser SUPERUSER;
+CREATE ROLE lr_replication REPLICATION;
+CREATE TABLE lr_test(data text);
+-- superuser can control replication
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+INSERT INTO lr_test VALUES('lr_superuser_init');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+--------------------------------------------------------------
+ BEGIN
+ table public.lr_test: INSERT: data[text]:'lr_superuser_init'
+ COMMIT
+(3 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+RESET ROLE;
+-- replication user can control replication
+SET ROLE lr_replication;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+INSERT INTO lr_test VALUES('lr_superuser_init');
+ERROR: permission denied for relation lr_test
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+------
+(0 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+RESET ROLE;
+-- plain user *can't* can control replication
+SET ROLE lr_normal;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ERROR: must be superuser or replication role to use replication slots
+INSERT INTO lr_test VALUES('lr_superuser_init');
+ERROR: permission denied for relation lr_test
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ERROR: must be superuser or replication role to use replication slots
+SELECT pg_drop_replication_slot('regression_slot');
+ERROR: must be superuser or replication role to use replication slots
+RESET ROLE;
+-- replication users can drop superuser created slots
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+RESET ROLE;
+SET ROLE lr_replication;
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+RESET ROLE;
+-- normal users can't drop existing slots
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+RESET ROLE;
+SET ROLE lr_normal;
+SELECT pg_drop_replication_slot('regression_slot');
+ERROR: must be superuser or replication role to use replication slots
+RESET ROLE;
+-- all users can see existing slots
+SET ROLE lr_superuser;
+SELECT slot_name, plugin FROM pg_replication_slots;
+ slot_name | plugin
+-----------------+---------------
+ regression_slot | test_decoding
+(1 row)
+
+RESET ROLE;
+SET ROLE lr_replication;
+SELECT slot_name, plugin FROM pg_replication_slots;
+ slot_name | plugin
+-----------------+---------------
+ regression_slot | test_decoding
+(1 row)
+
+RESET ROLE;
+SET ROLE lr_normal;
+SELECT slot_name, plugin FROM pg_replication_slots;
+ slot_name | plugin
+-----------------+---------------
+ regression_slot | test_decoding
+(1 row)
+
+RESET ROLE;
+-- cleanup
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+DROP ROLE lr_normal;
+DROP ROLE lr_superuser;
+DROP ROLE lr_replication;
+DROP TABLE lr_test;
diff --git a/contrib/test_decoding/expected/prepared.out b/contrib/test_decoding/expected/prepared.out
new file mode 100644
index 0000000000..8313f8b7aa
--- /dev/null
+++ b/contrib/test_decoding/expected/prepared.out
@@ -0,0 +1,82 @@
+-- predictability
+SET synchronous_commit = on;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+CREATE TABLE test_prepared1(id int);
+CREATE TABLE test_prepared2(id int);
+-- test simple successful use of a prepared xact
+BEGIN;
+INSERT INTO test_prepared1 VALUES (1);
+PREPARE TRANSACTION 'test_prepared#1';
+COMMIT PREPARED 'test_prepared#1';
+INSERT INTO test_prepared1 VALUES (2);
+-- test abort of a prepared xact
+BEGIN;
+INSERT INTO test_prepared1 VALUES (3);
+PREPARE TRANSACTION 'test_prepared#2';
+ROLLBACK PREPARED 'test_prepared#2';
+INSERT INTO test_prepared1 VALUES (4);
+-- test prepared xact containing ddl
+BEGIN;
+INSERT INTO test_prepared1 VALUES (5);
+ALTER TABLE test_prepared1 ADD COLUMN data text;
+INSERT INTO test_prepared1 VALUES (6, 'frakbar');
+PREPARE TRANSACTION 'test_prepared#3';
+-- test that we decode correctly while an uncommitted prepared xact
+-- with ddl exists.
+-- separate table because of the lock from the ALTER
+-- this will come before the '5' row above, as this commits before it.
+INSERT INTO test_prepared2 VALUES (7);
+COMMIT PREPARED 'test_prepared#3';
+-- make sure stuff still works
+INSERT INTO test_prepared1 VALUES (8);
+INSERT INTO test_prepared2 VALUES (9);
+-- cleanup
+DROP TABLE test_prepared1;
+DROP TABLE test_prepared2;
+-- show results
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+-------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.test_prepared1: INSERT: id[integer]:1
+ COMMIT
+ BEGIN
+ table public.test_prepared1: INSERT: id[integer]:2
+ COMMIT
+ BEGIN
+ table public.test_prepared1: INSERT: id[integer]:4
+ COMMIT
+ BEGIN
+ table public.test_prepared2: INSERT: id[integer]:7
+ COMMIT
+ BEGIN
+ table public.test_prepared1: INSERT: id[integer]:5
+ table public.test_prepared1: INSERT: id[integer]:6 data[text]:'frakbar'
+ COMMIT
+ BEGIN
+ table public.test_prepared1: INSERT: id[integer]:8 data[text]:null
+ COMMIT
+ BEGIN
+ table public.test_prepared2: INSERT: id[integer]:9
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+(30 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out
new file mode 100644
index 0000000000..ec23ab9024
--- /dev/null
+++ b/contrib/test_decoding/expected/rewrite.out
@@ -0,0 +1,107 @@
+-- predictability
+SET synchronous_commit = on;
+DROP TABLE IF EXISTS replication_example;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));
+INSERT INTO replication_example(somedata) VALUES (1);
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+----------------------------------------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:null
+ COMMIT
+(5 rows)
+
+BEGIN;
+INSERT INTO replication_example(somedata) VALUES (2);
+ALTER TABLE replication_example ADD COLUMN testcolumn1 int;
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (3, 1);
+COMMIT;
+BEGIN;
+INSERT INTO replication_example(somedata) VALUES (3);
+ALTER TABLE replication_example ADD COLUMN testcolumn2 int;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn2) VALUES (4, 2, 1);
+COMMIT;
+VACUUM FULL pg_am;
+VACUUM FULL pg_amop;
+VACUUM FULL pg_proc;
+VACUUM FULL pg_opclass;
+VACUUM FULL pg_type;
+VACUUM FULL pg_index;
+VACUUM FULL pg_database;
+-- repeated rewrites that fail
+BEGIN;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+ROLLBACK;
+-- repeated rewrites that succeed
+BEGIN;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+COMMIT;
+ -- repeated rewrites in different transactions
+VACUUM FULL pg_class;
+VACUUM FULL pg_class;
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (5, 3);
+BEGIN;
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (6, 4);
+ALTER TABLE replication_example ADD COLUMN testcolumn3 int;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (7, 5, 1);
+COMMIT;
+-- make old files go away
+CHECKPOINT;
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ data
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:2 somedata[integer]:2 text[character varying]:null
+ table public.replication_example: INSERT: id[integer]:3 somedata[integer]:3 text[character varying]:null testcolumn1[integer]:1
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:4 somedata[integer]:3 text[character varying]:null testcolumn1[integer]:null
+ table public.replication_example: INSERT: id[integer]:5 somedata[integer]:4 text[character varying]:null testcolumn1[integer]:2 testcolumn2[integer]:1
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:6 somedata[integer]:5 text[character varying]:null testcolumn1[integer]:3 testcolumn2[integer]:null
+ COMMIT
+ BEGIN
+ table public.replication_example: INSERT: id[integer]:7 somedata[integer]:6 text[character varying]:null testcolumn1[integer]:4 testcolumn2[integer]:null
+ table public.replication_example: INSERT: id[integer]:8 somedata[integer]:7 text[character varying]:null testcolumn1[integer]:5 testcolumn2[integer]:null testcolumn3[integer]:1
+ COMMIT
+(35 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
+DROP TABLE IF EXISTS replication_example;
diff --git a/contrib/test_decoding/expected/toast.out b/contrib/test_decoding/expected/toast.out
new file mode 100644
index 0000000000..6adef83f02
--- /dev/null
+++ b/contrib/test_decoding/expected/toast.out
@@ -0,0 +1,90 @@
+-- predictability
+SET synchronous_commit = on;
+DROP TABLE IF EXISTS xpto;
+NOTICE: table "xpto" does not exist, skipping
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ ?column?
+----------
+ init
+(1 row)
+
+CREATE SEQUENCE xpto_rand_seq START 79 INCREMENT 1499; -- portable "random"
+CREATE TABLE xpto (
+ id serial primary key,
+ toasted_col1 text,
+ rand1 float8 DEFAULT nextval('xpto_rand_seq'),
+ toasted_col2 text,
+ rand2 float8 DEFAULT nextval('xpto_rand_seq')
+);
+-- uncompressed external toast data
+INSERT INTO xpto (toasted_col1, toasted_col2) SELECT string_agg(g.i::text, ''), string_agg((g.i*2)::text, '') FROM generate_series(1, 2000) g(i);
+-- compressed external toast data
+INSERT INTO xpto (toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i);
+-- update of existing column
+UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1;
+UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+DELETE FROM xpto WHERE id = 1;
+DROP TABLE IF EXISTS toasted_key;
+NOTICE: table "toasted_key" does not exist, skipping
+CREATE TABLE toasted_key (
+ id serial,
+ toasted_key text PRIMARY KEY,
+ toasted_col1 text,
+ toasted_col2 text
+);
+ALTER TABLE toasted_key ALTER COLUMN toasted_key SET STORAGE EXTERNAL;
+ALTER TABLE toasted_key ALTER COLUMN toasted_col1 SET STORAGE EXTERNAL;
+INSERT INTO toasted_key(toasted_key, toasted_col1) VALUES(repeat('1234567890', 200), repeat('9876543210', 200));
+-- test update of a toasted key without changing it
+UPDATE toasted_key SET toasted_col2 = toasted_col1;
+-- test update of a toasted key, changing it
+UPDATE toasted_key SET toasted_key = toasted_key || '1';
+DELETE FROM toasted_key;
+SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+ substr
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.xpto: INSERT: id[integer]:1 toasted_col1[text]:'1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
+ COMMIT
+ BEGIN
+ table public.xpto: INSERT: id[integer]:2 toasted_col1[text]:null rand1[double precision]:3077 toasted_col2[text]:'00010002000300040005000600070008000900100011001200130014001500160017001800190020002100
+ COMMIT
+ BEGIN
+ table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:'1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
+ COMMIT
+ BEGIN
+ table public.xpto: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:123.456 toasted_col2[text]:unchanged-toast-datum rand2[double precision]:1578
+ COMMIT
+ BEGIN
+ table public.xpto: DELETE: id[integer]:1
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ COMMIT
+ BEGIN
+ table public.toasted_key: INSERT: id[integer]:1 toasted_key[text]:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
+ COMMIT
+ BEGIN
+ table public.toasted_key: UPDATE: id[integer]:1 toasted_key[text]:unchanged-toast-datum toasted_col1[text]:unchanged-toast-datum toasted_col2[text]:'987654321098765432109876543210987654321098765432109
+ COMMIT
+ BEGIN
+ table public.toasted_key: UPDATE: old-key: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
+ COMMIT
+ BEGIN
+ table public.toasted_key: DELETE: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567
+ COMMIT
+(37 rows)
+
+SELECT pg_drop_replication_slot('regression_slot');
+ pg_drop_replication_slot
+--------------------------
+
+(1 row)
+
diff --git a/contrib/test_decoding/logical.conf b/contrib/test_decoding/logical.conf
new file mode 100644
index 0000000000..367f706651
--- /dev/null
+++ b/contrib/test_decoding/logical.conf
@@ -0,0 +1,2 @@
+wal_level = logical
+max_replication_slots = 4
diff --git a/contrib/test_decoding/specs/concurrent_ddl_dml.spec b/contrib/test_decoding/specs/concurrent_ddl_dml.spec
new file mode 100644
index 0000000000..7c8a7c7977
--- /dev/null
+++ b/contrib/test_decoding/specs/concurrent_ddl_dml.spec
@@ -0,0 +1,94 @@
+setup
+{
+ DROP TABLE IF EXISTS tbl1;
+ DROP TABLE IF EXISTS tbl2;
+ CREATE TABLE tbl1(val1 integer, val2 integer);
+ CREATE TABLE tbl2(val1 integer, val2 integer);
+}
+
+teardown
+{
+ DROP TABLE tbl1;
+ DROP TABLE tbl2;
+ SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
+}
+
+session "s1"
+step "s1_init" { SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); }
+step "s1_begin" { BEGIN; }
+step "s1_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (1, 1); }
+step "s1_insert_tbl1_3col" { INSERT INTO tbl1 (val1, val2, val3) VALUES (1, 1, 1); }
+step "s1_insert_tbl2" { INSERT INTO tbl2 (val1, val2) VALUES (1, 1); }
+step "s1_insert_tbl2_3col" { INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); }
+step "s1_commit" { COMMIT; }
+
+session "s2"
+step "s2_alter_tbl1_float" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; }
+step "s2_alter_tbl1_char" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; }
+step "s2_alter_tbl1_text" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE text; }
+step "s2_alter_tbl1_boolean" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; }
+
+step "s2_alter_tbl1_add_int" { ALTER TABLE tbl1 ADD COLUMN val3 INTEGER; }
+step "s2_alter_tbl1_add_float" { ALTER TABLE tbl1 ADD COLUMN val3 FLOAT; }
+step "s2_alter_tbl1_add_char" { ALTER TABLE tbl1 ADD COLUMN val3 character varying; }
+step "s2_alter_tbl1_add_boolean" { ALTER TABLE tbl1 ADD COLUMN val3 BOOLEAN; }
+step "s2_alter_tbl1_add_text" { ALTER TABLE tbl1 ADD COLUMN val3 TEXT; }
+
+step "s2_alter_tbl2_float" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float; }
+step "s2_alter_tbl2_char" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying; }
+step "s2_alter_tbl2_text" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text; }
+step "s2_alter_tbl2_boolean" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean; }
+step "s2_alter_tbl2_text" { ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean; }
+
+step "s2_alter_tbl2_add_int" { ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; }
+step "s2_alter_tbl2_add_float" { ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; }
+step "s2_alter_tbl2_add_char" { ALTER TABLE tbl2 ADD COLUMN val3 character varying; }
+step "s2_alter_tbl2_add_boolean" { ALTER TABLE tbl2 ADD COLUMN val3 BOOLEAN; }
+step "s2_alter_tbl2_add_text" { ALTER TABLE tbl2 ADD COLUMN val3 TEXT; }
+step "s2_alter_tbl2_drop_3rd_col" { ALTER TABLE tbl2 DROP COLUMN val3; }
+step "s2_alter_tbl2_3rd_char" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character varying; }
+step "s2_alter_tbl2_3rd_text" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text; }
+step "s2_alter_tbl2_3rd_int" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; }
+
+step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); }
+
+
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_float" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl1_float" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_char" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl1_char" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2" "s2_alter_tbl1_float" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2" "s2_alter_tbl1_char" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_float" "s1_insert_tbl2" "s2_alter_tbl1_float" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_char" "s1_insert_tbl2" "s2_alter_tbl1_char" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_char" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_text" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s2_alter_tbl2_char" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_text" "s1_insert_tbl2" "s2_alter_tbl1_char" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_boolean" "s1_insert_tbl2" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_boolean" "s1_insert_tbl2" "s2_alter_tbl1_boolean" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_add_int" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2" "s1_commit" "s1_begin" "s2_alter_tbl2_add_int" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_add_float" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2" "s1_commit" "s1_begin" "s2_alter_tbl2_add_float" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_add_char" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2" "s1_commit" "s1_begin" "s2_alter_tbl2_add_char" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_add_int" "s1_begin" "s1_insert_tbl2_3col" "s2_alter_tbl2_drop_3rd_col" "s1_commit" "s2_get_changes"
+permutation "s1_init" "s2_alter_tbl2_add_int" "s1_begin" "s1_insert_tbl2_3col" "s2_alter_tbl2_drop_3rd_col" "s1_insert_tbl2" "s1_commit" "s1_insert_tbl2" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_add_int" "s1_begin" "s1_insert_tbl2_3col" "s2_alter_tbl2_drop_3rd_col" "s1_commit" "s2_get_changes" "s2_alter_tbl2_add_text" "s1_begin" "s1_insert_tbl2_3col" "s2_alter_tbl2_3rd_char" "s1_insert_tbl2_3col" "s1_commit" "s2_get_changes" "s2_alter_tbl2_3rd_int" "s1_insert_tbl2_3col" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_add_char" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2_3col" "s2_alter_tbl2_3rd_text" "s1_insert_tbl2_3col" "s1_commit" "s1_insert_tbl2_3col" "s2_get_changes"
+permutation "s1_init" "s2_alter_tbl2_add_text" "s1_begin" "s1_insert_tbl1" "s1_insert_tbl2_3col" "s2_alter_tbl2_3rd_char" "s1_insert_tbl2_3col" "s1_commit" "s1_insert_tbl2_3col" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_add_char" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_3rd_text" "s1_insert_tbl2_3col" "s1_commit" "s2_alter_tbl2_drop_3rd_col" "s1_insert_tbl2" "s2_get_changes"
+permutation "s1_init" "s2_alter_tbl2_add_text" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_3rd_char" "s1_insert_tbl2_3col" "s1_commit" "s2_alter_tbl2_drop_3rd_col" "s1_insert_tbl2" "s2_get_changes"
+
+permutation "s1_init" "s2_alter_tbl2_add_char" "s1_begin" "s1_insert_tbl1" "s2_alter_tbl2_drop_3rd_col" "s1_insert_tbl1" "s1_commit" "s2_get_changes"
diff --git a/contrib/test_decoding/specs/delayed_startup.spec b/contrib/test_decoding/specs/delayed_startup.spec
new file mode 100644
index 0000000000..b7fe8148ce
--- /dev/null
+++ b/contrib/test_decoding/specs/delayed_startup.spec
@@ -0,0 +1,24 @@
+setup
+{
+ DROP TABLE IF EXISTS do_write;
+ CREATE TABLE do_write(id serial primary key);
+}
+
+teardown
+{
+ DROP TABLE do_write;
+ SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
+}
+
+session "s1"
+setup { SET synchronous_commit=on; }
+step "s1b" { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step "s1w" { INSERT INTO do_write DEFAULT VALUES; }
+step "s1c" { COMMIT; }
+session "s2"
+setup { SET synchronous_commit=on; }
+step "s2init" {SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');}
+step "s2start" {SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');}
+
+
+permutation "s1b" "s1w" "s2init" "s1c" "s2start" "s1b" "s1w" "s1c" "s2start" "s1b" "s1w" "s2start" "s1c" "s2start"
diff --git a/contrib/test_decoding/specs/mxact.spec b/contrib/test_decoding/specs/mxact.spec
new file mode 100644
index 0000000000..ea5b1aa2d6
--- /dev/null
+++ b/contrib/test_decoding/specs/mxact.spec
@@ -0,0 +1,38 @@
+setup
+{
+ DROP TABLE IF EXISTS do_write;
+ CREATE TABLE do_write(id serial primary key);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS do_write;
+ SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
+}
+
+session "s0"
+setup { SET synchronous_commit=on; }
+step "s0init" {SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');}
+step "s0start" {SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');}
+step "s0alter" {ALTER TABLE do_write ADD column ts timestamptz; }
+step "s0w" { INSERT INTO do_write DEFAULT VALUES; }
+
+session "s1"
+setup { SET synchronous_commit=on; }
+step "s1begin" {BEGIN;}
+step "s1sharepgclass" { SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR SHARE) s; }
+step "s1keysharepgclass" { SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR KEY SHARE) s; }
+step "s1commit" {COMMIT;}
+
+session "s2"
+setup { SET synchronous_commit=on; }
+step "s2begin" {BEGIN;}
+step "s2sharepgclass" { SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR SHARE) s; }
+step "s2keysharepgclass" { SELECT count(*) > 1 FROM (SELECT * FROM pg_class FOR KEY SHARE) s; }
+step "s2commit" {COMMIT;}
+
+# test that we're handling an update-only mxact xmax correctly
+permutation "s0init" "s0start" "s1begin" "s1sharepgclass" "s2begin" "s2sharepgclass" "s0w" "s0start" "s2commit" "s1commit"
+
+# test that we're handling an update-only mxact xmax correctly
+permutation "s0init" "s0start" "s1begin" "s1keysharepgclass" "s2begin" "s2keysharepgclass" "s0alter" "s0w" "s0start" "s2commit" "s1commit"
diff --git a/contrib/test_decoding/sql/binary.sql b/contrib/test_decoding/sql/binary.sql
new file mode 100644
index 0000000000..619f00b3bc
--- /dev/null
+++ b/contrib/test_decoding/sql/binary.sql
@@ -0,0 +1,14 @@
+-- predictability
+SET synchronous_commit = on;
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+-- succeeds, textual plugin, textual consumer
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0');
+-- fails, binary plugin, textual consumer
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1');
+-- succeeds, textual plugin, binary consumer
+SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0');
+-- succeeds, binary plugin, binary consumer
+SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1');
+
+SELECT 'init' FROM pg_drop_replication_slot('regression_slot');
diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql
new file mode 100644
index 0000000000..87e74c64f3
--- /dev/null
+++ b/contrib/test_decoding/sql/ddl.sql
@@ -0,0 +1,338 @@
+-- predictability
+SET synchronous_commit = on;
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+-- fail because of an already existing slot
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+-- fail because of an invalid name
+SELECT 'init' FROM pg_create_logical_replication_slot('Invalid Name', 'test_decoding');
+
+-- fail twice because of an invalid parameter values
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'nonexistant-option', 'frakbar');
+SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar');
+
+-- succeed once
+SELECT pg_drop_replication_slot('regression_slot');
+-- fail
+SELECT pg_drop_replication_slot('regression_slot');
+
+-- check that we're detecting a streaming rep slot used for logical decoding
+SELECT 'init' FROM pg_create_physical_replication_slot('repl');
+SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('repl');
+
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+
+/* check whether status function reports us, only reproduceable columns */
+SELECT slot_name, plugin, slot_type, active,
+ NOT catalog_xmin IS NULL AS catalog_xmin_set,
+ xmin IS NULl AS data_xmin_not_set,
+ pg_xlog_location_diff(restart_lsn, '0/01000000') > 0 AS some_wal
+FROM pg_replication_slots;
+
+/*
+ * Check that changes are handled correctly when interleaved with ddl
+ */
+CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));
+BEGIN;
+INSERT INTO replication_example(somedata, text) VALUES (1, 1);
+INSERT INTO replication_example(somedata, text) VALUES (1, 2);
+COMMIT;
+
+ALTER TABLE replication_example ADD COLUMN bar int;
+
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 1, 4);
+
+BEGIN;
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 2, 4);
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 3, 4);
+INSERT INTO replication_example(somedata, text, bar) VALUES (2, 4, NULL);
+COMMIT;
+
+ALTER TABLE replication_example DROP COLUMN bar;
+INSERT INTO replication_example(somedata, text) VALUES (3, 1);
+
+BEGIN;
+INSERT INTO replication_example(somedata, text) VALUES (3, 2);
+INSERT INTO replication_example(somedata, text) VALUES (3, 3);
+COMMIT;
+
+ALTER TABLE replication_example RENAME COLUMN text TO somenum;
+
+INSERT INTO replication_example(somedata, somenum) VALUES (4, 1);
+
+-- collect all changes
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4);
+-- throw away changes, they contain oids
+SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+INSERT INTO replication_example(somedata, somenum) VALUES (5, 1);
+
+BEGIN;
+INSERT INTO replication_example(somedata, somenum) VALUES (6, 1);
+ALTER TABLE replication_example ADD COLUMN zaphod1 int;
+INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 2, 1);
+ALTER TABLE replication_example ADD COLUMN zaphod2 int;
+INSERT INTO replication_example(somedata, somenum, zaphod2) VALUES (6, 3, 1);
+INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2);
+COMMIT;
+
+-- show changes
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+-- hide changes bc of oid visible in full table rewrites
+CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int);
+INSERT INTO tr_unique(data) VALUES(10);
+ALTER TABLE tr_unique RENAME TO tr_pkey;
+ALTER TABLE tr_pkey ADD COLUMN id serial primary key;
+SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+INSERT INTO tr_pkey(data) VALUES(1);
+--show deletion with primary key
+DELETE FROM tr_pkey;
+
+/* display results */
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+/*
+ * check that disk spooling works
+ */
+BEGIN;
+CREATE TABLE tr_etoomuch (id serial primary key, data int);
+INSERT INTO tr_etoomuch(data) SELECT g.i FROM generate_series(1, 10234) g(i);
+DELETE FROM tr_etoomuch WHERE id < 5000;
+UPDATE tr_etoomuch SET data = - data WHERE id > 5000;
+COMMIT;
+
+/* display results, but hide most of the output */
+SELECT count(*), min(data), max(data)
+FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0')
+GROUP BY substring(data, 1, 24)
+ORDER BY 1,2;
+
+/*
+ * check whether we decode subtransactions correctly in relation with each
+ * other
+ */
+CREATE TABLE tr_sub (id serial primary key, path text);
+
+-- toplevel, subtxn, toplevel, subtxn, subtxn
+BEGIN;
+INSERT INTO tr_sub(path) VALUES ('1-top-#1');
+
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('1-top-1-#1');
+INSERT INTO tr_sub(path) VALUES ('1-top-1-#2');
+RELEASE SAVEPOINT a;
+
+SAVEPOINT b;
+SAVEPOINT c;
+INSERT INTO tr_sub(path) VALUES ('1-top-2-1-#1');
+INSERT INTO tr_sub(path) VALUES ('1-top-2-1-#2');
+RELEASE SAVEPOINT c;
+INSERT INTO tr_sub(path) VALUES ('1-top-2-#1');
+RELEASE SAVEPOINT b;
+COMMIT;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+-- check that we handle xlog assignments correctly
+BEGIN;
+-- nest 80 subtxns
+SAVEPOINT subtop;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;SAVEPOINT a;
+-- assign xid by inserting
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#1');
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#2');
+INSERT INTO tr_sub(path) VALUES ('2-top-1...--#3');
+RELEASE SAVEPOINT subtop;
+INSERT INTO tr_sub(path) VALUES ('2-top-#1');
+COMMIT;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+-- make sure rollbacked subtransactions aren't decoded
+BEGIN;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-#1');
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-1-#1');
+SAVEPOINT b;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-2-#1');
+ROLLBACK TO SAVEPOINT b;
+INSERT INTO tr_sub(path) VALUES ('3-top-2-#2');
+COMMIT;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+-- test whether a known, but not yet logged toplevel xact, followed by a
+-- subxact commit is handled correctly
+BEGIN;
+SELECT txid_current() != 0; -- so no fixed xid apears in the outfile
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('4-top-1-#1');
+RELEASE SAVEPOINT a;
+COMMIT;
+
+-- test whether a change in a subtransaction, in an unknown toplevel
+-- xact is handled correctly.
+BEGIN;
+SAVEPOINT a;
+INSERT INTO tr_sub(path) VALUES ('5-top-1-#1');
+COMMIT;
+
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+
+/*
+ * Check whether treating a table as a catalog table works somewhat
+ */
+CREATE TABLE replication_metadata (
+ id serial primary key,
+ relation name NOT NULL,
+ options text[]
+)
+WITH (user_catalog_table = true)
+;
+\d+ replication_metadata
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('foo', ARRAY['a', 'b']);
+
+ALTER TABLE replication_metadata RESET (user_catalog_table);
+\d+ replication_metadata
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('bar', ARRAY['a', 'b']);
+
+ALTER TABLE replication_metadata SET (user_catalog_table = true);
+\d+ replication_metadata
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('blub', NULL);
+
+-- make sure rewrites don't work
+ALTER TABLE replication_metadata ADD COLUMN rewritemeornot int;
+ALTER TABLE replication_metadata ALTER COLUMN rewritemeornot TYPE text;
+
+ALTER TABLE replication_metadata SET (user_catalog_table = false);
+\d+ replication_metadata
+
+INSERT INTO replication_metadata(relation, options)
+VALUES ('zaphod', NULL);
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+/*
+ * check whether we handle updates/deletes correct with & without a pkey
+ */
+
+/* we should handle the case without a key at all more gracefully */
+CREATE TABLE table_without_key(id serial, data int);
+INSERT INTO table_without_key(data) VALUES(1),(2);
+DELETE FROM table_without_key WHERE data = 1;
+-- won't log old keys
+UPDATE table_without_key SET data = 3 WHERE data = 2;
+UPDATE table_without_key SET id = -id;
+UPDATE table_without_key SET id = -id;
+-- should log the full old row now
+ALTER TABLE table_without_key REPLICA IDENTITY FULL;
+UPDATE table_without_key SET data = 3 WHERE data = 2;
+UPDATE table_without_key SET id = -id;
+UPDATE table_without_key SET id = -id;
+DELETE FROM table_without_key WHERE data = 3;
+
+CREATE TABLE table_with_pkey(id serial primary key, data int);
+INSERT INTO table_with_pkey(data) VALUES(1), (2);
+DELETE FROM table_with_pkey WHERE data = 1;
+-- should log the old pkey
+UPDATE table_with_pkey SET data = 3 WHERE data = 2;
+UPDATE table_with_pkey SET id = -id;
+UPDATE table_with_pkey SET id = -id;
+-- check that we log nothing despite having a pkey
+ALTER TABLE table_without_key REPLICA IDENTITY NOTHING;
+UPDATE table_with_pkey SET id = -id;
+-- check that we log everything despite having a pkey
+ALTER TABLE table_without_key REPLICA IDENTITY FULL;
+UPDATE table_with_pkey SET id = -id;
+DELETE FROM table_with_pkey WHERE data = 3;
+
+CREATE TABLE table_with_unique_not_null(id serial unique, data int);
+ALTER TABLE table_with_unique_not_null ALTER COLUMN id SET NOT NULL; --already set
+-- won't log anything, replica identity not setup
+INSERT INTO table_with_unique_not_null(data) VALUES(1), (2);
+DELETE FROM table_with_unique_not_null WHERE data = 1;
+UPDATE table_with_unique_not_null SET data = 3 WHERE data = 2;
+UPDATE table_with_unique_not_null SET id = -id;
+UPDATE table_with_unique_not_null SET id = -id;
+DELETE FROM table_with_unique_not_null WHERE data = 3;
+-- should log old key
+ALTER TABLE table_with_unique_not_null REPLICA IDENTITY USING INDEX table_with_unique_not_null_id_key;
+INSERT INTO table_with_unique_not_null(data) VALUES(1), (2);
+DELETE FROM table_with_unique_not_null WHERE data = 1;
+UPDATE table_with_unique_not_null SET data = 3 WHERE data = 2;
+UPDATE table_with_unique_not_null SET id = -id;
+UPDATE table_with_unique_not_null SET id = -id;
+DELETE FROM table_with_unique_not_null WHERE data = 3;
+
+-- check toast support
+BEGIN;
+CREATE SEQUENCE toasttable_rand_seq START 79 INCREMENT 1499; -- portable "random"
+CREATE TABLE toasttable(
+ id serial primary key,
+ toasted_col1 text,
+ rand1 float8 DEFAULT nextval('toasttable_rand_seq'),
+ toasted_col2 text,
+ rand2 float8 DEFAULT nextval('toasttable_rand_seq')
+ );
+COMMIT;
+-- uncompressed external toast data
+INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i);
+
+-- compressed external toast data
+INSERT INTO toasttable(toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i);
+
+-- update of existing column
+UPDATE toasttable
+ SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i))
+WHERE id = 1;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i);
+
+-- update of second column, first column unchanged
+UPDATE toasttable
+ SET toasted_col2 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i))
+WHERE id = 1;
+
+-- make sure we decode correctly even if the toast table is gone
+DROP TABLE toasttable;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+-- done, free logical replication slot
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+SELECT pg_drop_replication_slot('regression_slot');
+
+/* check that the slot is gone */
+SELECT * FROM pg_replication_slots;
diff --git a/contrib/test_decoding/sql/decoding_in_xact.sql b/contrib/test_decoding/sql/decoding_in_xact.sql
new file mode 100644
index 0000000000..2771afee7a
--- /dev/null
+++ b/contrib/test_decoding/sql/decoding_in_xact.sql
@@ -0,0 +1,41 @@
+-- predictability
+SET synchronous_commit = on;
+
+-- fail because we're creating a slot while in an xact with xid
+BEGIN;
+SELECT txid_current() = 0;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ROLLBACK;
+
+-- fail because we're creating a slot while in an subxact whose topxact has a xid
+BEGIN;
+SELECT txid_current() = 0;
+SAVEPOINT barf;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+ROLLBACK TO SAVEPOINT barf;
+ROLLBACK;
+
+-- succeed, outside tx.
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+SELECT 'stop' FROM pg_drop_replication_slot('regression_slot');
+
+-- succeed, in tx without xid.
+BEGIN;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+COMMIT;
+
+CREATE TABLE nobarf(id serial primary key, data text);
+INSERT INTO nobarf(data) VALUES('1');
+
+-- decoding works in transaction with xid
+BEGIN;
+SELECT txid_current() = 0;
+-- don't show yet, haven't committed
+INSERT INTO nobarf(data) VALUES('2');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+COMMIT;
+
+INSERT INTO nobarf(data) VALUES('3');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+SELECT 'stop' FROM pg_drop_replication_slot('regression_slot');
diff --git a/contrib/test_decoding/sql/permissions.sql b/contrib/test_decoding/sql/permissions.sql
new file mode 100644
index 0000000000..39d70b56b0
--- /dev/null
+++ b/contrib/test_decoding/sql/permissions.sql
@@ -0,0 +1,69 @@
+-- predictability
+SET synchronous_commit = on;
+
+-- setup
+CREATE ROLE lr_normal;
+CREATE ROLE lr_superuser SUPERUSER;
+CREATE ROLE lr_replication REPLICATION;
+CREATE TABLE lr_test(data text);
+
+-- superuser can control replication
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+INSERT INTO lr_test VALUES('lr_superuser_init');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('regression_slot');
+RESET ROLE;
+
+-- replication user can control replication
+SET ROLE lr_replication;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+INSERT INTO lr_test VALUES('lr_superuser_init');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('regression_slot');
+RESET ROLE;
+
+-- plain user *can't* can control replication
+SET ROLE lr_normal;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+INSERT INTO lr_test VALUES('lr_superuser_init');
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('regression_slot');
+RESET ROLE;
+
+-- replication users can drop superuser created slots
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+RESET ROLE;
+SET ROLE lr_replication;
+SELECT pg_drop_replication_slot('regression_slot');
+RESET ROLE;
+
+-- normal users can't drop existing slots
+SET ROLE lr_superuser;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+RESET ROLE;
+SET ROLE lr_normal;
+SELECT pg_drop_replication_slot('regression_slot');
+RESET ROLE;
+
+-- all users can see existing slots
+SET ROLE lr_superuser;
+SELECT slot_name, plugin FROM pg_replication_slots;
+RESET ROLE;
+
+SET ROLE lr_replication;
+SELECT slot_name, plugin FROM pg_replication_slots;
+RESET ROLE;
+
+SET ROLE lr_normal;
+SELECT slot_name, plugin FROM pg_replication_slots;
+RESET ROLE;
+
+-- cleanup
+SELECT pg_drop_replication_slot('regression_slot');
+
+DROP ROLE lr_normal;
+DROP ROLE lr_superuser;
+DROP ROLE lr_replication;
+DROP TABLE lr_test;
diff --git a/contrib/test_decoding/sql/prepared.sql b/contrib/test_decoding/sql/prepared.sql
new file mode 100644
index 0000000000..652f3d3f44
--- /dev/null
+++ b/contrib/test_decoding/sql/prepared.sql
@@ -0,0 +1,50 @@
+-- predictability
+SET synchronous_commit = on;
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+
+CREATE TABLE test_prepared1(id int);
+CREATE TABLE test_prepared2(id int);
+
+-- test simple successful use of a prepared xact
+BEGIN;
+INSERT INTO test_prepared1 VALUES (1);
+PREPARE TRANSACTION 'test_prepared#1';
+COMMIT PREPARED 'test_prepared#1';
+INSERT INTO test_prepared1 VALUES (2);
+
+-- test abort of a prepared xact
+BEGIN;
+INSERT INTO test_prepared1 VALUES (3);
+PREPARE TRANSACTION 'test_prepared#2';
+ROLLBACK PREPARED 'test_prepared#2';
+
+INSERT INTO test_prepared1 VALUES (4);
+
+-- test prepared xact containing ddl
+BEGIN;
+INSERT INTO test_prepared1 VALUES (5);
+ALTER TABLE test_prepared1 ADD COLUMN data text;
+INSERT INTO test_prepared1 VALUES (6, 'frakbar');
+PREPARE TRANSACTION 'test_prepared#3';
+
+-- test that we decode correctly while an uncommitted prepared xact
+-- with ddl exists.
+
+-- separate table because of the lock from the ALTER
+-- this will come before the '5' row above, as this commits before it.
+INSERT INTO test_prepared2 VALUES (7);
+
+COMMIT PREPARED 'test_prepared#3';
+
+-- make sure stuff still works
+INSERT INTO test_prepared1 VALUES (8);
+INSERT INTO test_prepared2 VALUES (9);
+
+-- cleanup
+DROP TABLE test_prepared1;
+DROP TABLE test_prepared2;
+
+-- show results
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+SELECT pg_drop_replication_slot('regression_slot');
diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql
new file mode 100644
index 0000000000..9a3dcbf857
--- /dev/null
+++ b/contrib/test_decoding/sql/rewrite.sql
@@ -0,0 +1,62 @@
+-- predictability
+SET synchronous_commit = on;
+
+DROP TABLE IF EXISTS replication_example;
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120));
+INSERT INTO replication_example(somedata) VALUES (1);
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+
+BEGIN;
+INSERT INTO replication_example(somedata) VALUES (2);
+ALTER TABLE replication_example ADD COLUMN testcolumn1 int;
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (3, 1);
+COMMIT;
+
+BEGIN;
+INSERT INTO replication_example(somedata) VALUES (3);
+ALTER TABLE replication_example ADD COLUMN testcolumn2 int;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn2) VALUES (4, 2, 1);
+COMMIT;
+
+VACUUM FULL pg_am;
+VACUUM FULL pg_amop;
+VACUUM FULL pg_proc;
+VACUUM FULL pg_opclass;
+VACUUM FULL pg_type;
+VACUUM FULL pg_index;
+VACUUM FULL pg_database;
+
+-- repeated rewrites that fail
+BEGIN;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+ROLLBACK;
+
+-- repeated rewrites that succeed
+BEGIN;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+CLUSTER pg_class USING pg_class_oid_index;
+COMMIT;
+
+ -- repeated rewrites in different transactions
+VACUUM FULL pg_class;
+VACUUM FULL pg_class;
+
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (5, 3);
+
+BEGIN;
+INSERT INTO replication_example(somedata, testcolumn1) VALUES (6, 4);
+ALTER TABLE replication_example ADD COLUMN testcolumn3 int;
+INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (7, 5, 1);
+COMMIT;
+
+-- make old files go away
+CHECKPOINT;
+
+SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('regression_slot');
+
+DROP TABLE IF EXISTS replication_example;
diff --git a/contrib/test_decoding/sql/toast.sql b/contrib/test_decoding/sql/toast.sql
new file mode 100644
index 0000000000..943db9d2ee
--- /dev/null
+++ b/contrib/test_decoding/sql/toast.sql
@@ -0,0 +1,51 @@
+-- predictability
+SET synchronous_commit = on;
+
+DROP TABLE IF EXISTS xpto;
+
+SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding');
+
+CREATE SEQUENCE xpto_rand_seq START 79 INCREMENT 1499; -- portable "random"
+CREATE TABLE xpto (
+ id serial primary key,
+ toasted_col1 text,
+ rand1 float8 DEFAULT nextval('xpto_rand_seq'),
+ toasted_col2 text,
+ rand2 float8 DEFAULT nextval('xpto_rand_seq')
+);
+
+-- uncompressed external toast data
+INSERT INTO xpto (toasted_col1, toasted_col2) SELECT string_agg(g.i::text, ''), string_agg((g.i*2)::text, '') FROM generate_series(1, 2000) g(i);
+
+-- compressed external toast data
+INSERT INTO xpto (toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM0000'), ''), 50) FROM generate_series(1, 500) g(i);
+
+-- update of existing column
+UPDATE xpto SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1;
+
+UPDATE xpto SET rand1 = 123.456 WHERE id = 1;
+
+DELETE FROM xpto WHERE id = 1;
+
+DROP TABLE IF EXISTS toasted_key;
+CREATE TABLE toasted_key (
+ id serial,
+ toasted_key text PRIMARY KEY,
+ toasted_col1 text,
+ toasted_col2 text
+);
+
+ALTER TABLE toasted_key ALTER COLUMN toasted_key SET STORAGE EXTERNAL;
+ALTER TABLE toasted_key ALTER COLUMN toasted_col1 SET STORAGE EXTERNAL;
+
+INSERT INTO toasted_key(toasted_key, toasted_col1) VALUES(repeat('1234567890', 200), repeat('9876543210', 200));
+
+-- test update of a toasted key without changing it
+UPDATE toasted_key SET toasted_col2 = toasted_col1;
+-- test update of a toasted key, changing it
+UPDATE toasted_key SET toasted_key = toasted_key || '1';
+
+DELETE FROM toasted_key;
+
+SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0');
+SELECT pg_drop_replication_slot('regression_slot');
diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c
new file mode 100644
index 0000000000..5ce052b5c6
--- /dev/null
+++ b/contrib/test_decoding/test_decoding.c
@@ -0,0 +1,407 @@
+/*-------------------------------------------------------------------------
+ *
+ * test_decoding.c
+ * example logical decoding output plugin
+ *
+ * Copyright (c) 2012-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/test_decoding/test_decoding.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include "access/sysattr.h"
+
+#include "catalog/pg_class.h"
+#include "catalog/pg_type.h"
+
+#include "nodes/parsenodes.h"
+
+#include "replication/output_plugin.h"
+#include "replication/logical.h"
+
+#include "utils/builtins.h"
+#include "utils/lsyscache.h"
+#include "utils/memutils.h"
+#include "utils/rel.h"
+#include "utils/relcache.h"
+#include "utils/syscache.h"
+#include "utils/typcache.h"
+
+
+PG_MODULE_MAGIC;
+
+/* These must be available to pg_dlsym() */
+extern void _PG_init(void);
+extern void _PG_output_plugin_init(OutputPluginCallbacks *cb);
+
+typedef struct
+{
+ MemoryContext context;
+ bool include_xids;
+ bool include_timestamp;
+} TestDecodingData;
+
+static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+ bool is_init);
+static void pg_decode_shutdown(LogicalDecodingContext *ctx);
+static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn);
+static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
+static void pg_decode_change(LogicalDecodingContext *ctx,
+ ReorderBufferTXN *txn, Relation rel,
+ ReorderBufferChange *change);
+
+void
+_PG_init(void)
+{
+ /* other plugins can perform things here */
+}
+
+/* specify output plugin callbacks */
+void
+_PG_output_plugin_init(OutputPluginCallbacks *cb)
+{
+ AssertVariableIsOfType(&_PG_output_plugin_init, LogicalOutputPluginInit);
+
+ cb->startup_cb = pg_decode_startup;
+ cb->begin_cb = pg_decode_begin_txn;
+ cb->change_cb = pg_decode_change;
+ cb->commit_cb = pg_decode_commit_txn;
+ cb->shutdown_cb = pg_decode_shutdown;
+}
+
+
+/* initialize this plugin */
+static void
+pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
+ bool is_init)
+{
+ ListCell *option;
+ TestDecodingData *data;
+
+ data = palloc(sizeof(TestDecodingData));
+ data->context = AllocSetContextCreate(ctx->context,
+ "text conversion context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+ data->include_xids = true;
+ data->include_timestamp = false;
+
+ ctx->output_plugin_private = data;
+
+ opt->output_type = OUTPUT_PLUGIN_TEXTUAL_OUTPUT;
+
+ foreach(option, ctx->output_plugin_options)
+ {
+ DefElem *elem = lfirst(option);
+
+ Assert(elem->arg == NULL || IsA(elem->arg, String));
+
+ if (strcmp(elem->defname, "include-xids") == 0)
+ {
+ /* if option does not provide a value, it means its value is true */
+ if (elem->arg == NULL)
+ data->include_xids = true;
+ else if (!parse_bool(strVal(elem->arg), &data->include_xids))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
+ }
+ else if (strcmp(elem->defname, "include-timestamp") == 0)
+ {
+ if (elem->arg == NULL)
+ data->include_timestamp = true;
+ else if (!parse_bool(strVal(elem->arg), &data->include_timestamp))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
+ }
+ else if (strcmp(elem->defname, "force-binary") == 0)
+ {
+ bool force_binary;
+
+ if (elem->arg == NULL)
+ continue;
+ else if (!parse_bool(strVal(elem->arg), &force_binary))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("could not parse value \"%s\" for parameter \"%s\"",
+ strVal(elem->arg), elem->defname)));
+
+ if (force_binary)
+ opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
+ }
+ else
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("option \"%s\" = \"%s\" is unknown",
+ elem->defname,
+ elem->arg ? strVal(elem->arg) : "(null)")));
+ }
+ }
+}
+
+/* cleanup this plugin's resources */
+static void
+pg_decode_shutdown(LogicalDecodingContext *ctx)
+{
+ TestDecodingData *data = ctx->output_plugin_private;
+
+ /* cleanup our own resources via memory context reset */
+ MemoryContextDelete(data->context);
+}
+
+/* BEGIN callback */
+static void
+pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn)
+{
+ TestDecodingData *data = ctx->output_plugin_private;
+
+ OutputPluginPrepareWrite(ctx, true);
+ if (data->include_xids)
+ appendStringInfo(ctx->out, "BEGIN %u", txn->xid);
+ else
+ appendStringInfoString(ctx->out, "BEGIN");
+ OutputPluginWrite(ctx, true);
+}
+
+/* COMMIT callback */
+static void
+pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
+ XLogRecPtr commit_lsn)
+{
+ TestDecodingData *data = ctx->output_plugin_private;
+
+ OutputPluginPrepareWrite(ctx, true);
+ if (data->include_xids)
+ appendStringInfo(ctx->out, "COMMIT %u", txn->xid);
+ else
+ appendStringInfoString(ctx->out, "COMMIT");
+
+ if (data->include_timestamp)
+ appendStringInfo(ctx->out, " (at %s)",
+ timestamptz_to_str(txn->commit_time));
+
+ OutputPluginWrite(ctx, true);
+}
+
+/*
+ * Print literal `outputstr' already represented as string of type `typid'
+ * into stringbuf `s'.
+ *
+ * Some builtin types aren't quoted, the rest is quoted. Escaping is done as
+ * if standard_conforming_strings were enabled.
+ */
+static void
+print_literal(StringInfo s, Oid typid, char *outputstr)
+{
+ const char *valptr;
+
+ switch (typid)
+ {
+ case INT2OID:
+ case INT4OID:
+ case INT8OID:
+ case OIDOID:
+ case FLOAT4OID:
+ case FLOAT8OID:
+ case NUMERICOID:
+ /* NB: We don't care about Inf, NaN et al. */
+ appendStringInfoString(s, outputstr);
+ break;
+
+ case BITOID:
+ case VARBITOID:
+ appendStringInfo(s, "B'%s'", outputstr);
+ break;
+
+ case BOOLOID:
+ if (strcmp(outputstr, "t") == 0)
+ appendStringInfoString(s, "true");
+ else
+ appendStringInfoString(s, "false");
+ break;
+
+ default:
+ appendStringInfoChar(s, '\'');
+ for (valptr = outputstr; *valptr; valptr++)
+ {
+ char ch = *valptr;
+
+ if (SQL_STR_DOUBLE(ch, false))
+ appendStringInfoChar(s, ch);
+ appendStringInfoChar(s, ch);
+ }
+ appendStringInfoChar(s, '\'');
+ break;
+ }
+}
+
+/* print the tuple 'tuple' into the StringInfo s */
+static void
+tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_nulls)
+{
+ int natt;
+ Oid oid;
+
+ /* print oid of tuple, it's not included in the TupleDesc */
+ if ((oid = HeapTupleHeaderGetOid(tuple->t_data)) != InvalidOid)
+ {
+ appendStringInfo(s, " oid[oid]:%u", oid);
+ }
+
+ /* print all columns individually */
+ for (natt = 0; natt < tupdesc->natts; natt++)
+ {
+ Form_pg_attribute attr; /* the attribute itself */
+ Oid typid; /* type of current attribute */
+ Oid typoutput; /* output function */
+ bool typisvarlena;
+ Datum origval; /* possibly toasted Datum */
+ bool isnull; /* column is null? */
+
+ attr = tupdesc->attrs[natt];
+
+ /*
+ * don't print dropped columns, we can't be sure everything is
+ * available for them
+ */
+ if (attr->attisdropped)
+ continue;
+
+ /*
+ * Don't print system columns, oid will already have been printed if
+ * present.
+ */
+ if (attr->attnum < 0)
+ continue;
+
+ typid = attr->atttypid;
+
+ /* get Datum from tuple */
+ origval = fastgetattr(tuple, natt + 1, tupdesc, &isnull);
+
+ if (isnull && skip_nulls)
+ continue;
+
+ /* print attribute name */
+ appendStringInfoChar(s, ' ');
+ appendStringInfoString(s, quote_identifier(NameStr(attr->attname)));
+
+ /* print attribute type */
+ appendStringInfoChar(s, '[');
+ appendStringInfoString(s, format_type_be(typid));
+ appendStringInfoChar(s, ']');
+
+ /* query output function */
+ getTypeOutputInfo(typid,
+ &typoutput, &typisvarlena);
+
+ /* print separator */
+ appendStringInfoChar(s, ':');
+
+ /* print data */
+ if (isnull)
+ appendStringInfoString(s, "null");
+ else if (typisvarlena && VARATT_IS_EXTERNAL_ONDISK(origval))
+ appendStringInfoString(s, "unchanged-toast-datum");
+ else if (!typisvarlena)
+ print_literal(s, typid,
+ OidOutputFunctionCall(typoutput, origval));
+ else
+ {
+ Datum val; /* definitely detoasted Datum */
+
+ val = PointerGetDatum(PG_DETOAST_DATUM(origval));
+ print_literal(s, typid, OidOutputFunctionCall(typoutput, val));
+ }
+ }
+}
+
+/*
+ * callback for individual changed tuples
+ */
+static void
+pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn,
+ Relation relation, ReorderBufferChange *change)
+{
+ TestDecodingData *data;
+ Form_pg_class class_form;
+ TupleDesc tupdesc;
+ MemoryContext old;
+
+ data = ctx->output_plugin_private;
+ class_form = RelationGetForm(relation);
+ tupdesc = RelationGetDescr(relation);
+
+ /* Avoid leaking memory by using and resetting our own context */
+ old = MemoryContextSwitchTo(data->context);
+
+ OutputPluginPrepareWrite(ctx, true);
+
+ appendStringInfoString(ctx->out, "table ");
+ appendStringInfoString(ctx->out,
+ quote_qualified_identifier(
+ get_namespace_name(
+ get_rel_namespace(RelationGetRelid(relation))),
+ NameStr(class_form->relname)));
+ appendStringInfoString(ctx->out, ":");
+
+ switch (change->action)
+ {
+ case REORDER_BUFFER_CHANGE_INSERT:
+ appendStringInfoString(ctx->out, " INSERT:");
+ if (change->data.tp.newtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ else
+ tuple_to_stringinfo(ctx->out, tupdesc,
+ &change->data.tp.newtuple->tuple,
+ false);
+ break;
+ case REORDER_BUFFER_CHANGE_UPDATE:
+ appendStringInfoString(ctx->out, " UPDATE:");
+ if (change->data.tp.oldtuple != NULL)
+ {
+ appendStringInfoString(ctx->out, " old-key:");
+ tuple_to_stringinfo(ctx->out, tupdesc,
+ &change->data.tp.oldtuple->tuple,
+ true);
+ appendStringInfoString(ctx->out, " new-tuple:");
+ }
+
+ if (change->data.tp.newtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ else
+ tuple_to_stringinfo(ctx->out, tupdesc,
+ &change->data.tp.newtuple->tuple,
+ false);
+ break;
+ case REORDER_BUFFER_CHANGE_DELETE:
+ appendStringInfoString(ctx->out, " DELETE:");
+
+ /* if there was no PK, we only know that a delete happened */
+ if (change->data.tp.oldtuple == NULL)
+ appendStringInfoString(ctx->out, " (no-tuple-data)");
+ /* In DELETE, only the replica identity is present; display that */
+ else
+ tuple_to_stringinfo(ctx->out, tupdesc,
+ &change->data.tp.oldtuple->tuple,
+ true);
+ break;
+ default:
+ Assert(false);
+ }
+
+ MemoryContextSwitchTo(old);
+ MemoryContextReset(data->context);
+
+ OutputPluginWrite(ctx, true);
+}
diff --git a/contrib/test_parser/test_parser.c b/contrib/test_parser/test_parser.c
index da7f04c6e0..c41d1eb201 100644
--- a/contrib/test_parser/test_parser.c
+++ b/contrib/test_parser/test_parser.c
@@ -3,7 +3,7 @@
* test_parser.c
* Simple example of a text search parser
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/test_parser/test_parser.c
@@ -38,23 +38,13 @@ typedef struct
} LexDescr;
/*
- * prototypes
+ * functions
*/
PG_FUNCTION_INFO_V1(testprs_start);
-Datum testprs_start(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(testprs_getlexeme);
-Datum testprs_getlexeme(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(testprs_end);
-Datum testprs_end(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(testprs_lextype);
-Datum testprs_lextype(PG_FUNCTION_ARGS);
-/*
- * functions
- */
Datum
testprs_start(PG_FUNCTION_ARGS)
{
diff --git a/contrib/test_shm_mq/.gitignore b/contrib/test_shm_mq/.gitignore
new file mode 100644
index 0000000000..5dcb3ff972
--- /dev/null
+++ b/contrib/test_shm_mq/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/test_shm_mq/Makefile b/contrib/test_shm_mq/Makefile
new file mode 100644
index 0000000000..5e5ac1ceb8
--- /dev/null
+++ b/contrib/test_shm_mq/Makefile
@@ -0,0 +1,20 @@
+# contrib/test_shm_mq/Makefile
+
+MODULE_big = test_shm_mq
+OBJS = test.o setup.o worker.o
+
+EXTENSION = test_shm_mq
+DATA = test_shm_mq--1.0.sql
+
+REGRESS = test_shm_mq
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/test_shm_mq
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/test_shm_mq/expected/test_shm_mq.out b/contrib/test_shm_mq/expected/test_shm_mq.out
new file mode 100644
index 0000000000..c4858b0c20
--- /dev/null
+++ b/contrib/test_shm_mq/expected/test_shm_mq.out
@@ -0,0 +1,36 @@
+CREATE EXTENSION test_shm_mq;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_shm_mq(1024, '', 2000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(1024, 'a', 2001, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1);
+ test_shm_mq
+-------------
+
+(1 row)
+
+SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3);
+ test_shm_mq_pipelined
+-----------------------
+
+(1 row)
+
diff --git a/contrib/test_shm_mq/setup.c b/contrib/test_shm_mq/setup.c
new file mode 100644
index 0000000000..572cf8898f
--- /dev/null
+++ b/contrib/test_shm_mq/setup.c
@@ -0,0 +1,328 @@
+/*--------------------------------------------------------------------------
+ *
+ * setup.c
+ * Code to set up a dynamic shared memory segments and a specified
+ * number of background workers for shared memory message queue
+ * testing.
+ *
+ * Copyright (C) 2013, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/test_shm_mq/setup.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "postmaster/bgworker.h"
+#include "storage/procsignal.h"
+#include "storage/shm_toc.h"
+#include "utils/memutils.h"
+
+#include "test_shm_mq.h"
+
+typedef struct
+{
+ int nworkers;
+ BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER];
+} worker_state;
+
+static void setup_dynamic_shared_memory(int64 queue_size, int nworkers,
+ dsm_segment **segp,
+ test_shm_mq_header **hdrp,
+ shm_mq **outp, shm_mq **inp);
+static worker_state *setup_background_workers(int nworkers,
+ dsm_segment *seg);
+static void cleanup_background_workers(dsm_segment *seg, Datum arg);
+static void wait_for_workers_to_become_ready(worker_state *wstate,
+ volatile test_shm_mq_header *hdr);
+static bool check_worker_status(worker_state *wstate);
+
+/*
+ * Set up a dynamic shared memory segment and zero or more background workers
+ * for a test run.
+ */
+void
+test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp,
+ shm_mq_handle **output, shm_mq_handle **input)
+{
+ dsm_segment *seg;
+ test_shm_mq_header *hdr;
+ shm_mq *outq = NULL; /* placate compiler */
+ shm_mq *inq = NULL; /* placate compiler */
+ worker_state *wstate;
+
+ /* Set up a dynamic shared memory segment. */
+ setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq);
+ *segp = seg;
+
+ /* Register background workers. */
+ wstate = setup_background_workers(nworkers, seg);
+
+ /* Attach the queues. */
+ *output = shm_mq_attach(outq, seg, wstate->handle[0]);
+ *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]);
+
+ /* Wait for workers to become ready. */
+ wait_for_workers_to_become_ready(wstate, hdr);
+
+ /*
+ * Once we reach this point, all workers are ready. We no longer need to
+ * kill them if we die; they'll die on their own as the message queues
+ * shut down.
+ */
+ cancel_on_dsm_detach(seg, cleanup_background_workers,
+ PointerGetDatum(wstate));
+ pfree(wstate);
+}
+
+/*
+ * Set up a dynamic shared memory segment.
+ *
+ * We set up a small control region that contains only a test_shm_mq_header,
+ * plus one region per message queue. There are as many message queues as
+ * the number of workers, plus one.
+ */
+static void
+setup_dynamic_shared_memory(int64 queue_size, int nworkers,
+ dsm_segment **segp, test_shm_mq_header **hdrp,
+ shm_mq **outp, shm_mq **inp)
+{
+ shm_toc_estimator e;
+ int i;
+ Size segsize;
+ dsm_segment *seg;
+ shm_toc *toc;
+ test_shm_mq_header *hdr;
+
+ /* Ensure a valid queue size. */
+ if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("queue size must be at least %zu bytes",
+ shm_mq_minimum_size)));
+ if (queue_size != ((Size) queue_size))
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("queue size overflows size_t")));
+
+ /*
+ * Estimate how much shared memory we need.
+ *
+ * Because the TOC machinery may choose to insert padding of oddly-sized
+ * requests, we must estimate each chunk separately.
+ *
+ * We need one key to register the location of the header, and we need
+ * nworkers + 1 keys to track the locations of the message queues.
+ */
+ shm_toc_initialize_estimator(&e);
+ shm_toc_estimate_chunk(&e, sizeof(test_shm_mq_header));
+ for (i = 0; i <= nworkers; ++i)
+ shm_toc_estimate_chunk(&e, (Size) queue_size);
+ shm_toc_estimate_keys(&e, 2 + nworkers);
+ segsize = shm_toc_estimate(&e);
+
+ /* Create the shared memory segment and establish a table of contents. */
+ seg = dsm_create(shm_toc_estimate(&e));
+ toc = shm_toc_create(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg),
+ segsize);
+
+ /* Set up the header region. */
+ hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header));
+ SpinLockInit(&hdr->mutex);
+ hdr->workers_total = nworkers;
+ hdr->workers_attached = 0;
+ hdr->workers_ready = 0;
+ shm_toc_insert(toc, 0, hdr);
+
+ /* Set up one message queue per worker, plus one. */
+ for (i = 0; i <= nworkers; ++i)
+ {
+ shm_mq *mq;
+
+ mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size),
+ (Size) queue_size);
+ shm_toc_insert(toc, i + 1, mq);
+
+ if (i == 0)
+ {
+ /* We send messages to the first queue. */
+ shm_mq_set_sender(mq, MyProc);
+ *outp = mq;
+ }
+ if (i == nworkers)
+ {
+ /* We receive messages from the last queue. */
+ shm_mq_set_receiver(mq, MyProc);
+ *inp = mq;
+ }
+ }
+
+ /* Return results to caller. */
+ *segp = seg;
+ *hdrp = hdr;
+}
+
+/*
+ * Register background workers.
+ */
+static worker_state *
+setup_background_workers(int nworkers, dsm_segment *seg)
+{
+ MemoryContext oldcontext;
+ BackgroundWorker worker;
+ worker_state *wstate;
+ int i;
+
+ /*
+ * We need the worker_state object and the background worker handles to
+ * which it points to be allocated in CurTransactionContext rather than
+ * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach
+ * hooks run.
+ */
+ oldcontext = MemoryContextSwitchTo(CurTransactionContext);
+
+ /* Create worker state object. */
+ wstate = MemoryContextAlloc(TopTransactionContext,
+ offsetof(worker_state, handle) +
+ sizeof(BackgroundWorkerHandle *) * nworkers);
+ wstate->nworkers = 0;
+
+ /*
+ * Arrange to kill all the workers if we abort before all workers are
+ * finished hooking themselves up to the dynamic shared memory segment.
+ *
+ * If we die after all the workers have finished hooking themselves up to
+ * the dynamic shared memory segment, we'll mark the two queues to which
+ * we're directly connected as detached, and the worker(s) connected to
+ * those queues will exit, marking any other queues to which they are
+ * connected as detached. This will cause any as-yet-unaware workers
+ * connected to those queues to exit in their turn, and so on, until
+ * everybody exits.
+ *
+ * But suppose the workers which are supposed to connect to the queues to
+ * which we're directly attached exit due to some error before they
+ * actually attach the queues. The remaining workers will have no way of
+ * knowing this. From their perspective, they're still waiting for those
+ * workers to start, when in fact they've already died.
+ */
+ on_dsm_detach(seg, cleanup_background_workers,
+ PointerGetDatum(wstate));
+
+ /* Configure a worker. */
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS;
+ worker.bgw_start_time = BgWorkerStart_ConsistentState;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ worker.bgw_main = NULL; /* new worker might not have library loaded */
+ sprintf(worker.bgw_library_name, "test_shm_mq");
+ sprintf(worker.bgw_function_name, "test_shm_mq_main");
+ snprintf(worker.bgw_name, BGW_MAXLEN, "test_shm_mq");
+ worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg));
+ /* set bgw_notify_pid, so we can detect if the worker stops */
+ worker.bgw_notify_pid = MyProcPid;
+
+ /* Register the workers. */
+ for (i = 0; i < nworkers; ++i)
+ {
+ if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i]))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("could not register background process"),
+ errhint("You may need to increase max_worker_processes.")));
+ ++wstate->nworkers;
+ }
+
+ /* All done. */
+ MemoryContextSwitchTo(oldcontext);
+ return wstate;
+}
+
+static void
+cleanup_background_workers(dsm_segment *seg, Datum arg)
+{
+ worker_state *wstate = (worker_state *) DatumGetPointer(arg);
+
+ while (wstate->nworkers > 0)
+ {
+ --wstate->nworkers;
+ TerminateBackgroundWorker(wstate->handle[wstate->nworkers]);
+ }
+}
+
+static void
+wait_for_workers_to_become_ready(worker_state *wstate,
+ volatile test_shm_mq_header *hdr)
+{
+ bool save_set_latch_on_sigusr1;
+ bool result = false;
+
+ save_set_latch_on_sigusr1 = set_latch_on_sigusr1;
+ set_latch_on_sigusr1 = true;
+
+ PG_TRY();
+ {
+ for (;;)
+ {
+ int workers_ready;
+
+ /* If all the workers are ready, we have succeeded. */
+ SpinLockAcquire(&hdr->mutex);
+ workers_ready = hdr->workers_ready;
+ SpinLockRelease(&hdr->mutex);
+ if (workers_ready >= wstate->nworkers)
+ {
+ result = true;
+ break;
+ }
+
+ /* If any workers (or the postmaster) have died, we have failed. */
+ if (!check_worker_status(wstate))
+ {
+ result = false;
+ break;
+ }
+
+ /* Wait to be signalled. */
+ WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
+
+ /* An interrupt may have occurred while we were waiting. */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Reset the latch so we don't spin. */
+ ResetLatch(&MyProc->procLatch);
+ }
+ }
+ PG_CATCH();
+ {
+ set_latch_on_sigusr1 = save_set_latch_on_sigusr1;
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ if (!result)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("one or more background workers failed to start")));
+}
+
+static bool
+check_worker_status(worker_state *wstate)
+{
+ int n;
+
+ /* If any workers (or the postmaster) have died, we have failed. */
+ for (n = 0; n < wstate->nworkers; ++n)
+ {
+ BgwHandleStatus status;
+ pid_t pid;
+
+ status = GetBackgroundWorkerPid(wstate->handle[n], &pid);
+ if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED)
+ return false;
+ }
+
+ /* Otherwise, things still look OK. */
+ return true;
+}
diff --git a/contrib/test_shm_mq/sql/test_shm_mq.sql b/contrib/test_shm_mq/sql/test_shm_mq.sql
new file mode 100644
index 0000000000..9de19d304a
--- /dev/null
+++ b/contrib/test_shm_mq/sql/test_shm_mq.sql
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_shm_mq;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_shm_mq(1024, '', 2000, 1);
+SELECT test_shm_mq(1024, 'a', 2001, 1);
+SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1);
+SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1);
+SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3);
diff --git a/contrib/test_shm_mq/test.c b/contrib/test_shm_mq/test.c
new file mode 100644
index 0000000000..8750bae8db
--- /dev/null
+++ b/contrib/test_shm_mq/test.c
@@ -0,0 +1,262 @@
+/*--------------------------------------------------------------------------
+ *
+ * test.c
+ * Test harness code for shared memory message queues.
+ *
+ * Copyright (C) 2013, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/test_shm_mq/test.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "fmgr.h"
+#include "miscadmin.h"
+
+#include "test_shm_mq.h"
+
+PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(test_shm_mq);
+PG_FUNCTION_INFO_V1(test_shm_mq_pipelined);
+
+void _PG_init(void);
+
+static void verify_message(Size origlen, char *origdata, Size newlen,
+ char *newdata);
+
+/*
+ * Simple test of the shared memory message queue infrastructure.
+ *
+ * We set up a ring of message queues passing through 1 or more background
+ * processes and eventually looping back to ourselves. We then send a message
+ * through the ring a number of times indicated by the loop count. At the end,
+ * we check whether the final message matches the one we started with.
+ */
+Datum
+test_shm_mq(PG_FUNCTION_ARGS)
+{
+ int64 queue_size = PG_GETARG_INT64(0);
+ text *message = PG_GETARG_TEXT_PP(1);
+ char *message_contents = VARDATA_ANY(message);
+ int message_size = VARSIZE_ANY_EXHDR(message);
+ int32 loop_count = PG_GETARG_INT32(2);
+ int32 nworkers = PG_GETARG_INT32(3);
+ dsm_segment *seg;
+ shm_mq_handle *outqh;
+ shm_mq_handle *inqh;
+ shm_mq_result res;
+ Size len;
+ void *data;
+
+ /* A negative loopcount is nonsensical. */
+ if (loop_count < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("repeat count size must be a non-negative integer")));
+
+ /*
+ * Since this test sends data using the blocking interfaces, it cannot
+ * send data to itself. Therefore, a minimum of 1 worker is required. Of
+ * course, a negative worker count is nonsensical.
+ */
+ if (nworkers < 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("number of workers must be a positive integer")));
+
+ /* Set up dynamic shared memory segment and background workers. */
+ test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
+
+ /* Send the initial message. */
+ res = shm_mq_send(outqh, message_size, message_contents, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+
+ /*
+ * Receive a message and send it back out again. Do this a number of
+ * times equal to the loop count.
+ */
+ for (;;)
+ {
+ /* Receive a message. */
+ res = shm_mq_receive(inqh, &len, &data, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not receive message")));
+
+ /* If this is supposed to be the last iteration, stop here. */
+ if (--loop_count <= 0)
+ break;
+
+ /* Send it back out. */
+ res = shm_mq_send(outqh, len, data, false);
+ if (res != SHM_MQ_SUCCESS)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+ }
+
+ /*
+ * Finally, check that we got back the same message from the last
+ * iteration that we originally sent.
+ */
+ verify_message(message_size, message_contents, len, data);
+
+ /* Clean up. */
+ dsm_detach(seg);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Pipelined test of the shared memory message queue infrastructure.
+ *
+ * As in the basic test, we set up a ring of message queues passing through
+ * 1 or more background processes and eventually looping back to ourselves.
+ * Then, we send N copies of the user-specified message through the ring and
+ * receive them all back. Since this might fill up all message queues in the
+ * ring and then stall, we must be prepared to begin receiving the messages
+ * back before we've finished sending them.
+ */
+Datum
+test_shm_mq_pipelined(PG_FUNCTION_ARGS)
+{
+ int64 queue_size = PG_GETARG_INT64(0);
+ text *message = PG_GETARG_TEXT_PP(1);
+ char *message_contents = VARDATA_ANY(message);
+ int message_size = VARSIZE_ANY_EXHDR(message);
+ int32 loop_count = PG_GETARG_INT32(2);
+ int32 nworkers = PG_GETARG_INT32(3);
+ bool verify = PG_GETARG_BOOL(4);
+ int32 send_count = 0;
+ int32 receive_count = 0;
+ dsm_segment *seg;
+ shm_mq_handle *outqh;
+ shm_mq_handle *inqh;
+ shm_mq_result res;
+ Size len;
+ void *data;
+
+ /* A negative loopcount is nonsensical. */
+ if (loop_count < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("repeat count size must be a non-negative integer")));
+
+ /*
+ * Using the nonblocking interfaces, we can even send data to ourselves,
+ * so the minimum number of workers for this test is zero.
+ */
+ if (nworkers < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("number of workers must be a non-negative integer")));
+
+ /* Set up dynamic shared memory segment and background workers. */
+ test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh);
+
+ /* Main loop. */
+ for (;;)
+ {
+ bool wait = true;
+
+ /*
+ * If we haven't yet sent the message the requisite number of times,
+ * try again to send it now. Note that when shm_mq_send() returns
+ * SHM_MQ_WOULD_BLOCK, the next call to that function must pass the
+ * same message size and contents; that's not an issue here because
+ * we're sending the same message every time.
+ */
+ if (send_count < loop_count)
+ {
+ res = shm_mq_send(outqh, message_size, message_contents, true);
+ if (res == SHM_MQ_SUCCESS)
+ {
+ ++send_count;
+ wait = false;
+ }
+ else if (res == SHM_MQ_DETACHED)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not send message")));
+ }
+
+ /*
+ * If we haven't yet received the message the requisite number of
+ * times, try to receive it again now.
+ */
+ if (receive_count < loop_count)
+ {
+ res = shm_mq_receive(inqh, &len, &data, true);
+ if (res == SHM_MQ_SUCCESS)
+ {
+ ++receive_count;
+ /* Verifying every time is slow, so it's optional. */
+ if (verify)
+ verify_message(message_size, message_contents, len, data);
+ wait = false;
+ }
+ else if (res == SHM_MQ_DETACHED)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not receive message")));
+ }
+ else
+ {
+ /*
+ * Otherwise, we've received the message enough times. This
+ * shouldn't happen unless we've also sent it enough times.
+ */
+ if (send_count != receive_count)
+ ereport(ERROR,
+ (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("message sent %d times, but received %d times",
+ send_count, receive_count)));
+ break;
+ }
+
+ if (wait)
+ {
+ /*
+ * If we made no progress, wait for one of the other processes to
+ * which we are connected to set our latch, indicating that they
+ * have read or written data and therefore there may now be work
+ * for us to do.
+ */
+ WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0);
+ CHECK_FOR_INTERRUPTS();
+ ResetLatch(&MyProc->procLatch);
+ }
+ }
+
+ /* Clean up. */
+ dsm_detach(seg);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Verify that two messages are the same.
+ */
+static void
+verify_message(Size origlen, char *origdata, Size newlen, char *newdata)
+{
+ Size i;
+
+ if (origlen != newlen)
+ ereport(ERROR,
+ (errmsg("message corrupted"),
+ errdetail("The original message was %zu bytes but the final message is %zu bytes.",
+ origlen, newlen)));
+
+ for (i = 0; i < origlen; ++i)
+ if (origdata[i] != newdata[i])
+ ereport(ERROR,
+ (errmsg("message corrupted"),
+ errdetail("The new and original messages differ at byte %zu of %zu.", i, origlen)));
+}
diff --git a/contrib/test_shm_mq/test_shm_mq--1.0.sql b/contrib/test_shm_mq/test_shm_mq--1.0.sql
new file mode 100644
index 0000000000..54b225e2ae
--- /dev/null
+++ b/contrib/test_shm_mq/test_shm_mq--1.0.sql
@@ -0,0 +1,19 @@
+/* contrib/test_shm_mq/test_shm_mq--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_shm_mq" to load this file. \quit
+
+CREATE FUNCTION test_shm_mq(queue_size pg_catalog.int8,
+ message pg_catalog.text,
+ repeat_count pg_catalog.int4 default 1,
+ num_workers pg_catalog.int4 default 1)
+ RETURNS pg_catalog.void STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION test_shm_mq_pipelined(queue_size pg_catalog.int8,
+ message pg_catalog.text,
+ repeat_count pg_catalog.int4 default 1,
+ num_workers pg_catalog.int4 default 1,
+ verify pg_catalog.bool default true)
+ RETURNS pg_catalog.void STRICT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/contrib/test_shm_mq/test_shm_mq.control b/contrib/test_shm_mq/test_shm_mq.control
new file mode 100644
index 0000000000..d9a74c7a32
--- /dev/null
+++ b/contrib/test_shm_mq/test_shm_mq.control
@@ -0,0 +1,4 @@
+comment = 'Test code for shared memory message queues'
+default_version = '1.0'
+module_pathname = '$libdir/test_shm_mq'
+relocatable = true
diff --git a/contrib/test_shm_mq/test_shm_mq.h b/contrib/test_shm_mq/test_shm_mq.h
new file mode 100644
index 0000000000..7ebfba902f
--- /dev/null
+++ b/contrib/test_shm_mq/test_shm_mq.h
@@ -0,0 +1,45 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_shm_mq.h
+ * Definitions for shared memory message queues
+ *
+ * Copyright (C) 2013, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/test_shm_mq/test_shm_mq.h
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef TEST_SHM_MQ_H
+#define TEST_SHM_MQ_H
+
+#include "storage/dsm.h"
+#include "storage/shm_mq.h"
+#include "storage/spin.h"
+
+/* Identifier for shared memory segments used by this extension. */
+#define PG_TEST_SHM_MQ_MAGIC 0x79fb2447
+
+/*
+ * This structure is stored in the dynamic shared memory segment. We use
+ * it to determine whether all workers started up OK and successfully
+ * attached to their respective shared message queues.
+ */
+typedef struct
+{
+ slock_t mutex;
+ int workers_total;
+ int workers_attached;
+ int workers_ready;
+} test_shm_mq_header;
+
+/* Set up dynamic shared memory and background workers for test run. */
+extern void test_shm_mq_setup(int64 queue_size, int32 nworkers,
+ dsm_segment **seg, shm_mq_handle **output,
+ shm_mq_handle **input);
+
+/* Main entrypoint for a worker. */
+extern void test_shm_mq_main(Datum);
+
+#endif
diff --git a/contrib/test_shm_mq/worker.c b/contrib/test_shm_mq/worker.c
new file mode 100644
index 0000000000..0d66c92ddb
--- /dev/null
+++ b/contrib/test_shm_mq/worker.c
@@ -0,0 +1,224 @@
+/*--------------------------------------------------------------------------
+ *
+ * worker.c
+ * Code for sample worker making use of shared memory message queues.
+ * Our test worker simply reads messages from one message queue and
+ * writes them back out to another message queue. In a real
+ * application, you'd presumably want the worker to do some more
+ * complex calculation rather than simply returning the input,
+ * but it should be possible to use much of the control logic just
+ * as presented here.
+ *
+ * Copyright (C) 2013, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/test_shm_mq/worker.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "miscadmin.h"
+#include "storage/ipc.h"
+#include "storage/procarray.h"
+#include "storage/shm_mq.h"
+#include "storage/shm_toc.h"
+#include "utils/resowner.h"
+
+#include "test_shm_mq.h"
+
+static void handle_sigterm(SIGNAL_ARGS);
+static void attach_to_queues(dsm_segment *seg, shm_toc *toc,
+ int myworkernumber, shm_mq_handle **inqhp,
+ shm_mq_handle **outqhp);
+static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh);
+
+/*
+ * Background worker entrypoint.
+ *
+ * This is intended to demonstrate how a background worker can be used to
+ * facilitate a parallel computation. Most of the logic here is fairly
+ * boilerplate stuff, designed to attach to the shared memory segment,
+ * notify the user backend that we're alive, and so on. The
+ * application-specific bits of logic that you'd replace for your own worker
+ * are attach_to_queues() and copy_messages().
+ */
+void
+test_shm_mq_main(Datum main_arg)
+{
+ dsm_segment *seg;
+ shm_toc *toc;
+ shm_mq_handle *inqh;
+ shm_mq_handle *outqh;
+ volatile test_shm_mq_header *hdr;
+ int myworkernumber;
+ PGPROC *registrant;
+
+ /*
+ * Establish signal handlers.
+ *
+ * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as
+ * it would a normal user backend. To make that happen, we establish a
+ * signal handler that is a stripped-down version of die(). We don't have
+ * any equivalent of the backend's command-read loop, where interrupts can
+ * be processed immediately, so make sure ImmediateInterruptOK is turned
+ * off.
+ */
+ pqsignal(SIGTERM, handle_sigterm);
+ ImmediateInterruptOK = false;
+ BackgroundWorkerUnblockSignals();
+
+ /*
+ * Connect to the dynamic shared memory segment.
+ *
+ * The backend that registered this worker passed us the ID of a shared
+ * memory segment to which we must attach for further instructions. In
+ * order to attach to dynamic shared memory, we need a resource owner.
+ * Once we've mapped the segment in our address space, attach to the table
+ * of contents so we can locate the various data structures we'll need to
+ * find within the segment.
+ */
+ CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker");
+ seg = dsm_attach(DatumGetInt32(main_arg));
+ if (seg == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("unable to map dynamic shared memory segment")));
+ toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg));
+ if (toc == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("bad magic number in dynamic shared memory segment")));
+
+ /*
+ * Acquire a worker number.
+ *
+ * By convention, the process registering this background worker should
+ * have stored the control structure at key 0. We look up that key to
+ * find it. Our worker number gives our identity: there may be just one
+ * worker involved in this parallel operation, or there may be many.
+ */
+ hdr = shm_toc_lookup(toc, 0);
+ SpinLockAcquire(&hdr->mutex);
+ myworkernumber = ++hdr->workers_attached;
+ SpinLockRelease(&hdr->mutex);
+ if (myworkernumber > hdr->workers_total)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("too many message queue testing workers already")));
+
+ /*
+ * Attach to the appropriate message queues.
+ */
+ attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh);
+
+ /*
+ * Indicate that we're fully initialized and ready to begin the main part
+ * of the parallel operation.
+ *
+ * Once we signal that we're ready, the user backend is entitled to assume
+ * that our on_dsm_detach callbacks will fire before we disconnect from
+ * the shared memory segment and exit. Generally, that means we must have
+ * attached to all relevant dynamic shared memory data structures by now.
+ */
+ SpinLockAcquire(&hdr->mutex);
+ ++hdr->workers_ready;
+ SpinLockRelease(&hdr->mutex);
+ registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid);
+ if (registrant == NULL)
+ {
+ elog(DEBUG1, "registrant backend has exited prematurely");
+ proc_exit(1);
+ }
+ SetLatch(&registrant->procLatch);
+
+ /* Do the work. */
+ copy_messages(inqh, outqh);
+
+ /*
+ * We're done. Explicitly detach the shared memory segment so that we
+ * don't get a resource leak warning at commit time. This will fire any
+ * on_dsm_detach callbacks we've registered, as well. Once that's done,
+ * we can go ahead and exit.
+ */
+ dsm_detach(seg);
+ proc_exit(1);
+}
+
+/*
+ * Attach to shared memory message queues.
+ *
+ * We use our worker number to determine to which queue we should attach.
+ * The queues are registered at keys 1..<number-of-workers>. The user backend
+ * writes to queue #1 and reads from queue #<number-of-workers>; each worker
+ * reads from the queue whose number is equal to its worker number and writes
+ * to the next higher-numbered queue.
+ */
+static void
+attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber,
+ shm_mq_handle **inqhp, shm_mq_handle **outqhp)
+{
+ shm_mq *inq;
+ shm_mq *outq;
+
+ inq = shm_toc_lookup(toc, myworkernumber);
+ shm_mq_set_receiver(inq, MyProc);
+ *inqhp = shm_mq_attach(inq, seg, NULL);
+ outq = shm_toc_lookup(toc, myworkernumber + 1);
+ shm_mq_set_sender(outq, MyProc);
+ *outqhp = shm_mq_attach(outq, seg, NULL);
+}
+
+/*
+ * Loop, receiving and sending messages, until the connection is broken.
+ *
+ * This is the "real work" performed by this worker process. Everything that
+ * happens before this is initialization of one form or another, and everything
+ * after this point is cleanup.
+ */
+static void
+copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh)
+{
+ Size len;
+ void *data;
+ shm_mq_result res;
+
+ for (;;)
+ {
+ /* Notice any interrupts that have occurred. */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Receive a message. */
+ res = shm_mq_receive(inqh, &len, &data, false);
+ if (res != SHM_MQ_SUCCESS)
+ break;
+
+ /* Send it back out. */
+ res = shm_mq_send(outqh, len, data, false);
+ if (res != SHM_MQ_SUCCESS)
+ break;
+ }
+}
+
+/*
+ * When we receive a SIGTERM, we set InterruptPending and ProcDiePending just
+ * like a normal backend. The next CHECK_FOR_INTERRUPTS() will do the right
+ * thing.
+ */
+static void
+handle_sigterm(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ if (MyProc)
+ SetLatch(&MyProc->procLatch);
+
+ if (!proc_exit_inprogress)
+ {
+ InterruptPending = true;
+ ProcDiePending = true;
+ }
+
+ errno = save_errno;
+}
diff --git a/contrib/tsearch2/tsearch2.c b/contrib/tsearch2/tsearch2.c
index 968bd80a17..bd30d87515 100644
--- a/contrib/tsearch2/tsearch2.c
+++ b/contrib/tsearch2/tsearch2.c
@@ -3,7 +3,7 @@
* tsearch2.c
* Backwards-compatibility package for old contrib/tsearch2 API
*
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
@@ -45,7 +45,7 @@ static Oid current_parser_oid = InvalidOid;
CStringGetDatum(text_to_cstring(text))))
#define UNSUPPORTED_FUNCTION(name) \
- Datum name(PG_FUNCTION_ARGS); \
+ PG_FUNCTION_INFO_V1(name); \
Datum \
name(PG_FUNCTION_ARGS) \
{ \
@@ -57,30 +57,11 @@ static Oid current_parser_oid = InvalidOid;
/* keep compiler quiet */ \
PG_RETURN_NULL(); \
} \
- PG_FUNCTION_INFO_V1(name)
+ extern int no_such_variable
static Oid GetCurrentDict(void);
static Oid GetCurrentParser(void);
-Datum tsa_lexize_byname(PG_FUNCTION_ARGS);
-Datum tsa_lexize_bycurrent(PG_FUNCTION_ARGS);
-Datum tsa_set_curdict(PG_FUNCTION_ARGS);
-Datum tsa_set_curdict_byname(PG_FUNCTION_ARGS);
-Datum tsa_token_type_current(PG_FUNCTION_ARGS);
-Datum tsa_set_curprs(PG_FUNCTION_ARGS);
-Datum tsa_set_curprs_byname(PG_FUNCTION_ARGS);
-Datum tsa_parse_current(PG_FUNCTION_ARGS);
-Datum tsa_set_curcfg(PG_FUNCTION_ARGS);
-Datum tsa_set_curcfg_byname(PG_FUNCTION_ARGS);
-Datum tsa_to_tsvector_name(PG_FUNCTION_ARGS);
-Datum tsa_to_tsquery_name(PG_FUNCTION_ARGS);
-Datum tsa_plainto_tsquery_name(PG_FUNCTION_ARGS);
-Datum tsa_headline_byname(PG_FUNCTION_ARGS);
-Datum tsa_ts_stat(PG_FUNCTION_ARGS);
-Datum tsa_tsearch2(PG_FUNCTION_ARGS);
-Datum tsa_rewrite_accum(PG_FUNCTION_ARGS);
-Datum tsa_rewrite_finish(PG_FUNCTION_ARGS);
-
PG_FUNCTION_INFO_V1(tsa_lexize_byname);
PG_FUNCTION_INFO_V1(tsa_lexize_bycurrent);
PG_FUNCTION_INFO_V1(tsa_set_curdict);
diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c
index 262d5ec15f..a337df61af 100644
--- a/contrib/unaccent/unaccent.c
+++ b/contrib/unaccent/unaccent.c
@@ -3,7 +3,7 @@
* unaccent.c
* Text search unaccent dictionary
*
- * Copyright (c) 2009-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2009-2014, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/unaccent/unaccent.c
@@ -23,30 +23,29 @@
PG_MODULE_MAGIC;
/*
- * Unaccent dictionary uses uncompressed suffix tree to find a
- * character to replace. Each node of tree is an array of
- * SuffixChar struct with length = 256 (n-th element of array
+ * Unaccent dictionary uses a trie to find a character to replace. Each node of
+ * the trie is an array of 256 TrieChar structs (n-th element of array
* corresponds to byte)
*/
-typedef struct SuffixChar
+typedef struct TrieChar
{
- struct SuffixChar *nextChar;
+ struct TrieChar *nextChar;
char *replaceTo;
int replacelen;
-} SuffixChar;
+} TrieChar;
/*
- * placeChar - put str into tree's structure, byte by byte.
+ * placeChar - put str into trie's structure, byte by byte.
*/
-static SuffixChar *
-placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
+static TrieChar *
+placeChar(TrieChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen)
{
- SuffixChar *curnode;
+ TrieChar *curnode;
if (!node)
{
- node = palloc(sizeof(SuffixChar) * 256);
- memset(node, 0, sizeof(SuffixChar) * 256);
+ node = palloc(sizeof(TrieChar) * 256);
+ memset(node, 0, sizeof(TrieChar) * 256);
}
curnode = node + *str;
@@ -71,13 +70,14 @@ placeChar(SuffixChar *node, unsigned char *str, int lenstr, char *replaceTo, int
}
/*
- * initSuffixTree - create suffix tree from file. Function converts
- * UTF8-encoded file into current encoding.
+ * initTrie - create trie from file.
+ *
+ * Function converts UTF8-encoded file into current encoding.
*/
-static SuffixChar *
-initSuffixTree(char *filename)
+static TrieChar *
+initTrie(char *filename)
{
- SuffixChar *volatile rootSuffixTree = NULL;
+ TrieChar *volatile rootTrie = NULL;
MemoryContext ccxt = CurrentMemoryContext;
tsearch_readline_state trst;
volatile bool skip;
@@ -161,9 +161,9 @@ initSuffixTree(char *filename)
}
if (state >= 3)
- rootSuffixTree = placeChar(rootSuffixTree,
- (unsigned char *) src, srclen,
- trg, trglen);
+ rootTrie = placeChar(rootTrie,
+ (unsigned char *) src, srclen,
+ trg, trglen);
pfree(line);
}
@@ -192,14 +192,14 @@ initSuffixTree(char *filename)
tsearch_readline_end(&trst);
- return rootSuffixTree;
+ return rootTrie;
}
/*
- * findReplaceTo - find multibyte character in tree
+ * findReplaceTo - find multibyte character in trie
*/
-static SuffixChar *
-findReplaceTo(SuffixChar *node, unsigned char *src, int srclen)
+static TrieChar *
+findReplaceTo(TrieChar *node, unsigned char *src, int srclen)
{
while (node)
{
@@ -216,12 +216,11 @@ findReplaceTo(SuffixChar *node, unsigned char *src, int srclen)
}
PG_FUNCTION_INFO_V1(unaccent_init);
-Datum unaccent_init(PG_FUNCTION_ARGS);
Datum
unaccent_init(PG_FUNCTION_ARGS)
{
List *dictoptions = (List *) PG_GETARG_POINTER(0);
- SuffixChar *rootSuffixTree = NULL;
+ TrieChar *rootTrie = NULL;
bool fileloaded = false;
ListCell *l;
@@ -235,7 +234,7 @@ unaccent_init(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple Rules parameters")));
- rootSuffixTree = initSuffixTree(defGetString(defel));
+ rootTrie = initTrie(defGetString(defel));
fileloaded = true;
}
else
@@ -254,29 +253,28 @@ unaccent_init(PG_FUNCTION_ARGS)
errmsg("missing Rules parameter")));
}
- PG_RETURN_POINTER(rootSuffixTree);
+ PG_RETURN_POINTER(rootTrie);
}
PG_FUNCTION_INFO_V1(unaccent_lexize);
-Datum unaccent_lexize(PG_FUNCTION_ARGS);
Datum
unaccent_lexize(PG_FUNCTION_ARGS)
{
- SuffixChar *rootSuffixTree = (SuffixChar *) PG_GETARG_POINTER(0);
+ TrieChar *rootTrie = (TrieChar *) PG_GETARG_POINTER(0);
char *srcchar = (char *) PG_GETARG_POINTER(1);
int32 len = PG_GETARG_INT32(2);
char *srcstart,
*trgchar = NULL;
int charlen;
TSLexeme *res = NULL;
- SuffixChar *node;
+ TrieChar *node;
srcstart = srcchar;
while (srcchar - srcstart < len)
{
charlen = pg_mblen(srcchar);
- node = findReplaceTo(rootSuffixTree, (unsigned char *) srcchar, charlen);
+ node = findReplaceTo(rootTrie, (unsigned char *) srcchar, charlen);
if (node && node->replaceTo)
{
if (!res)
@@ -313,7 +311,6 @@ unaccent_lexize(PG_FUNCTION_ARGS)
* Function-like wrapper for dictionary
*/
PG_FUNCTION_INFO_V1(unaccent_dict);
-Datum unaccent_dict(PG_FUNCTION_ARGS);
Datum
unaccent_dict(PG_FUNCTION_ARGS)
{
diff --git a/contrib/uuid-ossp/.gitignore b/contrib/uuid-ossp/.gitignore
new file mode 100644
index 0000000000..6c989c7872
--- /dev/null
+++ b/contrib/uuid-ossp/.gitignore
@@ -0,0 +1,6 @@
+/md5.c
+/sha1.c
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/uuid-ossp/Makefile b/contrib/uuid-ossp/Makefile
index 9b2d2e3ff9..335cc7ef50 100644
--- a/contrib/uuid-ossp/Makefile
+++ b/contrib/uuid-ossp/Makefile
@@ -1,12 +1,21 @@
# contrib/uuid-ossp/Makefile
MODULE_big = uuid-ossp
-OBJS = uuid-ossp.o
+OBJS = uuid-ossp.o $(UUID_EXTRA_OBJS)
EXTENSION = uuid-ossp
DATA = uuid-ossp--1.0.sql uuid-ossp--unpackaged--1.0.sql
-SHLIB_LINK += $(OSSP_UUID_LIBS)
+REGRESS = uuid_ossp
+
+SHLIB_LINK += $(UUID_LIBS)
+
+# We copy some needed files verbatim from pgcrypto
+pgcrypto_src = $(top_srcdir)/contrib/pgcrypto
+
+PG_CPPFLAGS = -I$(pgcrypto_src)
+
+EXTRA_CLEAN = md5.c sha1.c
ifdef USE_PGXS
PG_CONFIG = pg_config
@@ -18,3 +27,6 @@ top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
+
+md5.c sha1.c: % : $(pgcrypto_src)/%
+ rm -f $@ && $(LN_S) $< .
diff --git a/contrib/uuid-ossp/expected/uuid_ossp.out b/contrib/uuid-ossp/expected/uuid_ossp.out
new file mode 100644
index 0000000000..409c885c33
--- /dev/null
+++ b/contrib/uuid-ossp/expected/uuid_ossp.out
@@ -0,0 +1,139 @@
+CREATE EXTENSION "uuid-ossp";
+SELECT uuid_nil();
+ uuid_nil
+--------------------------------------
+ 00000000-0000-0000-0000-000000000000
+(1 row)
+
+SELECT uuid_ns_dns();
+ uuid_ns_dns
+--------------------------------------
+ 6ba7b810-9dad-11d1-80b4-00c04fd430c8
+(1 row)
+
+SELECT uuid_ns_url();
+ uuid_ns_url
+--------------------------------------
+ 6ba7b811-9dad-11d1-80b4-00c04fd430c8
+(1 row)
+
+SELECT uuid_ns_oid();
+ uuid_ns_oid
+--------------------------------------
+ 6ba7b812-9dad-11d1-80b4-00c04fd430c8
+(1 row)
+
+SELECT uuid_ns_x500();
+ uuid_ns_x500
+--------------------------------------
+ 6ba7b814-9dad-11d1-80b4-00c04fd430c8
+(1 row)
+
+-- some quick and dirty field extraction functions
+-- this is actually timestamp concatenated with clock sequence, per RFC 4122
+CREATE FUNCTION uuid_timestamp_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 15, 4) || substr($1::text, 10, 4) ||
+ substr($1::text, 1, 8) || substr($1::text, 20, 4))::bit(80)
+ & x'0FFFFFFFFFFFFFFF3FFF' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION uuid_version_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 15, 2))::bit(8) & '11110000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION uuid_reserved_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 20, 2))::bit(8) & '11000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION uuid_multicast_bit(uuid) RETURNS bool AS
+$$ SELECT (('x' || substr($1::text, 25, 2))::bit(8) & '00000001') != '00000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION uuid_local_admin_bit(uuid) RETURNS bool AS
+$$ SELECT (('x' || substr($1::text, 25, 2))::bit(8) & '00000010') != '00000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+CREATE FUNCTION uuid_node(uuid) RETURNS text AS
+$$ SELECT substr($1::text, 25) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+-- Ideally, the multicast bit would never be set in V1 output, but the
+-- UUID library may fall back to MC if it can't get the system MAC address.
+-- Also, the local-admin bit might be set (if so, we're probably inside a VM).
+-- So we can't test either bit here.
+SELECT uuid_version_bits(uuid_generate_v1()),
+ uuid_reserved_bits(uuid_generate_v1());
+ uuid_version_bits | uuid_reserved_bits
+-------------------+--------------------
+ 00010000 | 10000000
+(1 row)
+
+-- Although RFC 4122 only requires the multicast bit to be set in V1MC style
+-- UUIDs, our implementation always sets the local-admin bit as well.
+SELECT uuid_version_bits(uuid_generate_v1mc()),
+ uuid_reserved_bits(uuid_generate_v1mc()),
+ uuid_multicast_bit(uuid_generate_v1mc()),
+ uuid_local_admin_bit(uuid_generate_v1mc());
+ uuid_version_bits | uuid_reserved_bits | uuid_multicast_bit | uuid_local_admin_bit
+-------------------+--------------------+--------------------+----------------------
+ 00010000 | 10000000 | t | t
+(1 row)
+
+-- timestamp+clock sequence should be monotonic increasing in v1
+SELECT uuid_timestamp_bits(uuid_generate_v1()) < uuid_timestamp_bits(uuid_generate_v1());
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT uuid_timestamp_bits(uuid_generate_v1mc()) < uuid_timestamp_bits(uuid_generate_v1mc());
+ ?column?
+----------
+ t
+(1 row)
+
+-- Ideally, the node value is stable in V1 addresses, but OSSP UUID
+-- falls back to V1MC behavior if it can't get the system MAC address.
+SELECT CASE WHEN uuid_multicast_bit(uuid_generate_v1()) AND
+ uuid_local_admin_bit(uuid_generate_v1()) THEN
+ true -- punt, no test
+ ELSE
+ uuid_node(uuid_generate_v1()) = uuid_node(uuid_generate_v1())
+ END;
+ case
+------
+ t
+(1 row)
+
+-- In any case, V1MC node addresses should be random.
+SELECT uuid_node(uuid_generate_v1()) <> uuid_node(uuid_generate_v1mc());
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT uuid_node(uuid_generate_v1mc()) <> uuid_node(uuid_generate_v1mc());
+ ?column?
+----------
+ t
+(1 row)
+
+SELECT uuid_generate_v3(uuid_ns_dns(), 'www.widgets.com');
+ uuid_generate_v3
+--------------------------------------
+ 3d813cbb-47fb-32ba-91df-831e1593ac29
+(1 row)
+
+SELECT uuid_generate_v5(uuid_ns_dns(), 'www.widgets.com');
+ uuid_generate_v5
+--------------------------------------
+ 21f7f8de-8051-5b89-8680-0195ef798b6a
+(1 row)
+
+SELECT uuid_version_bits(uuid_generate_v4()),
+ uuid_reserved_bits(uuid_generate_v4());
+ uuid_version_bits | uuid_reserved_bits
+-------------------+--------------------
+ 01000000 | 10000000
+(1 row)
+
+SELECT uuid_generate_v4() <> uuid_generate_v4();
+ ?column?
+----------
+ t
+(1 row)
+
diff --git a/contrib/uuid-ossp/sql/uuid_ossp.sql b/contrib/uuid-ossp/sql/uuid_ossp.sql
new file mode 100644
index 0000000000..b4237df884
--- /dev/null
+++ b/contrib/uuid-ossp/sql/uuid_ossp.sql
@@ -0,0 +1,75 @@
+CREATE EXTENSION "uuid-ossp";
+
+SELECT uuid_nil();
+SELECT uuid_ns_dns();
+SELECT uuid_ns_url();
+SELECT uuid_ns_oid();
+SELECT uuid_ns_x500();
+
+-- some quick and dirty field extraction functions
+
+-- this is actually timestamp concatenated with clock sequence, per RFC 4122
+CREATE FUNCTION uuid_timestamp_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 15, 4) || substr($1::text, 10, 4) ||
+ substr($1::text, 1, 8) || substr($1::text, 20, 4))::bit(80)
+ & x'0FFFFFFFFFFFFFFF3FFF' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION uuid_version_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 15, 2))::bit(8) & '11110000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION uuid_reserved_bits(uuid) RETURNS varbit AS
+$$ SELECT ('x' || substr($1::text, 20, 2))::bit(8) & '11000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION uuid_multicast_bit(uuid) RETURNS bool AS
+$$ SELECT (('x' || substr($1::text, 25, 2))::bit(8) & '00000001') != '00000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION uuid_local_admin_bit(uuid) RETURNS bool AS
+$$ SELECT (('x' || substr($1::text, 25, 2))::bit(8) & '00000010') != '00000000' $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+CREATE FUNCTION uuid_node(uuid) RETURNS text AS
+$$ SELECT substr($1::text, 25) $$
+LANGUAGE SQL STRICT IMMUTABLE;
+
+-- Ideally, the multicast bit would never be set in V1 output, but the
+-- UUID library may fall back to MC if it can't get the system MAC address.
+-- Also, the local-admin bit might be set (if so, we're probably inside a VM).
+-- So we can't test either bit here.
+SELECT uuid_version_bits(uuid_generate_v1()),
+ uuid_reserved_bits(uuid_generate_v1());
+
+-- Although RFC 4122 only requires the multicast bit to be set in V1MC style
+-- UUIDs, our implementation always sets the local-admin bit as well.
+SELECT uuid_version_bits(uuid_generate_v1mc()),
+ uuid_reserved_bits(uuid_generate_v1mc()),
+ uuid_multicast_bit(uuid_generate_v1mc()),
+ uuid_local_admin_bit(uuid_generate_v1mc());
+
+-- timestamp+clock sequence should be monotonic increasing in v1
+SELECT uuid_timestamp_bits(uuid_generate_v1()) < uuid_timestamp_bits(uuid_generate_v1());
+SELECT uuid_timestamp_bits(uuid_generate_v1mc()) < uuid_timestamp_bits(uuid_generate_v1mc());
+
+-- Ideally, the node value is stable in V1 addresses, but OSSP UUID
+-- falls back to V1MC behavior if it can't get the system MAC address.
+SELECT CASE WHEN uuid_multicast_bit(uuid_generate_v1()) AND
+ uuid_local_admin_bit(uuid_generate_v1()) THEN
+ true -- punt, no test
+ ELSE
+ uuid_node(uuid_generate_v1()) = uuid_node(uuid_generate_v1())
+ END;
+
+-- In any case, V1MC node addresses should be random.
+SELECT uuid_node(uuid_generate_v1()) <> uuid_node(uuid_generate_v1mc());
+SELECT uuid_node(uuid_generate_v1mc()) <> uuid_node(uuid_generate_v1mc());
+
+SELECT uuid_generate_v3(uuid_ns_dns(), 'www.widgets.com');
+SELECT uuid_generate_v5(uuid_ns_dns(), 'www.widgets.com');
+
+SELECT uuid_version_bits(uuid_generate_v4()),
+ uuid_reserved_bits(uuid_generate_v4());
+
+SELECT uuid_generate_v4() <> uuid_generate_v4();
diff --git a/contrib/uuid-ossp/uuid-ossp--1.0.sql b/contrib/uuid-ossp/uuid-ossp--1.0.sql
index 45ada1b23b..042732065b 100644
--- a/contrib/uuid-ossp/uuid-ossp--1.0.sql
+++ b/contrib/uuid-ossp/uuid-ossp--1.0.sql
@@ -1,7 +1,7 @@
/* contrib/uuid-ossp/uuid-ossp--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION uuid-ossp" to load this file. \quit
+\echo Use '''CREATE EXTENSION "uuid-ossp"''' to load this file. \quit
CREATE FUNCTION uuid_nil()
RETURNS uuid
diff --git a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql
index 5c0dbfef9c..5776b6f930 100644
--- a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql
+++ b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql
@@ -1,7 +1,7 @@
/* contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
-\echo Use "CREATE EXTENSION uuid-ossp" to load this file. \quit
+\echo Use '''CREATE EXTENSION "uuid-ossp"''' to load this file. \quit
ALTER EXTENSION "uuid-ossp" ADD function uuid_nil();
ALTER EXTENSION "uuid-ossp" ADD function uuid_ns_dns();
diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c
index d4fc62bd4f..9e9905bfde 100644
--- a/contrib/uuid-ossp/uuid-ossp.c
+++ b/contrib/uuid-ossp/uuid-ossp.c
@@ -1,8 +1,10 @@
/*-------------------------------------------------------------------------
*
- * UUID generation functions using the OSSP UUID library
+ * UUID generation functions using the BSD, E2FS or OSSP UUID library
*
- * Copyright (c) 2007-2012, PostgreSQL Global Development Group
+ * Copyright (c) 2007-2014, PostgreSQL Global Development Group
+ *
+ * Portions Copyright (c) 2009 Andrew Gierth
*
* contrib/uuid-ossp/uuid-ossp.c
*
@@ -10,46 +12,104 @@
*/
#include "postgres.h"
+
#include "fmgr.h"
#include "utils/builtins.h"
#include "utils/uuid.h"
/*
- * There's some confusion over the location of the uuid.h header file.
- * On Debian, it's installed as ossp/uuid.h, while on Fedora, or if you
- * install ossp-uuid from a tarball, it's installed as uuid.h. Don't know
- * what other systems do.
+ * It's possible that there's more than one uuid.h header file present.
+ * We expect configure to set the HAVE_ symbol for only the one we want.
+ *
+ * BSD includes a uuid_hash() function that conflicts with the one in
+ * builtins.h; we #define it out of the way.
*/
-#ifdef HAVE_OSSP_UUID_H
-#include <ossp/uuid.h>
-#else
+#define uuid_hash bsd_uuid_hash
+
#ifdef HAVE_UUID_H
#include <uuid.h>
-#else
-#error OSSP uuid.h not found
#endif
+#ifdef HAVE_OSSP_UUID_H
+#include <ossp/uuid.h>
+#endif
+#ifdef HAVE_UUID_UUID_H
+#include <uuid/uuid.h>
+#endif
+
+#undef uuid_hash
+
+/*
+ * Some BSD variants offer md5 and sha1 implementations but Linux does not,
+ * so we use a copy of the ones from pgcrypto. Not needed with OSSP, though.
+ */
+#ifndef HAVE_UUID_OSSP
+#include "md5.h"
+#include "sha1.h"
#endif
-/* better both be 16 */
-#if (UUID_LEN != UUID_LEN_BIN)
+
+/* Check our UUID length against OSSP's; better both be 16 */
+#if defined(HAVE_UUID_OSSP) && (UUID_LEN != UUID_LEN_BIN)
#error UUID length mismatch
#endif
+/* Define some constants like OSSP's, to make the code more readable */
+#ifndef HAVE_UUID_OSSP
+#define UUID_MAKE_MC 0
+#define UUID_MAKE_V1 1
+#define UUID_MAKE_V2 2
+#define UUID_MAKE_V3 3
+#define UUID_MAKE_V4 4
+#define UUID_MAKE_V5 5
+#endif
-PG_MODULE_MAGIC;
+/*
+ * A DCE 1.1 compatible source representation of UUIDs, derived from
+ * the BSD implementation. BSD already has this; OSSP doesn't need it.
+ */
+#ifdef HAVE_UUID_E2FS
+typedef struct
+{
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint8_t clock_seq_hi_and_reserved;
+ uint8_t clock_seq_low;
+ uint8_t node[6];
+} dce_uuid_t;
+#else
+#define dce_uuid_t uuid_t
+#endif
+
+/* If not OSSP, we need some endianness-manipulation macros */
+#ifndef HAVE_UUID_OSSP
+#define UUID_TO_NETWORK(uu) \
+do { \
+ uu.time_low = htonl(uu.time_low); \
+ uu.time_mid = htons(uu.time_mid); \
+ uu.time_hi_and_version = htons(uu.time_hi_and_version); \
+} while (0)
-Datum uuid_nil(PG_FUNCTION_ARGS);
-Datum uuid_ns_dns(PG_FUNCTION_ARGS);
-Datum uuid_ns_url(PG_FUNCTION_ARGS);
-Datum uuid_ns_oid(PG_FUNCTION_ARGS);
-Datum uuid_ns_x500(PG_FUNCTION_ARGS);
+#define UUID_TO_LOCAL(uu) \
+do { \
+ uu.time_low = ntohl(uu.time_low); \
+ uu.time_mid = ntohs(uu.time_mid); \
+ uu.time_hi_and_version = ntohs(uu.time_hi_and_version); \
+} while (0)
-Datum uuid_generate_v1(PG_FUNCTION_ARGS);
-Datum uuid_generate_v1mc(PG_FUNCTION_ARGS);
-Datum uuid_generate_v3(PG_FUNCTION_ARGS);
-Datum uuid_generate_v4(PG_FUNCTION_ARGS);
-Datum uuid_generate_v5(PG_FUNCTION_ARGS);
+#define UUID_V3_OR_V5(uu, v) \
+do { \
+ uu.time_hi_and_version &= 0x0FFF; \
+ uu.time_hi_and_version |= (v << 12); \
+ uu.clock_seq_hi_and_reserved &= 0x3F; \
+ uu.clock_seq_hi_and_reserved |= 0x80; \
+} while(0)
+
+#endif /* !HAVE_UUID_OSSP */
+
+
+PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(uuid_nil);
@@ -64,6 +124,8 @@ PG_FUNCTION_INFO_V1(uuid_generate_v3);
PG_FUNCTION_INFO_V1(uuid_generate_v4);
PG_FUNCTION_INFO_V1(uuid_generate_v5);
+#ifdef HAVE_UUID_OSSP
+
static void
pguuid_complain(uuid_rc_t rc)
{
@@ -79,6 +141,42 @@ pguuid_complain(uuid_rc_t rc)
errmsg("OSSP uuid library failure: error code %d", rc)));
}
+/*
+ * We create a uuid_t object just once per session and re-use it for all
+ * operations in this module. OSSP UUID caches the system MAC address and
+ * other state in this object. Reusing the object has a number of benefits:
+ * saving the cycles needed to fetch the system MAC address over and over,
+ * reducing the amount of entropy we draw from /dev/urandom, and providing a
+ * positive guarantee that successive generated V1-style UUIDs don't collide.
+ * (On a machine fast enough to generate multiple UUIDs per microsecond,
+ * or whatever the system's wall-clock resolution is, we'd otherwise risk
+ * collisions whenever random initialization of the uuid_t's clock sequence
+ * value chanced to produce duplicates.)
+ *
+ * However: when we're doing V3 or V5 UUID creation, uuid_make needs two
+ * uuid_t objects, one holding the namespace UUID and one for the result.
+ * It's unspecified whether it's safe to use the same uuid_t for both cases,
+ * so let's cache a second uuid_t for use as the namespace holder object.
+ */
+static uuid_t *
+get_cached_uuid_t(int which)
+{
+ static uuid_t *cached_uuid[2] = {NULL, NULL};
+
+ if (cached_uuid[which] == NULL)
+ {
+ uuid_rc_t rc;
+
+ rc = uuid_create(&cached_uuid[which]);
+ if (rc != UUID_RC_OK)
+ {
+ cached_uuid[which] = NULL;
+ pguuid_complain(rc);
+ }
+ }
+ return cached_uuid[which];
+}
+
static char *
uuid_to_string(const uuid_t *uuid)
{
@@ -109,118 +207,293 @@ string_to_uuid(const char *str, uuid_t *uuid)
static Datum
special_uuid_value(const char *name)
{
- uuid_t *uuid;
+ uuid_t *uuid = get_cached_uuid_t(0);
char *str;
uuid_rc_t rc;
- rc = uuid_create(&uuid);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
rc = uuid_load(uuid, name);
if (rc != UUID_RC_OK)
pguuid_complain(rc);
str = uuid_to_string(uuid);
- rc = uuid_destroy(uuid);
+
+ return DirectFunctionCall1(uuid_in, CStringGetDatum(str));
+}
+
+/* len is unused with OSSP, but we want to have the same number of args */
+static Datum
+uuid_generate_internal(int mode, const uuid_t *ns, const char *name, int len)
+{
+ uuid_t *uuid = get_cached_uuid_t(0);
+ char *str;
+ uuid_rc_t rc;
+
+ rc = uuid_make(uuid, mode, ns, name);
if (rc != UUID_RC_OK)
pguuid_complain(rc);
+ str = uuid_to_string(uuid);
return DirectFunctionCall1(uuid_in, CStringGetDatum(str));
}
+static Datum
+uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
+{
+ uuid_t *ns_uuid = get_cached_uuid_t(1);
+
+ string_to_uuid(DatumGetCString(DirectFunctionCall1(uuid_out,
+ UUIDPGetDatum(ns))),
+ ns_uuid);
+
+ return uuid_generate_internal(mode,
+ ns_uuid,
+ text_to_cstring(name),
+ 0);
+}
+
+#else /* !HAVE_UUID_OSSP */
+
+static Datum
+uuid_generate_internal(int v, unsigned char *ns, char *ptr, int len)
+{
+ char strbuf[40];
+
+ switch (v)
+ {
+ case 0: /* constant-value uuids */
+ strlcpy(strbuf, ptr, 37);
+ break;
+
+ case 1: /* time/node-based uuids */
+ {
+#ifdef HAVE_UUID_E2FS
+ uuid_t uu;
+
+ uuid_generate_time(uu);
+ uuid_unparse(uu, strbuf);
+
+ /*
+ * PTR, if set, replaces the trailing characters of the uuid;
+ * this is to support v1mc, where a random multicast MAC is
+ * used instead of the physical one
+ */
+ if (ptr && len <= 36)
+ strcpy(strbuf + (36 - len), ptr);
+#else /* BSD */
+ uuid_t uu;
+ uint32_t status = uuid_s_ok;
+ char *str = NULL;
+
+ uuid_create(&uu, &status);
+
+ if (status == uuid_s_ok)
+ {
+ uuid_to_string(&uu, &str, &status);
+ if (status == uuid_s_ok)
+ {
+ strlcpy(strbuf, str, 37);
+
+ /*
+ * PTR, if set, replaces the trailing characters of
+ * the uuid; this is to support v1mc, where a random
+ * multicast MAC is used instead of the physical one
+ */
+ if (ptr && len <= 36)
+ strcpy(strbuf + (36 - len), ptr);
+ }
+ if (str)
+ free(str);
+ }
+
+ if (status != uuid_s_ok)
+ ereport(ERROR,
+ (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
+ errmsg("uuid library failure: %d",
+ (int) status)));
+#endif
+ break;
+ }
+
+ case 3: /* namespace-based MD5 uuids */
+ case 5: /* namespace-based SHA1 uuids */
+ {
+ dce_uuid_t uu;
+#ifdef HAVE_UUID_BSD
+ uint32_t status = uuid_s_ok;
+ char *str = NULL;
+#endif
+
+ if (v == 3)
+ {
+ MD5_CTX ctx;
+
+ MD5Init(&ctx);
+ MD5Update(&ctx, ns, sizeof(uu));
+ MD5Update(&ctx, (unsigned char *) ptr, len);
+ /* we assume sizeof MD5 result is 16, same as UUID size */
+ MD5Final((unsigned char *) &uu, &ctx);
+ }
+ else
+ {
+ SHA1_CTX ctx;
+ unsigned char sha1result[SHA1_RESULTLEN];
+
+ SHA1Init(&ctx);
+ SHA1Update(&ctx, ns, sizeof(uu));
+ SHA1Update(&ctx, (unsigned char *) ptr, len);
+ SHA1Final(sha1result, &ctx);
+ memcpy(&uu, sha1result, sizeof(uu));
+ }
+
+ /* the calculated hash is using local order */
+ UUID_TO_NETWORK(uu);
+ UUID_V3_OR_V5(uu, v);
+
+#ifdef HAVE_UUID_E2FS
+ /* uuid_unparse expects local order */
+ UUID_TO_LOCAL(uu);
+ uuid_unparse((unsigned char *) &uu, strbuf);
+#else /* BSD */
+ uuid_to_string(&uu, &str, &status);
+
+ if (status == uuid_s_ok)
+ strlcpy(strbuf, str, 37);
+
+ if (str)
+ free(str);
+
+ if (status != uuid_s_ok)
+ ereport(ERROR,
+ (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
+ errmsg("uuid library failure: %d",
+ (int) status)));
+#endif
+ break;
+ }
+
+ case 4: /* random uuid */
+ default:
+ {
+#ifdef HAVE_UUID_E2FS
+ uuid_t uu;
+
+ uuid_generate_random(uu);
+ uuid_unparse(uu, strbuf);
+#else /* BSD */
+ snprintf(strbuf, sizeof(strbuf),
+ "%08lx-%04x-%04x-%04x-%04x%08lx",
+ (unsigned long) arc4random(),
+ (unsigned) (arc4random() & 0xffff),
+ (unsigned) ((arc4random() & 0xfff) | 0x4000),
+ (unsigned) ((arc4random() & 0x3fff) | 0x8000),
+ (unsigned) (arc4random() & 0xffff),
+ (unsigned long) arc4random());
+#endif
+ break;
+ }
+ }
+
+ return DirectFunctionCall1(uuid_in, CStringGetDatum(strbuf));
+}
+
+#endif /* HAVE_UUID_OSSP */
+
+
Datum
uuid_nil(PG_FUNCTION_ARGS)
{
+#ifdef HAVE_UUID_OSSP
return special_uuid_value("nil");
+#else
+ return uuid_generate_internal(0, NULL,
+ "00000000-0000-0000-0000-000000000000", 36);
+#endif
}
Datum
uuid_ns_dns(PG_FUNCTION_ARGS)
{
+#ifdef HAVE_UUID_OSSP
return special_uuid_value("ns:DNS");
+#else
+ return uuid_generate_internal(0, NULL,
+ "6ba7b810-9dad-11d1-80b4-00c04fd430c8", 36);
+#endif
}
Datum
uuid_ns_url(PG_FUNCTION_ARGS)
{
+#ifdef HAVE_UUID_OSSP
return special_uuid_value("ns:URL");
+#else
+ return uuid_generate_internal(0, NULL,
+ "6ba7b811-9dad-11d1-80b4-00c04fd430c8", 36);
+#endif
}
Datum
uuid_ns_oid(PG_FUNCTION_ARGS)
{
+#ifdef HAVE_UUID_OSSP
return special_uuid_value("ns:OID");
+#else
+ return uuid_generate_internal(0, NULL,
+ "6ba7b812-9dad-11d1-80b4-00c04fd430c8", 36);
+#endif
}
Datum
uuid_ns_x500(PG_FUNCTION_ARGS)
{
+#ifdef HAVE_UUID_OSSP
return special_uuid_value("ns:X500");
-}
-
-
-static Datum
-uuid_generate_internal(int mode, const uuid_t *ns, const char *name)
-{
- uuid_t *uuid;
- char *str;
- uuid_rc_t rc;
-
- rc = uuid_create(&uuid);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
- rc = uuid_make(uuid, mode, ns, name);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
- str = uuid_to_string(uuid);
- rc = uuid_destroy(uuid);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
-
- return DirectFunctionCall1(uuid_in, CStringGetDatum(str));
+#else
+ return uuid_generate_internal(0, NULL,
+ "6ba7b814-9dad-11d1-80b4-00c04fd430c8", 36);
+#endif
}
Datum
uuid_generate_v1(PG_FUNCTION_ARGS)
{
- return uuid_generate_internal(UUID_MAKE_V1, NULL, NULL);
+ return uuid_generate_internal(UUID_MAKE_V1, NULL, NULL, 0);
}
Datum
uuid_generate_v1mc(PG_FUNCTION_ARGS)
{
- return uuid_generate_internal(UUID_MAKE_V1 | UUID_MAKE_MC, NULL, NULL);
-}
-
-
-static Datum
-uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
-{
- uuid_t *ns_uuid;
- Datum result;
- uuid_rc_t rc;
-
- rc = uuid_create(&ns_uuid);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
- string_to_uuid(DatumGetCString(DirectFunctionCall1(uuid_out, UUIDPGetDatum(ns))),
- ns_uuid);
-
- result = uuid_generate_internal(mode,
- ns_uuid,
- text_to_cstring(name));
-
- rc = uuid_destroy(ns_uuid);
- if (rc != UUID_RC_OK)
- pguuid_complain(rc);
+#ifdef HAVE_UUID_OSSP
+ char *buf = NULL;
+#elif defined(HAVE_UUID_E2FS)
+ char strbuf[40];
+ char *buf;
+ uuid_t uu;
+
+ uuid_generate_random(uu);
+
+ /* set IEEE802 multicast and local-admin bits */
+ ((dce_uuid_t *) &uu)->node[0] |= 0x03;
+
+ uuid_unparse(uu, strbuf);
+ buf = strbuf + 24;
+#else /* BSD */
+ char buf[16];
+
+ /* set IEEE802 multicast and local-admin bits */
+ snprintf(buf, sizeof(buf), "-%04x%08lx",
+ (unsigned) ((arc4random() & 0xffff) | 0x0300),
+ (unsigned long) arc4random());
+#endif
- return result;
+ return uuid_generate_internal(UUID_MAKE_V1 | UUID_MAKE_MC, NULL,
+ buf, 13);
}
@@ -230,14 +503,19 @@ uuid_generate_v3(PG_FUNCTION_ARGS)
pg_uuid_t *ns = PG_GETARG_UUID_P(0);
text *name = PG_GETARG_TEXT_P(1);
+#ifdef HAVE_UUID_OSSP
return uuid_generate_v35_internal(UUID_MAKE_V3, ns, name);
+#else
+ return uuid_generate_internal(UUID_MAKE_V3, (unsigned char *) ns,
+ VARDATA(name), VARSIZE(name) - VARHDRSZ);
+#endif
}
Datum
uuid_generate_v4(PG_FUNCTION_ARGS)
{
- return uuid_generate_internal(UUID_MAKE_V4, NULL, NULL);
+ return uuid_generate_internal(UUID_MAKE_V4, NULL, NULL, 0);
}
@@ -247,5 +525,10 @@ uuid_generate_v5(PG_FUNCTION_ARGS)
pg_uuid_t *ns = PG_GETARG_UUID_P(0);
text *name = PG_GETARG_TEXT_P(1);
+#ifdef HAVE_UUID_OSSP
return uuid_generate_v35_internal(UUID_MAKE_V5, ns, name);
+#else
+ return uuid_generate_internal(UUID_MAKE_V5, (unsigned char *) ns,
+ VARDATA(name), VARSIZE(name) - VARHDRSZ);
+#endif
}
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index 958a496b24..c2e5bad438 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -3,7 +3,7 @@
* vacuumlo.c
* This removes orphaned large objects from a database.
*
- * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -22,15 +22,12 @@
#endif
#include "libpq-fe.h"
+#include "pg_getopt.h"
#define atooid(x) ((Oid) strtoul((x), NULL, 10))
#define BUFSIZE 1024
-extern char *optarg;
-extern int optind,
- opterr;
-
enum trivalue
{
TRI_DEFAULT,
@@ -44,6 +41,7 @@ struct _param
enum trivalue pg_prompt;
char *pg_port;
char *pg_host;
+ const char *progname;
int verbose;
int dry_run;
long transaction_limit;
@@ -81,15 +79,28 @@ vacuumlo(const char *database, const struct _param * param)
*/
do
{
- new_pass = false;
+#define PARAMS_ARRAY_SIZE 7
+
+ const char *keywords[PARAMS_ARRAY_SIZE];
+ const char *values[PARAMS_ARRAY_SIZE];
+
+ keywords[0] = "host";
+ values[0] = param->pg_host;
+ keywords[1] = "port";
+ values[1] = param->pg_port;
+ keywords[2] = "user";
+ values[2] = param->pg_user;
+ keywords[3] = "password";
+ values[3] = password;
+ keywords[4] = "dbname";
+ values[4] = database;
+ keywords[5] = "fallback_application_name";
+ values[5] = param->progname;
+ keywords[6] = NULL;
+ values[6] = NULL;
- conn = PQsetdbLogin(param->pg_host,
- param->pg_port,
- NULL,
- NULL,
- database,
- param->pg_user,
- password);
+ new_pass = false;
+ conn = PQconnectdbParams(keywords, values, true);
if (!conn)
{
fprintf(stderr, "Connection to database \"%s\" failed\n",
@@ -195,7 +206,7 @@ vacuumlo(const char *database, const struct _param * param)
strcat(buf, " AND a.atttypid = t.oid ");
strcat(buf, " AND c.relnamespace = s.oid ");
strcat(buf, " AND t.typname in ('oid', 'lo') ");
- strcat(buf, " AND c.relkind = 'r'");
+ strcat(buf, " AND c.relkind in ('r', 'm')");
strcat(buf, " AND s.nspname !~ '^pg_'");
res = PQexec(conn, buf);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
@@ -276,74 +287,101 @@ vacuumlo(const char *database, const struct _param * param)
PQclear(res);
buf[0] = '\0';
- strcat(buf, "SELECT lo FROM vacuum_l");
+ strcat(buf,
+ "DECLARE myportal CURSOR WITH HOLD FOR SELECT lo FROM vacuum_l");
res = PQexec(conn, buf);
- if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
- fprintf(stderr, "Failed to read temp table:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ fprintf(stderr, "DECLARE CURSOR failed: %s", PQerrorMessage(conn));
PQclear(res);
PQfinish(conn);
return -1;
}
+ PQclear(res);
+
+ snprintf(buf, BUFSIZE, "FETCH FORWARD %ld IN myportal",
+ param->transaction_limit > 0 ? param->transaction_limit : 1000L);
- matched = PQntuples(res);
deleted = 0;
- for (i = 0; i < matched; i++)
+
+ while (1)
{
- Oid lo = atooid(PQgetvalue(res, i, 0));
+ res = PQexec(conn, buf);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ fprintf(stderr, "FETCH FORWARD failed: %s", PQerrorMessage(conn));
+ PQclear(res);
+ PQfinish(conn);
+ return -1;
+ }
- if (param->verbose)
+ matched = PQntuples(res);
+ if (matched <= 0)
{
- fprintf(stdout, "\rRemoving lo %6u ", lo);
- fflush(stdout);
+ /* at end of resultset */
+ PQclear(res);
+ break;
}
- if (param->dry_run == 0)
+ for (i = 0; i < matched; i++)
{
- if (lo_unlink(conn, lo) < 0)
+ Oid lo = atooid(PQgetvalue(res, i, 0));
+
+ if (param->verbose)
{
- fprintf(stderr, "\nFailed to remove lo %u: ", lo);
- fprintf(stderr, "%s", PQerrorMessage(conn));
- if (PQtransactionStatus(conn) == PQTRANS_INERROR)
+ fprintf(stdout, "\rRemoving lo %6u ", lo);
+ fflush(stdout);
+ }
+
+ if (param->dry_run == 0)
+ {
+ if (lo_unlink(conn, lo) < 0)
{
- success = false;
- break;
+ fprintf(stderr, "\nFailed to remove lo %u: ", lo);
+ fprintf(stderr, "%s", PQerrorMessage(conn));
+ if (PQtransactionStatus(conn) == PQTRANS_INERROR)
+ {
+ success = false;
+ PQclear(res);
+ break;
+ }
}
+ else
+ deleted++;
}
else
deleted++;
- }
- else
- deleted++;
- if (param->transaction_limit > 0 &&
- (deleted % param->transaction_limit) == 0)
- {
- res2 = PQexec(conn, "commit");
- if (PQresultStatus(res2) != PGRES_COMMAND_OK)
+
+ if (param->transaction_limit > 0 &&
+ (deleted % param->transaction_limit) == 0)
{
- fprintf(stderr, "Failed to commit transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ res2 = PQexec(conn, "commit");
+ if (PQresultStatus(res2) != PGRES_COMMAND_OK)
+ {
+ fprintf(stderr, "Failed to commit transaction:\n");
+ fprintf(stderr, "%s", PQerrorMessage(conn));
+ PQclear(res2);
+ PQclear(res);
+ PQfinish(conn);
+ return -1;
+ }
PQclear(res2);
- PQclear(res);
- PQfinish(conn);
- return -1;
- }
- PQclear(res2);
- res2 = PQexec(conn, "begin");
- if (PQresultStatus(res2) != PGRES_COMMAND_OK)
- {
- fprintf(stderr, "Failed to start transaction:\n");
- fprintf(stderr, "%s", PQerrorMessage(conn));
+ res2 = PQexec(conn, "begin");
+ if (PQresultStatus(res2) != PGRES_COMMAND_OK)
+ {
+ fprintf(stderr, "Failed to start transaction:\n");
+ fprintf(stderr, "%s", PQerrorMessage(conn));
+ PQclear(res2);
+ PQclear(res);
+ PQfinish(conn);
+ return -1;
+ }
PQclear(res2);
- PQclear(res);
- PQfinish(conn);
- return -1;
}
- PQclear(res2);
}
+
+ PQclear(res);
}
- PQclear(res);
/*
* That's all folks!
@@ -384,16 +422,17 @@ usage(const char *progname)
printf("%s removes unreferenced large objects from databases.\n\n", progname);
printf("Usage:\n %s [OPTION]... DBNAME...\n\n", progname);
printf("Options:\n");
- printf(" -h HOSTNAME database server host or socket directory\n");
- printf(" -l LIMIT commit after removing each LIMIT large objects\n");
- printf(" -n don't remove large objects, just show what would be done\n");
- printf(" -p PORT database server port\n");
- printf(" -U USERNAME user name to connect as\n");
- printf(" -w never prompt for password\n");
- printf(" -W force password prompt\n");
- printf(" -v write a lot of progress messages\n");
- printf(" --help show this help, then exit\n");
- printf(" --version output version information, then exit\n");
+ printf(" -l LIMIT commit after removing each LIMIT large objects\n");
+ printf(" -n don't remove large objects, just show what would be done\n");
+ printf(" -v write a lot of progress messages\n");
+ printf(" -V, --version output version information, then exit\n");
+ printf(" -?, --help show this help, then exit\n");
+ printf("\nConnection options:\n");
+ printf(" -h HOSTNAME database server host or socket directory\n");
+ printf(" -p PORT database server port\n");
+ printf(" -U USERNAME user name to connect as\n");
+ printf(" -w never prompt for password\n");
+ printf(" -W force password prompt\n");
printf("\n");
printf("Report bugs to <pgsql-bugs@postgresql.org>.\n");
}
@@ -415,6 +454,7 @@ main(int argc, char **argv)
param.pg_prompt = TRI_DEFAULT;
param.pg_host = NULL;
param.pg_port = NULL;
+ param.progname = progname;
param.verbose = 0;
param.dry_run = 0;
param.transaction_limit = 1000;
diff --git a/contrib/worker_spi/Makefile b/contrib/worker_spi/Makefile
new file mode 100644
index 0000000000..fbb29b4f2f
--- /dev/null
+++ b/contrib/worker_spi/Makefile
@@ -0,0 +1,17 @@
+# contrib/worker_spi/Makefile
+
+MODULES = worker_spi
+
+EXTENSION = worker_spi
+DATA = worker_spi--1.0.sql
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/worker_spi
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/worker_spi/worker_spi--1.0.sql b/contrib/worker_spi/worker_spi--1.0.sql
new file mode 100644
index 0000000000..09b7799f2c
--- /dev/null
+++ b/contrib/worker_spi/worker_spi--1.0.sql
@@ -0,0 +1,9 @@
+/* contrib/worker_spi/worker_spi--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION worker_spi" to load this file. \quit
+
+CREATE FUNCTION worker_spi_launch(pg_catalog.int4)
+RETURNS pg_catalog.int4 STRICT
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
diff --git a/contrib/worker_spi/worker_spi.c b/contrib/worker_spi/worker_spi.c
new file mode 100644
index 0000000000..829de0e6a7
--- /dev/null
+++ b/contrib/worker_spi/worker_spi.c
@@ -0,0 +1,407 @@
+/* -------------------------------------------------------------------------
+ *
+ * worker_spi.c
+ * Sample background worker code that demonstrates various coding
+ * patterns: establishing a database connection; starting and committing
+ * transactions; using GUC variables, and heeding SIGHUP to reread
+ * the configuration file; reporting to pg_stat_activity; using the
+ * process latch to sleep and exit in case of postmaster death.
+ *
+ * This code connects to a database, creates a schema and table, and summarizes
+ * the numbers contained therein. To see it working, insert an initial value
+ * with "total" type and some initial value; then insert some other rows with
+ * "delta" type. Delta rows will be deleted by this worker and their values
+ * aggregated into the total.
+ *
+ * Copyright (C) 2013, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/worker_spi/worker_spi.c
+ *
+ * -------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+/* These are always necessary for a bgworker */
+#include "miscadmin.h"
+#include "postmaster/bgworker.h"
+#include "storage/ipc.h"
+#include "storage/latch.h"
+#include "storage/lwlock.h"
+#include "storage/proc.h"
+#include "storage/shmem.h"
+
+/* these headers are used by this particular worker's code */
+#include "access/xact.h"
+#include "executor/spi.h"
+#include "fmgr.h"
+#include "lib/stringinfo.h"
+#include "pgstat.h"
+#include "utils/builtins.h"
+#include "utils/snapmgr.h"
+#include "tcop/utility.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(worker_spi_launch);
+
+void _PG_init(void);
+void worker_spi_main(Datum);
+
+/* flags set by signal handlers */
+static volatile sig_atomic_t got_sighup = false;
+static volatile sig_atomic_t got_sigterm = false;
+
+/* GUC variables */
+static int worker_spi_naptime = 10;
+static int worker_spi_total_workers = 2;
+
+
+typedef struct worktable
+{
+ const char *schema;
+ const char *name;
+} worktable;
+
+/*
+ * Signal handler for SIGTERM
+ * Set a flag to let the main loop to terminate, and set our latch to wake
+ * it up.
+ */
+static void
+worker_spi_sigterm(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_sigterm = true;
+ if (MyProc)
+ SetLatch(&MyProc->procLatch);
+
+ errno = save_errno;
+}
+
+/*
+ * Signal handler for SIGHUP
+ * Set a flag to tell the main loop to reread the config file, and set
+ * our latch to wake it up.
+ */
+static void
+worker_spi_sighup(SIGNAL_ARGS)
+{
+ int save_errno = errno;
+
+ got_sighup = true;
+ if (MyProc)
+ SetLatch(&MyProc->procLatch);
+
+ errno = save_errno;
+}
+
+/*
+ * Initialize workspace for a worker process: create the schema if it doesn't
+ * already exist.
+ */
+static void
+initialize_worker_spi(worktable *table)
+{
+ int ret;
+ int ntup;
+ bool isnull;
+ StringInfoData buf;
+
+ SetCurrentStatementStartTimestamp();
+ StartTransactionCommand();
+ SPI_connect();
+ PushActiveSnapshot(GetTransactionSnapshot());
+ pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema");
+
+ /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'",
+ table->schema);
+
+ ret = SPI_execute(buf.data, true, 0);
+ if (ret != SPI_OK_SELECT)
+ elog(FATAL, "SPI_execute failed: error code %d", ret);
+
+ if (SPI_processed != 1)
+ elog(FATAL, "not a singleton result");
+
+ ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0],
+ SPI_tuptable->tupdesc,
+ 1, &isnull));
+ if (isnull)
+ elog(FATAL, "null result");
+
+ if (ntup == 0)
+ {
+ resetStringInfo(&buf);
+ appendStringInfo(&buf,
+ "CREATE SCHEMA \"%s\" "
+ "CREATE TABLE \"%s\" ("
+ " type text CHECK (type IN ('total', 'delta')), "
+ " value integer)"
+ "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) "
+ "WHERE type = 'total'",
+ table->schema, table->name, table->name, table->name);
+
+ /* set statement start time */
+ SetCurrentStatementStartTimestamp();
+
+ ret = SPI_execute(buf.data, false, 0);
+
+ if (ret != SPI_OK_UTILITY)
+ elog(FATAL, "failed to create my schema");
+ }
+
+ SPI_finish();
+ PopActiveSnapshot();
+ CommitTransactionCommand();
+ pgstat_report_activity(STATE_IDLE, NULL);
+}
+
+void
+worker_spi_main(Datum main_arg)
+{
+ int index = DatumGetInt32(main_arg);
+ worktable *table;
+ StringInfoData buf;
+ char name[20];
+
+ table = palloc(sizeof(worktable));
+ sprintf(name, "schema%d", index);
+ table->schema = pstrdup(name);
+ table->name = pstrdup("counted");
+
+ /* Establish signal handlers before unblocking signals. */
+ pqsignal(SIGHUP, worker_spi_sighup);
+ pqsignal(SIGTERM, worker_spi_sigterm);
+
+ /* We're now ready to receive signals */
+ BackgroundWorkerUnblockSignals();
+
+ /* Connect to our database */
+ BackgroundWorkerInitializeConnection("postgres", NULL);
+
+ elog(LOG, "%s initialized with %s.%s",
+ MyBgworkerEntry->bgw_name, table->schema, table->name);
+ initialize_worker_spi(table);
+
+ /*
+ * Quote identifiers passed to us. Note that this must be done after
+ * initialize_worker_spi, because that routine assumes the names are not
+ * quoted.
+ *
+ * Note some memory might be leaked here.
+ */
+ table->schema = quote_identifier(table->schema);
+ table->name = quote_identifier(table->name);
+
+ initStringInfo(&buf);
+ appendStringInfo(&buf,
+ "WITH deleted AS (DELETE "
+ "FROM %s.%s "
+ "WHERE type = 'delta' RETURNING value), "
+ "total AS (SELECT coalesce(sum(value), 0) as sum "
+ "FROM deleted) "
+ "UPDATE %s.%s "
+ "SET value = %s.value + total.sum "
+ "FROM total WHERE type = 'total' "
+ "RETURNING %s.value",
+ table->schema, table->name,
+ table->schema, table->name,
+ table->name,
+ table->name);
+
+ /*
+ * Main loop: do this until the SIGTERM handler tells us to terminate
+ */
+ while (!got_sigterm)
+ {
+ int ret;
+ int rc;
+
+ /*
+ * Background workers mustn't call usleep() or any direct equivalent:
+ * instead, they may wait on their process latch, which sleeps as
+ * necessary, but is awakened if postmaster dies. That way the
+ * background process goes away immediately in an emergency.
+ */
+ rc = WaitLatch(&MyProc->procLatch,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ worker_spi_naptime * 1000L);
+ ResetLatch(&MyProc->procLatch);
+
+ /* emergency bailout if postmaster has died */
+ if (rc & WL_POSTMASTER_DEATH)
+ proc_exit(1);
+
+ /*
+ * In case of a SIGHUP, just reload the configuration.
+ */
+ if (got_sighup)
+ {
+ got_sighup = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
+
+ /*
+ * Start a transaction on which we can run queries. Note that each
+ * StartTransactionCommand() call should be preceded by a
+ * SetCurrentStatementStartTimestamp() call, which sets both the time
+ * for the statement we're about the run, and also the transaction
+ * start time. Also, each other query sent to SPI should probably be
+ * preceded by SetCurrentStatementStartTimestamp(), so that statement
+ * start time is always up to date.
+ *
+ * The SPI_connect() call lets us run queries through the SPI manager,
+ * and the PushActiveSnapshot() call creates an "active" snapshot
+ * which is necessary for queries to have MVCC data to work on.
+ *
+ * The pgstat_report_activity() call makes our activity visible
+ * through the pgstat views.
+ */
+ SetCurrentStatementStartTimestamp();
+ StartTransactionCommand();
+ SPI_connect();
+ PushActiveSnapshot(GetTransactionSnapshot());
+ pgstat_report_activity(STATE_RUNNING, buf.data);
+
+ /* We can now execute queries via SPI */
+ ret = SPI_execute(buf.data, false, 0);
+
+ if (ret != SPI_OK_UPDATE_RETURNING)
+ elog(FATAL, "cannot select from table %s.%s: error code %d",
+ table->schema, table->name, ret);
+
+ if (SPI_processed > 0)
+ {
+ bool isnull;
+ int32 val;
+
+ val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0],
+ SPI_tuptable->tupdesc,
+ 1, &isnull));
+ if (!isnull)
+ elog(LOG, "%s: count in %s.%s is now %d",
+ MyBgworkerEntry->bgw_name,
+ table->schema, table->name, val);
+ }
+
+ /*
+ * And finish our transaction.
+ */
+ SPI_finish();
+ PopActiveSnapshot();
+ CommitTransactionCommand();
+ pgstat_report_activity(STATE_IDLE, NULL);
+ }
+
+ proc_exit(1);
+}
+
+/*
+ * Entrypoint of this module.
+ *
+ * We register more than one worker process here, to demonstrate how that can
+ * be done.
+ */
+void
+_PG_init(void)
+{
+ BackgroundWorker worker;
+ unsigned int i;
+
+ /* get the configuration */
+ DefineCustomIntVariable("worker_spi.naptime",
+ "Duration between each check (in seconds).",
+ NULL,
+ &worker_spi_naptime,
+ 10,
+ 1,
+ INT_MAX,
+ PGC_SIGHUP,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ if (!process_shared_preload_libraries_in_progress)
+ return;
+
+ DefineCustomIntVariable("worker_spi.total_workers",
+ "Number of workers.",
+ NULL,
+ &worker_spi_total_workers,
+ 2,
+ 1,
+ 100,
+ PGC_POSTMASTER,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ /* set up common data for all our workers */
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ worker.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ worker.bgw_main = worker_spi_main;
+ worker.bgw_notify_pid = 0;
+
+ /*
+ * Now fill in worker-specific data, and do the actual registrations.
+ */
+ for (i = 1; i <= worker_spi_total_workers; i++)
+ {
+ snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i);
+ worker.bgw_main_arg = Int32GetDatum(i);
+
+ RegisterBackgroundWorker(&worker);
+ }
+}
+
+/*
+ * Dynamically launch an SPI worker.
+ */
+Datum
+worker_spi_launch(PG_FUNCTION_ARGS)
+{
+ int32 i = PG_GETARG_INT32(0);
+ BackgroundWorker worker;
+ BackgroundWorkerHandle *handle;
+ BgwHandleStatus status;
+ pid_t pid;
+
+ worker.bgw_flags = BGWORKER_SHMEM_ACCESS |
+ BGWORKER_BACKEND_DATABASE_CONNECTION;
+ worker.bgw_start_time = BgWorkerStart_RecoveryFinished;
+ worker.bgw_restart_time = BGW_NEVER_RESTART;
+ worker.bgw_main = NULL; /* new worker might not have library loaded */
+ sprintf(worker.bgw_library_name, "worker_spi");
+ sprintf(worker.bgw_function_name, "worker_spi_main");
+ snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i);
+ worker.bgw_main_arg = Int32GetDatum(i);
+ /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */
+ worker.bgw_notify_pid = MyProcPid;
+
+ if (!RegisterDynamicBackgroundWorker(&worker, &handle))
+ PG_RETURN_NULL();
+
+ status = WaitForBackgroundWorkerStartup(handle, &pid);
+
+ if (status == BGWH_STOPPED)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("could not start background process"),
+ errhint("More details may be available in the server log.")));
+ if (status == BGWH_POSTMASTER_DIED)
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("cannot start background processes without postmaster"),
+ errhint("Kill all remaining database processes and restart the database.")));
+ Assert(status == BGWH_STARTED);
+
+ PG_RETURN_INT32(pid);
+}
diff --git a/contrib/worker_spi/worker_spi.control b/contrib/worker_spi/worker_spi.control
new file mode 100644
index 0000000000..84d6294628
--- /dev/null
+++ b/contrib/worker_spi/worker_spi.control
@@ -0,0 +1,5 @@
+# worker_spi extension
+comment = 'Sample background worker'
+default_version = '1.0'
+module_pathname = '$libdir/worker_spi'
+relocatable = true
diff --git a/contrib/xml2/expected/xml2.out b/contrib/xml2/expected/xml2.out
index 3bf676fb40..eba6ae6036 100644
--- a/contrib/xml2/expected/xml2.out
+++ b/contrib/xml2/expected/xml2.out
@@ -207,3 +207,18 @@ SELECT xslt_process('<employee><name>cim</name><age>30</age><pay>400</pay></empl
(1 row)
+-- possible security exploit
+SELECT xslt_process('<xml><foo>Hello from XML</foo></xml>',
+$$<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:sax="http://icl.com/saxon"
+ extension-element-prefixes="sax">
+
+ <xsl:template match="//foo">
+ <sax:output href="0wn3d.txt" method="text">
+ <xsl:value-of select="'0wn3d via xml2 extension and libxslt'"/>
+ <xsl:apply-templates/>
+ </sax:output>
+ </xsl:template>
+</xsl:stylesheet>$$);
+ERROR: failed to apply stylesheet
diff --git a/contrib/xml2/expected/xml2_1.out b/contrib/xml2/expected/xml2_1.out
index fda626e08c..bac90e5a2a 100644
--- a/contrib/xml2/expected/xml2_1.out
+++ b/contrib/xml2/expected/xml2_1.out
@@ -151,3 +151,18 @@ SELECT xslt_process('<employee><name>cim</name><age>30</age><pay>400</pay></empl
</xsl:template>
</xsl:stylesheet>$$::text, 'n1="v1",n2="v2",n3="v3",n4="v4",n5="v5",n6="v6",n7="v7",n8="v8",n9="v9",n10="v10",n11="v11",n12="v12"'::text);
ERROR: xslt_process() is not available without libxslt
+-- possible security exploit
+SELECT xslt_process('<xml><foo>Hello from XML</foo></xml>',
+$$<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:sax="http://icl.com/saxon"
+ extension-element-prefixes="sax">
+
+ <xsl:template match="//foo">
+ <sax:output href="0wn3d.txt" method="text">
+ <xsl:value-of select="'0wn3d via xml2 extension and libxslt'"/>
+ <xsl:apply-templates/>
+ </sax:output>
+ </xsl:template>
+</xsl:stylesheet>$$);
+ERROR: xslt_process() is not available without libxslt
diff --git a/contrib/xml2/sql/xml2.sql b/contrib/xml2/sql/xml2.sql
index 4a996af716..ac49cfa7c5 100644
--- a/contrib/xml2/sql/xml2.sql
+++ b/contrib/xml2/sql/xml2.sql
@@ -122,3 +122,18 @@ SELECT xslt_process('<employee><name>cim</name><age>30</age><pay>400</pay></empl
</xsl:element>
</xsl:template>
</xsl:stylesheet>$$::text, 'n1="v1",n2="v2",n3="v3",n4="v4",n5="v5",n6="v6",n7="v7",n8="v8",n9="v9",n10="v10",n11="v11",n12="v12"'::text);
+
+-- possible security exploit
+SELECT xslt_process('<xml><foo>Hello from XML</foo></xml>',
+$$<xsl:stylesheet version="1.0"
+ xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ xmlns:sax="http://icl.com/saxon"
+ extension-element-prefixes="sax">
+
+ <xsl:template match="//foo">
+ <sax:output href="0wn3d.txt" method="text">
+ <xsl:value-of select="'0wn3d via xml2 extension and libxslt'"/>
+ <xsl:apply-templates/>
+ </sax:output>
+ </xsl:template>
+</xsl:stylesheet>$$);
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
index 660d25c349..a8b159ebff 100644
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -6,6 +6,7 @@
*/
#include "postgres.h"
+#include "access/htup_details.h"
#include "executor/spi.h"
#include "fmgr.h"
#include "funcapi.h"
@@ -25,17 +26,6 @@
PG_MODULE_MAGIC;
-/* externally accessible functions */
-
-Datum xml_is_well_formed(PG_FUNCTION_ARGS);
-Datum xml_encode_special_chars(PG_FUNCTION_ARGS);
-Datum xpath_nodeset(PG_FUNCTION_ARGS);
-Datum xpath_string(PG_FUNCTION_ARGS);
-Datum xpath_number(PG_FUNCTION_ARGS);
-Datum xpath_bool(PG_FUNCTION_ARGS);
-Datum xpath_list(PG_FUNCTION_ARGS);
-Datum xpath_table(PG_FUNCTION_ARGS);
-
/* exported for use by xslt_proc.c */
PgXmlErrorContext *pgxml_parser_init(PgXmlStrictness strictness);
@@ -719,7 +709,7 @@ xpath_table(PG_FUNCTION_ARGS)
/*
* Clear the values array, so that not-well-formed documents
- * return NULL in all columns. Note that this also means that
+ * return NULL in all columns. Note that this also means that
* spare columns will be NULL.
*/
for (j = 0; j < ret_tupdesc->natts; j++)
diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c
index a93931d261..9f13787332 100644
--- a/contrib/xml2/xslt_proc.c
+++ b/contrib/xml2/xslt_proc.c
@@ -26,15 +26,12 @@
#include <libxslt/xslt.h>
#include <libxslt/xsltInternals.h>
+#include <libxslt/security.h>
#include <libxslt/transform.h>
#include <libxslt/xsltutils.h>
#endif /* USE_LIBXSLT */
-/* externally accessible functions */
-
-Datum xslt_process(PG_FUNCTION_ARGS);
-
#ifdef USE_LIBXSLT
/* declarations to come from xpath.c */
@@ -61,7 +58,8 @@ xslt_process(PG_FUNCTION_ARGS)
volatile xsltStylesheetPtr stylesheet = NULL;
volatile xmlDocPtr doctree = NULL;
volatile xmlDocPtr restree = NULL;
- volatile xmlDocPtr ssdoc = NULL;
+ volatile xsltSecurityPrefsPtr xslt_sec_prefs = NULL;
+ volatile xsltTransformContextPtr xslt_ctxt = NULL;
volatile int resstat = -1;
xmlChar *resstr = NULL;
int reslen = 0;
@@ -83,36 +81,62 @@ xslt_process(PG_FUNCTION_ARGS)
PG_TRY();
{
- /* Check to see if document is a file or a literal */
+ xmlDocPtr ssdoc;
+ bool xslt_sec_prefs_error;
- if (VARDATA(doct)[0] == '<')
- doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
- else
- doctree = xmlParseFile(text_to_cstring(doct));
+ /* Parse document */
+ doctree = xmlParseMemory((char *) VARDATA(doct),
+ VARSIZE(doct) - VARHDRSZ);
if (doctree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"error parsing XML document");
/* Same for stylesheet */
- if (VARDATA(ssheet)[0] == '<')
- {
- ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
- VARSIZE(ssheet) - VARHDRSZ);
- if (ssdoc == NULL)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "error parsing stylesheet as XML document");
+ ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
+ VARSIZE(ssheet) - VARHDRSZ);
- stylesheet = xsltParseStylesheetDoc(ssdoc);
- }
- else
- stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
+ if (ssdoc == NULL)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+ "error parsing stylesheet as XML document");
+
+ /* After this call we need not free ssdoc separately */
+ stylesheet = xsltParseStylesheetDoc(ssdoc);
if (stylesheet == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"failed to parse stylesheet");
- restree = xsltApplyStylesheet(stylesheet, doctree, params);
+ xslt_ctxt = xsltNewTransformContext(stylesheet, doctree);
+
+ xslt_sec_prefs_error = false;
+ if ((xslt_sec_prefs = xsltNewSecurityPrefs()) == NULL)
+ xslt_sec_prefs_error = true;
+
+ if (xsltSetSecurityPrefs(xslt_sec_prefs, XSLT_SECPREF_READ_FILE,
+ xsltSecurityForbid) != 0)
+ xslt_sec_prefs_error = true;
+ if (xsltSetSecurityPrefs(xslt_sec_prefs, XSLT_SECPREF_WRITE_FILE,
+ xsltSecurityForbid) != 0)
+ xslt_sec_prefs_error = true;
+ if (xsltSetSecurityPrefs(xslt_sec_prefs, XSLT_SECPREF_CREATE_DIRECTORY,
+ xsltSecurityForbid) != 0)
+ xslt_sec_prefs_error = true;
+ if (xsltSetSecurityPrefs(xslt_sec_prefs, XSLT_SECPREF_READ_NETWORK,
+ xsltSecurityForbid) != 0)
+ xslt_sec_prefs_error = true;
+ if (xsltSetSecurityPrefs(xslt_sec_prefs, XSLT_SECPREF_WRITE_NETWORK,
+ xsltSecurityForbid) != 0)
+ xslt_sec_prefs_error = true;
+ if (xsltSetCtxtSecurityPrefs(xslt_sec_prefs, xslt_ctxt) != 0)
+ xslt_sec_prefs_error = true;
+
+ if (xslt_sec_prefs_error)
+ ereport(ERROR,
+ (errmsg("could not set libxslt security preferences")));
+
+ restree = xsltApplyStylesheetUser(stylesheet, doctree, params,
+ NULL, NULL, xslt_ctxt);
if (restree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
@@ -128,6 +152,10 @@ xslt_process(PG_FUNCTION_ARGS)
xmlFreeDoc(restree);
if (doctree != NULL)
xmlFreeDoc(doctree);
+ if (xslt_sec_prefs != NULL)
+ xsltFreeSecurityPrefs(xslt_sec_prefs);
+ if (xslt_ctxt != NULL)
+ xsltFreeTransformContext(xslt_ctxt);
xsltCleanupGlobals();
pg_xml_done(xmlerrcxt, true);
@@ -139,6 +167,8 @@ xslt_process(PG_FUNCTION_ARGS)
xsltFreeStylesheet(stylesheet);
xmlFreeDoc(restree);
xmlFreeDoc(doctree);
+ xsltFreeSecurityPrefs(xslt_sec_prefs);
+ xsltFreeTransformContext(xslt_ctxt);
xsltCleanupGlobals();
pg_xml_done(xmlerrcxt, false);