diff options
| author | Pavan Deolasee | 2015-06-04 08:02:02 +0000 |
|---|---|---|
| committer | Pavan Deolasee | 2015-06-04 08:02:02 +0000 |
| commit | 1e86f652fa5e214a284f9862c67bb976317ac730 (patch) | |
| tree | 2478baa40ebd47196528ee0a5aa6f6921d8e5bd9 /contrib | |
| parent | 3165b5fde927ff766921270bd56d3236b6c09c21 (diff) | |
| parent | 4cb7d671fddc8855c8def2de51fb23df1c8ac0af (diff) | |
Merge remote-tracking branch 'remotes/PGSQL/master' into XL_NEW_MASTER
Conflicts:
COPYRIGHT
configure
configure.in
contrib/Makefile
doc/bug.template
src/backend/access/common/heaptuple.c
src/backend/access/common/printtup.c
src/backend/access/transam/Makefile
src/backend/access/transam/clog.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/bootstrap/bootstrap.c
src/backend/catalog/Makefile
src/backend/catalog/catalog.c
src/backend/catalog/dependency.c
src/backend/catalog/genbki.pl
src/backend/catalog/namespace.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_proc.c
src/backend/catalog/storage.c
src/backend/commands/aggregatecmds.c
src/backend/commands/analyze.c
src/backend/commands/comment.c
src/backend/commands/copy.c
src/backend/commands/dbcommands.c
src/backend/commands/event_trigger.c
src/backend/commands/explain.c
src/backend/commands/indexcmds.c
src/backend/commands/portalcmds.c
src/backend/commands/schemacmds.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/trigger.c
src/backend/commands/vacuum.c
src/backend/commands/variable.c
src/backend/commands/view.c
src/backend/executor/execAmi.c
src/backend/executor/execCurrent.c
src/backend/executor/execMain.c
src/backend/executor/execProcnode.c
src/backend/executor/execTuples.c
src/backend/executor/execUtils.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSubplan.c
src/backend/executor/nodeWindowAgg.c
src/backend/libpq/hba.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/equalfuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/readfuncs.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/plan/subselect.c
src/backend/optimizer/prep/preptlist.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/plancat.c
src/backend/parser/analyze.c
src/backend/parser/gram.y
src/backend/parser/parse_agg.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_utilcmd.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/replication/logical/decode.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/ipc/ipci.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/procsignal.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/proc.c
src/backend/tcop/dest.c
src/backend/tcop/postgres.c
src/backend/tcop/pquery.c
src/backend/tcop/utility.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/date.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/pseudotypes.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/version.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/init/globals.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/misc/guc.c
src/backend/utils/mmgr/portalmem.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/sort/tuplestore.c
src/backend/utils/time/combocid.c
src/backend/utils/time/snapmgr.c
src/bin/Makefile
src/bin/initdb/initdb.c
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/pg_dump.c
src/bin/pgbench/pgbench.c
src/bin/psql/tab-complete.c
src/include/access/htup.h
src/include/access/rmgrlist.h
src/include/access/transam.h
src/include/access/xact.h
src/include/catalog/catalog.h
src/include/catalog/namespace.h
src/include/catalog/pg_aggregate.h
src/include/catalog/pg_namespace.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_type.h
src/include/commands/explain.h
src/include/commands/sequence.h
src/include/commands/vacuum.h
src/include/commands/variable.h
src/include/executor/execdesc.h
src/include/executor/executor.h
src/include/executor/tuptable.h
src/include/miscadmin.h
src/include/nodes/execnodes.h
src/include/nodes/nodes.h
src/include/nodes/params.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/planmain.h
src/include/parser/analyze.h
src/include/parser/parse_agg.h
src/include/parser/parse_utilcmd.h
src/include/pg_config.h.win32
src/include/pgstat.h
src/include/storage/backendid.h
src/include/storage/barrier.h
src/include/storage/lwlock.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/storage/procsignal.h
src/include/storage/smgr.h
src/include/tcop/dest.h
src/include/tcop/pquery.h
src/include/utils/builtins.h
src/include/utils/guc.h
src/include/utils/lsyscache.h
src/include/utils/plancache.h
src/include/utils/portal.h
src/include/utils/rel.h
src/include/utils/tuplesort.h
src/include/utils/tuplestore.h
src/test/regress/expected/aggregates.out
src/test/regress/expected/create_index.out
src/test/regress/expected/foreign_data.out
src/test/regress/expected/join.out
src/test/regress/expected/macaddr.out
src/test/regress/expected/polygon.out
src/test/regress/expected/rangetypes.out
src/test/regress/expected/update.out
src/test/regress/input/constraints.source
src/test/regress/pg_regress.c
src/test/regress/serial_schedule
src/test/regress/sql/rangetypes.sql
Diffstat (limited to 'contrib')
339 files changed, 6330 insertions, 15825 deletions
diff --git a/contrib/Makefile b/contrib/Makefile index 9b6ac2ec5a..b8230aadf7 100644 --- a/contrib/Makefile +++ b/contrib/Makefile @@ -16,7 +16,6 @@ SUBDIRS = \ dblink \ dict_int \ dict_xsyn \ - dummy_seclabel \ earthdistance \ file_fdw \ fuzzystrmatch \ @@ -29,7 +28,6 @@ SUBDIRS = \ oid2name \ pageinspect \ passwordcheck \ - pg_archivecleanup \ pg_buffercache \ pg_freespacemap \ pg_prewarm \ @@ -38,9 +36,6 @@ SUBDIRS = \ pg_test_fsync \ pg_test_timing \ pg_trgm \ - pg_upgrade \ - pg_upgrade_support \ - pgbench \ pgcrypto \ pgrowlocks \ pgstattuple \ @@ -53,13 +48,10 @@ SUBDIRS = \ tablefunc \ tcn \ test_decoding \ - test_parser \ - test_shm_mq \ tsearch2 \ unaccent \ vacuumlo \ - stormstats \ - worker_spi + stormstats ifeq ($(with_openssl),yes) SUBDIRS += sslinfo diff --git a/contrib/adminpack/Makefile b/contrib/adminpack/Makefile index 5cbc8f0c71..f065f84bfb 100644 --- a/contrib/adminpack/Makefile +++ b/contrib/adminpack/Makefile @@ -1,11 +1,12 @@ # contrib/adminpack/Makefile MODULE_big = adminpack -OBJS = adminpack.o +OBJS = adminpack.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) EXTENSION = adminpack DATA = adminpack--1.0.sql +PGFILEDESC = "adminpack - support functions for pgAdmin" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/adminpack/adminpack.c b/contrib/adminpack/adminpack.c index 8193b1f1d0..2937a9caca 100644 --- a/contrib/adminpack/adminpack.c +++ b/contrib/adminpack/adminpack.c @@ -3,7 +3,7 @@ * adminpack.c * * - * Copyright (c) 2002-2014, PostgreSQL Global Development Group + * Copyright (c) 2002-2015, PostgreSQL Global Development Group * * Author: Andreas Pflug <pgadmin@pse-consulting.de> * diff --git a/contrib/auth_delay/Makefile b/contrib/auth_delay/Makefile index 09d2d5418c..4b86ec37f0 100644 --- a/contrib/auth_delay/Makefile +++ b/contrib/auth_delay/Makefile @@ -1,6 +1,7 @@ # contrib/auth_delay/Makefile MODULES = auth_delay +PGFILEDESC = "auth_delay - delay authentication failure reports" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/auto_explain/Makefile b/contrib/auto_explain/Makefile index 2d1443fe48..fcf36c5ab0 100644 --- a/contrib/auto_explain/Makefile +++ b/contrib/auto_explain/Makefile @@ -1,7 +1,8 @@ # contrib/auto_explain/Makefile MODULE_big = auto_explain -OBJS = auto_explain.o +OBJS = auto_explain.o $(WIN32RES) +PGFILEDESC = "auto_explain - logging facility for execution plans" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index cbbd25753f..2a184ed886 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -3,7 +3,7 @@ * auto_explain.c * * - * Copyright (c) 2008-2014, PostgreSQL Global Development Group + * Copyright (c) 2008-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/auto_explain/auto_explain.c @@ -26,7 +26,7 @@ static bool auto_explain_log_analyze = false; static bool auto_explain_log_verbose = false; static bool auto_explain_log_buffers = false; static bool auto_explain_log_triggers = false; -static bool auto_explain_log_timing = false; +static bool auto_explain_log_timing = true; static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT; static bool auto_explain_log_nested_statements = false; @@ -200,8 +200,6 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags) queryDesc->instrument_options |= INSTRUMENT_TIMER; else queryDesc->instrument_options |= INSTRUMENT_ROWS; - - if (auto_explain_log_buffers) queryDesc->instrument_options |= INSTRUMENT_BUFFERS; } @@ -296,30 +294,31 @@ explain_ExecutorEnd(QueryDesc *queryDesc) msec = queryDesc->totaltime->total * 1000.0; if (msec >= auto_explain_log_min_duration) { - ExplainState es; - - ExplainInitState(&es); - es.analyze = (queryDesc->instrument_options && auto_explain_log_analyze); - es.verbose = auto_explain_log_verbose; - es.buffers = (es.analyze && auto_explain_log_buffers); - es.format = auto_explain_log_format; - - ExplainBeginOutput(&es); - ExplainQueryText(&es, queryDesc); - ExplainPrintPlan(&es, queryDesc); - if (es.analyze && auto_explain_log_triggers) - ExplainPrintTriggers(&es, queryDesc); - ExplainEndOutput(&es); + ExplainState *es = NewExplainState(); + + es->analyze = (queryDesc->instrument_options && auto_explain_log_analyze); + es->verbose = auto_explain_log_verbose; + es->buffers = (es->analyze && auto_explain_log_buffers); + es->timing = (es->analyze && auto_explain_log_timing); + es->summary = es->analyze; + es->format = auto_explain_log_format; + + ExplainBeginOutput(es); + ExplainQueryText(es, queryDesc); + ExplainPrintPlan(es, queryDesc); + if (es->analyze && auto_explain_log_triggers) + ExplainPrintTriggers(es, queryDesc); + ExplainEndOutput(es); /* Remove last line break */ - if (es.str->len > 0 && es.str->data[es.str->len - 1] == '\n') - es.str->data[--es.str->len] = '\0'; + if (es->str->len > 0 && es->str->data[es->str->len - 1] == '\n') + es->str->data[--es->str->len] = '\0'; /* Fix JSON to output an object */ if (auto_explain_log_format == EXPLAIN_FORMAT_JSON) { - es.str->data[0] = '{'; - es.str->data[es.str->len - 1] = '}'; + es->str->data[0] = '{'; + es->str->data[es->str->len - 1] = '}'; } /* @@ -330,10 +329,10 @@ explain_ExecutorEnd(QueryDesc *queryDesc) */ ereport(LOG, (errmsg("duration: %.3f ms plan:\n%s", - msec, es.str->data), + msec, es->str->data), errhidestmt(true))); - pfree(es.str->data); + pfree(es->str->data); } } diff --git a/contrib/btree_gin/Makefile b/contrib/btree_gin/Makefile index 09fd3e6e11..0492091599 100644 --- a/contrib/btree_gin/Makefile +++ b/contrib/btree_gin/Makefile @@ -1,10 +1,11 @@ # contrib/btree_gin/Makefile MODULE_big = btree_gin -OBJS = btree_gin.o +OBJS = btree_gin.o $(WIN32RES) EXTENSION = btree_gin DATA = btree_gin--1.0.sql btree_gin--unpackaged--1.0.sql +PGFILEDESC = "btree_gin - B-tree equivalent GIN operator classes" REGRESS = install_btree_gin int2 int4 int8 float4 float8 money oid \ timestamp timestamptz time timetz date interval \ diff --git a/contrib/btree_gin/btree_gin--unpackaged--1.0.sql b/contrib/btree_gin/btree_gin--unpackaged--1.0.sql index 8dfafc1e8b..3dae2dd38f 100644 --- a/contrib/btree_gin/btree_gin--unpackaged--1.0.sql +++ b/contrib/btree_gin/btree_gin--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/btree_gin/btree_gin--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION btree_gin" to load this file. \quit +\echo Use "CREATE EXTENSION btree_gin FROM unpackaged" to load this file. \quit ALTER EXTENSION btree_gin ADD function gin_btree_consistent(internal,smallint,anyelement,integer,internal,internal); ALTER EXTENSION btree_gin ADD function gin_extract_value_int2(smallint,internal); diff --git a/contrib/btree_gin/btree_gin.c b/contrib/btree_gin/btree_gin.c index 87d23e0350..1a5bb3cdc6 100644 --- a/contrib/btree_gin/btree_gin.c +++ b/contrib/btree_gin/btree_gin.c @@ -17,34 +17,30 @@ PG_MODULE_MAGIC; -typedef struct TypeInfo -{ - bool is_varlena; - Datum (*leftmostvalue) (void); - Datum (*typecmp) (FunctionCallInfo); -} TypeInfo; - typedef struct QueryInfo { StrategyNumber strategy; Datum datum; + bool is_varlena; + Datum (*typecmp) (FunctionCallInfo); } QueryInfo; -#define GIN_EXTRACT_VALUE(type) \ -PG_FUNCTION_INFO_V1(gin_extract_value_##type); \ -Datum \ -gin_extract_value_##type(PG_FUNCTION_ARGS) \ -{ \ - Datum datum = PG_GETARG_DATUM(0); \ - int32 *nentries = (int32 *) PG_GETARG_POINTER(1); \ - Datum *entries = (Datum *) palloc(sizeof(Datum)); \ - \ - if ( TypeInfo_##type.is_varlena ) \ - datum = PointerGetDatum(PG_DETOAST_DATUM(datum)); \ - entries[0] = datum; \ - *nentries = 1; \ - \ - PG_RETURN_POINTER(entries); \ + +/*** GIN support functions shared by all datatypes ***/ + +static Datum +gin_btree_extract_value(FunctionCallInfo fcinfo, bool is_varlena) +{ + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + Datum *entries = (Datum *) palloc(sizeof(Datum)); + + if (is_varlena) + datum = PointerGetDatum(PG_DETOAST_DATUM(datum)); + entries[0] = datum; + *nentries = 1; + + PG_RETURN_POINTER(entries); } /* @@ -55,49 +51,51 @@ gin_extract_value_##type(PG_FUNCTION_ARGS) \ * key, and work forward until the supplied query datum (which must be * sent along inside the QueryInfo structure). */ +static Datum +gin_btree_extract_query(FunctionCallInfo fcinfo, + bool is_varlena, + Datum (*leftmostvalue) (void), + Datum (*typecmp) (FunctionCallInfo)) +{ + Datum datum = PG_GETARG_DATUM(0); + int32 *nentries = (int32 *) PG_GETARG_POINTER(1); + StrategyNumber strategy = PG_GETARG_UINT16(2); + bool **partialmatch = (bool **) PG_GETARG_POINTER(3); + Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); + Datum *entries = (Datum *) palloc(sizeof(Datum)); + QueryInfo *data = (QueryInfo *) palloc(sizeof(QueryInfo)); + bool *ptr_partialmatch; + + *nentries = 1; + ptr_partialmatch = *partialmatch = (bool *) palloc(sizeof(bool)); + *ptr_partialmatch = false; + if (is_varlena) + datum = PointerGetDatum(PG_DETOAST_DATUM(datum)); + data->strategy = strategy; + data->datum = datum; + data->is_varlena = is_varlena; + data->typecmp = typecmp; + *extra_data = (Pointer *) palloc(sizeof(Pointer)); + **extra_data = (Pointer) data; + + switch (strategy) + { + case BTLessStrategyNumber: + case BTLessEqualStrategyNumber: + entries[0] = leftmostvalue(); + *ptr_partialmatch = true; + break; + case BTGreaterEqualStrategyNumber: + case BTGreaterStrategyNumber: + *ptr_partialmatch = true; + case BTEqualStrategyNumber: + entries[0] = datum; + break; + default: + elog(ERROR, "unrecognized strategy number: %d", strategy); + } -#define GIN_EXTRACT_QUERY(type) \ -PG_FUNCTION_INFO_V1(gin_extract_query_##type); \ -Datum \ -gin_extract_query_##type(PG_FUNCTION_ARGS) \ -{ \ - Datum datum = PG_GETARG_DATUM(0); \ - int32 *nentries = (int32 *) PG_GETARG_POINTER(1); \ - StrategyNumber strategy = PG_GETARG_UINT16(2); \ - bool **partialmatch = (bool **) PG_GETARG_POINTER(3); \ - Pointer **extra_data = (Pointer **) PG_GETARG_POINTER(4); \ - Datum *entries = (Datum *) palloc(sizeof(Datum)); \ - QueryInfo *data = (QueryInfo *) palloc(sizeof(QueryInfo)); \ - bool *ptr_partialmatch; \ - \ - *nentries = 1; \ - ptr_partialmatch = *partialmatch = (bool *) palloc(sizeof(bool)); \ - *ptr_partialmatch = false; \ - if ( TypeInfo_##type.is_varlena ) \ - datum = PointerGetDatum(PG_DETOAST_DATUM(datum)); \ - data->strategy = strategy; \ - data->datum = datum; \ - *extra_data = (Pointer *) palloc(sizeof(Pointer)); \ - **extra_data = (Pointer) data; \ - \ - switch (strategy) \ - { \ - case BTLessStrategyNumber: \ - case BTLessEqualStrategyNumber: \ - entries[0] = TypeInfo_##type.leftmostvalue(); \ - *ptr_partialmatch = true; \ - break; \ - case BTGreaterEqualStrategyNumber: \ - case BTGreaterStrategyNumber: \ - *ptr_partialmatch = true; \ - case BTEqualStrategyNumber: \ - entries[0] = datum; \ - break; \ - default: \ - elog(ERROR, "unrecognized strategy number: %d", strategy); \ - } \ - \ - PG_RETURN_POINTER(entries); \ + PG_RETURN_POINTER(entries); } /* @@ -105,78 +103,70 @@ gin_extract_query_##type(PG_FUNCTION_ARGS) \ * strategy it is a left-most value. So, use original datum from QueryInfo * to decide to stop scanning or not. Datum b is always from index. */ -#define GIN_COMPARE_PREFIX(type) \ -PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \ -Datum \ -gin_compare_prefix_##type(PG_FUNCTION_ARGS) \ -{ \ - Datum a = PG_GETARG_DATUM(0); \ - Datum b = PG_GETARG_DATUM(1); \ - QueryInfo *data = (QueryInfo *) PG_GETARG_POINTER(3); \ - int32 res, \ - cmp; \ - \ - cmp = DatumGetInt32(DirectFunctionCall2Coll( \ - TypeInfo_##type.typecmp, \ - PG_GET_COLLATION(), \ - (data->strategy == BTLessStrategyNumber || \ - data->strategy == BTLessEqualStrategyNumber) \ - ? data->datum : a, \ - b)); \ - \ - switch (data->strategy) \ - { \ - case BTLessStrategyNumber: \ - /* If original datum > indexed one then return match */ \ - if (cmp > 0) \ - res = 0; \ - else \ - res = 1; \ - break; \ - case BTLessEqualStrategyNumber: \ - /* The same except equality */ \ - if (cmp >= 0) \ - res = 0; \ - else \ - res = 1; \ - break; \ - case BTEqualStrategyNumber: \ - if (cmp != 0) \ - res = 1; \ - else \ - res = 0; \ - break; \ - case BTGreaterEqualStrategyNumber: \ - /* If original datum <= indexed one then return match */ \ - if (cmp <= 0) \ - res = 0; \ - else \ - res = 1; \ - break; \ - case BTGreaterStrategyNumber: \ - /* If original datum <= indexed one then return match */ \ - /* If original datum == indexed one then continue scan */ \ - if (cmp < 0) \ - res = 0; \ - else if (cmp == 0) \ - res = -1; \ - else \ - res = 1; \ - break; \ - default: \ - elog(ERROR, "unrecognized strategy number: %d", \ - data->strategy); \ - res = 0; \ - } \ - \ - PG_RETURN_INT32(res); \ -} - -#define GIN_SUPPORT(type) \ - GIN_EXTRACT_VALUE(type) \ - GIN_EXTRACT_QUERY(type) \ - GIN_COMPARE_PREFIX(type) +static Datum +gin_btree_compare_prefix(FunctionCallInfo fcinfo) +{ + Datum a = PG_GETARG_DATUM(0); + Datum b = PG_GETARG_DATUM(1); + QueryInfo *data = (QueryInfo *) PG_GETARG_POINTER(3); + int32 res, + cmp; + + cmp = DatumGetInt32(DirectFunctionCall2Coll( + data->typecmp, + PG_GET_COLLATION(), + (data->strategy == BTLessStrategyNumber || + data->strategy == BTLessEqualStrategyNumber) + ? data->datum : a, + b)); + + switch (data->strategy) + { + case BTLessStrategyNumber: + /* If original datum > indexed one then return match */ + if (cmp > 0) + res = 0; + else + res = 1; + break; + case BTLessEqualStrategyNumber: + /* The same except equality */ + if (cmp >= 0) + res = 0; + else + res = 1; + break; + case BTEqualStrategyNumber: + if (cmp != 0) + res = 1; + else + res = 0; + break; + case BTGreaterEqualStrategyNumber: + /* If original datum <= indexed one then return match */ + if (cmp <= 0) + res = 0; + else + res = 1; + break; + case BTGreaterStrategyNumber: + /* If original datum <= indexed one then return match */ + /* If original datum == indexed one then continue scan */ + if (cmp < 0) + res = 0; + else if (cmp == 0) + res = -1; + else + res = 1; + break; + default: + elog(ERROR, "unrecognized strategy number: %d", + data->strategy); + res = 0; + } + PG_RETURN_INT32(res); +} PG_FUNCTION_INFO_V1(gin_btree_consistent); Datum @@ -188,23 +178,45 @@ gin_btree_consistent(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); } +/*** GIN_SUPPORT macro defines the datatype specific functions ***/ + +#define GIN_SUPPORT(type, is_varlena, leftmostvalue, typecmp) \ +PG_FUNCTION_INFO_V1(gin_extract_value_##type); \ +Datum \ +gin_extract_value_##type(PG_FUNCTION_ARGS) \ +{ \ + return gin_btree_extract_value(fcinfo, is_varlena); \ +} \ +PG_FUNCTION_INFO_V1(gin_extract_query_##type); \ +Datum \ +gin_extract_query_##type(PG_FUNCTION_ARGS) \ +{ \ + return gin_btree_extract_query(fcinfo, \ + is_varlena, leftmostvalue, typecmp); \ +} \ +PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \ +Datum \ +gin_compare_prefix_##type(PG_FUNCTION_ARGS) \ +{ \ + return gin_btree_compare_prefix(fcinfo); \ +} + + +/*** Datatype specifications ***/ + static Datum leftmostvalue_int2(void) { return Int16GetDatum(SHRT_MIN); } -static TypeInfo TypeInfo_int2 = {false, leftmostvalue_int2, btint2cmp}; - -GIN_SUPPORT(int2) +GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp) static Datum leftmostvalue_int4(void) { return Int32GetDatum(INT_MIN); } -static TypeInfo TypeInfo_int4 = {false, leftmostvalue_int4, btint4cmp}; - -GIN_SUPPORT(int4) +GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp) static Datum leftmostvalue_int8(void) @@ -214,27 +226,21 @@ leftmostvalue_int8(void) */ return Int64GetDatum(SEQ_MINVALUE); } -static TypeInfo TypeInfo_int8 = {false, leftmostvalue_int8, btint8cmp}; - -GIN_SUPPORT(int8) +GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp) static Datum leftmostvalue_float4(void) { return Float4GetDatum(-get_float4_infinity()); } -static TypeInfo TypeInfo_float4 = {false, leftmostvalue_float4, btfloat4cmp}; - -GIN_SUPPORT(float4) +GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp) static Datum leftmostvalue_float8(void) { return Float8GetDatum(-get_float8_infinity()); } -static TypeInfo TypeInfo_float8 = {false, leftmostvalue_float8, btfloat8cmp}; - -GIN_SUPPORT(float8) +GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp) static Datum leftmostvalue_money(void) @@ -244,40 +250,30 @@ leftmostvalue_money(void) */ return Int64GetDatum(SEQ_MINVALUE); } -static TypeInfo TypeInfo_money = {false, leftmostvalue_money, cash_cmp}; - -GIN_SUPPORT(money) +GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp) static Datum leftmostvalue_oid(void) { return ObjectIdGetDatum(0); } -static TypeInfo TypeInfo_oid = {false, leftmostvalue_oid, btoidcmp}; - -GIN_SUPPORT(oid) +GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp) static Datum leftmostvalue_timestamp(void) { return TimestampGetDatum(DT_NOBEGIN); } -static TypeInfo TypeInfo_timestamp = {false, leftmostvalue_timestamp, timestamp_cmp}; - -GIN_SUPPORT(timestamp) - -static TypeInfo TypeInfo_timestamptz = {false, leftmostvalue_timestamp, timestamp_cmp}; +GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp) -GIN_SUPPORT(timestamptz) +GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp) static Datum leftmostvalue_time(void) { return TimeADTGetDatum(0); } -static TypeInfo TypeInfo_time = {false, leftmostvalue_time, time_cmp}; - -GIN_SUPPORT(time) +GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp) static Datum leftmostvalue_timetz(void) @@ -289,18 +285,14 @@ leftmostvalue_timetz(void) return TimeTzADTPGetDatum(v); } -static TypeInfo TypeInfo_timetz = {false, leftmostvalue_timetz, timetz_cmp}; - -GIN_SUPPORT(timetz) +GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp) static Datum leftmostvalue_date(void) { return DateADTGetDatum(DATEVAL_NOBEGIN); } -static TypeInfo TypeInfo_date = {false, leftmostvalue_date, date_cmp}; - -GIN_SUPPORT(date) +GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp) static Datum leftmostvalue_interval(void) @@ -312,9 +304,7 @@ leftmostvalue_interval(void) v->month = 0; return IntervalPGetDatum(v); } -static TypeInfo TypeInfo_interval = {false, leftmostvalue_interval, interval_cmp}; - -GIN_SUPPORT(interval) +GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp) static Datum leftmostvalue_macaddr(void) @@ -323,47 +313,32 @@ leftmostvalue_macaddr(void) return MacaddrPGetDatum(v); } -static TypeInfo TypeInfo_macaddr = {false, leftmostvalue_macaddr, macaddr_cmp}; - -GIN_SUPPORT(macaddr) +GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp) static Datum leftmostvalue_inet(void) { - return DirectFunctionCall3(inet_in, - CStringGetDatum("0.0.0.0/0"), - ObjectIdGetDatum(0), - Int32GetDatum(-1)); + return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0")); } -static TypeInfo TypeInfo_inet = {true, leftmostvalue_inet, network_cmp}; +GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp) -GIN_SUPPORT(inet) - -static TypeInfo TypeInfo_cidr = {true, leftmostvalue_inet, network_cmp}; - -GIN_SUPPORT(cidr) +GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp) static Datum leftmostvalue_text(void) { return PointerGetDatum(cstring_to_text_with_len("", 0)); } -static TypeInfo TypeInfo_text = {true, leftmostvalue_text, bttextcmp}; - -GIN_SUPPORT(text) +GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp) static Datum leftmostvalue_char(void) { return CharGetDatum(SCHAR_MIN); } -static TypeInfo TypeInfo_char = {false, leftmostvalue_char, btcharcmp}; - -GIN_SUPPORT(char) - -static TypeInfo TypeInfo_bytea = {true, leftmostvalue_text, byteacmp}; +GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp) -GIN_SUPPORT(bytea) +GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp) static Datum leftmostvalue_bit(void) @@ -373,9 +348,7 @@ leftmostvalue_bit(void) ObjectIdGetDatum(0), Int32GetDatum(-1)); } -static TypeInfo TypeInfo_bit = {true, leftmostvalue_bit, bitcmp}; - -GIN_SUPPORT(bit) +GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp) static Datum leftmostvalue_varbit(void) @@ -385,9 +358,7 @@ leftmostvalue_varbit(void) ObjectIdGetDatum(0), Int32GetDatum(-1)); } -static TypeInfo TypeInfo_varbit = {true, leftmostvalue_varbit, bitcmp}; - -GIN_SUPPORT(varbit) +GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp) /* * Numeric type hasn't a real left-most value, so we use PointerGetDatum(NULL) @@ -431,7 +402,4 @@ leftmostvalue_numeric(void) { return PointerGetDatum(NULL); } - -static TypeInfo TypeInfo_numeric = {true, leftmostvalue_numeric, gin_numeric_cmp}; - -GIN_SUPPORT(numeric) +GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp) diff --git a/contrib/btree_gist/Makefile b/contrib/btree_gist/Makefile index ba4af14658..9b7d61dff7 100644 --- a/contrib/btree_gist/Makefile +++ b/contrib/btree_gist/Makefile @@ -6,10 +6,12 @@ OBJS = btree_gist.o btree_utils_num.o btree_utils_var.o btree_int2.o \ btree_int4.o btree_int8.o btree_float4.o btree_float8.o btree_cash.o \ btree_oid.o btree_ts.o btree_time.o btree_date.o btree_interval.o \ btree_macaddr.o btree_inet.o btree_text.o btree_bytea.o btree_bit.o \ - btree_numeric.o + btree_numeric.o $(WIN32RES) EXTENSION = btree_gist -DATA = btree_gist--1.0.sql btree_gist--unpackaged--1.0.sql +DATA = btree_gist--1.1.sql btree_gist--unpackaged--1.0.sql \ + btree_gist--1.0--1.1.sql +PGFILEDESC = "btree_gist - B-tree equivalent GIST operator classes" REGRESS = init int2 int4 int8 float4 float8 cash oid timestamp timestamptz \ time timetz date interval macaddr inet cidr text varchar char bytea \ diff --git a/contrib/btree_gist/btree_bit.c b/contrib/btree_gist/btree_bit.c index 76297515c5..af210439f0 100644 --- a/contrib/btree_gist/btree_bit.c +++ b/contrib/btree_gist/btree_bit.c @@ -99,7 +99,7 @@ gbt_bit_l2n(GBT_VARKEY *leaf) o = gbt_bit_xfrm(r.lower); r.upper = r.lower = o; - out = gbt_var_key_copy(&r, TRUE); + out = gbt_var_key_copy(&r); pfree(o); return out; diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c index 63f86ebeef..aa14735338 100644 --- a/contrib/btree_gist/btree_cash.c +++ b/contrib/btree_gist/btree_cash.c @@ -17,6 +17,7 @@ typedef struct ** Cash ops */ PG_FUNCTION_INFO_V1(gbt_cash_compress); +PG_FUNCTION_INFO_V1(gbt_cash_fetch); PG_FUNCTION_INFO_V1(gbt_cash_union); PG_FUNCTION_INFO_V1(gbt_cash_picksplit); PG_FUNCTION_INFO_V1(gbt_cash_consistent); @@ -119,11 +120,17 @@ Datum gbt_cash_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_cash_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_cash_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c index 7a4c6aa600..bb516a9500 100644 --- a/contrib/btree_gist/btree_date.c +++ b/contrib/btree_gist/btree_date.c @@ -17,6 +17,7 @@ typedef struct ** date ops */ PG_FUNCTION_INFO_V1(gbt_date_compress); +PG_FUNCTION_INFO_V1(gbt_date_fetch); PG_FUNCTION_INFO_V1(gbt_date_union); PG_FUNCTION_INFO_V1(gbt_date_picksplit); PG_FUNCTION_INFO_V1(gbt_date_consistent); @@ -130,12 +131,17 @@ Datum gbt_date_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_date_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_date_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c index 778d8dad84..13dc4a5c0f 100644 --- a/contrib/btree_gist/btree_float4.c +++ b/contrib/btree_gist/btree_float4.c @@ -16,6 +16,7 @@ typedef struct float4key ** float4 ops */ PG_FUNCTION_INFO_V1(gbt_float4_compress); +PG_FUNCTION_INFO_V1(gbt_float4_fetch); PG_FUNCTION_INFO_V1(gbt_float4_union); PG_FUNCTION_INFO_V1(gbt_float4_picksplit); PG_FUNCTION_INFO_V1(gbt_float4_consistent); @@ -112,11 +113,17 @@ Datum gbt_float4_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_float4_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_float4_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c index c898bf2d97..c3a2415733 100644 --- a/contrib/btree_gist/btree_float8.c +++ b/contrib/btree_gist/btree_float8.c @@ -16,6 +16,7 @@ typedef struct float8key ** float8 ops */ PG_FUNCTION_INFO_V1(gbt_float8_compress); +PG_FUNCTION_INFO_V1(gbt_float8_fetch); PG_FUNCTION_INFO_V1(gbt_float8_union); PG_FUNCTION_INFO_V1(gbt_float8_picksplit); PG_FUNCTION_INFO_V1(gbt_float8_consistent); @@ -119,11 +120,17 @@ Datum gbt_float8_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_float8_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_float8_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_gist--1.0--1.1.sql b/contrib/btree_gist/btree_gist--1.0--1.1.sql new file mode 100644 index 0000000000..2633beabe0 --- /dev/null +++ b/contrib/btree_gist/btree_gist--1.0--1.1.sql @@ -0,0 +1,127 @@ +/* contrib/btree_gist/btree_gist--1.0--1.1.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "ALTER EXTENSION btree_gist UPDATE TO '1.1'" to load this file. \quit + +-- Index-only scan support new in 9.5. +CREATE FUNCTION gbt_oid_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_int2_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_int4_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_int8_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_float4_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_float8_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_ts_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_time_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_date_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_intv_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_cash_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_macad_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION gbt_var_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD + FUNCTION 9 (oid, oid) gbt_oid_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD + FUNCTION 9 (int2, int2) gbt_int2_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD + FUNCTION 9 (int4, int4) gbt_int4_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD + FUNCTION 9 (int8, int8) gbt_int8_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD + FUNCTION 9 (float4, float4) gbt_float4_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD + FUNCTION 9 (float8, float8) gbt_float8_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD + FUNCTION 9 (timestamp, timestamp) gbt_ts_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD + FUNCTION 9 (timestamptz, timestamptz) gbt_ts_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_time_ops USING gist ADD + FUNCTION 9 (time, time) gbt_time_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_date_ops USING gist ADD + FUNCTION 9 (date, date) gbt_date_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD + FUNCTION 9 (interval, interval) gbt_intv_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD + FUNCTION 9 (money, money) gbt_cash_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD + FUNCTION 9 (macaddr, macaddr) gbt_macad_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_text_ops USING gist ADD + FUNCTION 9 (text, text) gbt_var_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD + FUNCTION 9 (bpchar, bpchar) gbt_var_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD + FUNCTION 9 (bytea, bytea) gbt_var_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD + FUNCTION 9 (numeric, numeric) gbt_var_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD + FUNCTION 9 (bit, bit) gbt_var_fetch (internal) ; + +ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD + FUNCTION 9 (varbit, varbit) gbt_var_fetch (internal) ; diff --git a/contrib/btree_gist/btree_gist--1.0.sql b/contrib/btree_gist/btree_gist--1.1.sql index c5c958753e..cdec964c05 100644 --- a/contrib/btree_gist/btree_gist--1.0.sql +++ b/contrib/btree_gist/btree_gist--1.1.sql @@ -249,6 +249,11 @@ RETURNS float8 AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_oid_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_oid_compress(internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -264,6 +269,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_var_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_oid_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -305,11 +315,12 @@ AS -- Add operators that are new in 9.1. We do it like this, leaving them -- "loose" in the operator family rather than bound into the opclass, because -- that's the only state that can be reproduced during an upgrade from 9.0. - ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD OPERATOR 6 <> (oid, oid) , OPERATOR 15 <-> (oid, oid) FOR ORDER BY pg_catalog.oid_ops , - FUNCTION 8 (oid, oid) gbt_oid_distance (internal, oid, int2, oid) ; + FUNCTION 8 (oid, oid) gbt_oid_distance (internal, oid, int2, oid) , + -- Also add support function for index-only-scans, added in 9.5. + FUNCTION 9 (oid, oid) gbt_oid_fetch (internal) ; -- @@ -335,6 +346,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_int2_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_int2_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -376,8 +392,8 @@ AS ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD OPERATOR 6 <> (int2, int2) , OPERATOR 15 <-> (int2, int2) FOR ORDER BY pg_catalog.integer_ops , - FUNCTION 8 (int2, int2) gbt_int2_distance (internal, int2, int2, oid) ; - + FUNCTION 8 (int2, int2) gbt_int2_distance (internal, int2, int2, oid) , + FUNCTION 9 (int2, int2) gbt_int2_fetch (internal) ; -- -- @@ -402,6 +418,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_int4_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_int4_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -443,7 +464,8 @@ AS ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD OPERATOR 6 <> (int4, int4) , OPERATOR 15 <-> (int4, int4) FOR ORDER BY pg_catalog.integer_ops , - FUNCTION 8 (int4, int4) gbt_int4_distance (internal, int4, int2, oid) ; + FUNCTION 8 (int4, int4) gbt_int4_distance (internal, int4, int2, oid) , + FUNCTION 9 (int4, int4) gbt_int4_fetch (internal) ; -- @@ -469,6 +491,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_int8_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_int8_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -510,8 +537,8 @@ AS ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD OPERATOR 6 <> (int8, int8) , OPERATOR 15 <-> (int8, int8) FOR ORDER BY pg_catalog.integer_ops , - FUNCTION 8 (int8, int8) gbt_int8_distance (internal, int8, int2, oid) ; - + FUNCTION 8 (int8, int8) gbt_int8_distance (internal, int8, int2, oid) , + FUNCTION 9 (int8, int8) gbt_int8_fetch (internal) ; -- -- @@ -536,6 +563,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_float4_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_float4_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -577,8 +609,8 @@ AS ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD OPERATOR 6 <> (float4, float4) , OPERATOR 15 <-> (float4, float4) FOR ORDER BY pg_catalog.float_ops , - FUNCTION 8 (float4, float4) gbt_float4_distance (internal, float4, int2, oid) ; - + FUNCTION 8 (float4, float4) gbt_float4_distance (internal, float4, int2, oid) , + FUNCTION 9 (float4, float4) gbt_float4_fetch (internal) ; -- -- @@ -603,6 +635,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_float8_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_float8_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -644,8 +681,8 @@ AS ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD OPERATOR 6 <> (float8, float8) , OPERATOR 15 <-> (float8, float8) FOR ORDER BY pg_catalog.float_ops , - FUNCTION 8 (float8, float8) gbt_float8_distance (internal, float8, int2, oid) ; - + FUNCTION 8 (float8, float8) gbt_float8_distance (internal, float8, int2, oid) , + FUNCTION 9 (float8, float8) gbt_float8_fetch (internal) ; -- -- @@ -685,6 +722,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_ts_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_ts_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -726,8 +768,8 @@ AS ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD OPERATOR 6 <> (timestamp, timestamp) , OPERATOR 15 <-> (timestamp, timestamp) FOR ORDER BY pg_catalog.interval_ops , - FUNCTION 8 (timestamp, timestamp) gbt_ts_distance (internal, timestamp, int2, oid) ; - + FUNCTION 8 (timestamp, timestamp) gbt_ts_distance (internal, timestamp, int2, oid) , + FUNCTION 9 (timestamp, timestamp) gbt_ts_fetch (internal) ; -- Create the operator class CREATE OPERATOR CLASS gist_timestamptz_ops @@ -750,8 +792,8 @@ AS ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD OPERATOR 6 <> (timestamptz, timestamptz) , OPERATOR 15 <-> (timestamptz, timestamptz) FOR ORDER BY pg_catalog.interval_ops , - FUNCTION 8 (timestamptz, timestamptz) gbt_tstz_distance (internal, timestamptz, int2, oid) ; - + FUNCTION 8 (timestamptz, timestamptz) gbt_tstz_distance (internal, timestamptz, int2, oid) , + FUNCTION 9 (timestamptz, timestamptz) gbt_ts_fetch (internal) ; -- -- @@ -786,6 +828,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_time_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_time_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -827,7 +874,8 @@ AS ALTER OPERATOR FAMILY gist_time_ops USING gist ADD OPERATOR 6 <> (time, time) , OPERATOR 15 <-> (time, time) FOR ORDER BY pg_catalog.interval_ops , - FUNCTION 8 (time, time) gbt_time_distance (internal, time, int2, oid) ; + FUNCTION 8 (time, time) gbt_time_distance (internal, time, int2, oid) , + FUNCTION 9 (time, time) gbt_time_fetch (internal) ; CREATE OPERATOR CLASS gist_timetz_ops @@ -849,6 +897,7 @@ AS ALTER OPERATOR FAMILY gist_timetz_ops USING gist ADD OPERATOR 6 <> (timetz, timetz) ; + -- no 'fetch' function, as the compress function is lossy. -- @@ -874,6 +923,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_date_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_date_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -915,7 +969,8 @@ AS ALTER OPERATOR FAMILY gist_date_ops USING gist ADD OPERATOR 6 <> (date, date) , OPERATOR 15 <-> (date, date) FOR ORDER BY pg_catalog.integer_ops , - FUNCTION 8 (date, date) gbt_date_distance (internal, date, int2, oid) ; + FUNCTION 8 (date, date) gbt_date_distance (internal, date, int2, oid) , + FUNCTION 9 (date, date) gbt_date_fetch (internal) ; -- @@ -946,6 +1001,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_intv_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_intv_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -987,7 +1047,8 @@ AS ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD OPERATOR 6 <> (interval, interval) , OPERATOR 15 <-> (interval, interval) FOR ORDER BY pg_catalog.interval_ops , - FUNCTION 8 (interval, interval) gbt_intv_distance (internal, interval, int2, oid) ; + FUNCTION 8 (interval, interval) gbt_intv_distance (internal, interval, int2, oid) , + FUNCTION 9 (interval, interval) gbt_intv_fetch (internal) ; -- @@ -1013,6 +1074,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_cash_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_cash_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -1054,7 +1120,8 @@ AS ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD OPERATOR 6 <> (money, money) , OPERATOR 15 <-> (money, money) FOR ORDER BY pg_catalog.money_ops , - FUNCTION 8 (money, money) gbt_cash_distance (internal, money, int2, oid) ; + FUNCTION 8 (money, money) gbt_cash_distance (internal, money, int2, oid) , + FUNCTION 9 (money, money) gbt_cash_fetch (internal) ; -- @@ -1075,6 +1142,11 @@ RETURNS internal AS 'MODULE_PATHNAME' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION gbt_macad_fetch(internal) +RETURNS internal +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION gbt_macad_penalty(internal,internal,internal) RETURNS internal AS 'MODULE_PATHNAME' @@ -1114,7 +1186,8 @@ AS STORAGE gbtreekey16; ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD - OPERATOR 6 <> (macaddr, macaddr) ; + OPERATOR 6 <> (macaddr, macaddr) , + FUNCTION 9 (macaddr, macaddr) gbt_macad_fetch (internal); -- @@ -1184,7 +1257,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_text_ops USING gist ADD - OPERATOR 6 <> (text, text) ; + OPERATOR 6 <> (text, text) , + FUNCTION 9 (text, text) gbt_var_fetch (internal) ; ---- Create the operator class @@ -1206,8 +1280,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD - OPERATOR 6 <> (bpchar, bpchar) ; - + OPERATOR 6 <> (bpchar, bpchar) , + FUNCTION 9 (bpchar, bpchar) gbt_var_fetch (internal) ; -- -- @@ -1265,7 +1339,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD - OPERATOR 6 <> (bytea, bytea) ; + OPERATOR 6 <> (bytea, bytea) , + FUNCTION 9 (bytea, bytea) gbt_var_fetch (internal) ; -- @@ -1325,7 +1400,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD - OPERATOR 6 <> (numeric, numeric) ; + OPERATOR 6 <> (numeric, numeric) , + FUNCTION 9 (numeric, numeric) gbt_var_fetch (internal) ; -- @@ -1384,7 +1460,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD - OPERATOR 6 <> (bit, bit) ; + OPERATOR 6 <> (bit, bit) , + FUNCTION 9 (bit, bit) gbt_var_fetch (internal) ; -- Create the operator class @@ -1406,7 +1483,8 @@ AS STORAGE gbtreekey_var; ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD - OPERATOR 6 <> (varbit, varbit) ; + OPERATOR 6 <> (varbit, varbit) , + FUNCTION 9 (varbit, varbit) gbt_var_fetch (internal) ; -- @@ -1467,7 +1545,7 @@ AS ALTER OPERATOR FAMILY gist_inet_ops USING gist ADD OPERATOR 6 <> (inet, inet) ; - + -- no fetch support, the compress function is lossy -- Create the operator class CREATE OPERATOR CLASS gist_cidr_ops @@ -1489,3 +1567,4 @@ AS ALTER OPERATOR FAMILY gist_cidr_ops USING gist ADD OPERATOR 6 <> (inet, inet) ; + -- no fetch support, the compress function is lossy diff --git a/contrib/btree_gist/btree_gist--unpackaged--1.0.sql b/contrib/btree_gist/btree_gist--unpackaged--1.0.sql index 838ad7ec1e..e9913ab7f2 100644 --- a/contrib/btree_gist/btree_gist--unpackaged--1.0.sql +++ b/contrib/btree_gist/btree_gist--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/btree_gist/btree_gist--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION btree_gist" to load this file. \quit +\echo Use "CREATE EXTENSION btree_gist FROM unpackaged" to load this file. \quit ALTER EXTENSION btree_gist ADD type gbtreekey4; ALTER EXTENSION btree_gist ADD function gbtreekey4_in(cstring); diff --git a/contrib/btree_gist/btree_gist.control b/contrib/btree_gist/btree_gist.control index 10e2f949c1..c7adfeb358 100644 --- a/contrib/btree_gist/btree_gist.control +++ b/contrib/btree_gist/btree_gist.control @@ -1,5 +1,5 @@ # btree_gist extension comment = 'support for indexing common datatypes in GiST' -default_version = '1.0' +default_version = '1.1' module_pathname = '$libdir/btree_gist' relocatable = true diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c index a88aae6453..54dc1cc518 100644 --- a/contrib/btree_gist/btree_int2.c +++ b/contrib/btree_gist/btree_int2.c @@ -16,6 +16,7 @@ typedef struct int16key ** int16 ops */ PG_FUNCTION_INFO_V1(gbt_int2_compress); +PG_FUNCTION_INFO_V1(gbt_int2_fetch); PG_FUNCTION_INFO_V1(gbt_int2_union); PG_FUNCTION_INFO_V1(gbt_int2_picksplit); PG_FUNCTION_INFO_V1(gbt_int2_consistent); @@ -119,11 +120,17 @@ Datum gbt_int2_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_int2_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_int2_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c index 889a512078..ddbcf52746 100644 --- a/contrib/btree_gist/btree_int4.c +++ b/contrib/btree_gist/btree_int4.c @@ -16,6 +16,7 @@ typedef struct int32key ** int32 ops */ PG_FUNCTION_INFO_V1(gbt_int4_compress); +PG_FUNCTION_INFO_V1(gbt_int4_fetch); PG_FUNCTION_INFO_V1(gbt_int4_union); PG_FUNCTION_INFO_V1(gbt_int4_picksplit); PG_FUNCTION_INFO_V1(gbt_int4_consistent); @@ -120,11 +121,17 @@ Datum gbt_int4_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_int4_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_int4_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c index 8685cee176..44bf69a4fb 100644 --- a/contrib/btree_gist/btree_int8.c +++ b/contrib/btree_gist/btree_int8.c @@ -16,6 +16,7 @@ typedef struct int64key ** int64 ops */ PG_FUNCTION_INFO_V1(gbt_int8_compress); +PG_FUNCTION_INFO_V1(gbt_int8_fetch); PG_FUNCTION_INFO_V1(gbt_int8_union); PG_FUNCTION_INFO_V1(gbt_int8_picksplit); PG_FUNCTION_INFO_V1(gbt_int8_consistent); @@ -120,11 +121,17 @@ Datum gbt_int8_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_int8_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_int8_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index 68d80e8e0a..acccb8b42e 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -18,6 +18,7 @@ typedef struct ** Interval ops */ PG_FUNCTION_INFO_V1(gbt_intv_compress); +PG_FUNCTION_INFO_V1(gbt_intv_fetch); PG_FUNCTION_INFO_V1(gbt_intv_decompress); PG_FUNCTION_INFO_V1(gbt_intv_union); PG_FUNCTION_INFO_V1(gbt_intv_picksplit); @@ -175,6 +176,14 @@ gbt_intv_compress(PG_FUNCTION_ARGS) } Datum +gbt_intv_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} + +Datum gbt_intv_decompress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); diff --git a/contrib/btree_gist/btree_macaddr.c b/contrib/btree_gist/btree_macaddr.c index ed58a1b742..87d96c00ac 100644 --- a/contrib/btree_gist/btree_macaddr.c +++ b/contrib/btree_gist/btree_macaddr.c @@ -19,6 +19,7 @@ typedef struct ** OID ops */ PG_FUNCTION_INFO_V1(gbt_macad_compress); +PG_FUNCTION_INFO_V1(gbt_macad_fetch); PG_FUNCTION_INFO_V1(gbt_macad_union); PG_FUNCTION_INFO_V1(gbt_macad_picksplit); PG_FUNCTION_INFO_V1(gbt_macad_consistent); @@ -110,11 +111,17 @@ Datum gbt_macad_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_macad_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_macad_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c index 02ccca8647..47b00209c8 100644 --- a/contrib/btree_gist/btree_numeric.c +++ b/contrib/btree_gist/btree_numeric.c @@ -170,7 +170,7 @@ gbt_numeric_penalty(PG_FUNCTION_ARGS) uk; rk = gbt_var_key_readable(org); - uni = PointerGetDatum(gbt_var_key_copy(&rk, TRUE)); + uni = PointerGetDatum(gbt_var_key_copy(&rk)); gbt_var_bin_union(&uni, newe, PG_GET_COLLATION(), &tinfo); ok = gbt_var_key_readable(org); uk = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(uni)); diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c index f6b7bfa05b..ac61a76aa0 100644 --- a/contrib/btree_gist/btree_oid.c +++ b/contrib/btree_gist/btree_oid.c @@ -16,6 +16,7 @@ typedef struct ** OID ops */ PG_FUNCTION_INFO_V1(gbt_oid_compress); +PG_FUNCTION_INFO_V1(gbt_oid_fetch); PG_FUNCTION_INFO_V1(gbt_oid_union); PG_FUNCTION_INFO_V1(gbt_oid_picksplit); PG_FUNCTION_INFO_V1(gbt_oid_consistent); @@ -120,11 +121,17 @@ Datum gbt_oid_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } +Datum +gbt_oid_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_oid_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c index cdf81711e7..41d9959214 100644 --- a/contrib/btree_gist/btree_time.c +++ b/contrib/btree_gist/btree_time.c @@ -19,6 +19,7 @@ typedef struct */ PG_FUNCTION_INFO_V1(gbt_time_compress); PG_FUNCTION_INFO_V1(gbt_timetz_compress); +PG_FUNCTION_INFO_V1(gbt_time_fetch); PG_FUNCTION_INFO_V1(gbt_time_union); PG_FUNCTION_INFO_V1(gbt_time_picksplit); PG_FUNCTION_INFO_V1(gbt_time_consistent); @@ -157,9 +158,8 @@ Datum gbt_time_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } @@ -193,6 +193,13 @@ gbt_timetz_compress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(retval); } +Datum +gbt_time_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_time_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index a13dcc8bea..c746c2319c 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -19,6 +19,7 @@ typedef struct */ PG_FUNCTION_INFO_V1(gbt_ts_compress); PG_FUNCTION_INFO_V1(gbt_tstz_compress); +PG_FUNCTION_INFO_V1(gbt_ts_fetch); PG_FUNCTION_INFO_V1(gbt_ts_union); PG_FUNCTION_INFO_V1(gbt_ts_picksplit); PG_FUNCTION_INFO_V1(gbt_ts_consistent); @@ -153,7 +154,7 @@ ts_dist(PG_FUNCTION_ARGS) p->day = INT_MAX; p->month = INT_MAX; #ifdef HAVE_INT64_TIMESTAMP - p->time = INT64CONST(0x7FFFFFFFFFFFFFFF); + p->time = PG_INT64_MAX; #else p->time = DBL_MAX; #endif @@ -181,7 +182,7 @@ tstz_dist(PG_FUNCTION_ARGS) p->day = INT_MAX; p->month = INT_MAX; #ifdef HAVE_INT64_TIMESTAMP - p->time = INT64CONST(0x7FFFFFFFFFFFFFFF); + p->time = PG_INT64_MAX; #else p->time = DBL_MAX; #endif @@ -200,27 +201,11 @@ tstz_dist(PG_FUNCTION_ARGS) **************************************************/ -static Timestamp +static inline Timestamp tstz_to_ts_gmt(TimestampTz ts) { - Timestamp gmt; - int val, - tz; - - gmt = ts; - DecodeSpecial(0, "gmt", &val); - - if (ts < DT_NOEND && ts > DT_NOBEGIN) - { - tz = val * 60; - -#ifdef HAVE_INT64_TIMESTAMP - gmt -= (tz * INT64CONST(1000000)); -#else - gmt -= tz; -#endif - } - return gmt; + /* No timezone correction is needed, since GMT is offset 0 by definition */ + return (Timestamp) ts; } @@ -228,9 +213,8 @@ Datum gbt_ts_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); + PG_RETURN_POINTER(gbt_num_compress(entry, &tinfo)); } @@ -260,6 +244,13 @@ gbt_tstz_compress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(retval); } +Datum +gbt_ts_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_num_fetch(entry, &tinfo)); +} Datum gbt_ts_consistent(PG_FUNCTION_ARGS) diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c index 505633c98b..5bfe659f91 100644 --- a/contrib/btree_gist/btree_utils_num.c +++ b/contrib/btree_gist/btree_utils_num.c @@ -11,8 +11,10 @@ GISTENTRY * -gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo) +gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo) { + GISTENTRY *retval; + if (entry->leafkey) { union @@ -91,6 +93,64 @@ gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo *tinfo) return retval; } +/* + * Convert a compressed leaf item back to the original type, for index-only + * scans. + */ +GISTENTRY * +gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo) +{ + GISTENTRY *retval; + Datum datum; + + Assert(tinfo->indexsize >= 2 * tinfo->size); + + /* + * Get the original Datum from the stored datum. On leaf entries, the + * lower and upper bound are the same. We just grab the lower bound and + * return it. + */ + switch (tinfo->t) + { + case gbt_t_int2: + datum = Int16GetDatum(*(int16 *) entry->key); + break; + case gbt_t_int4: + datum = Int32GetDatum(*(int32 *) entry->key); + break; + case gbt_t_int8: + datum = Int64GetDatum(*(int64 *) entry->key); + break; + case gbt_t_oid: + datum = ObjectIdGetDatum(*(Oid *) entry->key); + break; + case gbt_t_float4: + datum = Float4GetDatum(*(float4 *) entry->key); + break; + case gbt_t_float8: + datum = Float8GetDatum(*(float8 *) entry->key); + break; + case gbt_t_date: + datum = DateADTGetDatum(*(DateADT *) entry->key); + break; + case gbt_t_time: + datum = TimeADTGetDatum(*(TimeADT *) entry->key); + break; + case gbt_t_ts: + datum = TimestampGetDatum(*(Timestamp *) entry->key); + break; + case gbt_t_cash: + datum = CashGetDatum(*(Cash *) entry->key); + break; + default: + datum = PointerGetDatum(entry->key); + } + + retval = palloc(sizeof(GISTENTRY)); + gistentryinit(*retval, datum, entry->rel, entry->page, entry->offset, + FALSE); + return retval; +} @@ -147,13 +207,8 @@ gbt_num_same(const GBT_NUMKEY *a, const GBT_NUMKEY *b, const gbtree_ninfo *tinfo b2.lower = &(((GBT_NUMKEY *) b)[0]); b2.upper = &(((GBT_NUMKEY *) b)[tinfo->size]); - if ( - (*tinfo->f_eq) (b1.lower, b2.lower) && - (*tinfo->f_eq) (b1.upper, b2.upper) - ) - return TRUE; - return FALSE; - + return ((*tinfo->f_eq) (b1.lower, b2.lower) && + (*tinfo->f_eq) (b1.upper, b2.upper)); } diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h index 0d79cd2a7f..a33491bc09 100644 --- a/contrib/btree_gist/btree_utils_num.h +++ b/contrib/btree_gist/btree_utils_num.h @@ -128,9 +128,9 @@ extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query, extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, const gbtree_ninfo *tinfo); -extern GISTENTRY *gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, - const gbtree_ninfo *tinfo); +extern GISTENTRY *gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo); +extern GISTENTRY *gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo); extern void *gbt_num_union(GBT_NUMKEY *out, const GistEntryVector *entryvec, const gbtree_ninfo *tinfo); diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index b7dd060a94..78e8662add 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -29,6 +29,7 @@ typedef struct PG_FUNCTION_INFO_V1(gbt_var_decompress); +PG_FUNCTION_INFO_V1(gbt_var_fetch); Datum @@ -66,26 +67,37 @@ gbt_var_key_readable(const GBT_VARKEY *k) } +/* + * Create a leaf-entry to store in the index, from a single Datum. + */ +static GBT_VARKEY * +gbt_var_key_from_datum(const struct varlena *u) +{ + int32 lowersize = VARSIZE(u); + GBT_VARKEY *r; + + r = (GBT_VARKEY *) palloc(lowersize + VARHDRSZ); + memcpy(VARDATA(r), u, lowersize); + SET_VARSIZE(r, lowersize + VARHDRSZ); + + return r; +} + +/* + * Create an entry to store in the index, from lower and upper bound. + */ GBT_VARKEY * -gbt_var_key_copy(const GBT_VARKEY_R *u, bool force_node) +gbt_var_key_copy(const GBT_VARKEY_R *u) { - GBT_VARKEY *r = NULL; int32 lowersize = VARSIZE(u->lower); int32 uppersize = VARSIZE(u->upper); + GBT_VARKEY *r; + + r = (GBT_VARKEY *) palloc0(INTALIGN(lowersize) + uppersize + VARHDRSZ); + memcpy(VARDATA(r), u->lower, lowersize); + memcpy(VARDATA(r) + INTALIGN(lowersize), u->upper, uppersize); + SET_VARSIZE(r, INTALIGN(lowersize) + uppersize + VARHDRSZ); - if (u->lower == u->upper && !force_node) - { /* leaf key mode */ - r = (GBT_VARKEY *) palloc(lowersize + VARHDRSZ); - memcpy(VARDATA(r), u->lower, lowersize); - SET_VARSIZE(r, lowersize + VARHDRSZ); - } - else - { /* node key mode */ - r = (GBT_VARKEY *) palloc0(INTALIGN(lowersize) + uppersize + VARHDRSZ); - memcpy(VARDATA(r), u->lower, lowersize); - memcpy(VARDATA(r) + INTALIGN(lowersize), u->upper, uppersize); - SET_VARSIZE(r, INTALIGN(lowersize) + uppersize + VARHDRSZ); - } return r; } @@ -255,18 +267,17 @@ gbt_var_bin_union(Datum *u, GBT_VARKEY *e, Oid collation, } if (update) - *u = PointerGetDatum(gbt_var_key_copy(&nr, TRUE)); + *u = PointerGetDatum(gbt_var_key_copy(&nr)); } else { nr.lower = eo.lower; nr.upper = eo.upper; - *u = PointerGetDatum(gbt_var_key_copy(&nr, TRUE)); + *u = PointerGetDatum(gbt_var_key_copy(&nr)); } } - GISTENTRY * gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo) { @@ -274,12 +285,10 @@ gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo) if (entry->leafkey) { - GBT_VARKEY *r = NULL; - bytea *leaf = (bytea *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); - GBT_VARKEY_R u; + struct varlena *leaf = PG_DETOAST_DATUM(entry->key); + GBT_VARKEY *r; - u.lower = u.upper = leaf; - r = gbt_var_key_copy(&u, FALSE); + r = gbt_var_key_from_datum(leaf); retval = palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(r), @@ -293,6 +302,22 @@ gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo) } +Datum +gbt_var_fetch(PG_FUNCTION_ARGS) +{ + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + GBT_VARKEY_R r = gbt_var_key_readable(key); + GISTENTRY *retval; + + retval = palloc(sizeof(GISTENTRY)); + gistentryinit(*retval, PointerGetDatum(r.lower), + entry->rel, entry->page, + entry->offset, TRUE); + + PG_RETURN_POINTER(retval); +} + GBT_VARKEY * gbt_var_union(const GistEntryVector *entryvec, int32 *size, Oid collation, @@ -308,7 +333,7 @@ gbt_var_union(const GistEntryVector *entryvec, int32 *size, Oid collation, cur = (GBT_VARKEY *) DatumGetPointer(entryvec->vector[0].key); rk = gbt_var_key_readable(cur); - out = PointerGetDatum(gbt_var_key_copy(&rk, TRUE)); + out = PointerGetDatum(gbt_var_key_copy(&rk)); for (i = 1; i < numranges; i++) { @@ -337,7 +362,6 @@ bool gbt_var_same(Datum d1, Datum d2, Oid collation, const gbtree_vinfo *tinfo) { - bool result; GBT_VARKEY *t1 = (GBT_VARKEY *) DatumGetPointer(d1); GBT_VARKEY *t2 = (GBT_VARKEY *) DatumGetPointer(d2); GBT_VARKEY_R r1, @@ -346,13 +370,8 @@ gbt_var_same(Datum d1, Datum d2, Oid collation, r1 = gbt_var_key_readable(t1); r2 = gbt_var_key_readable(t2); - if (t1 && t2) - result = ((*tinfo->f_cmp) (r1.lower, r2.lower, collation) == 0 && - (*tinfo->f_cmp) (r1.upper, r2.upper, collation) == 0); - else - result = (t1 == NULL && t2 == NULL); - - return result; + return ((*tinfo->f_cmp) (r1.lower, r2.lower, collation) == 0 && + (*tinfo->f_cmp) (r1.upper, r2.upper, collation) == 0); } diff --git a/contrib/btree_gist/btree_utils_var.h b/contrib/btree_gist/btree_utils_var.h index 7a3eeec01a..9a7c4d1055 100644 --- a/contrib/btree_gist/btree_utils_var.h +++ b/contrib/btree_gist/btree_utils_var.h @@ -47,7 +47,7 @@ typedef struct extern GBT_VARKEY_R gbt_var_key_readable(const GBT_VARKEY *k); -extern GBT_VARKEY *gbt_var_key_copy(const GBT_VARKEY_R *u, bool force_node); +extern GBT_VARKEY *gbt_var_key_copy(const GBT_VARKEY_R *u); extern GISTENTRY *gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo *tinfo); diff --git a/contrib/btree_gist/expected/bit.out b/contrib/btree_gist/expected/bit.out index ae82304600..8606baf366 100644 --- a/contrib/btree_gist/expected/bit.out +++ b/contrib/btree_gist/expected/bit.out @@ -64,3 +64,13 @@ SELECT count(*) FROM bittmp WHERE a > '011011000100010111011000110000100'; 350 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; + QUERY PLAN +----------------------------------------------------------------------- + Index Only Scan using bitidx on bittmp + Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit")) +(2 rows) + diff --git a/contrib/btree_gist/expected/bytea.out b/contrib/btree_gist/expected/bytea.out index 917fac1812..b9efa73c08 100644 --- a/contrib/btree_gist/expected/bytea.out +++ b/contrib/btree_gist/expected/bytea.out @@ -71,3 +71,20 @@ SELECT count(*) FROM byteatmp WHERE a = '2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbed 1 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM byteatmp where a > 'ffa'::bytea; + QUERY PLAN +-------------------------------------------- + Index Only Scan using byteaidx on byteatmp + Index Cond: (a > '\x666661'::bytea) +(2 rows) + +SELECT a FROM byteatmp where a > 'ffa'::bytea; + a +-------------------------------- + \x666662656532373363376262 + \x6666626663313331336339633835 +(2 rows) + diff --git a/contrib/btree_gist/expected/cash.out b/contrib/btree_gist/expected/cash.out index a4100d844e..cacbd71854 100644 --- a/contrib/btree_gist/expected/cash.out +++ b/contrib/btree_gist/expected/cash.out @@ -74,10 +74,10 @@ SELECT count(*) FROM moneytmp WHERE a > '22649.64'::money; EXPLAIN (COSTS OFF) SELECT a, a <-> '21472.79' FROM moneytmp ORDER BY a <-> '21472.79' LIMIT 3; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +-------------------------------------------------- Limit - -> Index Scan using moneyidx on moneytmp + -> Index Only Scan using moneyidx on moneytmp Order By: (a <-> '$21,472.79'::money) (3 rows) diff --git a/contrib/btree_gist/expected/char.out b/contrib/btree_gist/expected/char.out index 5260995a75..d715c045cc 100644 --- a/contrib/btree_gist/expected/char.out +++ b/contrib/btree_gist/expected/char.out @@ -64,3 +64,19 @@ SELECT count(*) FROM chartmp WHERE a > '31b0'::char(32); 400 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; + QUERY PLAN +--------------------------------------------------------------- + Index Only Scan using charidx on chartmp + Index Cond: ((a >= '31a'::bpchar) AND (a <= '31c'::bpchar)) +(2 rows) + +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; + a +------ + 31b0 +(1 row) + diff --git a/contrib/btree_gist/expected/char_1.out b/contrib/btree_gist/expected/char_1.out index a1d24876d8..867318002b 100644 --- a/contrib/btree_gist/expected/char_1.out +++ b/contrib/btree_gist/expected/char_1.out @@ -64,3 +64,19 @@ SELECT count(*) FROM chartmp WHERE a > '31b0'::char(32); 214 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; + QUERY PLAN +--------------------------------------------------------------- + Index Only Scan using charidx on chartmp + Index Cond: ((a >= '31a'::bpchar) AND (a <= '31c'::bpchar)) +(2 rows) + +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; + a +------ + 31b0 +(1 row) + diff --git a/contrib/btree_gist/expected/date.out b/contrib/btree_gist/expected/date.out index 4a360bea6d..5db864bb82 100644 --- a/contrib/btree_gist/expected/date.out +++ b/contrib/btree_gist/expected/date.out @@ -74,10 +74,10 @@ SELECT count(*) FROM datetmp WHERE a > '2001-02-13'::date; EXPLAIN (COSTS OFF) SELECT a, a <-> '2001-02-13' FROM datetmp ORDER BY a <-> '2001-02-13' LIMIT 3; - QUERY PLAN ----------------------------------------------- + QUERY PLAN +------------------------------------------------ Limit - -> Index Scan using dateidx on datetmp + -> Index Only Scan using dateidx on datetmp Order By: (a <-> '02-13-2001'::date) (3 rows) diff --git a/contrib/btree_gist/expected/float4.out b/contrib/btree_gist/expected/float4.out index 1695f7805a..abbd9eef4e 100644 --- a/contrib/btree_gist/expected/float4.out +++ b/contrib/btree_gist/expected/float4.out @@ -74,11 +74,11 @@ SELECT count(*) FROM float4tmp WHERE a > -179.0::float4; EXPLAIN (COSTS OFF) SELECT a, a <-> '-179.0' FROM float4tmp ORDER BY a <-> '-179.0' LIMIT 3; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +---------------------------------------------------- Limit - -> Index Scan using float4idx on float4tmp - Order By: (a <-> (-179)::real) + -> Index Only Scan using float4idx on float4tmp + Order By: (a <-> '-179'::real) (3 rows) SELECT a, a <-> '-179.0' FROM float4tmp ORDER BY a <-> '-179.0' LIMIT 3; diff --git a/contrib/btree_gist/expected/float8.out b/contrib/btree_gist/expected/float8.out index 7d2228b797..5111dbdfae 100644 --- a/contrib/btree_gist/expected/float8.out +++ b/contrib/btree_gist/expected/float8.out @@ -77,8 +77,8 @@ SELECT a, a <-> '-1890.0' FROM float8tmp ORDER BY a <-> '-1890.0' LIMIT 3; QUERY PLAN ----------------------------------------------------- Limit - -> Index Scan using float8idx on float8tmp - Order By: (a <-> (-1890)::double precision) + -> Index Only Scan using float8idx on float8tmp + Order By: (a <-> '-1890'::double precision) (3 rows) SELECT a, a <-> '-1890.0' FROM float8tmp ORDER BY a <-> '-1890.0' LIMIT 3; diff --git a/contrib/btree_gist/expected/int2.out b/contrib/btree_gist/expected/int2.out index b1cc3b14b2..50a332939b 100644 --- a/contrib/btree_gist/expected/int2.out +++ b/contrib/btree_gist/expected/int2.out @@ -74,11 +74,11 @@ SELECT count(*) FROM int2tmp WHERE a > 237::int2; EXPLAIN (COSTS OFF) SELECT a, a <-> '237' FROM int2tmp ORDER BY a <-> '237' LIMIT 3; - QUERY PLAN -------------------------------------------- + QUERY PLAN +------------------------------------------------ Limit - -> Index Scan using int2idx on int2tmp - Order By: (a <-> 237::smallint) + -> Index Only Scan using int2idx on int2tmp + Order By: (a <-> '237'::smallint) (3 rows) SELECT a, a <-> '237' FROM int2tmp ORDER BY a <-> '237' LIMIT 3; diff --git a/contrib/btree_gist/expected/int4.out b/contrib/btree_gist/expected/int4.out index 41bed1f6e3..6bbdc7c3f4 100644 --- a/contrib/btree_gist/expected/int4.out +++ b/contrib/btree_gist/expected/int4.out @@ -74,10 +74,10 @@ SELECT count(*) FROM int4tmp WHERE a > 237::int4; EXPLAIN (COSTS OFF) SELECT a, a <-> '237' FROM int4tmp ORDER BY a <-> '237' LIMIT 3; - QUERY PLAN -------------------------------------------- + QUERY PLAN +------------------------------------------------ Limit - -> Index Scan using int4idx on int4tmp + -> Index Only Scan using int4idx on int4tmp Order By: (a <-> 237) (3 rows) diff --git a/contrib/btree_gist/expected/int8.out b/contrib/btree_gist/expected/int8.out index ff0af4a5fb..eff77c26b5 100644 --- a/contrib/btree_gist/expected/int8.out +++ b/contrib/btree_gist/expected/int8.out @@ -74,11 +74,11 @@ SELECT count(*) FROM int8tmp WHERE a > 464571291354841::int8; EXPLAIN (COSTS OFF) SELECT a, a <-> '464571291354841' FROM int8tmp ORDER BY a <-> '464571291354841' LIMIT 3; - QUERY PLAN ---------------------------------------------------- + QUERY PLAN +----------------------------------------------------- Limit - -> Index Scan using int8idx on int8tmp - Order By: (a <-> 464571291354841::bigint) + -> Index Only Scan using int8idx on int8tmp + Order By: (a <-> '464571291354841'::bigint) (3 rows) SELECT a, a <-> '464571291354841' FROM int8tmp ORDER BY a <-> '464571291354841' LIMIT 3; diff --git a/contrib/btree_gist/expected/interval.out b/contrib/btree_gist/expected/interval.out index 6955251a04..875380978e 100644 --- a/contrib/btree_gist/expected/interval.out +++ b/contrib/btree_gist/expected/interval.out @@ -77,7 +77,7 @@ SELECT a, a <-> '199 days 21:21:23' FROM intervaltmp ORDER BY a <-> '199 days 21 QUERY PLAN --------------------------------------------------------------------------- Limit - -> Index Scan using intervalidx on intervaltmp + -> Index Only Scan using intervalidx on intervaltmp Order By: (a <-> '@ 199 days 21 hours 21 mins 23 secs'::interval) (3 rows) diff --git a/contrib/btree_gist/expected/macaddr.out b/contrib/btree_gist/expected/macaddr.out index 3f0271bd25..c0a4c6287f 100644 --- a/contrib/btree_gist/expected/macaddr.out +++ b/contrib/btree_gist/expected/macaddr.out @@ -64,3 +64,26 @@ SELECT count(*) FROM macaddrtmp WHERE a > '22:00:5c:e5:9b:0d'::macaddr; 540 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM macaddrtmp WHERE a < '02:03:04:05:06:07'::macaddr; + QUERY PLAN +-------------------------------------------------- + Index Only Scan using macaddridx on macaddrtmp + Index Cond: (a < '02:03:04:05:06:07'::macaddr) +(2 rows) + +SELECT * FROM macaddrtmp WHERE a < '02:03:04:05:06:07'::macaddr; + a +------------------- + 01:02:37:05:4f:36 + 01:02:37:05:4f:36 + 01:02:37:05:4f:36 + 01:02:37:05:4f:36 + 01:43:b5:79:eb:0f + 01:43:b5:79:eb:0f + 01:43:b5:79:eb:0f + 01:43:b5:79:eb:0f +(8 rows) + diff --git a/contrib/btree_gist/expected/numeric.out b/contrib/btree_gist/expected/numeric.out index 1ab7ae6486..ae839b8ec8 100644 --- a/contrib/btree_gist/expected/numeric.out +++ b/contrib/btree_gist/expected/numeric.out @@ -186,3 +186,22 @@ SELECT count(*) FROM numerictmp WHERE a > 0 ; 576 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM numerictmp WHERE a BETWEEN 1 AND 300 ORDER BY a; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: a + -> Index Only Scan using numericidx on numerictmp + Index Cond: ((a >= '1'::numeric) AND (a <= '300'::numeric)) +(4 rows) + +SELECT * FROM numerictmp WHERE a BETWEEN 1 AND 300 ORDER BY a; + a +------------ + 204.035430 + 207.400532 +(2 rows) + diff --git a/contrib/btree_gist/expected/text.out b/contrib/btree_gist/expected/text.out index 4905cb023b..bb4e2e62d1 100644 --- a/contrib/btree_gist/expected/text.out +++ b/contrib/btree_gist/expected/text.out @@ -71,3 +71,19 @@ SELECT count(*) FROM texttmp WHERE a = '2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb 1 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; + QUERY PLAN +----------------------------------------------------------- + Index Only Scan using textidx on texttmp + Index Cond: ((a >= '31a'::text) AND (a <= '31c'::text)) +(2 rows) + +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; + a +------ + 31b0 +(1 row) + diff --git a/contrib/btree_gist/expected/text_1.out b/contrib/btree_gist/expected/text_1.out index e2b9c1b6b7..8ef1ffb2d1 100644 --- a/contrib/btree_gist/expected/text_1.out +++ b/contrib/btree_gist/expected/text_1.out @@ -71,3 +71,19 @@ SELECT count(*) FROM texttmp WHERE a = '2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb 1 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; + QUERY PLAN +----------------------------------------------------------- + Index Only Scan using textidx on texttmp + Index Cond: ((a >= '31a'::text) AND (a <= '31c'::text)) +(2 rows) + +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; + a +------ + 31b0 +(1 row) + diff --git a/contrib/btree_gist/expected/time.out b/contrib/btree_gist/expected/time.out index 1b9da4e192..ec95ef77c5 100644 --- a/contrib/btree_gist/expected/time.out +++ b/contrib/btree_gist/expected/time.out @@ -77,7 +77,7 @@ SELECT a, a <-> '10:57:11' FROM timetmp ORDER BY a <-> '10:57:11' LIMIT 3; QUERY PLAN -------------------------------------------------------------- Limit - -> Index Scan using timeidx on timetmp + -> Index Only Scan using timeidx on timetmp Order By: (a <-> '10:57:11'::time without time zone) (3 rows) diff --git a/contrib/btree_gist/expected/timestamp.out b/contrib/btree_gist/expected/timestamp.out index cc3624f084..0d94f2f245 100644 --- a/contrib/btree_gist/expected/timestamp.out +++ b/contrib/btree_gist/expected/timestamp.out @@ -77,7 +77,7 @@ SELECT a, a <-> '2004-10-26 08:55:08' FROM timestamptmp ORDER BY a <-> '2004-10- QUERY PLAN ----------------------------------------------------------------------------------- Limit - -> Index Scan using timestampidx on timestamptmp + -> Index Only Scan using timestampidx on timestamptmp Order By: (a <-> 'Tue Oct 26 08:55:08 2004'::timestamp without time zone) (3 rows) diff --git a/contrib/btree_gist/expected/timestamptz.out b/contrib/btree_gist/expected/timestamptz.out index 88d2404c44..75a15a4256 100644 --- a/contrib/btree_gist/expected/timestamptz.out +++ b/contrib/btree_gist/expected/timestamptz.out @@ -197,7 +197,7 @@ SELECT a, a <-> '2018-12-18 10:59:54 GMT+2' FROM timestamptztmp ORDER BY a <-> ' QUERY PLAN ------------------------------------------------------------------------------------ Limit - -> Index Scan using timestamptzidx on timestamptztmp + -> Index Only Scan using timestamptzidx on timestamptztmp Order By: (a <-> 'Tue Dec 18 04:59:54 2018 PST'::timestamp with time zone) (3 rows) diff --git a/contrib/btree_gist/expected/varbit.out b/contrib/btree_gist/expected/varbit.out index e6765f4231..538ace85c9 100644 --- a/contrib/btree_gist/expected/varbit.out +++ b/contrib/btree_gist/expected/varbit.out @@ -64,3 +64,13 @@ SELECT count(*) FROM varbittmp WHERE a > '1110100111010'::varbit; 50 (1 row) +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; + QUERY PLAN +----------------------------------------------------------------------- + Index Only Scan using bitidx on bittmp + Index Cond: ((a >= B'1000000'::"bit") AND (a <= B'1000001'::"bit")) +(2 rows) + diff --git a/contrib/btree_gist/sql/bit.sql b/contrib/btree_gist/sql/bit.sql index a6b6ab3b3b..a733042023 100644 --- a/contrib/btree_gist/sql/bit.sql +++ b/contrib/btree_gist/sql/bit.sql @@ -29,3 +29,8 @@ SELECT count(*) FROM bittmp WHERE a = '011011000100010111011000110000100'; SELECT count(*) FROM bittmp WHERE a >= '011011000100010111011000110000100'; SELECT count(*) FROM bittmp WHERE a > '011011000100010111011000110000100'; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; diff --git a/contrib/btree_gist/sql/bytea.sql b/contrib/btree_gist/sql/bytea.sql index 37aff339e2..6885f5e56d 100644 --- a/contrib/btree_gist/sql/bytea.sql +++ b/contrib/btree_gist/sql/bytea.sql @@ -32,3 +32,9 @@ SELECT count(*) FROM byteatmp WHERE a >= '31b0'::bytea; SELECT count(*) FROM byteatmp WHERE a > '31b0'::bytea; SELECT count(*) FROM byteatmp WHERE a = '2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809'::bytea; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM byteatmp where a > 'ffa'::bytea; +SELECT a FROM byteatmp where a > 'ffa'::bytea; diff --git a/contrib/btree_gist/sql/char.sql b/contrib/btree_gist/sql/char.sql index 9ae0f1fb3f..f6eb52e672 100644 --- a/contrib/btree_gist/sql/char.sql +++ b/contrib/btree_gist/sql/char.sql @@ -29,3 +29,9 @@ SELECT count(*) FROM chartmp WHERE a = '31b0'::char(32); SELECT count(*) FROM chartmp WHERE a >= '31b0'::char(32); SELECT count(*) FROM chartmp WHERE a > '31b0'::char(32); + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; +SELECT * FROM chartmp WHERE a BETWEEN '31a' AND '31c'; diff --git a/contrib/btree_gist/sql/macaddr.sql b/contrib/btree_gist/sql/macaddr.sql index d9c54b2930..85c271f7ce 100644 --- a/contrib/btree_gist/sql/macaddr.sql +++ b/contrib/btree_gist/sql/macaddr.sql @@ -29,3 +29,9 @@ SELECT count(*) FROM macaddrtmp WHERE a = '22:00:5c:e5:9b:0d'::macaddr; SELECT count(*) FROM macaddrtmp WHERE a >= '22:00:5c:e5:9b:0d'::macaddr; SELECT count(*) FROM macaddrtmp WHERE a > '22:00:5c:e5:9b:0d'::macaddr; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM macaddrtmp WHERE a < '02:03:04:05:06:07'::macaddr; +SELECT * FROM macaddrtmp WHERE a < '02:03:04:05:06:07'::macaddr; diff --git a/contrib/btree_gist/sql/numeric.sql b/contrib/btree_gist/sql/numeric.sql index ae4ef7c689..dbb2f2f183 100644 --- a/contrib/btree_gist/sql/numeric.sql +++ b/contrib/btree_gist/sql/numeric.sql @@ -75,3 +75,9 @@ SELECT count(*) FROM numerictmp WHERE a = 0 ; SELECT count(*) FROM numerictmp WHERE a >= 0 ; SELECT count(*) FROM numerictmp WHERE a > 0 ; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM numerictmp WHERE a BETWEEN 1 AND 300 ORDER BY a; +SELECT * FROM numerictmp WHERE a BETWEEN 1 AND 300 ORDER BY a; diff --git a/contrib/btree_gist/sql/text.sql b/contrib/btree_gist/sql/text.sql index 0d411a9a41..46597e731d 100644 --- a/contrib/btree_gist/sql/text.sql +++ b/contrib/btree_gist/sql/text.sql @@ -32,3 +32,9 @@ SELECT count(*) FROM texttmp WHERE a >= '31b0'::text; SELECT count(*) FROM texttmp WHERE a > '31b0'::text; SELECT count(*) FROM texttmp WHERE a = '2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809 2eb2c961c1cbf6 cf8d7b68cb9a2f36 7bbedb4ae7 06ec46c55611a466eb7e3edcc009ca6 e 5ed9cd0ea5a4e55d601027c56a 64cacf3a42afc 90e63000c34506993345355640 79bce 173bb7 c5 574ea7c921cb0f25 089d56d16dff24f336e4740 6870470f1f9afcb4f7c56c9f b97e117fc965 7013029 e48f6dd481 7d00e1e227beef84a9 904d4c34241f cb5c0f14 3a8a70 f51a73164e92052fbb53b4cc2f1fed 3c3fecaa0270175 2521ef03594 fa05756812648f450fb 13c2f b39a0729d6182e9 15b5ea204fe73 d8991afd72d21acd188df1 a29fff57ab897338 de549b3ed5a024534c007125c 2fcf3e5c3e3 7427b6daec5c3f 473 8 a5d9 840410976ac2eeab58e1ca8bf46c2b7 1db9cc85a336f1291ea19922 db808f3548cda91 2e379ce80af12bd7ed56d0338c a ea67a7c847f6620fc894f0ba10044 0e 52e97d975af7201d8 d95e6f08184d8ff 19762476fa 42f278f3534f3f2be0abaed71 f0aba11835e4e1d94 e8534cf677046eafb8f5f761865 ffbee273c7bb 2bb77f6e780 c77e81851c491 e a9f45d765b01a030d5d317 ff7345a22bc360 c87363ba121297b063e83 13ea32e9618d 40304f6c2a7e92c1c66ff4208e a781b4a21419abfdf5eb467e4d48908 8a65656e514b2b3ef8f86310aaf85 4 90b7b2862e3dbc8f0eef3dfc6075bfa eb94a1c a58abb5def4fa43840e6e2716 260e6eaebb 42415d712bf83944dcd1204e 305254fc3b849150b5 5bbd7f8471dcd3621 2ae0548115a250 0c1988e9 76f98bef45639b7 0d5a28f01dc b71 c046576faa4d49eff8 c1e8d01c 10c86c457ea050455a742da4f8 ea7676af85c71c7eeca635 6a07137227404d a4 7186172 8150f31c9a15401c f1bb9057a9938bfa 22b482be08f424ec4 21daea994293589 15bff393f6b17fef24786dd6f9 d5a2d 4b3b5dd9370543e b4a93b2ac4341945d06 d384447812e0 4e3c97e9b8f7 f7d4d644b2a1d373 5102c b9531f725674b28 1aa16e7e34285797c1439 51aa762ea14b40fb8876c887eea6 45a62d3d5d3e946250904697486591 b3f1a8 243524767bf846d 8 95 45a922872 dd2497eb1e3da8d513d2 7821db9e14d4f 24c4f085de60d7c0c6ea3fc6bc e4c9f8c68596d7d afd6c8cb0f2516b87f24bbd8 61d2e457c70949 d2d362cdc657 3605f9d27fd6d72 32de91d66fe5bf537530 859e1a08b65 9b5a55f 4116cda9fddeb843964002 e81f3b2c0ca566ad3dbbc6e234 0d3b1d54 10c440be5c0bca95 7dad841f a61f041967972e805ccfee55c deee9cc16e92ab197 7627554073c1f56b9e 21bebcbfd2e2282f84 7b121a83eeb91db8bda81ba88c634b46394 59885ebc737617addaaf0cb809'::text; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; +SELECT * FROM texttmp WHERE a BETWEEN '31a' AND '31c'; diff --git a/contrib/btree_gist/sql/varbit.sql b/contrib/btree_gist/sql/varbit.sql index 73e9273802..e2a33b5a1b 100644 --- a/contrib/btree_gist/sql/varbit.sql +++ b/contrib/btree_gist/sql/varbit.sql @@ -29,3 +29,8 @@ SELECT count(*) FROM varbittmp WHERE a = '1110100111010'::varbit; SELECT count(*) FROM varbittmp WHERE a >= '1110100111010'::varbit; SELECT count(*) FROM varbittmp WHERE a > '1110100111010'::varbit; + +-- Test index-only scans +SET enable_bitmapscan=off; +EXPLAIN (COSTS OFF) +SELECT a FROM bittmp WHERE a BETWEEN '1000000' and '1000001'; diff --git a/contrib/chkpass/Makefile b/contrib/chkpass/Makefile index b775aef17d..a2599ea239 100644 --- a/contrib/chkpass/Makefile +++ b/contrib/chkpass/Makefile @@ -1,10 +1,11 @@ # contrib/chkpass/Makefile MODULE_big = chkpass -OBJS = chkpass.o +OBJS = chkpass.o $(WIN32RES) EXTENSION = chkpass DATA = chkpass--1.0.sql chkpass--unpackaged--1.0.sql +PGFILEDESC = "chkpass - encrypted password data type" SHLIB_LINK = $(filter -lcrypt, $(LIBS)) diff --git a/contrib/chkpass/chkpass--1.0.sql b/contrib/chkpass/chkpass--1.0.sql index d1fbedc446..406a61924c 100644 --- a/contrib/chkpass/chkpass--1.0.sql +++ b/contrib/chkpass/chkpass--1.0.sql @@ -10,12 +10,15 @@ CREATE FUNCTION chkpass_in(cstring) RETURNS chkpass AS 'MODULE_PATHNAME' - LANGUAGE C STRICT; + LANGUAGE C STRICT VOLATILE; +-- Note: chkpass_in actually is volatile, because of its use of random(). +-- In hindsight that was a bad idea, but there's no way to change it without +-- breaking some usage patterns. CREATE FUNCTION chkpass_out(chkpass) RETURNS cstring AS 'MODULE_PATHNAME' - LANGUAGE C STRICT; + LANGUAGE C STRICT IMMUTABLE; CREATE TYPE chkpass ( internallength = 16, diff --git a/contrib/chkpass/chkpass--unpackaged--1.0.sql b/contrib/chkpass/chkpass--unpackaged--1.0.sql index 7bbfb142a6..8bdecddfa5 100644 --- a/contrib/chkpass/chkpass--unpackaged--1.0.sql +++ b/contrib/chkpass/chkpass--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/chkpass/chkpass--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION chkpass" to load this file. \quit +\echo Use "CREATE EXTENSION chkpass FROM unpackaged" to load this file. \quit ALTER EXTENSION chkpass ADD type chkpass; ALTER EXTENSION chkpass ADD function chkpass_in(cstring); diff --git a/contrib/chkpass/chkpass.c b/contrib/chkpass/chkpass.c index 283ad9a538..9425c089b5 100644 --- a/contrib/chkpass/chkpass.c +++ b/contrib/chkpass/chkpass.c @@ -65,7 +65,7 @@ chkpass_in(PG_FUNCTION_ARGS) /* special case to let us enter encrypted passwords */ if (*str == ':') { - result = (chkpass *) palloc(sizeof(chkpass)); + result = (chkpass *) palloc0(sizeof(chkpass)); strlcpy(result->password, str + 1, 13 + 1); PG_RETURN_POINTER(result); } @@ -75,7 +75,7 @@ chkpass_in(PG_FUNCTION_ARGS) (errcode(ERRCODE_DATA_EXCEPTION), errmsg("password \"%s\" is weak", str))); - result = (chkpass *) palloc(sizeof(chkpass)); + result = (chkpass *) palloc0(sizeof(chkpass)); mysalt[0] = salt_chars[random() & 0x3f]; mysalt[1] = salt_chars[random() & 0x3f]; @@ -107,7 +107,7 @@ chkpass_out(PG_FUNCTION_ARGS) result = (char *) palloc(16); result[0] = ':'; - strcpy(result + 1, password->password); + strlcpy(result + 1, password->password, 15); PG_RETURN_CSTRING(result); } diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile index 65942528dd..267854b5de 100644 --- a/contrib/citext/Makefile +++ b/contrib/citext/Makefile @@ -4,6 +4,7 @@ MODULES = citext EXTENSION = citext DATA = citext--1.0.sql citext--unpackaged--1.0.sql +PGFILEDESC = "citext - case-insensitive character string data type" REGRESS = citext diff --git a/contrib/citext/citext--unpackaged--1.0.sql b/contrib/citext/citext--unpackaged--1.0.sql index 102743c528..ef6d6b0639 100644 --- a/contrib/citext/citext--unpackaged--1.0.sql +++ b/contrib/citext/citext--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/citext/citext--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION citext" to load this file. \quit +\echo Use "CREATE EXTENSION citext FROM unpackaged" to load this file. \quit ALTER EXTENSION citext ADD type citext; ALTER EXTENSION citext ADD function citextin(cstring); @@ -105,7 +105,12 @@ UPDATE pg_catalog.pg_attribute SET attcollation = 100 FROM typeoids WHERE atttypid = typeoids.typoid; -UPDATE pg_catalog.pg_index SET indcollation[0] = 100 +-- Updating the index indcollations is particularly tedious, but since we +-- don't currently allow SQL assignment to individual elements of oidvectors, +-- there's little choice. + +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, '^0', '100')::pg_catalog.oidvector WHERE indclass[0] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -115,7 +120,8 @@ WHERE indclass[0] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[1] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[1] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -125,7 +131,8 @@ WHERE indclass[1] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[2] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[2] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -135,7 +142,8 @@ WHERE indclass[2] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[3] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[3] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -145,7 +153,8 @@ WHERE indclass[3] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[4] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[4] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -155,7 +164,8 @@ WHERE indclass[4] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[5] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[5] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -165,7 +175,8 @@ WHERE indclass[5] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[6] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[6] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -175,7 +186,8 @@ WHERE indclass[6] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[7] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[7] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION diff --git a/contrib/cube/Makefile b/contrib/cube/Makefile index b5cd5d0f33..67f7867761 100644 --- a/contrib/cube/Makefile +++ b/contrib/cube/Makefile @@ -1,10 +1,11 @@ # contrib/cube/Makefile MODULE_big = cube -OBJS= cube.o cubeparse.o +OBJS= cube.o cubeparse.o $(WIN32RES) EXTENSION = cube DATA = cube--1.0.sql cube--unpackaged--1.0.sql +PGFILEDESC = "cube - multidimensional cube data type" REGRESS = cube diff --git a/contrib/cube/cube--unpackaged--1.0.sql b/contrib/cube/cube--unpackaged--1.0.sql index 6859682786..1065512a29 100644 --- a/contrib/cube/cube--unpackaged--1.0.sql +++ b/contrib/cube/cube--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/cube/cube--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION cube" to load this file. \quit +\echo Use "CREATE EXTENSION cube FROM unpackaged" to load this file. \quit ALTER EXTENSION cube ADD type cube; ALTER EXTENSION cube ADD function cube_in(cstring); diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index b0305ef431..b9ccad994a 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -26,11 +26,6 @@ PG_MODULE_MAGIC; #define ARRPTR(x) ( (double *) ARR_DATA_PTR(x) ) #define ARRNELEMS(x) ArrayGetNItems( ARR_NDIM(x), ARR_DIMS(x)) -extern int cube_yyparse(NDBOX **result); -extern void cube_yyerror(NDBOX **result, const char *message); -extern void cube_scanner_init(const char *str); -extern void cube_scanner_finish(void); - /* ** Input/Output routines */ diff --git a/contrib/cube/cubedata.h b/contrib/cube/cubedata.h index 5d44e11081..59c23ded6a 100644 --- a/contrib/cube/cubedata.h +++ b/contrib/cube/cubedata.h @@ -23,11 +23,10 @@ typedef struct NDBOX unsigned int header; /* - * Variable length array. The lower left coordinates for each dimension - * come first, followed by upper right coordinates unless the point flag - * is set. + * The lower left coordinates for each dimension come first, followed by + * upper right coordinates unless the point flag is set. */ - double x[1]; + double x[FLEXIBLE_ARRAY_MEMBER]; } NDBOX; #define POINT_BIT 0x80000000 @@ -41,9 +40,18 @@ typedef struct NDBOX #define LL_COORD(cube, i) ( (cube)->x[i] ) #define UR_COORD(cube, i) ( IS_POINT(cube) ? (cube)->x[i] : (cube)->x[(i) + DIM(cube)] ) -#define POINT_SIZE(_dim) (offsetof(NDBOX, x[0]) + sizeof(double)*(_dim)) -#define CUBE_SIZE(_dim) (offsetof(NDBOX, x[0]) + sizeof(double)*(_dim)*2) +#define POINT_SIZE(_dim) (offsetof(NDBOX, x) + sizeof(double)*(_dim)) +#define CUBE_SIZE(_dim) (offsetof(NDBOX, x) + sizeof(double)*(_dim)*2) -#define DatumGetNDBOX(x) ((NDBOX*)DatumGetPointer(x)) -#define PG_GETARG_NDBOX(x) DatumGetNDBOX( PG_DETOAST_DATUM(PG_GETARG_DATUM(x)) ) +#define DatumGetNDBOX(x) ((NDBOX *) PG_DETOAST_DATUM(x)) +#define PG_GETARG_NDBOX(x) DatumGetNDBOX(PG_GETARG_DATUM(x)) #define PG_RETURN_NDBOX(x) PG_RETURN_POINTER(x) + +/* in cubescan.l */ +extern int cube_yylex(void); +extern void cube_yyerror(NDBOX **result, const char *message) pg_attribute_noreturn(); +extern void cube_scanner_init(const char *str); +extern void cube_scanner_finish(void); + +/* in cubeparse.y */ +extern int cube_yyparse(NDBOX **result); diff --git a/contrib/cube/cubeparse.y b/contrib/cube/cubeparse.y index 0baee8e132..33606c741c 100644 --- a/contrib/cube/cubeparse.y +++ b/contrib/cube/cubeparse.y @@ -22,14 +22,9 @@ #define YYMALLOC palloc #define YYFREE pfree -extern int cube_yylex(void); - static char *scanbuf; static int scanbuflen; -extern int cube_yyparse(NDBOX **result); -extern void cube_yyerror(NDBOX **result, const char *message); - static int delim_count(char *s, char delim); static NDBOX * write_box(unsigned int dim, char *str1, char *str2); static NDBOX * write_point_as_box(char *s, int dim); diff --git a/contrib/cube/cubescan.l b/contrib/cube/cubescan.l index e383b59d3d..4408e28387 100644 --- a/contrib/cube/cubescan.l +++ b/contrib/cube/cubescan.l @@ -4,8 +4,6 @@ * contrib/cube/cubescan.l */ -#include "postgres.h" - /* No reason to constrain amount of data slurped */ #define YY_READ_BUF_SIZE 16777216 @@ -24,12 +22,6 @@ static YY_BUFFER_STATE scanbufhandle; /* this is now declared in cubeparse.y: */ /* static char *scanbuf; */ /* static int scanbuflen; */ - -/* flex 2.5.4 doesn't bother with a decl for this */ -int cube_yylex(void); - -void cube_scanner_init(const char *str); -void cube_scanner_finish(void); %} %option 8bit @@ -60,7 +52,7 @@ float ({integer}|{real})([eE]{integer})? %% -void __attribute__((noreturn)) +void yyerror(NDBOX **result, const char *message) { if (*yytext == YY_END_OF_BUFFER_CHAR) diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile index 32314a0abb..b8d515716d 100644 --- a/contrib/dblink/Makefile +++ b/contrib/dblink/Makefile @@ -1,24 +1,24 @@ # contrib/dblink/Makefile MODULE_big = dblink -OBJS = dblink.o +OBJS = dblink.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -SHLIB_PREREQS = submake-libpq EXTENSION = dblink DATA = dblink--1.1.sql dblink--1.0--1.1.sql dblink--unpackaged--1.0.sql +PGFILEDESC = "dblink - connect to other PostgreSQL databases" -REGRESS = dblink - -# the db name is hard-coded in the tests -override USE_MODULE_DB = +REGRESS = paths dblink +REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress +EXTRA_CLEAN = sql/paths.sql expected/paths.out ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +SHLIB_PREREQS = submake-libpq subdir = contrib/dblink top_builddir = ../.. include $(top_builddir)/src/Makefile.global diff --git a/contrib/dblink/dblink--unpackaged--1.0.sql b/contrib/dblink/dblink--unpackaged--1.0.sql index 29f5bed0c1..f3923b5b35 100644 --- a/contrib/dblink/dblink--unpackaged--1.0.sql +++ b/contrib/dblink/dblink--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dblink/dblink--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dblink" to load this file. \quit +\echo Use "CREATE EXTENSION dblink FROM unpackaged" to load this file. \quit ALTER EXTENSION dblink ADD function dblink_connect(text); ALTER EXTENSION dblink ADD function dblink_connect(text,text); diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index a81853fa91..c5892d37cb 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -9,7 +9,7 @@ * Shridhar Daithankar <shridhar_daithankar@persistent.co.in> * * contrib/dblink/dblink.c - * Copyright (c) 2001-2014, PostgreSQL Global Development Group + * Copyright (c) 2001-2015, PostgreSQL Global Development Group * ALL RIGHTS RESERVED; * * Permission to use, copy, modify, and distribute this software and its @@ -94,8 +94,8 @@ static void materializeQueryResult(FunctionCallInfo fcinfo, const char *conname, const char *sql, bool fail); -static PGresult *storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql); -static void storeRow(storeInfo *sinfo, PGresult *res, bool first); +static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql); +static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first); static remoteConn *getConnectionByName(const char *name); static HTAB *createConnHash(void); static void createNewConnection(const char *name, remoteConn *rconn); @@ -966,17 +966,24 @@ materializeQueryResult(FunctionCallInfo fcinfo, { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; PGresult *volatile res = NULL; - storeInfo sinfo; + volatile storeInfo sinfo; /* prepTuplestoreResult must have been called previously */ Assert(rsinfo->returnMode == SFRM_Materialize); /* initialize storeInfo to empty */ - memset(&sinfo, 0, sizeof(sinfo)); + memset((void *) &sinfo, 0, sizeof(sinfo)); sinfo.fcinfo = fcinfo; PG_TRY(); { + /* Create short-lived memory context for data conversions */ + sinfo.tmpcontext = AllocSetContextCreate(CurrentMemoryContext, + "dblink temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + /* execute query, collecting any tuples into the tuplestore */ res = storeQueryResult(&sinfo, conn, sql); @@ -1041,6 +1048,12 @@ materializeQueryResult(FunctionCallInfo fcinfo, PQclear(res); res = NULL; } + + /* clean up data conversion short-lived memory context */ + if (sinfo.tmpcontext != NULL) + MemoryContextDelete(sinfo.tmpcontext); + sinfo.tmpcontext = NULL; + PQclear(sinfo.last_res); sinfo.last_res = NULL; PQclear(sinfo.cur_res); @@ -1064,7 +1077,7 @@ materializeQueryResult(FunctionCallInfo fcinfo, * Execute query, and send any result rows to sinfo->tuplestore. */ static PGresult * -storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql) +storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql) { bool first = true; int nestlevel = -1; @@ -1132,7 +1145,7 @@ storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql) * (in this case the PGresult might contain either zero or one row). */ static void -storeRow(storeInfo *sinfo, PGresult *res, bool first) +storeRow(volatile storeInfo *sinfo, PGresult *res, bool first) { int nfields = PQnfields(res); HeapTuple tuple; @@ -1204,15 +1217,6 @@ storeRow(storeInfo *sinfo, PGresult *res, bool first) if (sinfo->cstrs) pfree(sinfo->cstrs); sinfo->cstrs = (char **) palloc(nfields * sizeof(char *)); - - /* Create short-lived memory context for data conversions */ - if (!sinfo->tmpcontext) - sinfo->tmpcontext = - AllocSetContextCreate(CurrentMemoryContext, - "dblink temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); } /* Should have a single-row result if we get here */ @@ -2976,7 +2980,7 @@ applyRemoteGucs(PGconn *conn) /* Apply the option (this will throw error on failure) */ (void) set_config_option(gucName, remoteVal, PGC_USERSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + GUC_ACTION_SAVE, true, 0, false); } return nestlevel; diff --git a/contrib/dblink/dblink.h b/contrib/dblink/dblink.h index 270d619ed6..de6c06df8e 100644 --- a/contrib/dblink/dblink.h +++ b/contrib/dblink/dblink.h @@ -9,7 +9,7 @@ * Shridhar Daithankar <shridhar_daithankar@persistent.co.in> * * contrib/dblink/dblink.h - * Copyright (c) 2001-2014, PostgreSQL Global Development Group + * Copyright (c) 2001-2015, PostgreSQL Global Development Group * ALL RIGHTS RESERVED; * * Permission to use, copy, modify, and distribute this software and its diff --git a/contrib/dblink/expected/.gitignore b/contrib/dblink/expected/.gitignore new file mode 100644 index 0000000000..d9c7942c64 --- /dev/null +++ b/contrib/dblink/expected/.gitignore @@ -0,0 +1 @@ +/paths.out diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out index f237c43d3d..a49b5629a1 100644 --- a/contrib/dblink/expected/dblink.out +++ b/contrib/dblink/expected/dblink.out @@ -88,9 +88,12 @@ SELECT dblink_build_sql_delete('"MySchema"."Foo"','1 2',2,'{"0", "a"}'); DELETE FROM "MySchema"."Foo" WHERE f1 = '0' AND f2 = 'a' (1 row) +CREATE FUNCTION connection_parameters() RETURNS text LANGUAGE SQL AS $f$ + SELECT $$dbname='$$||current_database()||$$' port=$$||current_setting('port'); +$f$; -- regular old dblink SELECT * -FROM dblink('dbname=contrib_regression','SELECT * FROM foo') AS t(a int, b text, c text[]) +FROM dblink(connection_parameters(),'SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; a | b | c ---+---+------------ @@ -103,8 +106,35 @@ SELECT * FROM dblink('SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; ERROR: connection not available +-- The first-level connection's backend will crash on exit given OpenLDAP +-- [2.4.24, 2.4.31]. We won't see evidence of any crash until the victim +-- process terminates and the postmaster responds. If process termination +-- entails writing a core dump, that can take awhile. Wait for the process to +-- vanish. At that point, the postmaster has called waitpid() on the crashed +-- process, and it will accept no new connections until it has reinitialized +-- the cluster. (We can't exploit pg_stat_activity, because the crash happens +-- after the backend updates shared memory to reflect its impending exit.) +DO $pl$ +DECLARE + detail text; +BEGIN + PERFORM wait_pid(crash_pid) + FROM dblink(connection_parameters(), $$ + SELECT pg_backend_pid() FROM dblink( + 'service=test_ldap '||connection_parameters(), + -- This string concatenation is a hack to shoehorn a + -- set_pgservicefile call into the SQL statement. + 'SELECT 1' || set_pgservicefile('pg_service.conf') + ) t(c int) + $$) AS t(crash_pid int); +EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS detail = PG_EXCEPTION_DETAIL; + -- Expected error in a non-LDAP build. + IF NOT detail LIKE 'syntax error in service file%' THEN RAISE; END IF; +END +$pl$; -- create a persistent connection -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); dblink_connect ---------------- OK @@ -240,14 +270,14 @@ WHERE t.a > 7; ERROR: connection not available -- put more data into our slave table, first using arbitrary connection syntax -- but truncate the actual return value so we can use diff to check for success -SELECT substr(dblink_exec('dbname=contrib_regression','INSERT INTO foo VALUES(10,''k'',''{"a10","b10","c10"}'')'),1,6); +SELECT substr(dblink_exec(connection_parameters(),'INSERT INTO foo VALUES(10,''k'',''{"a10","b10","c10"}'')'),1,6); substr -------- INSERT (1 row) -- create a persistent connection -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); dblink_connect ---------------- OK @@ -347,7 +377,7 @@ ERROR: could not establish connection DETAIL: missing "=" after "myconn" in connection info string -- create a named persistent connection -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); dblink_connect ---------------- OK @@ -376,10 +406,10 @@ CONTEXT: Error occurred on dblink connection named "myconn": could not execute -- create a second named persistent connection -- should error with "duplicate connection name" -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); ERROR: duplicate connection name -- create a second named persistent connection with a new name -SELECT dblink_connect('myconn2','dbname=contrib_regression'); +SELECT dblink_connect('myconn2',connection_parameters()); dblink_connect ---------------- OK @@ -574,7 +604,7 @@ ERROR: could not establish connection DETAIL: missing "=" after "myconn" in connection info string -- create a named persistent connection -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); dblink_connect ---------------- OK @@ -650,7 +680,7 @@ SELECT dblink_disconnect('myconn'); SELECT dblink_disconnect('myconn'); ERROR: connection "myconn" not available -- test asynchronous queries -SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest1', connection_parameters()); dblink_connect ---------------- OK @@ -663,7 +693,7 @@ SELECT * from 1 (1 row) -SELECT dblink_connect('dtest2', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest2', connection_parameters()); dblink_connect ---------------- OK @@ -676,7 +706,7 @@ SELECT * from 1 (1 row) -SELECT dblink_connect('dtest3', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest3', connection_parameters()); dblink_connect ---------------- OK @@ -750,7 +780,7 @@ SELECT * from result; 10 | k | {a10,b10,c10} (11 rows) -SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest1', connection_parameters()); dblink_connect ---------------- OK @@ -782,18 +812,23 @@ SELECT dblink_disconnect('dtest1'); (1 row) -- test foreign data wrapper functionality -CREATE USER dblink_regression_test; -CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw - OPTIONS (dbname 'contrib_regression'); +CREATE ROLE dblink_regression_test; +DO $d$ + BEGIN + EXECUTE $$CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw + OPTIONS (dbname '$$||current_database()||$$', + port '$$||current_setting('port')||$$' + )$$; + END; +$d$; CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (server 'localhost'); -- fail, can't specify server here ERROR: invalid option "server" HINT: Valid options in this context are: user, password -CREATE USER MAPPING FOR public SERVER fdtest; +CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (user :'USER'); GRANT USAGE ON FOREIGN SERVER fdtest TO dblink_regression_test; GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO dblink_regression_test; -\set ORIGINAL_USER :USER -\c - dblink_regression_test +SET SESSION AUTHORIZATION dblink_regression_test; -- should fail SELECT dblink_connect('myconn', 'fdtest'); ERROR: password is required @@ -821,14 +856,14 @@ SELECT * FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]) 10 | k | {a10,b10,c10} (11 rows) -\c - :ORIGINAL_USER +\c - - REVOKE USAGE ON FOREIGN SERVER fdtest FROM dblink_regression_test; REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_test; DROP USER dblink_regression_test; DROP USER MAPPING FOR public SERVER fdtest; DROP SERVER fdtest; -- test asynchronous notifications -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); dblink_connect ---------------- OK @@ -917,7 +952,7 @@ SELECT dblink_build_sql_delete('test_dropped', '1', 1, SET datestyle = ISO, MDY; SET intervalstyle = postgres; SET timezone = UTC; -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); dblink_connect ---------------- OK diff --git a/contrib/dblink/input/paths.source b/contrib/dblink/input/paths.source new file mode 100644 index 0000000000..aab3a3b2bf --- /dev/null +++ b/contrib/dblink/input/paths.source @@ -0,0 +1,14 @@ +-- Initialization that requires path substitution. + +CREATE FUNCTION putenv(text) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@', 'regress_putenv' + LANGUAGE C STRICT; + +CREATE FUNCTION wait_pid(int) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C STRICT; + +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS $$SELECT putenv('PGSERVICEFILE=@abs_srcdir@/' || $1)$$; diff --git a/contrib/dblink/output/paths.source b/contrib/dblink/output/paths.source new file mode 100644 index 0000000000..e1097f0996 --- /dev/null +++ b/contrib/dblink/output/paths.source @@ -0,0 +1,11 @@ +-- Initialization that requires path substitution. +CREATE FUNCTION putenv(text) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@', 'regress_putenv' + LANGUAGE C STRICT; +CREATE FUNCTION wait_pid(int) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C STRICT; +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS $$SELECT putenv('PGSERVICEFILE=@abs_srcdir@/' || $1)$$; diff --git a/contrib/dblink/pg_service.conf b/contrib/dblink/pg_service.conf new file mode 100644 index 0000000000..92201f0ad4 --- /dev/null +++ b/contrib/dblink/pg_service.conf @@ -0,0 +1,7 @@ +# pg_service.conf for minimally exercising libpq use of LDAP. + +# Having failed to reach an LDAP server, libpq essentially ignores the +# "service=test_ldap" in its connection string. Contact the "discard" +# service; the test works whether or not it answers. +[test_ldap] +ldap://127.0.0.1:9/base?attribute?one?filter diff --git a/contrib/dblink/sql/.gitignore b/contrib/dblink/sql/.gitignore new file mode 100644 index 0000000000..d17507846d --- /dev/null +++ b/contrib/dblink/sql/.gitignore @@ -0,0 +1 @@ +/paths.sql diff --git a/contrib/dblink/sql/dblink.sql b/contrib/dblink/sql/dblink.sql index 2a107601c5..ea78cc2929 100644 --- a/contrib/dblink/sql/dblink.sql +++ b/contrib/dblink/sql/dblink.sql @@ -55,9 +55,13 @@ SELECT dblink_build_sql_update('"MySchema"."Foo"','1 2',2,'{"0", "a"}','{"99", " -- build a delete statement based on a local tuple, SELECT dblink_build_sql_delete('"MySchema"."Foo"','1 2',2,'{"0", "a"}'); +CREATE FUNCTION connection_parameters() RETURNS text LANGUAGE SQL AS $f$ + SELECT $$dbname='$$||current_database()||$$' port=$$||current_setting('port'); +$f$; + -- regular old dblink SELECT * -FROM dblink('dbname=contrib_regression','SELECT * FROM foo') AS t(a int, b text, c text[]) +FROM dblink(connection_parameters(),'SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; -- should generate "connection not available" error @@ -65,8 +69,36 @@ SELECT * FROM dblink('SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; +-- The first-level connection's backend will crash on exit given OpenLDAP +-- [2.4.24, 2.4.31]. We won't see evidence of any crash until the victim +-- process terminates and the postmaster responds. If process termination +-- entails writing a core dump, that can take awhile. Wait for the process to +-- vanish. At that point, the postmaster has called waitpid() on the crashed +-- process, and it will accept no new connections until it has reinitialized +-- the cluster. (We can't exploit pg_stat_activity, because the crash happens +-- after the backend updates shared memory to reflect its impending exit.) +DO $pl$ +DECLARE + detail text; +BEGIN + PERFORM wait_pid(crash_pid) + FROM dblink(connection_parameters(), $$ + SELECT pg_backend_pid() FROM dblink( + 'service=test_ldap '||connection_parameters(), + -- This string concatenation is a hack to shoehorn a + -- set_pgservicefile call into the SQL statement. + 'SELECT 1' || set_pgservicefile('pg_service.conf') + ) t(c int) + $$) AS t(crash_pid int); +EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS detail = PG_EXCEPTION_DETAIL; + -- Expected error in a non-LDAP build. + IF NOT detail LIKE 'syntax error in service file%' THEN RAISE; END IF; +END +$pl$; + -- create a persistent connection -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); -- use the persistent connection SELECT * @@ -127,10 +159,10 @@ WHERE t.a > 7; -- put more data into our slave table, first using arbitrary connection syntax -- but truncate the actual return value so we can use diff to check for success -SELECT substr(dblink_exec('dbname=contrib_regression','INSERT INTO foo VALUES(10,''k'',''{"a10","b10","c10"}'')'),1,6); +SELECT substr(dblink_exec(connection_parameters(),'INSERT INTO foo VALUES(10,''k'',''{"a10","b10","c10"}'')'),1,6); -- create a persistent connection -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); -- put more data into our slave table, using persistent connection syntax -- but truncate the actual return value so we can use diff to check for success @@ -176,7 +208,7 @@ FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; -- create a named persistent connection -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); -- use the named persistent connection SELECT * @@ -190,10 +222,10 @@ WHERE t.a > 7; -- create a second named persistent connection -- should error with "duplicate connection name" -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); -- create a second named persistent connection with a new name -SELECT dblink_connect('myconn2','dbname=contrib_regression'); +SELECT dblink_connect('myconn2',connection_parameters()); -- use the second named persistent connection SELECT * @@ -279,7 +311,7 @@ FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; -- create a named persistent connection -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); -- put more data into our slave table, using named persistent connection syntax -- but truncate the actual return value so we can use diff to check for success @@ -313,15 +345,15 @@ SELECT dblink_disconnect('myconn'); SELECT dblink_disconnect('myconn'); -- test asynchronous queries -SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest1', connection_parameters()); SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; -SELECT dblink_connect('dtest2', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest2', connection_parameters()); SELECT * from dblink_send_query('dtest2', 'select * from foo where f1 > 2 and f1 < 7') as t1; -SELECT dblink_connect('dtest3', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest3', connection_parameters()); SELECT * from dblink_send_query('dtest3', 'select * from foo where f1 > 6') as t1; @@ -350,7 +382,7 @@ SELECT dblink_disconnect('dtest3'); SELECT * from result; -SELECT dblink_connect('dtest1', 'dbname=contrib_regression'); +SELECT dblink_connect('dtest1', connection_parameters()); SELECT * from dblink_send_query('dtest1', 'select * from foo where f1 < 3') as t1; @@ -359,25 +391,31 @@ SELECT dblink_error_message('dtest1'); SELECT dblink_disconnect('dtest1'); -- test foreign data wrapper functionality -CREATE USER dblink_regression_test; -CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw - OPTIONS (dbname 'contrib_regression'); +CREATE ROLE dblink_regression_test; +DO $d$ + BEGIN + EXECUTE $$CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw + OPTIONS (dbname '$$||current_database()||$$', + port '$$||current_setting('port')||$$' + )$$; + END; +$d$; + CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (server 'localhost'); -- fail, can't specify server here -CREATE USER MAPPING FOR public SERVER fdtest; +CREATE USER MAPPING FOR public SERVER fdtest OPTIONS (user :'USER'); GRANT USAGE ON FOREIGN SERVER fdtest TO dblink_regression_test; GRANT EXECUTE ON FUNCTION dblink_connect_u(text, text) TO dblink_regression_test; -\set ORIGINAL_USER :USER -\c - dblink_regression_test +SET SESSION AUTHORIZATION dblink_regression_test; -- should fail SELECT dblink_connect('myconn', 'fdtest'); -- should succeed SELECT dblink_connect_u('myconn', 'fdtest'); SELECT * FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]); -\c - :ORIGINAL_USER +\c - - REVOKE USAGE ON FOREIGN SERVER fdtest FROM dblink_regression_test; REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_test; DROP USER dblink_regression_test; @@ -385,7 +423,7 @@ DROP USER MAPPING FOR public SERVER fdtest; DROP SERVER fdtest; -- test asynchronous notifications -SELECT dblink_connect('dbname=contrib_regression'); +SELECT dblink_connect(connection_parameters()); --should return listen SELECT dblink_exec('LISTEN regression'); @@ -431,7 +469,7 @@ SELECT dblink_build_sql_delete('test_dropped', '1', 1, SET datestyle = ISO, MDY; SET intervalstyle = postgres; SET timezone = UTC; -SELECT dblink_connect('myconn','dbname=contrib_regression'); +SELECT dblink_connect('myconn',connection_parameters()); SELECT dblink_exec('myconn', 'SET datestyle = GERMAN, DMY;'); -- single row synchronous case diff --git a/contrib/dict_int/Makefile b/contrib/dict_int/Makefile index 3a3fc368dc..f6ae24aa4d 100644 --- a/contrib/dict_int/Makefile +++ b/contrib/dict_int/Makefile @@ -1,10 +1,11 @@ # contrib/dict_int/Makefile MODULE_big = dict_int -OBJS = dict_int.o +OBJS = dict_int.o $(WIN32RES) EXTENSION = dict_int DATA = dict_int--1.0.sql dict_int--unpackaged--1.0.sql +PGFILEDESC = "dict_int - add-on dictionary template for full-text search" REGRESS = dict_int diff --git a/contrib/dict_int/dict_int--unpackaged--1.0.sql b/contrib/dict_int/dict_int--unpackaged--1.0.sql index ef59b046ee..1b2d862e1f 100644 --- a/contrib/dict_int/dict_int--unpackaged--1.0.sql +++ b/contrib/dict_int/dict_int--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dict_int/dict_int--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dict_int" to load this file. \quit +\echo Use "CREATE EXTENSION dict_int FROM unpackaged" to load this file. \quit ALTER EXTENSION dict_int ADD function dintdict_init(internal); ALTER EXTENSION dict_int ADD function dintdict_lexize(internal,internal,internal,internal); diff --git a/contrib/dict_int/dict_int.c b/contrib/dict_int/dict_int.c index 79067a86f0..73cb73de9b 100644 --- a/contrib/dict_int/dict_int.c +++ b/contrib/dict_int/dict_int.c @@ -3,7 +3,7 @@ * dict_int.c * Text search dictionary for integers * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/dict_int/dict_int.c @@ -17,7 +17,6 @@ PG_MODULE_MAGIC; - typedef struct { int maxlen; diff --git a/contrib/dict_xsyn/Makefile b/contrib/dict_xsyn/Makefile index ce92baa478..0c401cf3c8 100644 --- a/contrib/dict_xsyn/Makefile +++ b/contrib/dict_xsyn/Makefile @@ -1,11 +1,12 @@ # contrib/dict_xsyn/Makefile MODULE_big = dict_xsyn -OBJS = dict_xsyn.o +OBJS = dict_xsyn.o $(WIN32RES) EXTENSION = dict_xsyn DATA = dict_xsyn--1.0.sql dict_xsyn--unpackaged--1.0.sql DATA_TSEARCH = xsyn_sample.rules +PGFILEDESC = "dict_xsyn - add-on dictionary template for full-text search" REGRESS = dict_xsyn diff --git a/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql b/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql index 1d193f7981..7533da1902 100644 --- a/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql +++ b/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dict_xsyn" to load this file. \quit +\echo Use "CREATE EXTENSION dict_xsyn FROM unpackaged" to load this file. \quit ALTER EXTENSION dict_xsyn ADD function dxsyn_init(internal); ALTER EXTENSION dict_xsyn ADD function dxsyn_lexize(internal,internal,internal,internal); diff --git a/contrib/dict_xsyn/dict_xsyn.c b/contrib/dict_xsyn/dict_xsyn.c index 1c27565f5e..dacb2cc638 100644 --- a/contrib/dict_xsyn/dict_xsyn.c +++ b/contrib/dict_xsyn/dict_xsyn.c @@ -3,7 +3,7 @@ * dict_xsyn.c * Extended synonym dictionary * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/dict_xsyn/dict_xsyn.c diff --git a/contrib/dummy_seclabel/Makefile b/contrib/dummy_seclabel/Makefile deleted file mode 100644 index 105400f5f9..0000000000 --- a/contrib/dummy_seclabel/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -# contrib/dummy_seclabel/Makefile - -MODULES = dummy_seclabel - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/dummy_seclabel -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/dummy_seclabel/dummy_seclabel.c b/contrib/dummy_seclabel/dummy_seclabel.c deleted file mode 100644 index b5753cc908..0000000000 --- a/contrib/dummy_seclabel/dummy_seclabel.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * dummy_seclabel.c - * - * Dummy security label provider. - * - * This module does not provide anything worthwhile from a security - * perspective, but allows regression testing independent of platform-specific - * features like SELinux. - * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - */ -#include "postgres.h" - -#include "commands/seclabel.h" -#include "miscadmin.h" -#include "utils/rel.h" - -PG_MODULE_MAGIC; - -/* Entrypoint of the module */ -void _PG_init(void); - -static void -dummy_object_relabel(const ObjectAddress *object, const char *seclabel) -{ - if (seclabel == NULL || - strcmp(seclabel, "unclassified") == 0 || - strcmp(seclabel, "classified") == 0) - return; - - if (strcmp(seclabel, "secret") == 0 || - strcmp(seclabel, "top secret") == 0) - { - if (!superuser()) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("only superuser can set '%s' label", seclabel))); - return; - } - ereport(ERROR, - (errcode(ERRCODE_INVALID_NAME), - errmsg("'%s' is not a valid security label", seclabel))); -} - -void -_PG_init(void) -{ - register_label_provider("dummy", dummy_object_relabel); -} diff --git a/contrib/earthdistance/Makefile b/contrib/earthdistance/Makefile index 48a7cf8c7c..93dcbe3a31 100644 --- a/contrib/earthdistance/Makefile +++ b/contrib/earthdistance/Makefile @@ -4,6 +4,7 @@ MODULES = earthdistance EXTENSION = earthdistance DATA = earthdistance--1.0.sql earthdistance--unpackaged--1.0.sql +PGFILEDESC = "earthdistance - calculate distances on the surface of the Earth" REGRESS = earthdistance REGRESS_OPTS = --extra-install=contrib/cube diff --git a/contrib/earthdistance/earthdistance--unpackaged--1.0.sql b/contrib/earthdistance/earthdistance--unpackaged--1.0.sql index 362e0ac107..ae787f6877 100644 --- a/contrib/earthdistance/earthdistance--unpackaged--1.0.sql +++ b/contrib/earthdistance/earthdistance--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/earthdistance/earthdistance--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION earthdistance" to load this file. \quit +\echo Use "CREATE EXTENSION earthdistance FROM unpackaged" to load this file. \quit ALTER EXTENSION earthdistance ADD function earth(); ALTER EXTENSION earthdistance ADD type earth; diff --git a/contrib/earthdistance/earthdistance.c b/contrib/earthdistance/earthdistance.c index 6bbebdfd1a..861b166373 100644 --- a/contrib/earthdistance/earthdistance.c +++ b/contrib/earthdistance/earthdistance.c @@ -10,7 +10,6 @@ #define M_PI 3.14159265358979323846 #endif - PG_MODULE_MAGIC; /* Earth's radius is in statute miles. */ diff --git a/contrib/file_fdw/Makefile b/contrib/file_fdw/Makefile index 4616cec633..4da9f2d697 100644 --- a/contrib/file_fdw/Makefile +++ b/contrib/file_fdw/Makefile @@ -4,6 +4,7 @@ MODULES = file_fdw EXTENSION = file_fdw DATA = file_fdw--1.0.sql +PGFILEDESC = "file_fdw - foreign data wrapper for files" REGRESS = file_fdw diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 5a4d5aac21..4368897581 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -3,7 +3,7 @@ * file_fdw.c * foreign-data wrapper for server-side flat files. * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/file_fdw/file_fdw.c @@ -932,7 +932,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel, int tuple_width; tuple_width = MAXALIGN(baserel->width) + - MAXALIGN(sizeof(HeapTupleHeaderData)); + MAXALIGN(SizeofHeapTupleHeader); ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width); } diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source index b608372825..416753dcad 100644 --- a/contrib/file_fdw/input/file_fdw.source +++ b/contrib/file_fdw/input/file_fdw.source @@ -62,7 +62,7 @@ CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', null ' CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR CREATE FOREIGN TABLE agg_text ( - a int2, + a int2 CHECK (a >= 0), b float4 ) SERVER file_server OPTIONS (format 'text', filename '@abs_srcdir@/data/agg.data', delimiter ' ', null '\N'); @@ -72,11 +72,13 @@ CREATE FOREIGN TABLE agg_csv ( b float4 ) SERVER file_server OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.csv', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); CREATE FOREIGN TABLE agg_bad ( a int2, b float4 ) SERVER file_server OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.bad', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); -- per-column options tests CREATE FOREIGN TABLE text_csv ( @@ -131,9 +133,35 @@ SELECT tableoid::regclass, b FROM agg_csv; INSERT INTO agg_csv VALUES(1,2.0); UPDATE agg_csv SET a = 1; DELETE FROM agg_csv WHERE a = 100; --- but this should be ignored +-- but this should be allowed SELECT * FROM agg_csv FOR UPDATE; +-- constraint exclusion tests +\t on +EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; +\t off +SELECT * FROM agg_csv WHERE a < 0; +SET constraint_exclusion = 'on'; +\t on +EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; +\t off +SELECT * FROM agg_csv WHERE a < 0; +RESET constraint_exclusion; + +-- table inheritance tests +CREATE TABLE agg (a int2, b float4); +ALTER FOREIGN TABLE agg_csv INHERIT agg; +SELECT tableoid::regclass, * FROM agg; +SELECT tableoid::regclass, * FROM agg_csv; +SELECT tableoid::regclass, * FROM ONLY agg; +-- updates aren't supported +UPDATE agg SET a = 1; +DELETE FROM agg WHERE a = 100; +-- but this should be allowed +SELECT tableoid::regclass, * FROM agg FOR UPDATE; +ALTER FOREIGN TABLE agg_csv NO INHERIT agg; +DROP TABLE agg; + -- privilege tests SET ROLE file_fdw_superuser; SELECT * FROM agg_text ORDER BY a; diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source index bc183b8874..8719694276 100644 --- a/contrib/file_fdw/output/file_fdw.source +++ b/contrib/file_fdw/output/file_fdw.source @@ -78,7 +78,7 @@ ERROR: COPY null representation cannot use newline or carriage return CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR ERROR: filename is required for file_fdw foreign tables CREATE FOREIGN TABLE agg_text ( - a int2, + a int2 CHECK (a >= 0), b float4 ) SERVER file_server OPTIONS (format 'text', filename '@abs_srcdir@/data/agg.data', delimiter ' ', null '\N'); @@ -88,11 +88,13 @@ CREATE FOREIGN TABLE agg_csv ( b float4 ) SERVER file_server OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.csv', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); CREATE FOREIGN TABLE agg_bad ( a int2, b float4 ) SERVER file_server OPTIONS (format 'csv', filename '@abs_srcdir@/data/agg.bad', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); -- per-column options tests CREATE FOREIGN TABLE text_csv ( word1 text OPTIONS (force_not_null 'true'), @@ -210,7 +212,7 @@ UPDATE agg_csv SET a = 1; ERROR: cannot update foreign table "agg_csv" DELETE FROM agg_csv WHERE a = 100; ERROR: cannot delete from foreign table "agg_csv" --- but this should be ignored +-- but this should be allowed SELECT * FROM agg_csv FOR UPDATE; a | b -----+--------- @@ -219,6 +221,74 @@ SELECT * FROM agg_csv FOR UPDATE; 42 | 324.78 (3 rows) +-- constraint exclusion tests +\t on +EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; + Foreign Scan on public.agg_csv + Output: a, b + Filter: (agg_csv.a < 0) + Foreign File: @abs_srcdir@/data/agg.csv + +\t off +SELECT * FROM agg_csv WHERE a < 0; + a | b +---+--- +(0 rows) + +SET constraint_exclusion = 'on'; +\t on +EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0; + Result + Output: a, b + One-Time Filter: false + +\t off +SELECT * FROM agg_csv WHERE a < 0; + a | b +---+--- +(0 rows) + +RESET constraint_exclusion; +-- table inheritance tests +CREATE TABLE agg (a int2, b float4); +ALTER FOREIGN TABLE agg_csv INHERIT agg; +SELECT tableoid::regclass, * FROM agg; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +SELECT tableoid::regclass, * FROM agg_csv; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY agg; + tableoid | a | b +----------+---+--- +(0 rows) + +-- updates aren't supported +UPDATE agg SET a = 1; +ERROR: cannot update foreign table "agg_csv" +DELETE FROM agg WHERE a = 100; +ERROR: cannot delete from foreign table "agg_csv" +-- but this should be allowed +SELECT tableoid::regclass, * FROM agg FOR UPDATE; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +ALTER FOREIGN TABLE agg_csv NO INHERIT agg; +DROP TABLE agg; -- privilege tests SET ROLE file_fdw_superuser; SELECT * FROM agg_text ORDER BY a; @@ -265,9 +335,9 @@ RESET ROLE; DROP EXTENSION file_fdw CASCADE; NOTICE: drop cascades to 8 other objects DETAIL: drop cascades to server file_server -drop cascades to user mapping for file_fdw_user -drop cascades to user mapping for file_fdw_superuser -drop cascades to user mapping for no_priv_user +drop cascades to user mapping for file_fdw_user on server file_server +drop cascades to user mapping for file_fdw_superuser on server file_server +drop cascades to user mapping for no_priv_user on server file_server drop cascades to foreign table agg_text drop cascades to foreign table agg_csv drop cascades to foreign table agg_bad diff --git a/contrib/fuzzystrmatch/Makefile b/contrib/fuzzystrmatch/Makefile index 834b679b10..0327d9510a 100644 --- a/contrib/fuzzystrmatch/Makefile +++ b/contrib/fuzzystrmatch/Makefile @@ -1,10 +1,11 @@ # contrib/fuzzystrmatch/Makefile MODULE_big = fuzzystrmatch -OBJS = fuzzystrmatch.o dmetaphone.o +OBJS = fuzzystrmatch.o dmetaphone.o $(WIN32RES) EXTENSION = fuzzystrmatch DATA = fuzzystrmatch--1.0.sql fuzzystrmatch--unpackaged--1.0.sql +PGFILEDESC = "fuzzystrmatch - similarities and distance between strings" ifdef USE_PGXS PG_CONFIG = pg_config @@ -16,6 +17,3 @@ top_builddir = ../.. include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif - -# levenshtein.c is #included by fuzzystrmatch.c -fuzzystrmatch.o: fuzzystrmatch.c levenshtein.c diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c index 5001288bb6..7c8457e734 100644 --- a/contrib/fuzzystrmatch/dmetaphone.c +++ b/contrib/fuzzystrmatch/dmetaphone.c @@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS) * in a case like this. */ -#define META_FREE(x) /* pfree((x)) */ +#define META_FREE(x) ((void)true) /* pfree((x)) */ #else /* not defined DMETAPHONE_MAIN */ /* use the standard malloc library when not running in PostgreSQL */ @@ -247,7 +247,7 @@ NewMetaString(char *init_str) META_MALLOC(s->str, s->bufsize, char); assert(s->str != NULL); - strncpy(s->str, init_str, s->length + 1); + memcpy(s->str, init_str, s->length + 1); s->free_string_on_destroy = 1; return s; @@ -359,7 +359,10 @@ StringAt(metastring *s, int start, int length,...) { test = va_arg(ap, char *); if (*test && (strncmp(pos, test, length) == 0)) + { + va_end(ap); return 1; + } } while (strcmp(test, "") != 0); diff --git a/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql b/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql index b9a805a4fe..14491a9fa7 100644 --- a/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql +++ b/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION fuzzystrmatch" to load this file. \quit +\echo Use "CREATE EXTENSION fuzzystrmatch FROM unpackaged" to load this file. \quit ALTER EXTENSION fuzzystrmatch ADD function levenshtein(text,text); ALTER EXTENSION fuzzystrmatch ADD function levenshtein(text,text,integer,integer,integer); diff --git a/contrib/fuzzystrmatch/fuzzystrmatch.c b/contrib/fuzzystrmatch/fuzzystrmatch.c index 7a53d8a008..f9508a574f 100644 --- a/contrib/fuzzystrmatch/fuzzystrmatch.c +++ b/contrib/fuzzystrmatch/fuzzystrmatch.c @@ -6,7 +6,7 @@ * Joe Conway <mail@joeconway.com> * * contrib/fuzzystrmatch/fuzzystrmatch.c - * Copyright (c) 2001-2014, PostgreSQL Global Development Group + * Copyright (c) 2001-2015, PostgreSQL Global Development Group * ALL RIGHTS RESERVED; * * metaphone() @@ -154,23 +154,6 @@ getcode(char c) /* These prevent GH from becoming F */ #define NOGHTOF(c) (getcode(c) & 16) /* BDH */ -/* Faster than memcmp(), for this use case. */ -static inline bool -rest_of_char_same(const char *s1, const char *s2, int len) -{ - while (len > 0) - { - len--; - if (s1[len] != s2[len]) - return false; - } - return true; -} - -#include "levenshtein.c" -#define LEVENSHTEIN_LESS_EQUAL -#include "levenshtein.c" - PG_FUNCTION_INFO_V1(levenshtein_with_costs); Datum levenshtein_with_costs(PG_FUNCTION_ARGS) @@ -180,8 +163,20 @@ levenshtein_with_costs(PG_FUNCTION_ARGS) int ins_c = PG_GETARG_INT32(2); int del_c = PG_GETARG_INT32(3); int sub_c = PG_GETARG_INT32(4); - - PG_RETURN_INT32(levenshtein_internal(src, dst, ins_c, del_c, sub_c)); + const char *s_data; + const char *t_data; + int s_bytes, + t_bytes; + + /* Extract a pointer to the actual character data */ + s_data = VARDATA_ANY(src); + t_data = VARDATA_ANY(dst); + /* Determine length of each string in bytes and characters */ + s_bytes = VARSIZE_ANY_EXHDR(src); + t_bytes = VARSIZE_ANY_EXHDR(dst); + + PG_RETURN_INT32(varstr_levenshtein(s_data, s_bytes, t_data, t_bytes, ins_c, + del_c, sub_c)); } @@ -191,8 +186,20 @@ levenshtein(PG_FUNCTION_ARGS) { text *src = PG_GETARG_TEXT_PP(0); text *dst = PG_GETARG_TEXT_PP(1); - - PG_RETURN_INT32(levenshtein_internal(src, dst, 1, 1, 1)); + const char *s_data; + const char *t_data; + int s_bytes, + t_bytes; + + /* Extract a pointer to the actual character data */ + s_data = VARDATA_ANY(src); + t_data = VARDATA_ANY(dst); + /* Determine length of each string in bytes and characters */ + s_bytes = VARSIZE_ANY_EXHDR(src); + t_bytes = VARSIZE_ANY_EXHDR(dst); + + PG_RETURN_INT32(varstr_levenshtein(s_data, s_bytes, t_data, t_bytes, 1, 1, + 1)); } @@ -206,8 +213,21 @@ levenshtein_less_equal_with_costs(PG_FUNCTION_ARGS) int del_c = PG_GETARG_INT32(3); int sub_c = PG_GETARG_INT32(4); int max_d = PG_GETARG_INT32(5); - - PG_RETURN_INT32(levenshtein_less_equal_internal(src, dst, ins_c, del_c, sub_c, max_d)); + const char *s_data; + const char *t_data; + int s_bytes, + t_bytes; + + /* Extract a pointer to the actual character data */ + s_data = VARDATA_ANY(src); + t_data = VARDATA_ANY(dst); + /* Determine length of each string in bytes and characters */ + s_bytes = VARSIZE_ANY_EXHDR(src); + t_bytes = VARSIZE_ANY_EXHDR(dst); + + PG_RETURN_INT32(varstr_levenshtein_less_equal(s_data, s_bytes, t_data, + t_bytes, ins_c, del_c, + sub_c, max_d)); } @@ -218,8 +238,20 @@ levenshtein_less_equal(PG_FUNCTION_ARGS) text *src = PG_GETARG_TEXT_PP(0); text *dst = PG_GETARG_TEXT_PP(1); int max_d = PG_GETARG_INT32(2); - - PG_RETURN_INT32(levenshtein_less_equal_internal(src, dst, 1, 1, 1, max_d)); + const char *s_data; + const char *t_data; + int s_bytes, + t_bytes; + + /* Extract a pointer to the actual character data */ + s_data = VARDATA_ANY(src); + t_data = VARDATA_ANY(dst); + /* Determine length of each string in bytes and characters */ + s_bytes = VARSIZE_ANY_EXHDR(src); + t_bytes = VARSIZE_ANY_EXHDR(dst); + + PG_RETURN_INT32(varstr_levenshtein_less_equal(s_data, s_bytes, t_data, + t_bytes, 1, 1, 1, max_d)); } @@ -248,11 +280,6 @@ metaphone(PG_FUNCTION_ARGS) errmsg("argument exceeds the maximum length of %d bytes", MAX_METAPHONE_STRLEN))); - if (!(str_i_len > 0)) - ereport(ERROR, - (errcode(ERRCODE_ZERO_LENGTH_CHARACTER_STRING), - errmsg("argument is empty string"))); - reqlen = PG_GETARG_INT32(1); if (reqlen > MAX_METAPHONE_STRLEN) ereport(ERROR, diff --git a/contrib/fuzzystrmatch/levenshtein.c b/contrib/fuzzystrmatch/levenshtein.c deleted file mode 100644 index 4f37a54b1e..0000000000 --- a/contrib/fuzzystrmatch/levenshtein.c +++ /dev/null @@ -1,403 +0,0 @@ -/* - * levenshtein.c - * - * Functions for "fuzzy" comparison of strings - * - * Joe Conway <mail@joeconway.com> - * - * Copyright (c) 2001-2014, PostgreSQL Global Development Group - * ALL RIGHTS RESERVED; - * - * levenshtein() - * ------------- - * Written based on a description of the algorithm by Michael Gilleland - * found at http://www.merriampark.com/ld.htm - * Also looked at levenshtein.c in the PHP 4.0.6 distribution for - * inspiration. - * Configurable penalty costs extension is introduced by Volkan - * YAZICI <volkan.yazici@gmail.com>. - */ - -/* - * External declarations for exported functions - */ -#ifdef LEVENSHTEIN_LESS_EQUAL -static int levenshtein_less_equal_internal(text *s, text *t, - int ins_c, int del_c, int sub_c, int max_d); -#else -static int levenshtein_internal(text *s, text *t, - int ins_c, int del_c, int sub_c); -#endif - -#define MAX_LEVENSHTEIN_STRLEN 255 - - -/* - * Calculates Levenshtein distance metric between supplied strings. Generally - * (1, 1, 1) penalty costs suffices for common cases, but your mileage may - * vary. - * - * One way to compute Levenshtein distance is to incrementally construct - * an (m+1)x(n+1) matrix where cell (i, j) represents the minimum number - * of operations required to transform the first i characters of s into - * the first j characters of t. The last column of the final row is the - * answer. - * - * We use that algorithm here with some modification. In lieu of holding - * the entire array in memory at once, we'll just use two arrays of size - * m+1 for storing accumulated values. At each step one array represents - * the "previous" row and one is the "current" row of the notional large - * array. - * - * If max_d >= 0, we only need to provide an accurate answer when that answer - * is less than or equal to the bound. From any cell in the matrix, there is - * theoretical "minimum residual distance" from that cell to the last column - * of the final row. This minimum residual distance is zero when the - * untransformed portions of the strings are of equal length (because we might - * get lucky and find all the remaining characters matching) and is otherwise - * based on the minimum number of insertions or deletions needed to make them - * equal length. The residual distance grows as we move toward the upper - * right or lower left corners of the matrix. When the max_d bound is - * usefully tight, we can use this property to avoid computing the entirety - * of each row; instead, we maintain a start_column and stop_column that - * identify the portion of the matrix close to the diagonal which can still - * affect the final answer. - */ -static int -#ifdef LEVENSHTEIN_LESS_EQUAL -levenshtein_less_equal_internal(text *s, text *t, - int ins_c, int del_c, int sub_c, int max_d) -#else -levenshtein_internal(text *s, text *t, - int ins_c, int del_c, int sub_c) -#endif -{ - int m, - n, - s_bytes, - t_bytes; - int *prev; - int *curr; - int *s_char_len = NULL; - int i, - j; - const char *s_data; - const char *t_data; - const char *y; - - /* - * For levenshtein_less_equal_internal, we have real variables called - * start_column and stop_column; otherwise it's just short-hand for 0 and - * m. - */ -#ifdef LEVENSHTEIN_LESS_EQUAL - int start_column, - stop_column; - -#undef START_COLUMN -#undef STOP_COLUMN -#define START_COLUMN start_column -#define STOP_COLUMN stop_column -#else -#undef START_COLUMN -#undef STOP_COLUMN -#define START_COLUMN 0 -#define STOP_COLUMN m -#endif - - /* Extract a pointer to the actual character data. */ - s_data = VARDATA_ANY(s); - t_data = VARDATA_ANY(t); - - /* Determine length of each string in bytes and characters. */ - s_bytes = VARSIZE_ANY_EXHDR(s); - t_bytes = VARSIZE_ANY_EXHDR(t); - m = pg_mbstrlen_with_len(s_data, s_bytes); - n = pg_mbstrlen_with_len(t_data, t_bytes); - - /* - * We can transform an empty s into t with n insertions, or a non-empty t - * into an empty s with m deletions. - */ - if (!m) - return n * ins_c; - if (!n) - return m * del_c; - - /* - * For security concerns, restrict excessive CPU+RAM usage. (This - * implementation uses O(m) memory and has O(mn) complexity.) - */ - if (m > MAX_LEVENSHTEIN_STRLEN || - n > MAX_LEVENSHTEIN_STRLEN) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("argument exceeds the maximum length of %d bytes", - MAX_LEVENSHTEIN_STRLEN))); - -#ifdef LEVENSHTEIN_LESS_EQUAL - /* Initialize start and stop columns. */ - start_column = 0; - stop_column = m + 1; - - /* - * If max_d >= 0, determine whether the bound is impossibly tight. If so, - * return max_d + 1 immediately. Otherwise, determine whether it's tight - * enough to limit the computation we must perform. If so, figure out - * initial stop column. - */ - if (max_d >= 0) - { - int min_theo_d; /* Theoretical minimum distance. */ - int max_theo_d; /* Theoretical maximum distance. */ - int net_inserts = n - m; - - min_theo_d = net_inserts < 0 ? - -net_inserts * del_c : net_inserts * ins_c; - if (min_theo_d > max_d) - return max_d + 1; - if (ins_c + del_c < sub_c) - sub_c = ins_c + del_c; - max_theo_d = min_theo_d + sub_c * Min(m, n); - if (max_d >= max_theo_d) - max_d = -1; - else if (ins_c + del_c > 0) - { - /* - * Figure out how much of the first row of the notional matrix we - * need to fill in. If the string is growing, the theoretical - * minimum distance already incorporates the cost of deleting the - * number of characters necessary to make the two strings equal in - * length. Each additional deletion forces another insertion, so - * the best-case total cost increases by ins_c + del_c. If the - * string is shrinking, the minimum theoretical cost assumes no - * excess deletions; that is, we're starting no further right than - * column n - m. If we do start further right, the best-case - * total cost increases by ins_c + del_c for each move right. - */ - int slack_d = max_d - min_theo_d; - int best_column = net_inserts < 0 ? -net_inserts : 0; - - stop_column = best_column + (slack_d / (ins_c + del_c)) + 1; - if (stop_column > m) - stop_column = m + 1; - } - } -#endif - - /* - * In order to avoid calling pg_mblen() repeatedly on each character in s, - * we cache all the lengths before starting the main loop -- but if all - * the characters in both strings are single byte, then we skip this and - * use a fast-path in the main loop. If only one string contains - * multi-byte characters, we still build the array, so that the fast-path - * needn't deal with the case where the array hasn't been initialized. - */ - if (m != s_bytes || n != t_bytes) - { - int i; - const char *cp = s_data; - - s_char_len = (int *) palloc((m + 1) * sizeof(int)); - for (i = 0; i < m; ++i) - { - s_char_len[i] = pg_mblen(cp); - cp += s_char_len[i]; - } - s_char_len[i] = 0; - } - - /* One more cell for initialization column and row. */ - ++m; - ++n; - - /* Previous and current rows of notional array. */ - prev = (int *) palloc(2 * m * sizeof(int)); - curr = prev + m; - - /* - * To transform the first i characters of s into the first 0 characters of - * t, we must perform i deletions. - */ - for (i = START_COLUMN; i < STOP_COLUMN; i++) - prev[i] = i * del_c; - - /* Loop through rows of the notional array */ - for (y = t_data, j = 1; j < n; j++) - { - int *temp; - const char *x = s_data; - int y_char_len = n != t_bytes + 1 ? pg_mblen(y) : 1; - -#ifdef LEVENSHTEIN_LESS_EQUAL - - /* - * In the best case, values percolate down the diagonal unchanged, so - * we must increment stop_column unless it's already on the right end - * of the array. The inner loop will read prev[stop_column], so we - * have to initialize it even though it shouldn't affect the result. - */ - if (stop_column < m) - { - prev[stop_column] = max_d + 1; - ++stop_column; - } - - /* - * The main loop fills in curr, but curr[0] needs a special case: to - * transform the first 0 characters of s into the first j characters - * of t, we must perform j insertions. However, if start_column > 0, - * this special case does not apply. - */ - if (start_column == 0) - { - curr[0] = j * ins_c; - i = 1; - } - else - i = start_column; -#else - curr[0] = j * ins_c; - i = 1; -#endif - - /* - * This inner loop is critical to performance, so we include a - * fast-path to handle the (fairly common) case where no multibyte - * characters are in the mix. The fast-path is entitled to assume - * that if s_char_len is not initialized then BOTH strings contain - * only single-byte characters. - */ - if (s_char_len != NULL) - { - for (; i < STOP_COLUMN; i++) - { - int ins; - int del; - int sub; - int x_char_len = s_char_len[i - 1]; - - /* - * Calculate costs for insertion, deletion, and substitution. - * - * When calculating cost for substitution, we compare the last - * character of each possibly-multibyte character first, - * because that's enough to rule out most mis-matches. If we - * get past that test, then we compare the lengths and the - * remaining bytes. - */ - ins = prev[i] + ins_c; - del = curr[i - 1] + del_c; - if (x[x_char_len - 1] == y[y_char_len - 1] - && x_char_len == y_char_len && - (x_char_len == 1 || rest_of_char_same(x, y, x_char_len))) - sub = prev[i - 1]; - else - sub = prev[i - 1] + sub_c; - - /* Take the one with minimum cost. */ - curr[i] = Min(ins, del); - curr[i] = Min(curr[i], sub); - - /* Point to next character. */ - x += x_char_len; - } - } - else - { - for (; i < STOP_COLUMN; i++) - { - int ins; - int del; - int sub; - - /* Calculate costs for insertion, deletion, and substitution. */ - ins = prev[i] + ins_c; - del = curr[i - 1] + del_c; - sub = prev[i - 1] + ((*x == *y) ? 0 : sub_c); - - /* Take the one with minimum cost. */ - curr[i] = Min(ins, del); - curr[i] = Min(curr[i], sub); - - /* Point to next character. */ - x++; - } - } - - /* Swap current row with previous row. */ - temp = curr; - curr = prev; - prev = temp; - - /* Point to next character. */ - y += y_char_len; - -#ifdef LEVENSHTEIN_LESS_EQUAL - - /* - * This chunk of code represents a significant performance hit if used - * in the case where there is no max_d bound. This is probably not - * because the max_d >= 0 test itself is expensive, but rather because - * the possibility of needing to execute this code prevents tight - * optimization of the loop as a whole. - */ - if (max_d >= 0) - { - /* - * The "zero point" is the column of the current row where the - * remaining portions of the strings are of equal length. There - * are (n - 1) characters in the target string, of which j have - * been transformed. There are (m - 1) characters in the source - * string, so we want to find the value for zp where (n - 1) - j = - * (m - 1) - zp. - */ - int zp = j - (n - m); - - /* Check whether the stop column can slide left. */ - while (stop_column > 0) - { - int ii = stop_column - 1; - int net_inserts = ii - zp; - - if (prev[ii] + (net_inserts > 0 ? net_inserts * ins_c : - -net_inserts * del_c) <= max_d) - break; - stop_column--; - } - - /* Check whether the start column can slide right. */ - while (start_column < stop_column) - { - int net_inserts = start_column - zp; - - if (prev[start_column] + - (net_inserts > 0 ? net_inserts * ins_c : - -net_inserts * del_c) <= max_d) - break; - - /* - * We'll never again update these values, so we must make sure - * there's nothing here that could confuse any future - * iteration of the outer loop. - */ - prev[start_column] = max_d + 1; - curr[start_column] = max_d + 1; - if (start_column != 0) - s_data += (s_char_len != NULL) ? s_char_len[start_column - 1] : 1; - start_column++; - } - - /* If they cross, we're going to exceed the bound. */ - if (start_column >= stop_column) - return max_d + 1; - } -#endif - } - - /* - * Because the final value was swapped from the previous row to the - * current row, that's where we'll find it. - */ - return prev[m - 1]; -} diff --git a/contrib/hstore/Makefile b/contrib/hstore/Makefile index 2b60fbed0e..82908de9da 100644 --- a/contrib/hstore/Makefile +++ b/contrib/hstore/Makefile @@ -2,12 +2,13 @@ MODULE_big = hstore OBJS = hstore_io.o hstore_op.o hstore_gist.o hstore_gin.o hstore_compat.o \ - crc32.o + $(WIN32RES) EXTENSION = hstore DATA = hstore--1.3.sql hstore--1.2--1.3.sql \ hstore--1.1--1.2.sql hstore--1.0--1.1.sql \ hstore--unpackaged--1.0.sql +PGFILEDESC = "hstore - key/value pair data type" REGRESS = hstore diff --git a/contrib/hstore/crc32.c b/contrib/hstore/crc32.c deleted file mode 100644 index c82fc66472..0000000000 --- a/contrib/hstore/crc32.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * contrib/hstore/crc32.c - * - * Both POSIX and CRC32 checksums */ - -#include <sys/types.h> -#include <stdio.h> -#include <sys/types.h> - -#include "crc32.h" - -/* - * This code implements the AUTODIN II polynomial - * The variable corresponding to the macro argument "crc" should - * be an unsigned long. - * Original code by Spencer Garrett <srg@quick.com> - */ - -#define _CRC32_(crc, ch) (crc = (crc >> 8) ^ crc32tab[(crc ^ (ch)) & 0xff]) - -/* generated using the AUTODIN II polynomial - * x^32 + x^26 + x^23 + x^22 + x^16 + - * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + 1 - */ - -static const unsigned int crc32tab[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, -}; - -unsigned int -crc32_sz(char *buf, int size) -{ - unsigned int crc = ~((unsigned int) 0); - char *p; - int len, - nr; - - len = 0; - nr = size; - for (len += nr, p = buf; nr--; ++p) - _CRC32_(crc, *p); - return ~crc; -} diff --git a/contrib/hstore/crc32.h b/contrib/hstore/crc32.h deleted file mode 100644 index f5bfd82517..0000000000 --- a/contrib/hstore/crc32.h +++ /dev/null @@ -1,13 +0,0 @@ -/* - * contrib/hstore/crc32.h - */ -#ifndef _CRC32_H -#define _CRC32_H - -/* Returns crc32 of data block */ -extern unsigned int crc32_sz(char *buf, int size); - -/* Returns crc32 of null-terminated string */ -#define crc32(buf) crc32_sz((buf),strlen(buf)) - -#endif diff --git a/contrib/hstore/hstore--unpackaged--1.0.sql b/contrib/hstore/hstore--unpackaged--1.0.sql index b7e73f4123..19a7802805 100644 --- a/contrib/hstore/hstore--unpackaged--1.0.sql +++ b/contrib/hstore/hstore--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/hstore/hstore--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION hstore" to load this file. \quit +\echo Use "CREATE EXTENSION hstore FROM unpackaged" to load this file. \quit ALTER EXTENSION hstore ADD type hstore; ALTER EXTENSION hstore ADD function hstore_in(cstring); diff --git a/contrib/hstore/hstore_gist.c b/contrib/hstore/hstore_gist.c index d4a9aaa4c1..06f3c9359b 100644 --- a/contrib/hstore/hstore_gist.c +++ b/contrib/hstore/hstore_gist.c @@ -6,8 +6,8 @@ #include "access/gist.h" #include "access/skey.h" #include "catalog/pg_type.h" +#include "utils/pg_crc.h" -#include "crc32.h" #include "hstore.h" /* bigint defines */ @@ -41,7 +41,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ int32 flag; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } GISTTYPE; #define ALLISTRUE 0x04 @@ -68,6 +68,20 @@ typedef struct #define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) ) +/* shorthand for calculating CRC-32 of a single chunk of data. */ +static pg_crc32 +crc32_sz(char *buf, int size) +{ + pg_crc32 crc; + + INIT_TRADITIONAL_CRC32(crc); + COMP_TRADITIONAL_CRC32(crc, buf, size); + FIN_TRADITIONAL_CRC32(crc); + + return crc; +} + + PG_FUNCTION_INFO_V1(ghstore_in); PG_FUNCTION_INFO_V1(ghstore_out); diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c index 6acd18fdb0..25a33abebc 100644 --- a/contrib/hstore/hstore_io.c +++ b/contrib/hstore/hstore_io.c @@ -12,6 +12,7 @@ #include "libpq/pqformat.h" #include "utils/builtins.h" #include "utils/json.h" +#include "utils/jsonapi.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -746,7 +747,7 @@ typedef struct RecordIOData Oid record_type; int32 record_typmod; int ncolumns; - ColumnIOData columns[1]; /* VARIABLE LENGTH ARRAY */ + ColumnIOData columns[FLEXIBLE_ARRAY_MEMBER]; } RecordIOData; PG_FUNCTION_INFO_V1(hstore_from_record); @@ -804,8 +805,8 @@ hstore_from_record(PG_FUNCTION_ARGS) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, - sizeof(RecordIOData) - sizeof(ColumnIOData) - + ncolumns * sizeof(ColumnIOData)); + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; @@ -815,8 +816,8 @@ hstore_from_record(PG_FUNCTION_ARGS) my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, - sizeof(RecordIOData) - sizeof(ColumnIOData) - + ncolumns * sizeof(ColumnIOData)); + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; @@ -995,8 +996,8 @@ hstore_populate_record(PG_FUNCTION_ARGS) { fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt, - sizeof(RecordIOData) - sizeof(ColumnIOData) - + ncolumns * sizeof(ColumnIOData)); + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); my_extra = (RecordIOData *) fcinfo->flinfo->fn_extra; my_extra->record_type = InvalidOid; my_extra->record_typmod = 0; @@ -1006,8 +1007,8 @@ hstore_populate_record(PG_FUNCTION_ARGS) my_extra->record_typmod != tupTypmod) { MemSet(my_extra, 0, - sizeof(RecordIOData) - sizeof(ColumnIOData) - + ncolumns * sizeof(ColumnIOData)); + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); my_extra->record_type = tupType; my_extra->record_typmod = tupTypmod; my_extra->ncolumns = ncolumns; @@ -1246,7 +1247,6 @@ hstore_to_json_loose(PG_FUNCTION_ARGS) int count = HS_COUNT(in); char *base = STRPTR(in); HEntry *entries = ARRPTR(in); - bool is_number; StringInfoData tmp, dst; @@ -1273,48 +1273,9 @@ hstore_to_json_loose(PG_FUNCTION_ARGS) appendStringInfoString(&dst, "false"); else { - is_number = false; resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); - - /* - * don't treat something with a leading zero followed by another - * digit as numeric - could be a zip code or similar - */ - if (tmp.len > 0 && - !(tmp.data[0] == '0' && - isdigit((unsigned char) tmp.data[1])) && - strspn(tmp.data, "+-0123456789Ee.") == tmp.len) - { - /* - * might be a number. See if we can input it as a numeric - * value. Ignore any actual parsed value. - */ - char *endptr = "junk"; - long lval; - - lval = strtol(tmp.data, &endptr, 10); - (void) lval; - if (*endptr == '\0') - { - /* - * strol man page says this means the whole string is - * valid - */ - is_number = true; - } - else - { - /* not an int - try a double */ - double dval; - - dval = strtod(tmp.data, &endptr); - (void) dval; - if (*endptr == '\0') - is_number = true; - } - } - if (is_number) + if (IsValidJsonNumber(tmp.data, tmp.len)) appendBinaryStringInfo(&dst, tmp.data, tmp.len); else escape_json(&dst, tmp.data); @@ -1383,7 +1344,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) JsonbParseState *state = NULL; JsonbValue *res; - res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); + (void) pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); for (i = 0; i < count; i++) { @@ -1394,7 +1355,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) key.val.string.len = HS_KEYLEN(entries, i); key.val.string.val = HS_KEY(entries, base, i); - res = pushJsonbValue(&state, WJB_KEY, &key); + (void) pushJsonbValue(&state, WJB_KEY, &key); if (HS_VALISNULL(entries, i)) { @@ -1406,7 +1367,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) val.val.string.len = HS_VALLEN(entries, i); val.val.string.val = HS_VAL(entries, base, i); } - res = pushJsonbValue(&state, WJB_VALUE, &val); + (void) pushJsonbValue(&state, WJB_VALUE, &val); } res = pushJsonbValue(&state, WJB_END_OBJECT, NULL); @@ -1430,7 +1391,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) initStringInfo(&tmp); - res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); + (void) pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); for (i = 0; i < count; i++) { @@ -1441,7 +1402,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) key.val.string.len = HS_KEYLEN(entries, i); key.val.string.val = HS_KEY(entries, base, i); - res = pushJsonbValue(&state, WJB_KEY, &key); + (void) pushJsonbValue(&state, WJB_KEY, &key); if (HS_VALISNULL(entries, i)) { @@ -1516,7 +1477,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) val.val.string.val = HS_VAL(entries, base, i); } } - res = pushJsonbValue(&state, WJB_VALUE, &val); + (void) pushJsonbValue(&state, WJB_VALUE, &val); } res = pushJsonbValue(&state, WJB_END_OBJECT, NULL); diff --git a/contrib/intagg/intagg--unpackaged--1.0.sql b/contrib/intagg/intagg--unpackaged--1.0.sql index 6a6663d092..a0b13f3f69 100644 --- a/contrib/intagg/intagg--unpackaged--1.0.sql +++ b/contrib/intagg/intagg--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/intagg/intagg--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION intagg" to load this file. \quit +\echo Use "CREATE EXTENSION intagg FROM unpackaged" to load this file. \quit ALTER EXTENSION intagg ADD function int_agg_state(internal,integer); ALTER EXTENSION intagg ADD function int_agg_final_array(internal); diff --git a/contrib/intarray/Makefile b/contrib/intarray/Makefile index 71f820ec4a..920c5b1ba0 100644 --- a/contrib/intarray/Makefile +++ b/contrib/intarray/Makefile @@ -1,10 +1,12 @@ # contrib/intarray/Makefile MODULE_big = _int -OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o _intbig_gist.o _int_gin.o +OBJS = _int_bool.o _int_gist.o _int_op.o _int_tool.o \ + _intbig_gist.o _int_gin.o $(WIN32RES) EXTENSION = intarray DATA = intarray--1.0.sql intarray--unpackaged--1.0.sql +PGFILEDESC = "intarray - functions and operators for arrays of integers" REGRESS = _int diff --git a/contrib/intarray/_int.h b/contrib/intarray/_int.h index 7f93206e89..d524f0fed5 100644 --- a/contrib/intarray/_int.h +++ b/contrib/intarray/_int.h @@ -73,7 +73,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ int32 flag; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } GISTTYPE; #define ALLISTRUE 0x04 @@ -133,7 +133,7 @@ typedef struct QUERYTYPE { int32 vl_len_; /* varlena header (do not touch directly!) */ int32 size; /* number of ITEMs */ - ITEM items[1]; /* variable length array */ + ITEM items[FLEXIBLE_ARRAY_MEMBER]; } QUERYTYPE; #define HDRSIZEQT offsetof(QUERYTYPE, items) diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c index 53abcc45a5..07108eb15e 100644 --- a/contrib/intarray/_int_gist.c +++ b/contrib/intarray/_int_gist.c @@ -3,6 +3,8 @@ */ #include "postgres.h" +#include <limits.h> + #include "access/gist.h" #include "access/skey.h" @@ -191,7 +193,7 @@ g_int_compress(PG_FUNCTION_ARGS) cand = 1; while (len > MAXNUMRANGE * 2) { - min = 0x7fffffff; + min = INT_MAX; for (i = 2; i < len; i += 2) if (min > (dr[i] - dr[i - 1])) { @@ -416,9 +418,7 @@ g_int_picksplit(PG_FUNCTION_ARGS) size_waste = size_union - size_inter; pfree(union_d); - - if (inter_d != (ArrayType *) NULL) - pfree(inter_d); + pfree(inter_d); /* * are these a more promising split that what we've already seen? @@ -517,10 +517,8 @@ g_int_picksplit(PG_FUNCTION_ARGS) /* pick which page to add it to */ if (size_alpha - size_l < size_beta - size_r + WISH_F(v->spl_nleft, v->spl_nright, 0.01)) { - if (datum_l) - pfree(datum_l); - if (union_dr) - pfree(union_dr); + pfree(datum_l); + pfree(union_dr); datum_l = union_dl; size_l = size_alpha; *left++ = i; @@ -528,10 +526,8 @@ g_int_picksplit(PG_FUNCTION_ARGS) } else { - if (datum_r) - pfree(datum_r); - if (union_dl) - pfree(union_dl); + pfree(datum_r); + pfree(union_dl); datum_r = union_dr; size_r = size_beta; *right++ = i; diff --git a/contrib/intarray/_int_op.c b/contrib/intarray/_int_op.c index 70849be57f..537174175b 100644 --- a/contrib/intarray/_int_op.c +++ b/contrib/intarray/_int_op.c @@ -6,7 +6,6 @@ #include "_int.h" - PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(_int_different); diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c index 511c7acb54..3c52912bbf 100644 --- a/contrib/intarray/_int_tool.c +++ b/contrib/intarray/_int_tool.c @@ -184,40 +184,34 @@ rt__int_size(ArrayType *a, float *size) *size = (float) ARRNELEMS(a); } +/* qsort_arg comparison function for isort() */ +static int +isort_cmp(const void *a, const void *b, void *arg) +{ + int32 aval = *((const int32 *) a); + int32 bval = *((const int32 *) b); + + if (aval < bval) + return -1; + if (aval > bval) + return 1; + + /* + * Report if we have any duplicates. If there are equal keys, qsort must + * compare them at some point, else it wouldn't know whether one should go + * before or after the other. + */ + *((bool *) arg) = true; + return 0; +} + /* Sort the given data (len >= 2). Return true if any duplicates found */ bool isort(int32 *a, int len) { - int32 cur, - prev; - int32 *pcur, - *pprev, - *end; - bool r = FALSE; + bool r = false; - /* - * We use a simple insertion sort. While this is O(N^2) in the worst - * case, it's quite fast if the input is already sorted or nearly so. - * Also, for not-too-large inputs it's faster than more complex methods - * anyhow. - */ - end = a + len; - for (pcur = a + 1; pcur < end; pcur++) - { - cur = *pcur; - for (pprev = pcur - 1; pprev >= a; pprev--) - { - prev = *pprev; - if (prev <= cur) - { - if (prev == cur) - r = TRUE; - break; - } - pprev[1] = prev; - } - pprev[1] = cur; - } + qsort_arg(a, len, sizeof(int32), isort_cmp, (void *) &r); return r; } diff --git a/contrib/intarray/intarray--unpackaged--1.0.sql b/contrib/intarray/intarray--unpackaged--1.0.sql index 5de64bf0ab..63814cef98 100644 --- a/contrib/intarray/intarray--unpackaged--1.0.sql +++ b/contrib/intarray/intarray--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/intarray/intarray--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION intarray" to load this file. \quit +\echo Use "CREATE EXTENSION intarray FROM unpackaged" to load this file. \quit ALTER EXTENSION intarray ADD type query_int; ALTER EXTENSION intarray ADD function bqarr_in(cstring); diff --git a/contrib/isn/Makefile b/contrib/isn/Makefile index bd8f193e93..75c07a8296 100644 --- a/contrib/isn/Makefile +++ b/contrib/isn/Makefile @@ -4,6 +4,7 @@ MODULES = isn EXTENSION = isn DATA = isn--1.0.sql isn--unpackaged--1.0.sql +PGFILEDESC = "isn - data types for international product numbering standards" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/isn/isn--unpackaged--1.0.sql b/contrib/isn/isn--unpackaged--1.0.sql index 30e5012156..8a19d6a475 100644 --- a/contrib/isn/isn--unpackaged--1.0.sql +++ b/contrib/isn/isn--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/isn/isn--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION isn" to load this file. \quit +\echo Use "CREATE EXTENSION isn FROM unpackaged" to load this file. \quit ALTER EXTENSION isn ADD type ean13; ALTER EXTENSION isn ADD function ean13_in(cstring); diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 1124744979..5fbd253491 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -4,7 +4,7 @@ * PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC) * * Author: German Mendez Bravo (Kronuz) - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/isn/isn.c @@ -825,18 +825,18 @@ string2ean(const char *str, bool errorOK, ean13 *result, goto eanwrongtype; break; case ISMN: - strncpy(buf, "9790", 4); /* this isn't for sure yet, for now + memcpy(buf, "9790", 4); /* this isn't for sure yet, for now * ISMN it's only 9790 */ valid = (valid && ((rcheck = checkdig(buf, 13)) == check || magic)); break; case ISBN: - strncpy(buf, "978", 3); + memcpy(buf, "978", 3); valid = (valid && ((rcheck = weight_checkdig(buf + 3, 10)) == check || magic)); break; case ISSN: - strncpy(buf + 10, "00", 2); /* append 00 as the normal issue + memcpy(buf + 10, "00", 2); /* append 00 as the normal issue * publication code */ - strncpy(buf, "977", 3); + memcpy(buf, "977", 3); valid = (valid && ((rcheck = weight_checkdig(buf + 3, 8)) == check || magic)); break; case UPC: diff --git a/contrib/isn/isn.h b/contrib/isn/isn.h index aca00d8ffa..4548eafea9 100644 --- a/contrib/isn/isn.h +++ b/contrib/isn/isn.h @@ -4,7 +4,7 @@ * PostgreSQL type definitions for ISNs (ISBN, ISMN, ISSN, EAN13, UPC) * * Author: German Mendez Bravo (Kronuz) - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/isn/isn.h diff --git a/contrib/lo/Makefile b/contrib/lo/Makefile index 66b337c17a..b956aa4fa1 100644 --- a/contrib/lo/Makefile +++ b/contrib/lo/Makefile @@ -4,6 +4,7 @@ MODULES = lo EXTENSION = lo DATA = lo--1.0.sql lo--unpackaged--1.0.sql +PGFILEDESC = "lo - management for large objects" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/lo/lo--unpackaged--1.0.sql b/contrib/lo/lo--unpackaged--1.0.sql index 053185ba1d..d6bcf1a46e 100644 --- a/contrib/lo/lo--unpackaged--1.0.sql +++ b/contrib/lo/lo--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/lo/lo--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION lo" to load this file. \quit +\echo Use "CREATE EXTENSION lo FROM unpackaged" to load this file. \quit ALTER EXTENSION lo ADD domain lo; ALTER EXTENSION lo ADD function lo_oid(lo); diff --git a/contrib/lo/lo.c b/contrib/lo/lo.c index 4dee64724d..953659305f 100644 --- a/contrib/lo/lo.c +++ b/contrib/lo/lo.c @@ -12,7 +12,6 @@ #include "libpq/be-fsstubs.h" #include "utils/rel.h" - PG_MODULE_MAGIC; #define atooid(x) ((Oid) strtoul((x), NULL, 10)) diff --git a/contrib/ltree/Makefile b/contrib/ltree/Makefile index 65d42f875f..a41e45723d 100644 --- a/contrib/ltree/Makefile +++ b/contrib/ltree/Makefile @@ -2,11 +2,12 @@ MODULE_big = ltree OBJS = ltree_io.o ltree_op.o lquery_op.o _ltree_op.o crc32.o \ - ltxtquery_io.o ltxtquery_op.o ltree_gist.o _ltree_gist.o + ltxtquery_io.o ltxtquery_op.o ltree_gist.o _ltree_gist.o $(WIN32RES) PG_CPPFLAGS = -DLOWER_NODE EXTENSION = ltree DATA = ltree--1.0.sql ltree--unpackaged--1.0.sql +PGFILEDESC = "ltree - hierarchical label data type" REGRESS = ltree diff --git a/contrib/ltree/crc32.c b/contrib/ltree/crc32.c index ea1a661fef..1c08d264f7 100644 --- a/contrib/ltree/crc32.c +++ b/contrib/ltree/crc32.c @@ -1,7 +1,12 @@ -/* Both POSIX and CRC32 checksums */ - /* contrib/ltree/crc32.c */ +/* + * Implements CRC-32, as used in ltree. + * + * Note that the CRC is used in the on-disk format of GiST indexes, so we + * must stay backwards-compatible! + */ + #include "postgres.h" #include <sys/types.h> @@ -15,100 +20,23 @@ #define TOLOWER(x) (x) #endif +#include "utils/pg_crc.h" #include "crc32.h" -/* - * This code implements the AUTODIN II polynomial - * The variable corresponding to the macro argument "crc" should - * be an unsigned long. - * Oroginal code by Spencer Garrett <srg@quick.com> - */ - -#define _CRC32_(crc, ch) ((crc) = ((crc) >> 8) ^ crc32tab[((crc) ^ (ch)) & 0xff]) - -/* generated using the AUTODIN II polynomial - * x^32 + x^26 + x^23 + x^22 + x^16 + - * x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + 1 - */ - -static const unsigned int crc32tab[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, - 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, - 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, - 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, - 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, - 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, - 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, - 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, - 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, - 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, - 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, - 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, - 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, - 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, - 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, - 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, - 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, - 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, - 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, - 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, - 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, - 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, - 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, - 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, - 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, - 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, - 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, - 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, - 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, - 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, - 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, - 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, - 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, - 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, - 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d, -}; - unsigned int ltree_crc32_sz(char *buf, int size) { - unsigned int crc = ~((unsigned int) 0); - char *p; - int len, - nr; - - len = 0; - nr = size; - for (len += nr, p = buf; nr--; ++p) - _CRC32_(crc, TOLOWER((unsigned int) *p)); - return ~crc; + pg_crc32 crc; + char *p = buf; + + INIT_TRADITIONAL_CRC32(crc); + while (size > 0) + { + char c = (char) TOLOWER(*p); + COMP_TRADITIONAL_CRC32(crc, &c, 1); + size--; + p++; + } + FIN_TRADITIONAL_CRC32(crc); + return (unsigned int) crc; } diff --git a/contrib/ltree/ltree--1.0.sql b/contrib/ltree/ltree--1.0.sql index 5a2f375a4f..7d55fc603f 100644 --- a/contrib/ltree/ltree--1.0.sql +++ b/contrib/ltree/ltree--1.0.sql @@ -6,12 +6,12 @@ CREATE FUNCTION ltree_in(cstring) RETURNS ltree AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltree_out(ltree) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltree ( INTERNALLENGTH = -1, @@ -303,12 +303,12 @@ CREATE OPERATOR CLASS ltree_ops CREATE FUNCTION lquery_in(cstring) RETURNS lquery AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION lquery_out(lquery) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE lquery ( INTERNALLENGTH = -1, @@ -414,12 +414,12 @@ CREATE OPERATOR ^? ( CREATE FUNCTION ltxtq_in(cstring) RETURNS ltxtquery AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltxtq_out(ltxtquery) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltxtquery ( INTERNALLENGTH = -1, @@ -481,12 +481,12 @@ CREATE OPERATOR ^@ ( CREATE FUNCTION ltree_gist_in(cstring) RETURNS ltree_gist AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltree_gist_out(ltree_gist) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltree_gist ( internallength = -1, diff --git a/contrib/ltree/ltree--unpackaged--1.0.sql b/contrib/ltree/ltree--unpackaged--1.0.sql index 1e24fa56c6..30a94c2fc5 100644 --- a/contrib/ltree/ltree--unpackaged--1.0.sql +++ b/contrib/ltree/ltree--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/ltree/ltree--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION ltree" to load this file. \quit +\echo Use "CREATE EXTENSION ltree FROM unpackaged" to load this file. \quit ALTER EXTENSION ltree ADD type ltree; ALTER EXTENSION ltree ADD function ltree_in(cstring); diff --git a/contrib/ltree/ltree.h b/contrib/ltree/ltree.h index 1b1305b483..c604357dbf 100644 --- a/contrib/ltree/ltree.h +++ b/contrib/ltree/ltree.h @@ -10,7 +10,7 @@ typedef struct { uint16 len; - char name[1]; + char name[FLEXIBLE_ARRAY_MEMBER]; } ltree_level; #define LEVEL_HDRSIZE (offsetof(ltree_level,name)) @@ -20,7 +20,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ uint16 numlevel; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } ltree; #define LTREE_HDRSIZE MAXALIGN( offsetof(ltree, data) ) @@ -34,7 +34,7 @@ typedef struct int32 val; uint16 len; uint8 flag; - char name[1]; + char name[FLEXIBLE_ARRAY_MEMBER]; } lquery_variant; #define LVAR_HDRSIZE MAXALIGN(offsetof(lquery_variant, name)) @@ -51,7 +51,7 @@ typedef struct uint16 numvar; uint16 low; uint16 high; - char variants[1]; + char variants[FLEXIBLE_ARRAY_MEMBER]; } lquery_level; #define LQL_HDRSIZE MAXALIGN( offsetof(lquery_level,variants) ) @@ -72,7 +72,7 @@ typedef struct uint16 numlevel; uint16 firstgood; uint16 flag; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } lquery; #define LQUERY_HDRSIZE MAXALIGN( offsetof(lquery, data) ) @@ -107,7 +107,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ int32 size; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } ltxtquery; #define HDRSIZEQT MAXALIGN(VARHDRSZ + sizeof(int32)) @@ -208,7 +208,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ uint32 flag; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } ltree_gist; #define LTG_ONENODE 0x01 diff --git a/contrib/oid2name/Makefile b/contrib/oid2name/Makefile index f695b4a84e..3414b4a5cc 100644 --- a/contrib/oid2name/Makefile +++ b/contrib/oid2name/Makefile @@ -4,7 +4,7 @@ PGFILEDESC = "oid2name - examine the file structure" PGAPPICON = win32 PROGRAM = oid2name -OBJS = oid2name.o +OBJS = oid2name.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) PG_LIBS = $(libpq_pgport) diff --git a/contrib/pageinspect/Makefile b/contrib/pageinspect/Makefile index ee78cb2989..aec5258a1b 100644 --- a/contrib/pageinspect/Makefile +++ b/contrib/pageinspect/Makefile @@ -1,11 +1,14 @@ # contrib/pageinspect/Makefile MODULE_big = pageinspect -OBJS = rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o +OBJS = rawpage.o heapfuncs.o btreefuncs.o fsmfuncs.o \ + brinfuncs.o ginfuncs.o $(WIN32RES) EXTENSION = pageinspect -DATA = pageinspect--1.2.sql pageinspect--1.0--1.1.sql \ - pageinspect--1.1--1.2.sql pageinspect--unpackaged--1.0.sql +DATA = pageinspect--1.3.sql pageinspect--1.2--1.3.sql \ + pageinspect--1.1--1.2.sql pageinspect--1.0--1.1.sql \ + pageinspect--unpackaged--1.0.sql +PGFILEDESC = "pageinspect - functions to inspect contents of database pages" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pageinspect/brinfuncs.c b/contrib/pageinspect/brinfuncs.c new file mode 100644 index 0000000000..1b15a7bdfe --- /dev/null +++ b/contrib/pageinspect/brinfuncs.c @@ -0,0 +1,409 @@ +/* + * brinfuncs.c + * Functions to investigate BRIN indexes + * + * Copyright (c) 2014-2015, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/pageinspect/brinfuncs.c + */ +#include "postgres.h" + +#include "access/htup_details.h" +#include "access/brin.h" +#include "access/brin_internal.h" +#include "access/brin_page.h" +#include "access/brin_revmap.h" +#include "access/brin_tuple.h" +#include "catalog/index.h" +#include "catalog/pg_type.h" +#include "funcapi.h" +#include "lib/stringinfo.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "miscadmin.h" + + +PG_FUNCTION_INFO_V1(brin_page_type); +PG_FUNCTION_INFO_V1(brin_page_items); +PG_FUNCTION_INFO_V1(brin_metapage_info); +PG_FUNCTION_INFO_V1(brin_revmap_data); + +typedef struct brin_column_state +{ + int nstored; + FmgrInfo outputFn[FLEXIBLE_ARRAY_MEMBER]; +} brin_column_state; + +typedef struct brin_page_state +{ + BrinDesc *bdesc; + Page page; + OffsetNumber offset; + bool unusedItem; + bool done; + AttrNumber attno; + BrinMemTuple *dtup; + brin_column_state *columns[FLEXIBLE_ARRAY_MEMBER]; +} brin_page_state; + + +static Page verify_brin_page(bytea *raw_page, uint16 type, + const char *strtype); + +Datum +brin_page_type(PG_FUNCTION_ARGS) +{ + bytea *raw_page = PG_GETARG_BYTEA_P(0); + Page page = VARDATA(raw_page); + char *type; + + switch (BrinPageType(page)) + { + case BRIN_PAGETYPE_META: + type = "meta"; + break; + case BRIN_PAGETYPE_REVMAP: + type = "revmap"; + break; + case BRIN_PAGETYPE_REGULAR: + type = "regular"; + break; + default: + type = psprintf("unknown (%02x)", BrinPageType(page)); + break; + } + + PG_RETURN_TEXT_P(cstring_to_text(type)); +} + +/* + * Verify that the given bytea contains a BRIN page of the indicated page + * type, or die in the attempt. A pointer to the page is returned. + */ +static Page +verify_brin_page(bytea *raw_page, uint16 type, const char *strtype) +{ + Page page; + int raw_page_size; + + raw_page_size = VARSIZE(raw_page) - VARHDRSZ; + + if (raw_page_size < SizeOfPageHeaderData) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page too small"), + errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ))); + + page = VARDATA(raw_page); + + /* verify the special space says this page is what we want */ + if (BrinPageType(page) != type) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("page is not a BRIN page of type \"%s\"", strtype), + errdetail("Expected special type %08x, got %08x.", + type, BrinPageType(page)))); + + return page; +} + + +/* + * Extract all item values from a BRIN index page + * + * Usage: SELECT * FROM brin_page_items(get_raw_page('idx', 1), 'idx'::regclass); + */ +Datum +brin_page_items(PG_FUNCTION_ARGS) +{ + brin_page_state *state; + FuncCallContext *fctx; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to use raw page functions")))); + + if (SRF_IS_FIRSTCALL()) + { + bytea *raw_page = PG_GETARG_BYTEA_P(0); + Oid indexRelid = PG_GETARG_OID(1); + Page page; + TupleDesc tupdesc; + MemoryContext mctx; + Relation indexRel; + AttrNumber attno; + + /* minimally verify the page we got */ + page = verify_brin_page(raw_page, BRIN_PAGETYPE_REGULAR, "regular"); + + /* create a function context for cross-call persistence */ + fctx = SRF_FIRSTCALL_INIT(); + + /* switch to memory context appropriate for multiple function calls */ + mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + indexRel = index_open(indexRelid, AccessShareLock); + + state = palloc(offsetof(brin_page_state, columns) + + sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts); + + state->bdesc = brin_build_desc(indexRel); + state->page = page; + state->offset = FirstOffsetNumber; + state->unusedItem = false; + state->done = false; + state->dtup = NULL; + + /* + * Initialize output functions for all indexed datatypes; simplifies + * calling them later. + */ + for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++) + { + Oid output; + bool isVarlena; + BrinOpcInfo *opcinfo; + int i; + brin_column_state *column; + + opcinfo = state->bdesc->bd_info[attno - 1]; + column = palloc(offsetof(brin_column_state, outputFn) + + sizeof(FmgrInfo) * opcinfo->oi_nstored); + + column->nstored = opcinfo->oi_nstored; + for (i = 0; i < opcinfo->oi_nstored; i++) + { + getTypeOutputInfo(opcinfo->oi_typids[i], &output, &isVarlena); + fmgr_info(output, &column->outputFn[i]); + } + + state->columns[attno - 1] = column; + } + + index_close(indexRel, AccessShareLock); + + fctx->user_fctx = state; + fctx->tuple_desc = BlessTupleDesc(tupdesc); + + MemoryContextSwitchTo(mctx); + } + + fctx = SRF_PERCALL_SETUP(); + state = fctx->user_fctx; + + if (!state->done) + { + HeapTuple result; + Datum values[7]; + bool nulls[7]; + + /* + * This loop is called once for every attribute of every tuple in the + * page. At the start of a tuple, we get a NULL dtup; that's our + * signal for obtaining and decoding the next one. If that's not the + * case, we output the next attribute. + */ + if (state->dtup == NULL) + { + BrinTuple *tup; + MemoryContext mctx; + ItemId itemId; + + /* deformed tuple must live across calls */ + mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); + + /* verify item status: if there's no data, we can't decode */ + itemId = PageGetItemId(state->page, state->offset); + if (ItemIdIsUsed(itemId)) + { + tup = (BrinTuple *) PageGetItem(state->page, + PageGetItemId(state->page, + state->offset)); + state->dtup = brin_deform_tuple(state->bdesc, tup); + state->attno = 1; + state->unusedItem = false; + } + else + state->unusedItem = true; + + MemoryContextSwitchTo(mctx); + } + else + state->attno++; + + MemSet(nulls, 0, sizeof(nulls)); + + if (state->unusedItem) + { + values[0] = UInt16GetDatum(state->offset); + nulls[1] = true; + nulls[2] = true; + nulls[3] = true; + nulls[4] = true; + nulls[5] = true; + nulls[6] = true; + } + else + { + int att = state->attno - 1; + + values[0] = UInt16GetDatum(state->offset); + values[1] = UInt32GetDatum(state->dtup->bt_blkno); + values[2] = UInt16GetDatum(state->attno); + values[3] = BoolGetDatum(state->dtup->bt_columns[att].bv_allnulls); + values[4] = BoolGetDatum(state->dtup->bt_columns[att].bv_hasnulls); + values[5] = BoolGetDatum(state->dtup->bt_placeholder); + if (!state->dtup->bt_columns[att].bv_allnulls) + { + BrinValues *bvalues = &state->dtup->bt_columns[att]; + StringInfoData s; + bool first; + int i; + + initStringInfo(&s); + appendStringInfoChar(&s, '{'); + + first = true; + for (i = 0; i < state->columns[att]->nstored; i++) + { + char *val; + + if (!first) + appendStringInfoString(&s, " .. "); + first = false; + val = OutputFunctionCall(&state->columns[att]->outputFn[i], + bvalues->bv_values[i]); + appendStringInfoString(&s, val); + pfree(val); + } + appendStringInfoChar(&s, '}'); + + values[6] = CStringGetTextDatum(s.data); + pfree(s.data); + } + else + { + nulls[6] = true; + } + } + + result = heap_form_tuple(fctx->tuple_desc, values, nulls); + + /* + * If the item was unused, jump straight to the next one; otherwise, + * the only cleanup needed here is to set our signal to go to the next + * tuple in the following iteration, by freeing the current one. + */ + if (state->unusedItem) + state->offset = OffsetNumberNext(state->offset); + else if (state->attno >= state->bdesc->bd_tupdesc->natts) + { + pfree(state->dtup); + state->dtup = NULL; + state->offset = OffsetNumberNext(state->offset); + } + + /* + * If we're beyond the end of the page, set flag to end the function in + * the following iteration. + */ + if (state->offset > PageGetMaxOffsetNumber(state->page)) + state->done = true; + + SRF_RETURN_NEXT(fctx, HeapTupleGetDatum(result)); + } + + brin_free_desc(state->bdesc); + + SRF_RETURN_DONE(fctx); +} + +Datum +brin_metapage_info(PG_FUNCTION_ARGS) +{ + bytea *raw_page = PG_GETARG_BYTEA_P(0); + Page page; + BrinMetaPageData *meta; + TupleDesc tupdesc; + Datum values[4]; + bool nulls[4]; + HeapTuple htup; + + page = verify_brin_page(raw_page, BRIN_PAGETYPE_META, "metapage"); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + tupdesc = BlessTupleDesc(tupdesc); + + /* Extract values from the metapage */ + meta = (BrinMetaPageData *) PageGetContents(page); + MemSet(nulls, 0, sizeof(nulls)); + values[0] = CStringGetTextDatum(psprintf("0x%08X", meta->brinMagic)); + values[1] = Int32GetDatum(meta->brinVersion); + values[2] = Int32GetDatum(meta->pagesPerRange); + values[3] = Int64GetDatum(meta->lastRevmapPage); + + htup = heap_form_tuple(tupdesc, values, nulls); + + PG_RETURN_DATUM(HeapTupleGetDatum(htup)); +} + +/* + * Return the TID array stored in a BRIN revmap page + */ +Datum +brin_revmap_data(PG_FUNCTION_ARGS) +{ + struct + { + ItemPointerData *tids; + int idx; + } *state; + FuncCallContext *fctx; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to use raw page functions")))); + + if (SRF_IS_FIRSTCALL()) + { + bytea *raw_page = PG_GETARG_BYTEA_P(0); + MemoryContext mctx; + Page page; + + /* minimally verify the page we got */ + page = verify_brin_page(raw_page, BRIN_PAGETYPE_REVMAP, "revmap"); + + /* create a function context for cross-call persistence */ + fctx = SRF_FIRSTCALL_INIT(); + + /* switch to memory context appropriate for multiple function calls */ + mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); + + state = palloc(sizeof(*state)); + state->tids = ((RevmapContents *) PageGetContents(page))->rm_tids; + state->idx = 0; + + fctx->user_fctx = state; + + MemoryContextSwitchTo(mctx); + } + + fctx = SRF_PERCALL_SETUP(); + state = fctx->user_fctx; + + if (state->idx < REVMAP_PAGE_MAXITEMS) + SRF_RETURN_NEXT(fctx, PointerGetDatum(&state->tids[state->idx++])); + + SRF_RETURN_DONE(fctx); +} diff --git a/contrib/pageinspect/fsmfuncs.c b/contrib/pageinspect/fsmfuncs.c index 8c1960445a..ad3881974f 100644 --- a/contrib/pageinspect/fsmfuncs.c +++ b/contrib/pageinspect/fsmfuncs.c @@ -9,7 +9,7 @@ * there's hardly any use case for using these without superuser-rights * anyway. * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/fsmfuncs.c diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c new file mode 100644 index 0000000000..701b2ca763 --- /dev/null +++ b/contrib/pageinspect/ginfuncs.c @@ -0,0 +1,281 @@ +/* + * ginfuncs.c + * Functions to investigate the content of GIN indexes + * + * Copyright (c) 2014-2015, PostgreSQL Global Development Group + * + * IDENTIFICATION + * contrib/pageinspect/ginfuncs.c + */ +#include "postgres.h" + +#include "access/gin.h" +#include "access/gin_private.h" +#include "access/htup_details.h" +#include "catalog/namespace.h" +#include "catalog/pg_type.h" +#include "funcapi.h" +#include "miscadmin.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/rel.h" + +#define DatumGetItemPointer(X) ((ItemPointer) DatumGetPointer(X)) +#define ItemPointerGetDatum(X) PointerGetDatum(X) + + +PG_FUNCTION_INFO_V1(gin_metapage_info); +PG_FUNCTION_INFO_V1(gin_page_opaque_info); +PG_FUNCTION_INFO_V1(gin_leafpage_items); + +Datum +gin_metapage_info(PG_FUNCTION_ARGS) +{ + bytea *raw_page = PG_GETARG_BYTEA_P(0); + int raw_page_size; + TupleDesc tupdesc; + Page page; + GinPageOpaque opaq; + GinMetaPageData *metadata; + HeapTuple resultTuple; + Datum values[10]; + bool nulls[10]; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to use raw page functions")))); + + raw_page_size = VARSIZE(raw_page) - VARHDRSZ; + if (raw_page_size < BLCKSZ) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page too small (%d bytes)", raw_page_size))); + page = VARDATA(raw_page); + + opaq = (GinPageOpaque) PageGetSpecialPointer(page); + if (opaq->flags != GIN_META) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page is not a GIN metapage"), + errdetail("Flags %04X, expected %04X", + opaq->flags, GIN_META))); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + metadata = GinPageGetMeta(page); + + memset(nulls, 0, sizeof(nulls)); + + values[0] = Int64GetDatum(metadata->head); + values[1] = Int64GetDatum(metadata->tail); + values[2] = Int32GetDatum(metadata->tailFreeSize); + values[3] = Int64GetDatum(metadata->nPendingPages); + values[4] = Int64GetDatum(metadata->nPendingHeapTuples); + + /* statistics, updated by VACUUM */ + values[5] = Int64GetDatum(metadata->nTotalPages); + values[6] = Int64GetDatum(metadata->nEntryPages); + values[7] = Int64GetDatum(metadata->nDataPages); + values[8] = Int64GetDatum(metadata->nEntries); + + values[9] = Int32GetDatum(metadata->ginVersion); + + /* Build and return the result tuple. */ + resultTuple = heap_form_tuple(tupdesc, values, nulls); + + return HeapTupleGetDatum(resultTuple); +} + + +Datum +gin_page_opaque_info(PG_FUNCTION_ARGS) +{ + bytea *raw_page = PG_GETARG_BYTEA_P(0); + int raw_page_size; + TupleDesc tupdesc; + Page page; + GinPageOpaque opaq; + HeapTuple resultTuple; + Datum values[3]; + bool nulls[10]; + Datum flags[16]; + int nflags = 0; + uint16 flagbits; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to use raw page functions")))); + + raw_page_size = VARSIZE(raw_page) - VARHDRSZ; + if (raw_page_size < BLCKSZ) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page too small (%d bytes)", raw_page_size))); + page = VARDATA(raw_page); + + opaq = (GinPageOpaque) PageGetSpecialPointer(page); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + /* Convert the flags bitmask to an array of human-readable names */ + flagbits = opaq->flags; + if (flagbits & GIN_DATA) + flags[nflags++] = CStringGetTextDatum("data"); + if (flagbits & GIN_LEAF) + flags[nflags++] = CStringGetTextDatum("leaf"); + if (flagbits & GIN_DELETED) + flags[nflags++] = CStringGetTextDatum("deleted"); + if (flagbits & GIN_META) + flags[nflags++] = CStringGetTextDatum("meta"); + if (flagbits & GIN_LIST) + flags[nflags++] = CStringGetTextDatum("list"); + if (flagbits & GIN_LIST_FULLROW) + flags[nflags++] = CStringGetTextDatum("list_fullrow"); + if (flagbits & GIN_INCOMPLETE_SPLIT) + flags[nflags++] = CStringGetTextDatum("incomplete_split"); + if (flagbits & GIN_COMPRESSED) + flags[nflags++] = CStringGetTextDatum("compressed"); + flagbits &= ~(GIN_DATA | GIN_LEAF | GIN_DELETED | GIN_META | GIN_LIST | + GIN_LIST_FULLROW | GIN_INCOMPLETE_SPLIT | GIN_COMPRESSED); + if (flagbits) + { + /* any flags we don't recognize are printed in hex */ + flags[nflags++] = DirectFunctionCall1(to_hex32, Int32GetDatum(flagbits)); + } + + memset(nulls, 0, sizeof(nulls)); + + values[0] = Int64GetDatum(opaq->rightlink); + values[1] = Int64GetDatum(opaq->maxoff); + values[2] = PointerGetDatum( + construct_array(flags, nflags, TEXTOID, -1, false, 'i')); + + /* Build and return the result tuple. */ + resultTuple = heap_form_tuple(tupdesc, values, nulls); + + return HeapTupleGetDatum(resultTuple); +} + +typedef struct gin_leafpage_items_state +{ + TupleDesc tupd; + GinPostingList *seg; + GinPostingList *lastseg; +} gin_leafpage_items_state; + +Datum +gin_leafpage_items(PG_FUNCTION_ARGS) +{ + bytea *raw_page = PG_GETARG_BYTEA_P(0); + int raw_page_size; + FuncCallContext *fctx; + gin_leafpage_items_state *inter_call_data; + + if (!superuser()) + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + (errmsg("must be superuser to use raw page functions")))); + + raw_page_size = VARSIZE(raw_page) - VARHDRSZ; + + if (SRF_IS_FIRSTCALL()) + { + TupleDesc tupdesc; + MemoryContext mctx; + Page page; + GinPageOpaque opaq; + + if (raw_page_size < BLCKSZ) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page too small (%d bytes)", raw_page_size))); + page = VARDATA(raw_page); + + if (PageGetSpecialSize(page) != MAXALIGN(sizeof(GinPageOpaqueData))) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page is not a valid GIN data leaf page"), + errdetail("Special size %d, expected %d", + (int) PageGetSpecialSize(page), + (int) MAXALIGN(sizeof(GinPageOpaqueData))))); + + opaq = (GinPageOpaque) PageGetSpecialPointer(page); + if (opaq->flags != (GIN_DATA | GIN_LEAF | GIN_COMPRESSED)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("input page is not a compressed GIN data leaf page"), + errdetail("Flags %04X, expected %04X", + opaq->flags, + (GIN_DATA | GIN_LEAF | GIN_COMPRESSED)))); + + fctx = SRF_FIRSTCALL_INIT(); + mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx); + + inter_call_data = palloc(sizeof(gin_leafpage_items_state)); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + inter_call_data->tupd = tupdesc; + + inter_call_data->seg = GinDataLeafPageGetPostingList(page); + inter_call_data->lastseg = (GinPostingList *) + (((char *) inter_call_data->seg) + + GinDataLeafPageGetPostingListSize(page)); + + fctx->user_fctx = inter_call_data; + + MemoryContextSwitchTo(mctx); + } + + fctx = SRF_PERCALL_SETUP(); + inter_call_data = fctx->user_fctx; + + if (inter_call_data->seg != inter_call_data->lastseg) + { + GinPostingList *cur = inter_call_data->seg; + HeapTuple resultTuple; + Datum result; + Datum values[3]; + bool nulls[3]; + int ndecoded, + i; + ItemPointer tids; + Datum *tids_datum; + + memset(nulls, 0, sizeof(nulls)); + + values[0] = ItemPointerGetDatum(&cur->first); + values[1] = UInt16GetDatum(cur->nbytes); + + /* build an array of decoded item pointers */ + tids = ginPostingListDecode(cur, &ndecoded); + tids_datum = (Datum *) palloc(ndecoded * sizeof(Datum)); + for (i = 0; i < ndecoded; i++) + tids_datum[i] = ItemPointerGetDatum(&tids[i]); + values[2] = PointerGetDatum(construct_array(tids_datum, + ndecoded, + TIDOID, + sizeof(ItemPointerData), + false, 's')); + pfree(tids_datum); + pfree(tids); + + /* Build and return the result tuple. */ + resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls); + result = HeapTupleGetDatum(resultTuple); + + inter_call_data->seg = GinNextPostingListSegment(cur); + + SRF_RETURN_NEXT(fctx, result); + } + else + SRF_RETURN_DONE(fctx); +} diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index dedc8feaeb..8d1666c8bd 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -15,7 +15,7 @@ * there's hardly any use case for using these without superuser-rights * anyway. * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/heapfuncs.c @@ -149,7 +149,7 @@ heap_page_items(PG_FUNCTION_ARGS) * many other ways, but at least we won't crash. */ if (ItemIdHasStorage(id) && - lp_len >= sizeof(HeapTupleHeader) && + lp_len >= MinHeapTupleSize && lp_offset == MAXALIGN(lp_offset) && lp_offset + lp_len <= raw_page_size) { @@ -169,18 +169,19 @@ heap_page_items(PG_FUNCTION_ARGS) values[10] = UInt8GetDatum(tuphdr->t_hoff); /* - * We already checked that the item as is completely within the - * raw page passed to us, with the length given in the line - * pointer.. Let's check that t_hoff doesn't point over lp_len, - * before using it to access t_bits and oid. + * We already checked that the item is completely within the raw + * page passed to us, with the length given in the line pointer. + * Let's check that t_hoff doesn't point over lp_len, before using + * it to access t_bits and oid. */ - if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) && - tuphdr->t_hoff <= lp_len) + if (tuphdr->t_hoff >= SizeofHeapTupleHeader && + tuphdr->t_hoff <= lp_len && + tuphdr->t_hoff == MAXALIGN(tuphdr->t_hoff)) { if (tuphdr->t_infomask & HEAP_HASNULL) { bits_len = tuphdr->t_hoff - - (((char *) tuphdr->t_bits) -((char *) tuphdr)); + offsetof(HeapTupleHeaderData, t_bits); values[11] = CStringGetTextDatum( bits_to_text(tuphdr->t_bits, bits_len * 8)); diff --git a/contrib/pageinspect/pageinspect--1.0--1.1.sql b/contrib/pageinspect/pageinspect--1.0--1.1.sql index 49e83264d3..0e2c3f45b3 100644 --- a/contrib/pageinspect/pageinspect--1.0--1.1.sql +++ b/contrib/pageinspect/pageinspect--1.0--1.1.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--1.0--1.1.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.1" to load this file. \quit +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.1'" to load this file. \quit DROP FUNCTION page_header(bytea); CREATE FUNCTION page_header(IN page bytea, diff --git a/contrib/pageinspect/pageinspect--1.1--1.2.sql b/contrib/pageinspect/pageinspect--1.1--1.2.sql index 5e23ca4dd5..31ffbb0eb5 100644 --- a/contrib/pageinspect/pageinspect--1.1--1.2.sql +++ b/contrib/pageinspect/pageinspect--1.1--1.2.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--1.1--1.2.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.2" to load this file. \quit +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.2'" to load this file. \quit DROP FUNCTION page_header(bytea); CREATE FUNCTION page_header(IN page bytea, diff --git a/contrib/pageinspect/pageinspect--1.2--1.3.sql b/contrib/pageinspect/pageinspect--1.2--1.3.sql new file mode 100644 index 0000000000..9c55a6e598 --- /dev/null +++ b/contrib/pageinspect/pageinspect--1.2--1.3.sql @@ -0,0 +1,82 @@ +/* contrib/pageinspect/pageinspect--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.3'" to load this file. \quit + +-- +-- brin_page_type() +-- +CREATE FUNCTION brin_page_type(IN page bytea) +RETURNS text +AS 'MODULE_PATHNAME', 'brin_page_type' +LANGUAGE C STRICT; + +-- +-- brin_metapage_info() +-- +CREATE FUNCTION brin_metapage_info(IN page bytea, OUT magic text, + OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint) +AS 'MODULE_PATHNAME', 'brin_metapage_info' +LANGUAGE C STRICT; + +-- +-- brin_revmap_data() +-- +CREATE FUNCTION brin_revmap_data(IN page bytea, + OUT pages tid) +RETURNS SETOF tid +AS 'MODULE_PATHNAME', 'brin_revmap_data' +LANGUAGE C STRICT; + +-- +-- brin_page_items() +-- +CREATE FUNCTION brin_page_items(IN page bytea, IN index_oid regclass, + OUT itemoffset int, + OUT blknum int, + OUT attnum int, + OUT allnulls bool, + OUT hasnulls bool, + OUT placeholder bool, + OUT value text) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'brin_page_items' +LANGUAGE C STRICT; + +-- +-- gin_metapage_info() +-- +CREATE FUNCTION gin_metapage_info(IN page bytea, + OUT pending_head bigint, + OUT pending_tail bigint, + OUT tail_free_size int4, + OUT n_pending_pages bigint, + OUT n_pending_tuples bigint, + OUT n_total_pages bigint, + OUT n_entry_pages bigint, + OUT n_data_pages bigint, + OUT n_entries bigint, + OUT version int4) +AS 'MODULE_PATHNAME', 'gin_metapage_info' +LANGUAGE C STRICT; + +-- +-- gin_page_opaque_info() +-- +CREATE FUNCTION gin_page_opaque_info(IN page bytea, + OUT rightlink bigint, + OUT maxoff int4, + OUT flags text[]) +AS 'MODULE_PATHNAME', 'gin_page_opaque_info' +LANGUAGE C STRICT; + +-- +-- gin_leafpage_items() +-- +CREATE FUNCTION gin_leafpage_items(IN page bytea, + OUT first_tid tid, + OUT nbytes int2, + OUT tids tid[]) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'gin_leafpage_items' +LANGUAGE C STRICT; diff --git a/contrib/pageinspect/pageinspect--1.2.sql b/contrib/pageinspect/pageinspect--1.3.sql index 15e8e1e381..a99e05862d 100644 --- a/contrib/pageinspect/pageinspect--1.2.sql +++ b/contrib/pageinspect/pageinspect--1.3.sql @@ -1,4 +1,4 @@ -/* contrib/pageinspect/pageinspect--1.2.sql */ +/* contrib/pageinspect/pageinspect--1.3.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION pageinspect" to load this file. \quit @@ -99,9 +99,91 @@ AS 'MODULE_PATHNAME', 'bt_page_items' LANGUAGE C STRICT; -- +-- brin_page_type() +-- +CREATE FUNCTION brin_page_type(IN page bytea) +RETURNS text +AS 'MODULE_PATHNAME', 'brin_page_type' +LANGUAGE C STRICT; + +-- +-- brin_metapage_info() +-- +CREATE FUNCTION brin_metapage_info(IN page bytea, OUT magic text, + OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint) +AS 'MODULE_PATHNAME', 'brin_metapage_info' +LANGUAGE C STRICT; + +-- +-- brin_revmap_data() +-- +CREATE FUNCTION brin_revmap_data(IN page bytea, + OUT pages tid) +RETURNS SETOF tid +AS 'MODULE_PATHNAME', 'brin_revmap_data' +LANGUAGE C STRICT; + +-- +-- brin_page_items() +-- +CREATE FUNCTION brin_page_items(IN page bytea, IN index_oid regclass, + OUT itemoffset int, + OUT blknum int, + OUT attnum int, + OUT allnulls bool, + OUT hasnulls bool, + OUT placeholder bool, + OUT value text) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'brin_page_items' +LANGUAGE C STRICT; + +-- -- fsm_page_contents() -- CREATE FUNCTION fsm_page_contents(IN page bytea) RETURNS text AS 'MODULE_PATHNAME', 'fsm_page_contents' LANGUAGE C STRICT; + +-- +-- GIN functions +-- + +-- +-- gin_metapage_info() +-- +CREATE FUNCTION gin_metapage_info(IN page bytea, + OUT pending_head bigint, + OUT pending_tail bigint, + OUT tail_free_size int4, + OUT n_pending_pages bigint, + OUT n_pending_tuples bigint, + OUT n_total_pages bigint, + OUT n_entry_pages bigint, + OUT n_data_pages bigint, + OUT n_entries bigint, + OUT version int4) +AS 'MODULE_PATHNAME', 'gin_metapage_info' +LANGUAGE C STRICT; + +-- +-- gin_page_opaque_info() +-- +CREATE FUNCTION gin_page_opaque_info(IN page bytea, + OUT rightlink bigint, + OUT maxoff int4, + OUT flags text[]) +AS 'MODULE_PATHNAME', 'gin_page_opaque_info' +LANGUAGE C STRICT; + +-- +-- gin_leafpage_items() +-- +CREATE FUNCTION gin_leafpage_items(IN page bytea, + OUT first_tid tid, + OUT nbytes int2, + OUT tids tid[]) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'gin_leafpage_items' +LANGUAGE C STRICT; diff --git a/contrib/pageinspect/pageinspect--unpackaged--1.0.sql b/contrib/pageinspect/pageinspect--unpackaged--1.0.sql index 13e2167dfc..1bf6bccb79 100644 --- a/contrib/pageinspect/pageinspect--unpackaged--1.0.sql +++ b/contrib/pageinspect/pageinspect--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pageinspect" to load this file. \quit +\echo Use "CREATE EXTENSION pageinspect FROM unpackaged" to load this file. \quit DROP FUNCTION heap_page_items(bytea); CREATE FUNCTION heap_page_items(IN page bytea, diff --git a/contrib/pageinspect/pageinspect.control b/contrib/pageinspect/pageinspect.control index aecd91a711..a9dab3327c 100644 --- a/contrib/pageinspect/pageinspect.control +++ b/contrib/pageinspect/pageinspect.control @@ -1,5 +1,5 @@ # pageinspect extension comment = 'inspect the contents of database pages at a low level' -default_version = '1.2' +default_version = '1.3' module_pathname = '$libdir/pageinspect' relocatable = true diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c index cc66fc8b04..38c136f987 100644 --- a/contrib/pageinspect/rawpage.c +++ b/contrib/pageinspect/rawpage.c @@ -5,7 +5,7 @@ * * Access-method specific inspection functions are in separate files. * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pageinspect/rawpage.c @@ -131,9 +131,11 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); - if (blkno >= RelationGetNumberOfBlocks(rel)) - elog(ERROR, "block number %u is out of range for relation \"%s\"", - blkno, RelationGetRelationName(rel)); + if (blkno >= RelationGetNumberOfBlocksInFork(rel, forknum)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("block number %u is out of range for relation \"%s\"", + blkno, RelationGetRelationName(rel)))); /* Initialize buffer to copy to */ raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ); @@ -190,7 +192,7 @@ page_header(PG_FUNCTION_ARGS) * Check that enough data was supplied, so that we don't try to access * fields outside the supplied buffer. */ - if (raw_page_size < sizeof(PageHeaderData)) + if (raw_page_size < SizeOfPageHeaderData) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("input page too small (%d bytes)", raw_page_size))); diff --git a/contrib/passwordcheck/Makefile b/contrib/passwordcheck/Makefile index 4829bfd1f3..4652aeb3d7 100644 --- a/contrib/passwordcheck/Makefile +++ b/contrib/passwordcheck/Makefile @@ -1,7 +1,8 @@ # contrib/passwordcheck/Makefile MODULE_big = passwordcheck -OBJS = passwordcheck.o +OBJS = passwordcheck.o $(WIN32RES) +PGFILEDESC = "passwordcheck - strengthen user password checks" # uncomment the following two lines to enable cracklib support # PG_CPPFLAGS = -DUSE_CRACKLIB '-DCRACKLIB_DICTPATH="/usr/lib/cracklib_dict"' diff --git a/contrib/passwordcheck/passwordcheck.c b/contrib/passwordcheck/passwordcheck.c index 405896d40a..78c44b2d05 100644 --- a/contrib/passwordcheck/passwordcheck.c +++ b/contrib/passwordcheck/passwordcheck.c @@ -3,7 +3,7 @@ * passwordcheck.c * * - * Copyright (c) 2009-2014, PostgreSQL Global Development Group + * Copyright (c) 2009-2015, PostgreSQL Global Development Group * * Author: Laurenz Albe <laurenz.albe@wien.gv.at> * @@ -24,7 +24,6 @@ #include "fmgr.h" #include "libpq/md5.h" - PG_MODULE_MAGIC; /* passwords shorter than this will be rejected */ diff --git a/contrib/pg_archivecleanup/.gitignore b/contrib/pg_archivecleanup/.gitignore deleted file mode 100644 index 804089070d..0000000000 --- a/contrib/pg_archivecleanup/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/pg_archivecleanup diff --git a/contrib/pg_archivecleanup/Makefile b/contrib/pg_archivecleanup/Makefile deleted file mode 100644 index 39c55d8713..0000000000 --- a/contrib/pg_archivecleanup/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -# contrib/pg_archivecleanup/Makefile - -PGFILEDESC = "pg_archivecleanup - cleans archive when used with streaming replication" -PGAPPICON = win32 - -PROGRAM = pg_archivecleanup -OBJS = pg_archivecleanup.o - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_archivecleanup -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c deleted file mode 100644 index 212b267fcf..0000000000 --- a/contrib/pg_archivecleanup/pg_archivecleanup.c +++ /dev/null @@ -1,369 +0,0 @@ -/* - * contrib/pg_archivecleanup/pg_archivecleanup.c - * - * pg_archivecleanup.c - * - * Production-ready example of an archive_cleanup_command - * used to clean an archive when using standby_mode = on in 9.0 - * or for standalone use for any version of PostgreSQL 8.0+. - * - * Original author: Simon Riggs simon@2ndquadrant.com - * Current maintainer: Simon Riggs - */ -#include "postgres_fe.h" - -#include <ctype.h> -#include <dirent.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <signal.h> -#include <sys/time.h> - -#include "pg_getopt.h" - -const char *progname; - -/* Options and defaults */ -bool debug = false; /* are we debugging? */ -bool dryrun = false; /* are we performing a dry-run operation? */ -char *additional_ext = NULL; /* Extension to remove from filenames */ - -char *archiveLocation; /* where to find the archive? */ -char *restartWALFileName; /* the file from which we can restart restore */ -char WALFilePath[MAXPGPATH]; /* the file path including archive */ -char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we - * want to remain in - * archive */ - - -/* ===================================================================== - * - * Customizable section - * - * ===================================================================== - * - * Currently, this section assumes that the Archive is a locally - * accessible directory. If you want to make other assumptions, - * such as using a vendor-specific archive and access API, these - * routines are the ones you'll need to change. You're - * enouraged to submit any changes to pgsql-hackers@postgresql.org - * or personally to the current maintainer. Those changes may be - * folded in to later versions of this program. - */ - -#define XLOG_DATA_FNAME_LEN 24 -/* Reworked from access/xlog_internal.h */ -#define XLogFileName(fname, tli, log, seg) \ - snprintf(fname, XLOG_DATA_FNAME_LEN + 1, "%08X%08X%08X", tli, log, seg) -#define XLOG_BACKUP_FNAME_LEN 40 - -/* - * Initialize allows customized commands into the archive cleanup program. - * - * You may wish to add code to check for tape libraries, etc.. - */ -static void -Initialize(void) -{ - /* - * This code assumes that archiveLocation is a directory, so we use stat - * to test if it's accessible. - */ - struct stat stat_buf; - - if (stat(archiveLocation, &stat_buf) != 0 || - !S_ISDIR(stat_buf.st_mode)) - { - fprintf(stderr, "%s: archive location \"%s\" does not exist\n", - progname, archiveLocation); - exit(2); - } -} - -static void -TrimExtension(char *filename, char *extension) -{ - int flen; - int elen; - - if (extension == NULL) - return; - - elen = strlen(extension); - flen = strlen(filename); - - if (flen > elen && strcmp(filename + flen - elen, extension) == 0) - filename[flen - elen] = '\0'; -} - -static void -CleanupPriorWALFiles(void) -{ - int rc; - DIR *xldir; - struct dirent *xlde; - char walfile[MAXPGPATH]; - - if ((xldir = opendir(archiveLocation)) != NULL) - { - while (errno = 0, (xlde = readdir(xldir)) != NULL) - { - strncpy(walfile, xlde->d_name, MAXPGPATH); - TrimExtension(walfile, additional_ext); - - /* - * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that - * we won't prematurely remove a segment from a parent timeline. - * We could probably be a little more proactive about removing - * segments of non-parent timelines, but that would be a whole lot - * more complicated. - * - * We use the alphanumeric sorting property of the filenames to - * decide which ones are earlier than the exclusiveCleanupFileName - * file. Note that this means files are not removed in the order - * they were originally written, in case this worries you. - */ - if (strlen(walfile) == XLOG_DATA_FNAME_LEN && - strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN && - strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) - { - /* - * Use the original file name again now, including any - * extension that might have been chopped off before testing - * the sequence. - */ - snprintf(WALFilePath, MAXPGPATH, "%s/%s", - archiveLocation, xlde->d_name); - - if (dryrun) - { - /* - * Prints the name of the file to be removed and skips the - * actual removal. The regular printout is so that the - * user can pipe the output into some other program. - */ - printf("%s\n", WALFilePath); - if (debug) - fprintf(stderr, - "%s: file \"%s\" would be removed\n", - progname, WALFilePath); - continue; - } - - if (debug) - fprintf(stderr, "%s: removing file \"%s\"\n", - progname, WALFilePath); - - rc = unlink(WALFilePath); - if (rc != 0) - { - fprintf(stderr, "%s: ERROR: could not remove file \"%s\": %s\n", - progname, WALFilePath, strerror(errno)); - break; - } - } - } - - if (errno) - fprintf(stderr, "%s: could not read archive location \"%s\": %s\n", - progname, archiveLocation, strerror(errno)); - if (closedir(xldir)) - fprintf(stderr, "%s: could not close archive location \"%s\": %s\n", - progname, archiveLocation, strerror(errno)); - } - else - fprintf(stderr, "%s: could not open archive location \"%s\": %s\n", - progname, archiveLocation, strerror(errno)); -} - -/* - * SetWALFileNameForCleanup() - * - * Set the earliest WAL filename that we want to keep on the archive - * and decide whether we need_cleanup - */ -static void -SetWALFileNameForCleanup(void) -{ - bool fnameOK = false; - - TrimExtension(restartWALFileName, additional_ext); - - /* - * If restartWALFileName is a WAL file name then just use it directly. If - * restartWALFileName is a .backup filename, make sure we use the prefix - * of the filename, otherwise we will remove wrong files since - * 000000010000000000000010.00000020.backup is after - * 000000010000000000000010. - */ - if (strlen(restartWALFileName) == XLOG_DATA_FNAME_LEN && - strspn(restartWALFileName, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN) - { - strcpy(exclusiveCleanupFileName, restartWALFileName); - fnameOK = true; - } - else if (strlen(restartWALFileName) == XLOG_BACKUP_FNAME_LEN) - { - int args; - uint32 tli = 1, - log = 0, - seg = 0, - offset = 0; - - args = sscanf(restartWALFileName, "%08X%08X%08X.%08X.backup", &tli, &log, &seg, &offset); - if (args == 4) - { - fnameOK = true; - - /* - * Use just the prefix of the filename, ignore everything after - * first period - */ - XLogFileName(exclusiveCleanupFileName, tli, log, seg); - } - } - - if (!fnameOK) - { - fprintf(stderr, "%s: invalid filename input\n", progname); - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(2); - } -} - -/* ===================================================================== - * End of Customizable section - * ===================================================================== - */ - -static void -usage(void) -{ - printf("%s removes older WAL files from PostgreSQL archives.\n\n", progname); - printf("Usage:\n"); - printf(" %s [OPTION]... ARCHIVELOCATION OLDESTKEPTWALFILE\n", progname); - printf("\nOptions:\n"); - printf(" -d generate debug output (verbose mode)\n"); - printf(" -n dry run, show the names of the files that would be removed\n"); - printf(" -V, --version output version information, then exit\n"); - printf(" -x EXT clean up files if they have this extension\n"); - printf(" -?, --help show this help, then exit\n"); - printf("\n" - "For use as archive_cleanup_command in recovery.conf when standby_mode = on:\n" - " archive_cleanup_command = 'pg_archivecleanup [OPTION]... ARCHIVELOCATION %%r'\n" - "e.g.\n" - " archive_cleanup_command = 'pg_archivecleanup /mnt/server/archiverdir %%r'\n"); - printf("\n" - "Or for use as a standalone archive cleaner:\n" - "e.g.\n" - " pg_archivecleanup /mnt/server/archiverdir 000000010000000000000010.00000020.backup\n"); - printf("\nReport bugs to <pgsql-bugs@postgresql.org>.\n"); -} - -/*------------ MAIN ----------------------------------------*/ -int -main(int argc, char **argv) -{ - int c; - - progname = get_progname(argv[0]); - - if (argc > 1) - { - if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) - { - usage(); - exit(0); - } - if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) - { - puts("pg_archivecleanup (PostgreSQL) " PG_VERSION); - exit(0); - } - } - - while ((c = getopt(argc, argv, "x:dn")) != -1) - { - switch (c) - { - case 'd': /* Debug mode */ - debug = true; - break; - case 'n': /* Dry-Run mode */ - dryrun = true; - break; - case 'x': - additional_ext = strdup(optarg); /* Extension to remove - * from xlogfile names */ - break; - default: - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(2); - break; - } - } - - /* - * We will go to the archiveLocation to check restartWALFileName. - * restartWALFileName may not exist anymore, which would not be an error, - * so we separate the archiveLocation and restartWALFileName so we can - * check separately whether archiveLocation exists, if not that is an - * error - */ - if (optind < argc) - { - archiveLocation = argv[optind]; - optind++; - } - else - { - fprintf(stderr, "%s: must specify archive location\n", progname); - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(2); - } - - if (optind < argc) - { - restartWALFileName = argv[optind]; - optind++; - } - else - { - fprintf(stderr, "%s: must specify restartfilename\n", progname); - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(2); - } - - if (optind < argc) - { - fprintf(stderr, "%s: too many parameters\n", progname); - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(2); - } - - /* - * Check archive exists and other initialization if required. - */ - Initialize(); - - /* - * Check filename is a valid name, then process to find cut-off - */ - SetWALFileNameForCleanup(); - - if (debug) - { - snprintf(WALFilePath, MAXPGPATH, "%s/%s", - archiveLocation, exclusiveCleanupFileName); - fprintf(stderr, "%s: keep WAL file \"%s\" and later\n", - progname, WALFilePath); - } - - /* - * Remove WAL files older than cut-off - */ - CleanupPriorWALFiles(); - - exit(0); -} diff --git a/contrib/pg_buffercache/Makefile b/contrib/pg_buffercache/Makefile index 323c0ac8ed..065d3d690a 100644 --- a/contrib/pg_buffercache/Makefile +++ b/contrib/pg_buffercache/Makefile @@ -1,10 +1,11 @@ # contrib/pg_buffercache/Makefile MODULE_big = pg_buffercache -OBJS = pg_buffercache_pages.o +OBJS = pg_buffercache_pages.o $(WIN32RES) EXTENSION = pg_buffercache -DATA = pg_buffercache--1.0.sql pg_buffercache--unpackaged--1.0.sql +DATA = pg_buffercache--1.1.sql pg_buffercache--1.0--1.1.sql pg_buffercache--unpackaged--1.0.sql +PGFILEDESC = "pg_buffercache - monitoring of shared buffer cache in real-time" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_buffercache/pg_buffercache--1.0--1.1.sql b/contrib/pg_buffercache/pg_buffercache--1.0--1.1.sql new file mode 100644 index 0000000000..54d02f58c0 --- /dev/null +++ b/contrib/pg_buffercache/pg_buffercache--1.0--1.1.sql @@ -0,0 +1,11 @@ +/* contrib/pg_buffercache/pg_buffercache--1.0--1.1.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_buffercache UPDATE TO '1.1'" to load this file. \quit + +-- Upgrade view to 1.1. format +CREATE OR REPLACE VIEW pg_buffercache AS + SELECT P.* FROM pg_buffercache_pages() AS P + (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid, + relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2, + pinning_backends int4); diff --git a/contrib/pg_buffercache/pg_buffercache--1.0.sql b/contrib/pg_buffercache/pg_buffercache--1.1.sql index 4ca4c44256..f3b6482fa6 100644 --- a/contrib/pg_buffercache/pg_buffercache--1.0.sql +++ b/contrib/pg_buffercache/pg_buffercache--1.1.sql @@ -1,4 +1,4 @@ -/* contrib/pg_buffercache/pg_buffercache--1.0.sql */ +/* contrib/pg_buffercache/pg_buffercache--1.1.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION pg_buffercache" to load this file. \quit @@ -13,7 +13,8 @@ LANGUAGE C; CREATE VIEW pg_buffercache AS SELECT P.* FROM pg_buffercache_pages() AS P (bufferid integer, relfilenode oid, reltablespace oid, reldatabase oid, - relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2); + relforknumber int2, relblocknumber int8, isdirty bool, usagecount int2, + pinning_backends int4); -- Don't want these to be available to public. REVOKE ALL ON FUNCTION pg_buffercache_pages() FROM PUBLIC; diff --git a/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql b/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql index bfe6e52f8f..dc1cbdd6fe 100644 --- a/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql +++ b/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_buffercache" to load this file. \quit +\echo Use "CREATE EXTENSION pg_buffercache FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_buffercache ADD function pg_buffercache_pages(); ALTER EXTENSION pg_buffercache ADD view pg_buffercache; diff --git a/contrib/pg_buffercache/pg_buffercache.control b/contrib/pg_buffercache/pg_buffercache.control index 709513c334..5494e2fae5 100644 --- a/contrib/pg_buffercache/pg_buffercache.control +++ b/contrib/pg_buffercache/pg_buffercache.control @@ -1,5 +1,5 @@ # pg_buffercache extension comment = 'examine the shared buffer cache' -default_version = '1.0' +default_version = '1.1' module_pathname = '$libdir/pg_buffercache' relocatable = true diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c index f39fe255db..98016fc365 100644 --- a/contrib/pg_buffercache/pg_buffercache_pages.c +++ b/contrib/pg_buffercache/pg_buffercache_pages.c @@ -15,11 +15,11 @@ #include "storage/bufmgr.h" -#define NUM_BUFFERCACHE_PAGES_ELEM 8 +#define NUM_BUFFERCACHE_PAGES_MIN_ELEM 8 +#define NUM_BUFFERCACHE_PAGES_ELEM 9 PG_MODULE_MAGIC; - /* * Record structure holding the to be exposed cache data. */ @@ -34,6 +34,12 @@ typedef struct bool isvalid; bool isdirty; uint16 usagecount; + /* + * An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from + * being pinned by too many backends and each backend will only pin once + * because of bufmgr.c's PrivateRefCount infrastructure. + */ + int32 pinning_backends; } BufferCachePagesRec; @@ -61,12 +67,12 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) MemoryContext oldcontext; BufferCachePagesContext *fctx; /* User function context. */ TupleDesc tupledesc; + TupleDesc expected_tupledesc; HeapTuple tuple; if (SRF_IS_FIRSTCALL()) { int i; - volatile BufferDesc *bufHdr; funcctx = SRF_FIRSTCALL_INIT(); @@ -76,8 +82,23 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) /* Create a user function context for cross-call persistence */ fctx = (BufferCachePagesContext *) palloc(sizeof(BufferCachePagesContext)); + /* + * To smoothly support upgrades from version 1.0 of this extension + * transparently handle the (non-)existence of the pinning_backends + * column. We unfortunately have to get the result type for that... - + * we can't use the result type determined by the function definition + * without potentially crashing when somebody uses the old (or even + * wrong) function definition though. + */ + if (get_call_result_type(fcinfo, NULL, &expected_tupledesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + if (expected_tupledesc->natts < NUM_BUFFERCACHE_PAGES_MIN_ELEM || + expected_tupledesc->natts > NUM_BUFFERCACHE_PAGES_ELEM) + elog(ERROR, "incorrect number of output arguments"); + /* Construct a tuple descriptor for the result rows. */ - tupledesc = CreateTemplateTupleDesc(NUM_BUFFERCACHE_PAGES_ELEM, false); + tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts, false); TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid", INT4OID, -1, 0); TupleDescInitEntry(tupledesc, (AttrNumber) 2, "relfilenode", @@ -95,6 +116,10 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) TupleDescInitEntry(tupledesc, (AttrNumber) 8, "usage_count", INT2OID, -1, 0); + if (expected_tupledesc->natts == NUM_BUFFERCACHE_PAGES_ELEM) + TupleDescInitEntry(tupledesc, (AttrNumber) 9, "pinning_backends", + INT4OID, -1, 0); + fctx->tupdesc = BlessTupleDesc(tupledesc); /* Allocate NBuffers worth of BufferCachePagesRec records. */ @@ -120,8 +145,11 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) * Scan though all the buffers, saving the relevant fields in the * fctx->record structure. */ - for (i = 0, bufHdr = BufferDescriptors; i < NBuffers; i++, bufHdr++) + for (i = 0; i < NBuffers; i++) { + volatile BufferDesc *bufHdr; + + bufHdr = GetBufferDescriptor(i); /* Lock each buffer header before inspecting. */ LockBufHdr(bufHdr); @@ -132,6 +160,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) fctx->record[i].forknum = bufHdr->tag.forkNum; fctx->record[i].blocknum = bufHdr->tag.blockNum; fctx->record[i].usagecount = bufHdr->usage_count; + fctx->record[i].pinning_backends = bufHdr->refcount; if (bufHdr->flags & BM_DIRTY) fctx->record[i].isdirty = true; @@ -186,6 +215,8 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) nulls[5] = true; nulls[6] = true; nulls[7] = true; + /* unused for v1.0 callers, but the array is always long enough */ + nulls[8] = true; } else { @@ -203,6 +234,9 @@ pg_buffercache_pages(PG_FUNCTION_ARGS) nulls[6] = false; values[7] = Int16GetDatum(fctx->record[i].usagecount); nulls[7] = false; + /* unused for v1.0 callers, but the array is always long enough */ + values[8] = Int32GetDatum(fctx->record[i].pinning_backends); + nulls[8] = false; } /* Build and return the tuple. */ diff --git a/contrib/pg_freespacemap/Makefile b/contrib/pg_freespacemap/Makefile index b2e3ba3aa3..5760d9fe51 100644 --- a/contrib/pg_freespacemap/Makefile +++ b/contrib/pg_freespacemap/Makefile @@ -1,10 +1,11 @@ # contrib/pg_freespacemap/Makefile MODULE_big = pg_freespacemap -OBJS = pg_freespacemap.o +OBJS = pg_freespacemap.o $(WIN32RES) EXTENSION = pg_freespacemap DATA = pg_freespacemap--1.0.sql pg_freespacemap--unpackaged--1.0.sql +PGFILEDESC = "pg_freespacemap - monitoring of free space map" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql b/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql index 5e8d7e472e..8651373800 100644 --- a/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql +++ b/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_freespacemap" to load this file. \quit +\echo Use "CREATE EXTENSION pg_freespacemap FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_freespacemap ADD function pg_freespace(regclass,bigint); ALTER EXTENSION pg_freespacemap ADD function pg_freespace(regclass); diff --git a/contrib/pg_freespacemap/pg_freespacemap.c b/contrib/pg_freespacemap/pg_freespacemap.c index 7805345add..7d939a7d20 100644 --- a/contrib/pg_freespacemap/pg_freespacemap.c +++ b/contrib/pg_freespacemap/pg_freespacemap.c @@ -11,7 +11,6 @@ #include "funcapi.h" #include "storage/freespace.h" - PG_MODULE_MAGIC; /* diff --git a/contrib/pg_prewarm/Makefile b/contrib/pg_prewarm/Makefile index 176a29a003..eeba502672 100644 --- a/contrib/pg_prewarm/Makefile +++ b/contrib/pg_prewarm/Makefile @@ -1,10 +1,11 @@ # contrib/pg_prewarm/Makefile MODULE_big = pg_prewarm -OBJS = pg_prewarm.o +OBJS = pg_prewarm.o $(WIN32RES) EXTENSION = pg_prewarm DATA = pg_prewarm--1.0.sql +PGFILEDESC = "pg_prewarm - preload relation data into system buffer cache" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c index df20e888ef..d81f93bba7 100644 --- a/contrib/pg_prewarm/pg_prewarm.c +++ b/contrib/pg_prewarm/pg_prewarm.c @@ -3,7 +3,7 @@ * pg_prewarm.c * prewarming utilities * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_prewarm/pg_prewarm.c @@ -159,6 +159,7 @@ pg_prewarm(PG_FUNCTION_ARGS) */ for (block = first_block; block <= last_block; ++block) { + CHECK_FOR_INTERRUPTS(); PrefetchBuffer(rel, forkNumber, block); ++blocks_done; } @@ -177,6 +178,7 @@ pg_prewarm(PG_FUNCTION_ARGS) */ for (block = first_block; block <= last_block; ++block) { + CHECK_FOR_INTERRUPTS(); smgrread(rel->rd_smgr, forkNumber, block, blockbuffer); ++blocks_done; } @@ -190,6 +192,7 @@ pg_prewarm(PG_FUNCTION_ARGS) { Buffer buf; + CHECK_FOR_INTERRUPTS(); buf = ReadBufferExtended(rel, forkNumber, block, RBM_NORMAL, NULL); ReleaseBuffer(buf); ++blocks_done; diff --git a/contrib/pg_standby/Makefile b/contrib/pg_standby/Makefile index b7c6ae95e7..0bca2f8e9e 100644 --- a/contrib/pg_standby/Makefile +++ b/contrib/pg_standby/Makefile @@ -4,7 +4,7 @@ PGFILEDESC = "pg_standby - supports creation of a warm standby" PGAPPICON = win32 PROGRAM = pg_standby -OBJS = pg_standby.o +OBJS = pg_standby.o $(WIN32RES) ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c index d6b169264c..2f9f2b4d2e 100644 --- a/contrib/pg_standby/pg_standby.c +++ b/contrib/pg_standby/pg_standby.c @@ -418,7 +418,7 @@ CheckForExternalTrigger(void) return; } - if ((len = read(fd, buf, sizeof(buf))) < 0) + if ((len = read(fd, buf, sizeof(buf) - 1)) < 0) { fprintf(stderr, "WARNING: could not read \"%s\": %s\n", triggerPath, strerror(errno)); diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile index 95a2767006..975a637897 100644 --- a/contrib/pg_stat_statements/Makefile +++ b/contrib/pg_stat_statements/Makefile @@ -1,11 +1,13 @@ # contrib/pg_stat_statements/Makefile MODULE_big = pg_stat_statements -OBJS = pg_stat_statements.o +OBJS = pg_stat_statements.o $(WIN32RES) EXTENSION = pg_stat_statements -DATA = pg_stat_statements--1.2.sql pg_stat_statements--1.1--1.2.sql \ - pg_stat_statements--1.0--1.1.sql pg_stat_statements--unpackaged--1.0.sql +DATA = pg_stat_statements--1.3.sql pg_stat_statements--1.2--1.3.sql \ + pg_stat_statements--1.1--1.2.sql pg_stat_statements--1.0--1.1.sql \ + pg_stat_statements--unpackaged--1.0.sql +PGFILEDESC = "pg_stat_statements - execution statistics of SQL statements" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql b/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql new file mode 100644 index 0000000000..a56f151b99 --- /dev/null +++ b/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql @@ -0,0 +1,47 @@ +/* contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.3'" to load this file. \quit + +/* First we have to remove them from the extension */ +ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements; +ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements(boolean); + +/* Then we can drop them */ +DROP VIEW pg_stat_statements; +DROP FUNCTION pg_stat_statements(boolean); + +/* Now redefine */ +CREATE FUNCTION pg_stat_statements(IN showtext boolean, + OUT userid oid, + OUT dbid oid, + OUT queryid bigint, + OUT query text, + OUT calls int8, + OUT total_time float8, + OUT min_time float8, + OUT max_time float8, + OUT mean_time float8, + OUT stddev_time float8, + OUT rows int8, + OUT shared_blks_hit int8, + OUT shared_blks_read int8, + OUT shared_blks_dirtied int8, + OUT shared_blks_written int8, + OUT local_blks_hit int8, + OUT local_blks_read int8, + OUT local_blks_dirtied int8, + OUT local_blks_written int8, + OUT temp_blks_read int8, + OUT temp_blks_written int8, + OUT blk_read_time float8, + OUT blk_write_time float8 +) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pg_stat_statements_1_3' +LANGUAGE C STRICT VOLATILE; + +CREATE VIEW pg_stat_statements AS + SELECT * FROM pg_stat_statements(true); + +GRANT SELECT ON pg_stat_statements TO PUBLIC; diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.2.sql b/contrib/pg_stat_statements/pg_stat_statements--1.3.sql index 5bfa9a55d0..92ed0571e9 100644 --- a/contrib/pg_stat_statements/pg_stat_statements--1.2.sql +++ b/contrib/pg_stat_statements/pg_stat_statements--1.3.sql @@ -1,4 +1,4 @@ -/* contrib/pg_stat_statements/pg_stat_statements--1.2.sql */ +/* contrib/pg_stat_statements/pg_stat_statements--1.3.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit @@ -16,6 +16,10 @@ CREATE FUNCTION pg_stat_statements(IN showtext boolean, OUT query text, OUT calls int8, OUT total_time float8, + OUT min_time float8, + OUT max_time float8, + OUT mean_time float8, + OUT stddev_time float8, OUT rows int8, OUT shared_blks_hit int8, OUT shared_blks_read int8, @@ -31,7 +35,7 @@ CREATE FUNCTION pg_stat_statements(IN showtext boolean, OUT blk_write_time float8 ) RETURNS SETOF record -AS 'MODULE_PATHNAME', 'pg_stat_statements_1_2' +AS 'MODULE_PATHNAME', 'pg_stat_statements_1_3' LANGUAGE C STRICT VOLATILE; -- Register a view on the function for ease of use. diff --git a/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql b/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql index e84a3cbafc..116e95834d 100644 --- a/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql +++ b/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit +\echo Use "CREATE EXTENSION pg_stat_statements FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_stat_statements ADD function pg_stat_statements_reset(); ALTER EXTENSION pg_stat_statements ADD function pg_stat_statements(); diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index da128efb04..2963c58998 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -48,7 +48,7 @@ * in the file to be read or written while holding only shared lock. * * - * Copyright (c) 2008-2014, PostgreSQL Global Development Group + * Copyright (c) 2008-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_stat_statements/pg_stat_statements.c @@ -57,6 +57,7 @@ */ #include "postgres.h" +#include <math.h> #include <sys/stat.h> #include <unistd.h> @@ -76,7 +77,6 @@ #include "utils/builtins.h" #include "utils/memutils.h" - PG_MODULE_MAGIC; /* Location of permanent stats file (valid when database is shut down) */ @@ -116,7 +116,8 @@ typedef enum pgssVersion { PGSS_V1_0 = 0, PGSS_V1_1, - PGSS_V1_2 + PGSS_V1_2, + PGSS_V1_3 } pgssVersion; /* @@ -137,6 +138,10 @@ typedef struct Counters { int64 calls; /* # of times executed */ double total_time; /* total execution time, in msec */ + double min_time; /* minimim execution time in msec */ + double max_time; /* maximum execution time in msec */ + double mean_time; /* mean execution time in msec */ + double sum_var_time; /* sum of variances in execution time in msec */ int64 rows; /* total # of retrieved or affected rows */ int64 shared_blks_hit; /* # of shared buffer hits */ int64 shared_blks_read; /* # of shared disk blocks read */ @@ -275,6 +280,7 @@ void _PG_fini(void); PG_FUNCTION_INFO_V1(pg_stat_statements_reset); PG_FUNCTION_INFO_V1(pg_stat_statements_1_2); +PG_FUNCTION_INFO_V1(pg_stat_statements_1_3); PG_FUNCTION_INFO_V1(pg_stat_statements); static void pgss_shmem_startup(void); @@ -963,10 +969,13 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString, * calculated from the query tree) would be used to accumulate costs of * ensuing EXECUTEs. This would be confusing, and inconsistent with other * cases where planning time is not included at all. + * + * Likewise, we don't track execution of DEALLOCATE. */ if (pgss_track_utility && pgss_enabled() && !IsA(parsetree, ExecuteStmt) && - !IsA(parsetree, PrepareStmt)) + !IsA(parsetree, PrepareStmt) && + !IsA(parsetree, DeallocateStmt)) { instr_time start; instr_time duration; @@ -1237,6 +1246,31 @@ pgss_store(const char *query, uint32 queryId, e->counters.calls += 1; e->counters.total_time += total_time; + if (e->counters.calls == 1) + { + e->counters.min_time = total_time; + e->counters.max_time = total_time; + e->counters.mean_time = total_time; + } + else + { + /* + * Welford's method for accurately computing variance. + * See <http://www.johndcook.com/blog/standard_deviation/> + */ + double old_mean = e->counters.mean_time; + + e->counters.mean_time += + (total_time - old_mean) / e->counters.calls; + e->counters.sum_var_time += + (total_time - old_mean) * (total_time - e->counters.mean_time); + + /* calculate min and max time */ + if (e->counters.min_time > total_time) + e->counters.min_time = total_time; + if (e->counters.max_time < total_time) + e->counters.max_time = total_time; + } e->counters.rows += rows; e->counters.shared_blks_hit += bufusage->shared_blks_hit; e->counters.shared_blks_read += bufusage->shared_blks_read; @@ -1281,7 +1315,8 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS) #define PG_STAT_STATEMENTS_COLS_V1_0 14 #define PG_STAT_STATEMENTS_COLS_V1_1 18 #define PG_STAT_STATEMENTS_COLS_V1_2 19 -#define PG_STAT_STATEMENTS_COLS 19 /* maximum of above */ +#define PG_STAT_STATEMENTS_COLS_V1_3 23 +#define PG_STAT_STATEMENTS_COLS 23 /* maximum of above */ /* * Retrieve statement statistics. @@ -1294,6 +1329,16 @@ pg_stat_statements_reset(PG_FUNCTION_ARGS) * function. Unfortunately we weren't bright enough to do that for 1.1. */ Datum +pg_stat_statements_1_3(PG_FUNCTION_ARGS) +{ + bool showtext = PG_GETARG_BOOL(0); + + pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext); + + return (Datum) 0; +} + +Datum pg_stat_statements_1_2(PG_FUNCTION_ARGS) { bool showtext = PG_GETARG_BOOL(0); @@ -1382,6 +1427,10 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, if (api_version != PGSS_V1_2) elog(ERROR, "incorrect number of output arguments"); break; + case PG_STAT_STATEMENTS_COLS_V1_3: + if (api_version != PGSS_V1_3) + elog(ERROR, "incorrect number of output arguments"); + break; default: elog(ERROR, "incorrect number of output arguments"); } @@ -1465,6 +1514,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, bool nulls[PG_STAT_STATEMENTS_COLS]; int i = 0; Counters tmp; + double stddev; int64 queryid = entry->key.queryid; memset(values, 0, sizeof(values)); @@ -1541,6 +1591,23 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, values[i++] = Int64GetDatumFast(tmp.calls); values[i++] = Float8GetDatumFast(tmp.total_time); + if (api_version >= PGSS_V1_3) + { + values[i++] = Float8GetDatumFast(tmp.min_time); + values[i++] = Float8GetDatumFast(tmp.max_time); + values[i++] = Float8GetDatumFast(tmp.mean_time); + /* + * Note we are calculating the population variance here, not the + * sample variance, as we have data for the whole population, + * so Bessel's correction is not used, and we don't divide by + * tmp.calls - 1. + */ + if (tmp.calls > 1) + stddev = sqrt(tmp.sum_var_time / tmp.calls); + else + stddev = 0.0; + values[i++] = Float8GetDatumFast(stddev); + } values[i++] = Int64GetDatumFast(tmp.rows); values[i++] = Int64GetDatumFast(tmp.shared_blks_hit); values[i++] = Int64GetDatumFast(tmp.shared_blks_read); @@ -1563,6 +1630,7 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo, Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 : api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 : api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 : + api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 : -1 /* fail if you forget to update this assert */ )); tuplestore_putvalues(tupstore, tupdesc, values, nulls); @@ -2420,6 +2488,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node) SubLink *sublink = (SubLink *) node; APP_JUMB(sublink->subLinkType); + APP_JUMB(sublink->subLinkId); JumbleExpr(jstate, (Node *) sublink->testexpr); JumbleQuery(jstate, (Query *) sublink->subselect); } @@ -2837,6 +2906,9 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query) ScanKeywords, NumScanKeywords); + /* we don't want to re-emit any escape string warnings */ + yyextra.escape_string_warning = false; + /* Search for each constant, in sequence */ for (i = 0; i < jstate->clocations_count; i++) { diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control index 6ecf2b6d1b..53df9789df 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.control +++ b/contrib/pg_stat_statements/pg_stat_statements.control @@ -1,5 +1,5 @@ # pg_stat_statements extension comment = 'track execution statistics of all SQL statements executed' -default_version = '1.2' +default_version = '1.3' module_pathname = '$libdir/pg_stat_statements' relocatable = true diff --git a/contrib/pg_test_fsync/Makefile b/contrib/pg_test_fsync/Makefile index b456429098..15afba7682 100644 --- a/contrib/pg_test_fsync/Makefile +++ b/contrib/pg_test_fsync/Makefile @@ -4,7 +4,7 @@ PGFILEDESC = "pg_test_fsync - test various disk sync methods" PGAPPICON = win32 PROGRAM = pg_test_fsync -OBJS = pg_test_fsync.o +OBJS = pg_test_fsync.o $(WIN32RES) ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c index 842295ae3d..c8427623d2 100644 --- a/contrib/pg_test_fsync/pg_test_fsync.c +++ b/contrib/pg_test_fsync/pg_test_fsync.c @@ -23,9 +23,9 @@ #define XLOG_BLCKSZ_K (XLOG_BLCKSZ / 1024) -#define LABEL_FORMAT " %-32s" +#define LABEL_FORMAT " %-30s" #define NA_FORMAT "%20s" -#define OPS_FORMAT "%11.3f ops/sec %6.0f usecs/op" +#define OPS_FORMAT "%13.3f ops/sec %6.0f usecs/op" #define USECS_SEC 1000000 /* These are macros to avoid timing the function call overhead. */ @@ -242,8 +242,7 @@ test_sync(int writes_per_op) printf("\nCompare file sync methods using one %dkB write:\n", XLOG_BLCKSZ_K); else printf("\nCompare file sync methods using two %dkB writes:\n", XLOG_BLCKSZ_K); - printf("(in wal_sync_method preference order, except fdatasync\n"); - printf("is Linux's default)\n"); + printf("(in wal_sync_method preference order, except fdatasync is Linux's default)\n"); /* * Test open_datasync if available @@ -259,8 +258,6 @@ test_sync(int writes_per_op) } else { - if ((tmpfile = open(filename, O_RDWR | O_DSYNC | PG_O_DIRECT, 0)) == -1) - die("could not open output file"); START_TIMER; for (ops = 0; alarm_triggered == false; ops++) { @@ -398,8 +395,8 @@ static void test_open_syncs(void) { printf("\nCompare open_sync with different write sizes:\n"); - printf("(This is designed to compare the cost of writing 16kB\n"); - printf("in different write open_sync sizes.)\n"); + printf("(This is designed to compare the cost of writing 16kB in different write\n" + "open_sync sizes.)\n"); test_open_sync(" 1 * 16kB open_sync write", 16); test_open_sync(" 2 * 8kB open_sync writes", 8); @@ -459,8 +456,8 @@ test_file_descriptor_sync(void) * on platforms which support it. */ printf("\nTest if fsync on non-write file descriptor is honored:\n"); - printf("(If the times are similar, fsync() can sync data written\n"); - printf("on a different descriptor.)\n"); + printf("(If the times are similar, fsync() can sync data written on a different\n" + "descriptor.)\n"); /* * first write, fsync and close, which is the normal behavior without @@ -524,7 +521,7 @@ test_non_sync(void) /* * Test a simple write without fsync */ - printf("\nNon-Sync'ed %dkB writes:\n", XLOG_BLCKSZ_K); + printf("\nNon-sync'ed %dkB writes:\n", XLOG_BLCKSZ_K); printf(LABEL_FORMAT, "write"); fflush(stdout); diff --git a/contrib/pg_test_timing/Makefile b/contrib/pg_test_timing/Makefile index b8b266a2fc..8b37aa8249 100644 --- a/contrib/pg_test_timing/Makefile +++ b/contrib/pg_test_timing/Makefile @@ -4,7 +4,7 @@ PGFILEDESC = "pg_test_timing - test timing overhead" PGAPPICON = win32 PROGRAM = pg_test_timing -OBJS = pg_test_timing.o +OBJS = pg_test_timing.o $(WIN32RES) ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pg_test_timing/pg_test_timing.c b/contrib/pg_test_timing/pg_test_timing.c index e44c535d09..e5c11de6bb 100644 --- a/contrib/pg_test_timing/pg_test_timing.c +++ b/contrib/pg_test_timing/pg_test_timing.c @@ -115,7 +115,7 @@ test_timing(int32 duration) end_time, temp; - total_time = duration > 0 ? duration * 1000000 : 0; + total_time = duration > 0 ? duration * INT64CONST(1000000) : 0; INSTR_TIME_SET_CURRENT(start_time); cur = INSTR_TIME_GET_MICROSEC(start_time); diff --git a/contrib/pg_trgm/Makefile b/contrib/pg_trgm/Makefile index 0d549f8b6c..e081a1e5e9 100644 --- a/contrib/pg_trgm/Makefile +++ b/contrib/pg_trgm/Makefile @@ -1,10 +1,11 @@ # contrib/pg_trgm/Makefile MODULE_big = pg_trgm -OBJS = trgm_op.o trgm_gist.o trgm_gin.o trgm_regexp.o +OBJS = trgm_op.o trgm_gist.o trgm_gin.o trgm_regexp.o $(WIN32RES) EXTENSION = pg_trgm DATA = pg_trgm--1.1.sql pg_trgm--1.0--1.1.sql pg_trgm--unpackaged--1.0.sql +PGFILEDESC = "pg_trgm - trigram matching" REGRESS = pg_trgm diff --git a/contrib/pg_trgm/pg_trgm--1.1.sql b/contrib/pg_trgm/pg_trgm--1.1.sql index 1fff7af2c4..34b37e4787 100644 --- a/contrib/pg_trgm/pg_trgm--1.1.sql +++ b/contrib/pg_trgm/pg_trgm--1.1.sql @@ -53,12 +53,12 @@ CREATE OPERATOR <-> ( CREATE FUNCTION gtrgm_in(cstring) RETURNS gtrgm AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION gtrgm_out(gtrgm) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE gtrgm ( INTERNALLENGTH = -1, diff --git a/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql b/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql index 7243a6a410..d3eab97d41 100644 --- a/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql +++ b/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_trgm" to load this file. \quit +\echo Use "CREATE EXTENSION pg_trgm FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_trgm ADD function set_limit(real); ALTER EXTENSION pg_trgm ADD function show_limit(); diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h index ed649b8dcc..f030558088 100644 --- a/contrib/pg_trgm/trgm.h +++ b/contrib/pg_trgm/trgm.h @@ -63,7 +63,7 @@ typedef struct { int32 vl_len_; /* varlena header (do not touch directly!) */ uint8 flag; - char data[1]; + char data[FLEXIBLE_ARRAY_MEMBER]; } TRGM; #define TRGMHDRSIZE (VARHDRSZ + sizeof(uint8)) diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index c385e09edd..1a71a2bef3 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -10,7 +10,7 @@ #include "catalog/pg_type.h" #include "tsearch/ts_locale.h" #include "utils/memutils.h" - +#include "utils/pg_crc.h" PG_MODULE_MAGIC; @@ -109,9 +109,9 @@ compact_trigram(trgm *tptr, char *str, int bytelen) { pg_crc32 crc; - INIT_CRC32(crc); - COMP_CRC32(crc, str, bytelen); - FIN_CRC32(crc); + INIT_LEGACY_CRC32(crc); + COMP_LEGACY_CRC32(crc, str, bytelen); + FIN_LEGACY_CRC32(crc); /* * use only 3 upper bytes from crc, hope, it's good enough hashing diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index 9f050533c5..a91e6186ba 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -181,7 +181,7 @@ * 7) Mark state 3 final because state 5 of source NFA is marked as final. * * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION @@ -877,7 +877,7 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result) #endif /* Fill result with exactly MAX_MULTIBYTE_CHAR_LEN bytes */ - strncpy(result->bytes, s, MAX_MULTIBYTE_CHAR_LEN); + memcpy(result->bytes, s, MAX_MULTIBYTE_CHAR_LEN); return true; } @@ -915,11 +915,10 @@ transformGraph(TrgmNFA *trgmNFA) hashCtl.keysize = sizeof(TrgmStateKey); hashCtl.entrysize = sizeof(TrgmState); hashCtl.hcxt = CurrentMemoryContext; - hashCtl.hash = tag_hash; trgmNFA->states = hash_create("Trigram NFA", 1024, &hashCtl, - HASH_ELEM | HASH_CONTEXT | HASH_FUNCTION); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* Create initial state: ambiguous prefix, NFA's initial state */ MemSet(&initkey, 0, sizeof(initkey)); diff --git a/contrib/pg_upgrade/.gitignore b/contrib/pg_upgrade/.gitignore deleted file mode 100644 index 9555f54e85..0000000000 --- a/contrib/pg_upgrade/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -/pg_upgrade -# Generated by test suite -analyze_new_cluster.sh -delete_old_cluster.sh -/log/ -/tmp_check/ diff --git a/contrib/pg_upgrade/IMPLEMENTATION b/contrib/pg_upgrade/IMPLEMENTATION deleted file mode 100644 index a0cfcf15da..0000000000 --- a/contrib/pg_upgrade/IMPLEMENTATION +++ /dev/null @@ -1,100 +0,0 @@ -contrib/pg_upgrade/IMPLEMENTATION - ------------------------------------------------------------------------------- -PG_UPGRADE: IN-PLACE UPGRADES FOR POSTGRESQL ------------------------------------------------------------------------------- - -Upgrading a PostgreSQL database from one major release to another can be -an expensive process. For minor upgrades, you can simply install new -executables and forget about upgrading existing data. But for major -upgrades, you have to export all of your data using pg_dump, install the -new release, run initdb to create a new cluster, and then import your -old data. If you have a lot of data, that can take a considerable amount -of time. If you have too much data, you may have to buy more storage -since you need enough room to hold the original data plus the exported -data. pg_upgrade can reduce the amount of time and disk space required -for many upgrades. - -The URL http://momjian.us/main/writings/pgsql/pg_upgrade.pdf contains a -presentation about pg_upgrade internals that mirrors the text -description below. - ------------------------------------------------------------------------------- -WHAT IT DOES ------------------------------------------------------------------------------- - -pg_upgrade is a tool that performs an in-place upgrade of existing -data. Some upgrades change the on-disk representation of data; -pg_upgrade cannot help in those upgrades. However, many upgrades do -not change the on-disk representation of a user-defined table. In those -cases, pg_upgrade can move existing user-defined tables from the old -database cluster into the new cluster. - -There are two factors that determine whether an in-place upgrade is -practical. - -Every table in a cluster shares the same on-disk representation of the -table headers and trailers and the on-disk representation of tuple -headers. If this changes between the old version of PostgreSQL and the -new version, pg_upgrade cannot move existing tables to the new cluster; -you will have to pg_dump the old data and then import that data into the -new cluster. - -Second, all data types should have the same binary representation -between the two major PostgreSQL versions. - ------------------------------------------------------------------------------- -HOW IT WORKS ------------------------------------------------------------------------------- - -To use pg_upgrade during an upgrade, start by installing a fresh -cluster using the newest version in a new directory. When you've -finished installation, the new cluster will contain the new executables -and the usual template0, template1, and postgres, but no user-defined -tables. At this point, you can shut down the old and new postmasters and -invoke pg_upgrade. - -When pg_upgrade starts, it ensures that all required executables are -present and contain the expected version numbers. The verification -process also checks the old and new $PGDATA directories to ensure that -the expected files and subdirectories are in place. If the verification -process succeeds, pg_upgrade starts the old postmaster and runs -pg_dumpall --schema-only to capture the metadata contained in the old -cluster. The script produced by pg_dumpall will be used in a later step -to recreate all user-defined objects in the new cluster. - -Note that the script produced by pg_dumpall will only recreate -user-defined objects, not system-defined objects. The new cluster will -contain the system-defined objects created by the latest version of -PostgreSQL. - -Once pg_upgrade has extracted the metadata from the old cluster, it -performs a number of bookkeeping tasks required to 'sync up' the new -cluster with the existing data. - -First, pg_upgrade copies the commit status information and 'next -transaction ID' from the old cluster to the new cluster. This is the -steps ensures that the proper tuples are visible from the new cluster. -Remember, pg_upgrade does not export/import the content of user-defined -tables so the transaction IDs in the new cluster must match the -transaction IDs in the old data. pg_upgrade also copies the starting -address for write-ahead logs from the old cluster to the new cluster. - -Now pg_upgrade begins reconstructing the metadata obtained from the old -cluster using the first part of the pg_dumpall output. - -Next, pg_upgrade executes the remainder of the script produced earlier -by pg_dumpall --- this script effectively creates the complete -user-defined metadata from the old cluster to the new cluster. It -preserves the relfilenode numbers so TOAST and other references -to relfilenodes in user data is preserved. (See binary-upgrade usage -in pg_dump). - -Finally, pg_upgrade links or copies each user-defined table and its -supporting indexes and toast tables from the old cluster to the new -cluster. - -An important feature of the pg_upgrade design is that it leaves the -original cluster intact --- if a problem occurs during the upgrade, you -can still run the previous version, after renaming the tablespaces back -to the original names. diff --git a/contrib/pg_upgrade/Makefile b/contrib/pg_upgrade/Makefile deleted file mode 100644 index 150a9b4770..0000000000 --- a/contrib/pg_upgrade/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -# contrib/pg_upgrade/Makefile - -PGFILEDESC = "pg_upgrade - an in-place binary upgrade utility" -PGAPPICON = win32 - -PROGRAM = pg_upgrade -OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \ - option.o page.o parallel.o pg_upgrade.o relfilenode.o server.o \ - tablespace.o util.o version.o version_old_8_3.o $(WIN32RES) - -PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir) -PG_LIBS = $(libpq_pgport) - -EXTRA_CLEAN = analyze_new_cluster.sh delete_old_cluster.sh log/ tmp_check/ \ - pg_upgrade_dump_globals.sql \ - pg_upgrade_dump_*.custom pg_upgrade_*.log - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_upgrade -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -check: test.sh all - MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) EXTRA_REGRESS_OPTS="$(EXTRA_REGRESS_OPTS)" $(SHELL) $< --install - -# disabled because it upsets the build farm -#installcheck: test.sh -# MAKE=$(MAKE) bindir=$(bindir) libdir=$(libdir) $(SHELL) $< diff --git a/contrib/pg_upgrade/TESTING b/contrib/pg_upgrade/TESTING deleted file mode 100644 index 359688c664..0000000000 --- a/contrib/pg_upgrade/TESTING +++ /dev/null @@ -1,83 +0,0 @@ -contrib/pg_upgrade/TESTING - -The most effective way to test pg_upgrade, aside from testing on user -data, is by upgrading the PostgreSQL regression database. - -This testing process first requires the creation of a valid regression -database dump. Such files contain most database features and are -specific to each major version of Postgres. - -Here are the steps needed to create a regression database dump file: - -1) Create and populate the regression database in the old cluster - This database can be created by running 'make installcheck' from - src/test/regression. - -2) Use pg_dump to dump out the regression database. Use the new - cluster's pg_dump on the old database to minimize whitespace - differences in the diff. - -3) Adjust the regression database dump file - - a) Perform the load/dump twice - This fixes problems with the ordering of COPY columns for - inherited tables. - - b) Change CREATE FUNCTION shared object paths to use '$libdir' - The old and new cluster will have different shared object paths. - - c) Fix any wrapping format differences - Commands like CREATE TRIGGER and ALTER TABLE sometimes have - differences. - - d) For pre-9.0, change CREATE OR REPLACE LANGUAGE to CREATE LANGUAGE - - e) For pre-9.0, remove 'regex_flavor' - - f) For pre-9.0, adjust extra_float_digits - Postgres 9.0 pg_dump uses extra_float_digits=-2 for pre-9.0 - databases, and extra_float_digits=-3 for >= 9.0 databases. - It is necessary to modify 9.0 pg_dump to always use -3, and - modify the pre-9.0 old server to accept extra_float_digits=-3. - -Once the dump is created, it can be repeatedly loaded into the old -database, upgraded, and dumped out of the new database, and then -compared to the original version. To test the dump file, perform these -steps: - -1) Create the old and new clusters in different directories. - -2) Copy the regression shared object files into the appropriate /lib - directory for old and new clusters. - -3) Create the regression database in the old server. - -4) Load the dump file created above into the regression database; - check for errors while loading. - -5) Upgrade the old database to the new major version, as outlined in - the pg_upgrade manual section. - -6) Use pg_dump to dump out the regression database in the new cluster. - -7) Diff the regression database dump file with the regression dump - file loaded into the old server. - -The shell script test.sh in this directory performs more or less this -procedure. You can invoke it by running - - make check - -or by running - - make installcheck - -if "make install" (or "make install-world") were done beforehand. -When invoked without arguments, it will run an upgrade from the -version in this source tree to a new instance of the same version. To -test an upgrade from a different version, invoke it like this: - - make installcheck oldbindir=...otherversion/bin oldsrc=...somewhere/postgresql - -In this case, you will have to manually eyeball the resulting dump -diff for version-specific differences, as explained above. diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c deleted file mode 100644 index edfe7e114b..0000000000 --- a/contrib/pg_upgrade/check.c +++ /dev/null @@ -1,1031 +0,0 @@ -/* - * check.c - * - * server checks and output routines - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/check.c - */ - -#include "postgres_fe.h" - -#include "mb/pg_wchar.h" -#include "pg_upgrade.h" - - -static void set_locale_and_encoding(ClusterInfo *cluster); -static void check_new_cluster_is_empty(void); -static void check_locale_and_encoding(ControlData *oldctrl, - ControlData *newctrl); -static bool equivalent_locale(const char *loca, const char *locb); -static bool equivalent_encoding(const char *chara, const char *charb); -static void check_is_super_user(ClusterInfo *cluster); -static void check_for_prepared_transactions(ClusterInfo *cluster); -static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); -static void check_for_reg_data_type_usage(ClusterInfo *cluster); -static void get_bin_version(ClusterInfo *cluster); -static char *get_canonical_locale_name(int category, const char *locale); - - -/* - * fix_path_separator - * For non-Windows, just return the argument. - * For Windows convert any forward slash to a backslash - * such as is suitable for arguments to builtin commands - * like RMDIR and DEL. - */ -static char * -fix_path_separator(char *path) -{ -#ifdef WIN32 - - char *result; - char *c; - - result = pg_strdup(path); - - for (c = result; *c != '\0'; c++) - if (*c == '/') - *c = '\\'; - - return result; -#else - - return path; -#endif -} - -void -output_check_banner(bool live_check) -{ - if (user_opts.check && live_check) - { - pg_log(PG_REPORT, "Performing Consistency Checks on Old Live Server\n"); - pg_log(PG_REPORT, "------------------------------------------------\n"); - } - else - { - pg_log(PG_REPORT, "Performing Consistency Checks\n"); - pg_log(PG_REPORT, "-----------------------------\n"); - } -} - - -void -check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name) -{ - /* -- OLD -- */ - - if (!live_check) - start_postmaster(&old_cluster, true); - - set_locale_and_encoding(&old_cluster); - - get_pg_database_relfilenode(&old_cluster); - - /* Extract a list of databases and tables from the old cluster */ - get_db_and_rel_infos(&old_cluster); - - init_tablespaces(); - - get_loadable_libraries(); - - - /* - * Check for various failure cases - */ - check_is_super_user(&old_cluster); - check_for_prepared_transactions(&old_cluster); - check_for_reg_data_type_usage(&old_cluster); - check_for_isn_and_int8_passing_mismatch(&old_cluster); - - /* old = PG 8.3 checks? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) - { - old_8_3_check_for_name_data_type_usage(&old_cluster); - old_8_3_check_for_tsquery_usage(&old_cluster); - old_8_3_check_ltree_usage(&old_cluster); - if (user_opts.check) - { - old_8_3_rebuild_tsvector_tables(&old_cluster, true); - old_8_3_invalidate_hash_gin_indexes(&old_cluster, true); - old_8_3_invalidate_bpchar_pattern_ops_indexes(&old_cluster, true); - } - else - - /* - * While we have the old server running, create the script to - * properly restore its sequence values but we report this at the - * end. - */ - *sequence_script_file_name = - old_8_3_create_sequence_script(&old_cluster); - } - - /* Pre-PG 9.4 had a different 'line' data type internal format */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903) - old_9_3_check_for_line_data_type_usage(&old_cluster); - - /* Pre-PG 9.0 had no large object permissions */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - new_9_0_populate_pg_largeobject_metadata(&old_cluster, true); - - /* - * While not a check option, we do this now because this is the only time - * the old server is running. - */ - if (!user_opts.check) - generate_old_dump(); - - if (!live_check) - stop_postmaster(false); -} - - -void -check_new_cluster(void) -{ - set_locale_and_encoding(&new_cluster); - - check_locale_and_encoding(&old_cluster.controldata, &new_cluster.controldata); - - get_db_and_rel_infos(&new_cluster); - - check_new_cluster_is_empty(); - - check_loadable_libraries(); - - if (user_opts.transfer_mode == TRANSFER_MODE_LINK) - check_hard_link(); - - check_is_super_user(&new_cluster); - - /* - * We don't restore our own user, so both clusters must match have - * matching install-user oids. - */ - if (old_cluster.install_role_oid != new_cluster.install_role_oid) - pg_fatal("Old and new cluster install users have different values for pg_authid.oid.\n"); - - /* - * We only allow the install user in the new cluster because other defined - * users might match users defined in the old cluster and generate an - * error during pg_dump restore. - */ - if (new_cluster.role_count != 1) - pg_fatal("Only the install user can be defined in the new cluster.\n"); - - check_for_prepared_transactions(&new_cluster); -} - - -void -report_clusters_compatible(void) -{ - if (user_opts.check) - { - pg_log(PG_REPORT, "\n*Clusters are compatible*\n"); - /* stops new cluster */ - stop_postmaster(false); - exit(0); - } - - pg_log(PG_REPORT, "\n" - "If pg_upgrade fails after this point, you must re-initdb the\n" - "new cluster before continuing.\n"); -} - - -void -issue_warnings(char *sequence_script_file_name) -{ - /* old = PG 8.3 warnings? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) - { - start_postmaster(&new_cluster, true); - - /* restore proper sequence values using file created from old server */ - if (sequence_script_file_name) - { - prep_status("Adjusting sequences"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - sequence_script_file_name); - unlink(sequence_script_file_name); - check_ok(); - } - - old_8_3_rebuild_tsvector_tables(&new_cluster, false); - old_8_3_invalidate_hash_gin_indexes(&new_cluster, false); - old_8_3_invalidate_bpchar_pattern_ops_indexes(&new_cluster, false); - stop_postmaster(false); - } - - /* Create dummy large object permissions for old < PG 9.0? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - { - start_postmaster(&new_cluster, true); - new_9_0_populate_pg_largeobject_metadata(&new_cluster, false); - stop_postmaster(false); - } -} - - -void -output_completion_banner(char *analyze_script_file_name, - char *deletion_script_file_name) -{ - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - pg_log(PG_REPORT, - "Optimizer statistics are not transferred by pg_upgrade so,\n" - "once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - else - pg_log(PG_REPORT, - "Optimizer statistics and free space information are not transferred\n" - "by pg_upgrade so, once you start the new server, consider running:\n" - " %s\n\n", analyze_script_file_name); - - - if (deletion_script_file_name) - pg_log(PG_REPORT, - "Running this script will delete the old cluster's data files:\n" - " %s\n", - deletion_script_file_name); - else - pg_log(PG_REPORT, - "Could not create a script to delete the old cluster's data\n" - "files because user-defined tablespaces exist in the old cluster\n" - "directory. The old cluster's contents must be deleted manually.\n"); -} - - -void -check_cluster_versions(void) -{ - prep_status("Checking cluster versions"); - - /* get old and new cluster versions */ - old_cluster.major_version = get_major_server_version(&old_cluster); - new_cluster.major_version = get_major_server_version(&new_cluster); - - /* - * We allow upgrades from/to the same major version for alpha/beta - * upgrades - */ - - if (GET_MAJOR_VERSION(old_cluster.major_version) < 803) - pg_fatal("This utility can only upgrade from PostgreSQL version 8.3 and later.\n"); - - /* Only current PG version is supported as a target */ - if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM)) - pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n", - PG_MAJORVERSION); - - /* - * We can't allow downgrading because we use the target pg_dumpall, and - * pg_dumpall cannot operate on new database versions, only older - * versions. - */ - if (old_cluster.major_version > new_cluster.major_version) - pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); - - /* get old and new binary versions */ - get_bin_version(&old_cluster); - get_bin_version(&new_cluster); - - /* Ensure binaries match the designated data directories */ - if (GET_MAJOR_VERSION(old_cluster.major_version) != - GET_MAJOR_VERSION(old_cluster.bin_version)) - pg_fatal("Old cluster data and binary directories are from different major versions.\n"); - if (GET_MAJOR_VERSION(new_cluster.major_version) != - GET_MAJOR_VERSION(new_cluster.bin_version)) - pg_fatal("New cluster data and binary directories are from different major versions.\n"); - - check_ok(); -} - - -void -check_cluster_compatibility(bool live_check) -{ - /* get/check pg_control data of servers */ - get_control_data(&old_cluster, live_check); - get_control_data(&new_cluster, false); - check_control_data(&old_cluster.controldata, &new_cluster.controldata); - - /* Is it 9.0 but without tablespace directories? */ - if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 && - new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER) - pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n" - "because of backend API changes made during development.\n"); - - /* We read the real port number for PG >= 9.1 */ - if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 && - old_cluster.port == DEF_PGUPORT) - pg_fatal("When checking a pre-PG 9.1 live old server, " - "you must specify the old server's port number.\n"); - - if (live_check && old_cluster.port == new_cluster.port) - pg_fatal("When checking a live server, " - "the old and new port numbers must be different.\n"); -} - - -/* - * set_locale_and_encoding() - * - * query the database to get the template0 locale - */ -static void -set_locale_and_encoding(ClusterInfo *cluster) -{ - ControlData *ctrl = &cluster->controldata; - PGconn *conn; - PGresult *res; - int i_encoding; - int cluster_version = cluster->major_version; - - conn = connectToServer(cluster, "template1"); - - /* for pg < 80400, we got the values from pg_controldata */ - if (cluster_version >= 80400) - { - int i_datcollate; - int i_datctype; - - res = executeQueryOrDie(conn, - "SELECT datcollate, datctype " - "FROM pg_catalog.pg_database " - "WHERE datname = 'template0' "); - assert(PQntuples(res) == 1); - - i_datcollate = PQfnumber(res, "datcollate"); - i_datctype = PQfnumber(res, "datctype"); - - if (GET_MAJOR_VERSION(cluster->major_version) < 902) - { - /* - * Pre-9.2 did not canonicalize the supplied locale names to match - * what the system returns, while 9.2+ does, so convert pre-9.2 to - * match. - */ - ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE, - pg_strdup(PQgetvalue(res, 0, i_datcollate))); - ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE, - pg_strdup(PQgetvalue(res, 0, i_datctype))); - } - else - { - ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate)); - ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype)); - } - - PQclear(res); - } - - res = executeQueryOrDie(conn, - "SELECT pg_catalog.pg_encoding_to_char(encoding) " - "FROM pg_catalog.pg_database " - "WHERE datname = 'template0' "); - assert(PQntuples(res) == 1); - - i_encoding = PQfnumber(res, "pg_encoding_to_char"); - ctrl->encoding = pg_strdup(PQgetvalue(res, 0, i_encoding)); - - PQclear(res); - - PQfinish(conn); -} - - -/* - * check_locale_and_encoding() - * - * Check that old and new locale and encoding match. Even though the backend - * tries to canonicalize stored locale names, the platform often doesn't - * cooperate, so it's entirely possible that one DB thinks its locale is - * "en_US.UTF-8" while the other says "en_US.utf8". Try to be forgiving. - */ -static void -check_locale_and_encoding(ControlData *oldctrl, - ControlData *newctrl) -{ - if (!equivalent_locale(oldctrl->lc_collate, newctrl->lc_collate)) - pg_fatal("lc_collate cluster values do not match: old \"%s\", new \"%s\"\n", - oldctrl->lc_collate, newctrl->lc_collate); - if (!equivalent_locale(oldctrl->lc_ctype, newctrl->lc_ctype)) - pg_fatal("lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n", - oldctrl->lc_ctype, newctrl->lc_ctype); - if (!equivalent_encoding(oldctrl->encoding, newctrl->encoding)) - pg_fatal("encoding cluster values do not match: old \"%s\", new \"%s\"\n", - oldctrl->encoding, newctrl->encoding); -} - -/* - * equivalent_locale() - * - * Best effort locale-name comparison. Return false if we are not 100% sure - * the locales are equivalent. - */ -static bool -equivalent_locale(const char *loca, const char *locb) -{ - const char *chara = strrchr(loca, '.'); - const char *charb = strrchr(locb, '.'); - int lencmp; - - /* If they don't both contain an encoding part, just do strcasecmp(). */ - if (!chara || !charb) - return (pg_strcasecmp(loca, locb) == 0); - - /* - * Compare the encoding parts. Windows tends to use code page numbers for - * the encoding part, which equivalent_encoding() won't like, so accept if - * the strings are case-insensitive equal; otherwise use - * equivalent_encoding() to compare. - */ - if (pg_strcasecmp(chara + 1, charb + 1) != 0 && - !equivalent_encoding(chara + 1, charb + 1)) - return false; - - /* - * OK, compare the locale identifiers (e.g. en_US part of en_US.utf8). - * - * It's tempting to ignore non-alphanumeric chars here, but for now it's - * not clear that that's necessary; just do case-insensitive comparison. - */ - lencmp = chara - loca; - if (lencmp != charb - locb) - return false; - - return (pg_strncasecmp(loca, locb, lencmp) == 0); -} - -/* - * equivalent_encoding() - * - * Best effort encoding-name comparison. Return true only if the encodings - * are valid server-side encodings and known equivalent. - * - * Because the lookup in pg_valid_server_encoding() does case folding and - * ignores non-alphanumeric characters, this will recognize many popular - * variant spellings as equivalent, eg "utf8" and "UTF-8" will match. - */ -static bool -equivalent_encoding(const char *chara, const char *charb) -{ - int enca = pg_valid_server_encoding(chara); - int encb = pg_valid_server_encoding(charb); - - if (enca < 0 || encb < 0) - return false; - - return (enca == encb); -} - - -static void -check_new_cluster_is_empty(void) -{ - int dbnum; - - for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) - { - int relnum; - RelInfoArr *rel_arr = &new_cluster.dbarr.dbs[dbnum].rel_arr; - - for (relnum = 0; relnum < rel_arr->nrels; - relnum++) - { - /* pg_largeobject and its index should be skipped */ - if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0) - pg_fatal("New cluster database \"%s\" is not empty\n", - new_cluster.dbarr.dbs[dbnum].db_name); - } - } - -} - - -/* - * create_script_for_cluster_analyze() - * - * This incrementally generates better optimizer statistics - */ -void -create_script_for_cluster_analyze(char **analyze_script_file_name) -{ - FILE *script = NULL; - char *user_specification = ""; - - prep_status("Creating script to analyze new cluster"); - - if (os_info.user_specified) - user_specification = psprintf("-U \"%s\" ", os_info.user); - - *analyze_script_file_name = psprintf("analyze_new_cluster.%s", SCRIPT_EXT); - - if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); - -#ifndef WIN32 - /* add shebang header */ - fprintf(script, "#!/bin/sh\n\n"); -#else - /* suppress command echoing */ - fprintf(script, "@echo off\n"); -#endif - - fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %shave the default level of optimizer statistics.%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %sthis script and run:%s\n", - ECHO_QUOTE, ECHO_QUOTE); - fprintf(script, "echo %s \"%s/vacuumdb\" %s--all %s%s\n", ECHO_QUOTE, - new_cluster.bindir, user_specification, - /* Did we copy the free space files? */ - (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ? - "--analyze-only" : "--analyze", ECHO_QUOTE); - fprintf(script, "echo%s\n\n", ECHO_BLANK); - - fprintf(script, "\"%s/vacuumdb\" %s--all --analyze-in-stages\n", - new_cluster.bindir, user_specification); - /* Did we copy the free space files? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 804) - fprintf(script, "\"%s/vacuumdb\" %s--all\n", new_cluster.bindir, - user_specification); - - fprintf(script, "echo%s\n\n", ECHO_BLANK); - fprintf(script, "echo %sDone%s\n", - ECHO_QUOTE, ECHO_QUOTE); - - fclose(script); - -#ifndef WIN32 - if (chmod(*analyze_script_file_name, S_IRWXU) != 0) - pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); -#endif - - if (os_info.user_specified) - pg_free(user_specification); - - check_ok(); -} - - -/* - * create_script_for_old_cluster_deletion() - * - * This is particularly useful for tablespace deletion. - */ -void -create_script_for_old_cluster_deletion(char **deletion_script_file_name) -{ - FILE *script = NULL; - int tblnum; - char old_cluster_pgdata[MAXPGPATH]; - - *deletion_script_file_name = psprintf("delete_old_cluster.%s", SCRIPT_EXT); - - /* - * Some users (oddly) create tablespaces inside the cluster data - * directory. We can't create a proper old cluster delete script in that - * case. - */ - strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH); - canonicalize_path(old_cluster_pgdata); - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - char old_tablespace_dir[MAXPGPATH]; - - strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH); - canonicalize_path(old_tablespace_dir); - if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir)) - { - /* Unlink file in case it is left over from a previous run. */ - unlink(*deletion_script_file_name); - pg_free(*deletion_script_file_name); - *deletion_script_file_name = NULL; - return; - } - } - - prep_status("Creating script to delete old cluster"); - - if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); - -#ifndef WIN32 - /* add shebang header */ - fprintf(script, "#!/bin/sh\n\n"); -#endif - - /* delete old cluster's default tablespace */ - fprintf(script, RMDIR_CMD " %s\n", fix_path_separator(old_cluster.pgdata)); - - /* delete old cluster's alternate tablespaces */ - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - /* - * Do the old cluster's per-database directories share a directory - * with a new version-specific tablespace? - */ - if (strlen(old_cluster.tablespace_suffix) == 0) - { - /* delete per-database directories */ - int dbnum; - - fprintf(script, "\n"); - /* remove PG_VERSION? */ - if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) - fprintf(script, RM_CMD " %s%cPG_VERSION\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - PATH_SEPARATOR); - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - fprintf(script, RMDIR_CMD " %s%c%d\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid); - } - else - { - char *suffix_path = pg_strdup(old_cluster.tablespace_suffix); - - /* - * Simply delete the tablespace directory, which might be ".old" - * or a version-specific subdirectory. - */ - fprintf(script, RMDIR_CMD " %s%s\n", - fix_path_separator(os_info.old_tablespaces[tblnum]), - fix_path_separator(suffix_path)); - pfree(suffix_path); - } - } - - fclose(script); - -#ifndef WIN32 - if (chmod(*deletion_script_file_name, S_IRWXU) != 0) - pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); -#endif - - check_ok(); -} - - -/* - * check_is_super_user() - * - * Check we are superuser, and out user id and user count - */ -static void -check_is_super_user(ClusterInfo *cluster) -{ - PGresult *res; - PGconn *conn = connectToServer(cluster, "template1"); - - prep_status("Checking database user is a superuser"); - - /* Can't use pg_authid because only superusers can view it. */ - res = executeQueryOrDie(conn, - "SELECT rolsuper, oid " - "FROM pg_catalog.pg_roles " - "WHERE rolname = current_user"); - - if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0) - pg_fatal("database user \"%s\" is not a superuser\n", - os_info.user); - - cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1)); - - PQclear(res); - - res = executeQueryOrDie(conn, - "SELECT COUNT(*) " - "FROM pg_catalog.pg_roles "); - - if (PQntuples(res) != 1) - pg_fatal("could not determine the number of users\n"); - - cluster->role_count = atoi(PQgetvalue(res, 0, 0)); - - PQclear(res); - - PQfinish(conn); - - check_ok(); -} - - -/* - * check_for_prepared_transactions() - * - * Make sure there are no prepared transactions because the storage format - * might have changed. - */ -static void -check_for_prepared_transactions(ClusterInfo *cluster) -{ - PGresult *res; - PGconn *conn = connectToServer(cluster, "template1"); - - prep_status("Checking for prepared transactions"); - - res = executeQueryOrDie(conn, - "SELECT * " - "FROM pg_catalog.pg_prepared_xacts"); - - if (PQntuples(res) != 0) - pg_fatal("The %s cluster contains prepared transactions\n", - CLUSTER_NAME(cluster)); - - PQclear(res); - - PQfinish(conn); - - check_ok(); -} - - -/* - * check_for_isn_and_int8_passing_mismatch() - * - * contrib/isn relies on data type int8, and in 8.4 int8 can now be passed - * by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so - * it must match for the old and new servers. - */ -static void -check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for contrib/isn with bigint-passing mismatch"); - - if (old_cluster.controldata.float8_pass_by_value == - new_cluster.controldata.float8_pass_by_value) - { - /* no mismatch */ - check_ok(); - return; - } - - snprintf(output_path, sizeof(output_path), - "contrib_isn_and_int8_pass_by_value.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_proname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any functions coming from contrib/isn */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, p.proname " - "FROM pg_catalog.pg_proc p, " - " pg_catalog.pg_namespace n " - "WHERE p.pronamespace = n.oid AND " - " p.probin = '$libdir/isn'"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_proname = PQfnumber(res, "proname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_proname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n" - "bigint data type. Your old and new clusters pass bigint values\n" - "differently so this cluster cannot currently be upgraded. You can\n" - "manually upgrade databases that use \"contrib/isn\" facilities and remove\n" - "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n" - "the problem functions is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * check_for_reg_data_type_usage() - * pg_upgrade only preserves these system values: - * pg_class.oid - * pg_type.oid - * pg_enum.oid - * - * Many of the reg* data types reference system catalog info that is - * not preserved, and hence these data types cannot be used in user - * tables upgraded by pg_upgrade. - */ -static void -check_for_reg_data_type_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for reg* system OID user data types"); - - snprintf(output_path, sizeof(output_path), "tables_using_reg.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* - * While several relkinds don't store any data, e.g. views, they can - * be used to define data types of other columns, so we check all - * relkinds. - */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid IN ( " - " 'pg_catalog.regproc'::pg_catalog.regtype, " - " 'pg_catalog.regprocedure'::pg_catalog.regtype, " - " 'pg_catalog.regoper'::pg_catalog.regtype, " - " 'pg_catalog.regoperator'::pg_catalog.regtype, " - /* regclass.oid is preserved, so 'regclass' is OK */ - /* regtype.oid is preserved, so 'regtype' is OK */ - " 'pg_catalog.regconfig'::pg_catalog.regtype, " - " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND " - " c.relnamespace = n.oid AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains one of the reg* data types in user tables.\n" - "These data types reference system OIDs that are not preserved by\n" - "pg_upgrade, so this cluster cannot currently be upgraded. You can\n" - "remove the problem tables and restart the upgrade. A list of the problem\n" - "columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -static void -get_bin_version(ClusterInfo *cluster) -{ - char cmd[MAXPGPATH], - cmd_output[MAX_STRING]; - FILE *output; - int pre_dot, - post_dot; - - snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir); - - if ((output = popen(cmd, "r")) == NULL || - fgets(cmd_output, sizeof(cmd_output), output) == NULL) - pg_fatal("Could not get pg_ctl version data using %s: %s\n", - cmd, getErrorText(errno)); - - pclose(output); - - /* Remove trailing newline */ - if (strchr(cmd_output, '\n') != NULL) - *strchr(cmd_output, '\n') = '\0'; - - if (sscanf(cmd_output, "%*s %*s %d.%d", &pre_dot, &post_dot) != 2) - pg_fatal("could not get version from %s\n", cmd); - - cluster->bin_version = (pre_dot * 100 + post_dot) * 100; -} - - -/* - * get_canonical_locale_name - * - * Send the locale name to the system, and hope we get back a canonical - * version. This should match the backend's check_locale() function. - */ -static char * -get_canonical_locale_name(int category, const char *locale) -{ - char *save; - char *res; - - /* get the current setting, so we can restore it. */ - save = setlocale(category, NULL); - if (!save) - pg_fatal("failed to get the current locale\n"); - - /* 'save' may be pointing at a modifiable scratch variable, so copy it. */ - save = pg_strdup(save); - - /* set the locale with setlocale, to see if it accepts it. */ - res = setlocale(category, locale); - - if (!res) - pg_fatal("failed to get system locale name for \"%s\"\n", locale); - - res = pg_strdup(res); - - /* restore old value. */ - if (!setlocale(category, save)) - pg_fatal("failed to restore old locale \"%s\"\n", save); - - pg_free(save); - - return res; -} diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c deleted file mode 100644 index 2906ccbf8c..0000000000 --- a/contrib/pg_upgrade/controldata.c +++ /dev/null @@ -1,644 +0,0 @@ -/* - * controldata.c - * - * controldata functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/controldata.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <ctype.h> - -/* - * get_control_data() - * - * gets pg_control information in "ctrl". Assumes that bindir and - * datadir are valid absolute paths to postgresql bin and pgdata - * directories respectively *and* pg_resetxlog is version compatible - * with datadir. The main purpose of this function is to get pg_control - * data in a version independent manner. - * - * The approach taken here is to invoke pg_resetxlog with -n option - * and then pipe its output. With little string parsing we get the - * pg_control data. pg_resetxlog cannot be run while the server is running - * so we use pg_controldata; pg_controldata doesn't provide all the fields - * we need to actually perform the upgrade, but it provides enough for - * check mode. We do not implement pg_resetxlog -n because it is hard to - * return valid xid data for a running server. - */ -void -get_control_data(ClusterInfo *cluster, bool live_check) -{ - char cmd[MAXPGPATH]; - char bufin[MAX_STRING]; - FILE *output; - char *p; - bool got_xid = false; - bool got_oid = false; - bool got_nextxlogfile = false; - bool got_multi = false; - bool got_mxoff = false; - bool got_oldestmulti = false; - bool got_log_id = false; - bool got_log_seg = false; - bool got_tli = false; - bool got_align = false; - bool got_blocksz = false; - bool got_largesz = false; - bool got_walsz = false; - bool got_walseg = false; - bool got_ident = false; - bool got_index = false; - bool got_toast = false; - bool got_date_is_int = false; - bool got_float8_pass_by_value = false; - bool got_data_checksum_version = false; - char *lc_collate = NULL; - char *lc_ctype = NULL; - char *lc_monetary = NULL; - char *lc_numeric = NULL; - char *lc_time = NULL; - char *lang = NULL; - char *language = NULL; - char *lc_all = NULL; - char *lc_messages = NULL; - uint32 logid = 0; - uint32 segno = 0; - uint32 tli = 0; - - - /* - * Because we test the pg_resetxlog output as strings, it has to be in - * English. Copied from pg_regress.c. - */ - if (getenv("LC_COLLATE")) - lc_collate = pg_strdup(getenv("LC_COLLATE")); - if (getenv("LC_CTYPE")) - lc_ctype = pg_strdup(getenv("LC_CTYPE")); - if (getenv("LC_MONETARY")) - lc_monetary = pg_strdup(getenv("LC_MONETARY")); - if (getenv("LC_NUMERIC")) - lc_numeric = pg_strdup(getenv("LC_NUMERIC")); - if (getenv("LC_TIME")) - lc_time = pg_strdup(getenv("LC_TIME")); - if (getenv("LANG")) - lang = pg_strdup(getenv("LANG")); - if (getenv("LANGUAGE")) - language = pg_strdup(getenv("LANGUAGE")); - if (getenv("LC_ALL")) - lc_all = pg_strdup(getenv("LC_ALL")); - if (getenv("LC_MESSAGES")) - lc_messages = pg_strdup(getenv("LC_MESSAGES")); - - pg_putenv("LC_COLLATE", NULL); - pg_putenv("LC_CTYPE", NULL); - pg_putenv("LC_MONETARY", NULL); - pg_putenv("LC_NUMERIC", NULL); - pg_putenv("LC_TIME", NULL); - pg_putenv("LANG", -#ifndef WIN32 - NULL); -#else - /* On Windows the default locale cannot be English, so force it */ - "en"); -#endif - pg_putenv("LANGUAGE", NULL); - pg_putenv("LC_ALL", NULL); - pg_putenv("LC_MESSAGES", "C"); - - snprintf(cmd, sizeof(cmd), "\"%s/%s \"%s\"", - cluster->bindir, - live_check ? "pg_controldata\"" : "pg_resetxlog\" -n", - cluster->pgdata); - fflush(stdout); - fflush(stderr); - - if ((output = popen(cmd, "r")) == NULL) - pg_fatal("Could not get control data using %s: %s\n", - cmd, getErrorText(errno)); - - /* Only pre-8.4 has these so if they are not set below we will check later */ - cluster->controldata.lc_collate = NULL; - cluster->controldata.lc_ctype = NULL; - - /* Only in <= 8.3 */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 803) - { - cluster->controldata.float8_pass_by_value = false; - got_float8_pass_by_value = true; - } - - /* Only in <= 9.2 */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 902) - { - cluster->controldata.data_checksum_version = 0; - got_data_checksum_version = true; - } - - /* we have the result of cmd in "output". so parse it line by line now */ - while (fgets(bufin, sizeof(bufin), output)) - { - pg_log(PG_VERBOSE, "%s", bufin); - -#ifdef WIN32 - - /* - * Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does - * work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a - * minor upgrade. - */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 803) - { - for (p = bufin; *p; p++) - if (!isascii(*p)) - pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n" - "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n" - "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n"); - } -#endif - - if ((p = strstr(bufin, "pg_control version number:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: pg_resetxlog problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.ctrl_ver = str2uint(p); - } - else if ((p = strstr(bufin, "Catalog version number:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.cat_ver = str2uint(p); - } - else if ((p = strstr(bufin, "First log segment after reset:")) != NULL) - { - /* Skip the colon and any whitespace after it */ - p = strchr(p, ':'); - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - p = strpbrk(p, "01234567890ABCDEF"); - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - /* Make sure it looks like a valid WAL file name */ - if (strspn(p, "0123456789ABCDEF") != 24) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - strlcpy(cluster->controldata.nextxlogfile, p, 25); - got_nextxlogfile = true; - } - else if ((p = strstr(bufin, "First log file ID after reset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - logid = str2uint(p); - got_log_id = true; - } - else if ((p = strstr(bufin, "First log file segment after reset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - segno = str2uint(p); - got_log_seg = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's TimeLineID:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.chkpnt_tli = str2uint(p); - got_tli = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextXID:")) != NULL) - { - char *op = strchr(p, '/'); - - if (op == NULL) - op = strchr(p, ':'); - - if (op == NULL || strlen(op) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - op++; /* removing ':' char */ - cluster->controldata.chkpnt_nxtxid = str2uint(op); - got_xid = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextOID:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.chkpnt_nxtoid = str2uint(p); - got_oid = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextMultiXactId:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.chkpnt_nxtmulti = str2uint(p); - got_multi = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's oldestMultiXid:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.chkpnt_oldstMulti = str2uint(p); - got_oldestmulti = true; - } - else if ((p = strstr(bufin, "Latest checkpoint's NextMultiOffset:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.chkpnt_nxtmxoff = str2uint(p); - got_mxoff = true; - } - else if ((p = strstr(bufin, "Maximum data alignment:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.align = str2uint(p); - got_align = true; - } - else if ((p = strstr(bufin, "Database block size:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.blocksz = str2uint(p); - got_blocksz = true; - } - else if ((p = strstr(bufin, "Blocks per segment of large relation:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.largesz = str2uint(p); - got_largesz = true; - } - else if ((p = strstr(bufin, "WAL block size:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.walsz = str2uint(p); - got_walsz = true; - } - else if ((p = strstr(bufin, "Bytes per WAL segment:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.walseg = str2uint(p); - got_walseg = true; - } - else if ((p = strstr(bufin, "Maximum length of identifiers:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.ident = str2uint(p); - got_ident = true; - } - else if ((p = strstr(bufin, "Maximum columns in an index:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.index = str2uint(p); - got_index = true; - } - else if ((p = strstr(bufin, "Maximum size of a TOAST chunk:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.toast = str2uint(p); - got_toast = true; - } - else if ((p = strstr(bufin, "Date/time type storage:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL; - got_date_is_int = true; - } - else if ((p = strstr(bufin, "Float8 argument passing:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - /* used later for contrib check */ - cluster->controldata.float8_pass_by_value = strstr(p, "by value") != NULL; - got_float8_pass_by_value = true; - } - else if ((p = strstr(bufin, "checksum")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - /* used later for contrib check */ - cluster->controldata.data_checksum_version = str2uint(p); - got_data_checksum_version = true; - } - /* In pre-8.4 only */ - else if ((p = strstr(bufin, "LC_COLLATE:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - /* skip leading spaces and remove trailing newline */ - p += strspn(p, " "); - if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n') - *(p + strlen(p) - 1) = '\0'; - cluster->controldata.lc_collate = pg_strdup(p); - } - /* In pre-8.4 only */ - else if ((p = strstr(bufin, "LC_CTYPE:")) != NULL) - { - p = strchr(p, ':'); - - if (p == NULL || strlen(p) <= 1) - pg_fatal("%d: controldata retrieval problem\n", __LINE__); - - p++; /* removing ':' char */ - /* skip leading spaces and remove trailing newline */ - p += strspn(p, " "); - if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n') - *(p + strlen(p) - 1) = '\0'; - cluster->controldata.lc_ctype = pg_strdup(p); - } - } - - if (output) - pclose(output); - - /* - * Restore environment variables - */ - pg_putenv("LC_COLLATE", lc_collate); - pg_putenv("LC_CTYPE", lc_ctype); - pg_putenv("LC_MONETARY", lc_monetary); - pg_putenv("LC_NUMERIC", lc_numeric); - pg_putenv("LC_TIME", lc_time); - pg_putenv("LANG", lang); - pg_putenv("LANGUAGE", language); - pg_putenv("LC_ALL", lc_all); - pg_putenv("LC_MESSAGES", lc_messages); - - pg_free(lc_collate); - pg_free(lc_ctype); - pg_free(lc_monetary); - pg_free(lc_numeric); - pg_free(lc_time); - pg_free(lang); - pg_free(language); - pg_free(lc_all); - pg_free(lc_messages); - - /* - * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log - * file after reset as separate lines. Starting with 9.3, it reports the - * WAL file name. If the old cluster is older than 9.3, we construct the - * WAL file name from the xlogid and segno. - */ - if (GET_MAJOR_VERSION(cluster->major_version) <= 902) - { - if (got_log_id && got_log_seg) - { - snprintf(cluster->controldata.nextxlogfile, 25, "%08X%08X%08X", - tli, logid, segno); - got_nextxlogfile = true; - } - } - - /* verify that we got all the mandatory pg_control data */ - if (!got_xid || !got_oid || - !got_multi || !got_mxoff || - (!got_oldestmulti && - cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) || - (!live_check && !got_nextxlogfile) || - !got_tli || - !got_align || !got_blocksz || !got_largesz || !got_walsz || - !got_walseg || !got_ident || !got_index || !got_toast || - !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version) - { - pg_log(PG_REPORT, - "The %s cluster lacks some required control information:\n", - CLUSTER_NAME(cluster)); - - if (!got_xid) - pg_log(PG_REPORT, " checkpoint next XID\n"); - - if (!got_oid) - pg_log(PG_REPORT, " latest checkpoint next OID\n"); - - if (!got_multi) - pg_log(PG_REPORT, " latest checkpoint next MultiXactId\n"); - - if (!got_mxoff) - pg_log(PG_REPORT, " latest checkpoint next MultiXactOffset\n"); - - if (!got_oldestmulti && - cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - pg_log(PG_REPORT, " latest checkpoint oldest MultiXactId\n"); - - if (!live_check && !got_nextxlogfile) - pg_log(PG_REPORT, " first WAL segment after reset\n"); - - if (!got_tli) - pg_log(PG_REPORT, " latest checkpoint timeline ID\n"); - - if (!got_align) - pg_log(PG_REPORT, " maximum alignment\n"); - - if (!got_blocksz) - pg_log(PG_REPORT, " block size\n"); - - if (!got_largesz) - pg_log(PG_REPORT, " large relation segment size\n"); - - if (!got_walsz) - pg_log(PG_REPORT, " WAL block size\n"); - - if (!got_walseg) - pg_log(PG_REPORT, " WAL segment size\n"); - - if (!got_ident) - pg_log(PG_REPORT, " maximum identifier length\n"); - - if (!got_index) - pg_log(PG_REPORT, " maximum number of indexed columns\n"); - - if (!got_toast) - pg_log(PG_REPORT, " maximum TOAST chunk size\n"); - - if (!got_date_is_int) - pg_log(PG_REPORT, " dates/times are integers?\n"); - - /* value added in Postgres 8.4 */ - if (!got_float8_pass_by_value) - pg_log(PG_REPORT, " float8 argument passing method\n"); - - /* value added in Postgres 9.3 */ - if (!got_data_checksum_version) - pg_log(PG_REPORT, " data checksum version\n"); - - pg_fatal("Cannot continue without required control information, terminating\n"); - } -} - - -/* - * check_control_data() - * - * check to make sure the control data settings are compatible - */ -void -check_control_data(ControlData *oldctrl, - ControlData *newctrl) -{ - if (oldctrl->align == 0 || oldctrl->align != newctrl->align) - pg_fatal("old and new pg_controldata alignments are invalid or do not match\n" - "Likely one cluster is a 32-bit install, the other 64-bit\n"); - - if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz) - pg_fatal("old and new pg_controldata block sizes are invalid or do not match\n"); - - if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz) - pg_fatal("old and new pg_controldata maximum relation segement sizes are invalid or do not match\n"); - - if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz) - pg_fatal("old and new pg_controldata WAL block sizes are invalid or do not match\n"); - - if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg) - pg_fatal("old and new pg_controldata WAL segment sizes are invalid or do not match\n"); - - if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident) - pg_fatal("old and new pg_controldata maximum identifier lengths are invalid or do not match\n"); - - if (oldctrl->index == 0 || oldctrl->index != newctrl->index) - pg_fatal("old and new pg_controldata maximum indexed columns are invalid or do not match\n"); - - if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast) - pg_fatal("old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n"); - - if (oldctrl->date_is_int != newctrl->date_is_int) - { - pg_log(PG_WARNING, - "\nOld and new pg_controldata date/time storage types do not match.\n"); - - /* - * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose - */ - pg_fatal("You will need to rebuild the new server with configure option\n" - "--disable-integer-datetimes or get server binaries built with those\n" - "options.\n"); - } - - /* - * We might eventually allow upgrades from checksum to no-checksum - * clusters. - */ - if (oldctrl->data_checksum_version != newctrl->data_checksum_version) - { - pg_fatal("old and new pg_controldata checksum versions are invalid or do not match\n"); - } -} - - -void -disable_old_cluster(void) -{ - char old_path[MAXPGPATH], - new_path[MAXPGPATH]; - - /* rename pg_control so old server cannot be accidentally started */ - prep_status("Adding \".old\" suffix to old global/pg_control"); - - snprintf(old_path, sizeof(old_path), "%s/global/pg_control", old_cluster.pgdata); - snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", old_cluster.pgdata); - if (pg_mv_file(old_path, new_path) != 0) - pg_fatal("Unable to rename %s to %s.\n", old_path, new_path); - check_ok(); - - pg_log(PG_REPORT, "\n" - "If you want to start the old cluster, you will need to remove\n" - "the \".old\" suffix from %s/global/pg_control.old.\n" - "Because \"link\" mode was used, the old cluster cannot be safely\n" - "started once the new cluster has been started.\n\n", old_cluster.pgdata); -} diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c deleted file mode 100644 index 6c7661049c..0000000000 --- a/contrib/pg_upgrade/dump.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * dump.c - * - * dump functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/dump.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <sys/types.h> - -void -generate_old_dump(void) -{ - int dbnum; - mode_t old_umask; - - prep_status("Creating dump of global objects"); - - /* run new pg_dumpall binary for globals */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_dumpall\" %s --schema-only --globals-only " - "--quote-all-identifiers --binary-upgrade %s -f %s", - new_cluster.bindir, cluster_conn_opts(&old_cluster), - log_opts.verbose ? "--verbose" : "", - GLOBALS_DUMP_FILE); - check_ok(); - - prep_status("Creating dump of database schemas\n"); - - /* - * Set umask for this function, all functions it calls, and all - * subprocesses/threads it creates. We can't use fopen_priv() as Windows - * uses threads and umask is process-global. - */ - old_umask = umask(S_IRWXG | S_IRWXO); - - /* create per-db dump files */ - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - pg_log(PG_STATUS, "%s", old_db->db_name); - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - - parallel_exec_prog(log_file_name, NULL, - "\"%s/pg_dump\" %s --schema-only --quote-all-identifiers " - "--binary-upgrade --format=custom %s --file=\"%s\" \"%s\"", - new_cluster.bindir, cluster_conn_opts(&old_cluster), - log_opts.verbose ? "--verbose" : "", - sql_file_name, old_db->db_name); - } - - /* reap all children */ - while (reap_child(true) == true) - ; - - umask(old_umask); - - end_progress_output(); - check_ok(); -} diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c deleted file mode 100644 index 6c217c902d..0000000000 --- a/contrib/pg_upgrade/exec.c +++ /dev/null @@ -1,378 +0,0 @@ -/* - * exec.c - * - * execution functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/exec.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <fcntl.h> -#include <sys/types.h> - -static void check_data_dir(const char *pg_data); -static void check_bin_dir(ClusterInfo *cluster); -static void validate_exec(const char *dir, const char *cmdName); - -#ifdef WIN32 -static int win32_check_directory_write_permissions(void); -#endif - - -/* - * exec_prog() - * Execute an external program with stdout/stderr redirected, and report - * errors - * - * Formats a command from the given argument list, logs it to the log file, - * and attempts to execute that command. If the command executes - * successfully, exec_prog() returns true. - * - * If the command fails, an error message is saved to the specified log_file. - * If throw_error is true, this raises a PG_FATAL error and pg_upgrade - * terminates; otherwise it is just reported as PG_REPORT and exec_prog() - * returns false. - * - * The code requires it be called first from the primary thread on Windows. - */ -bool -exec_prog(const char *log_file, const char *opt_log_file, - bool throw_error, const char *fmt,...) -{ - int result = 0; - int written; - -#define MAXCMDLEN (2 * MAXPGPATH) - char cmd[MAXCMDLEN]; - FILE *log; - va_list ap; - -#ifdef WIN32 - static DWORD mainThreadId = 0; - - /* We assume we are called from the primary thread first */ - if (mainThreadId == 0) - mainThreadId = GetCurrentThreadId(); -#endif - - written = 0; - va_start(ap, fmt); - written += vsnprintf(cmd + written, MAXCMDLEN - written, fmt, ap); - va_end(ap); - if (written >= MAXCMDLEN) - pg_fatal("command too long\n"); - written += snprintf(cmd + written, MAXCMDLEN - written, - " >> \"%s\" 2>&1", log_file); - if (written >= MAXCMDLEN) - pg_fatal("command too long\n"); - - pg_log(PG_VERBOSE, "%s\n", cmd); - -#ifdef WIN32 - - /* - * For some reason, Windows issues a file-in-use error if we write data to - * the log file from a non-primary thread just before we create a - * subprocess that also writes to the same log file. One fix is to sleep - * for 100ms. A cleaner fix is to write to the log file _after_ the - * subprocess has completed, so we do this only when writing from a - * non-primary thread. fflush(), running system() twice, and pre-creating - * the file do not see to help. - */ - if (mainThreadId != GetCurrentThreadId()) - result = system(cmd); -#endif - - log = fopen(log_file, "a"); - -#ifdef WIN32 - { - /* - * "pg_ctl -w stop" might have reported that the server has stopped - * because the postmaster.pid file has been removed, but "pg_ctl -w - * start" might still be in the process of closing and might still be - * holding its stdout and -l log file descriptors open. Therefore, - * try to open the log file a few more times. - */ - int iter; - - for (iter = 0; iter < 4 && log == NULL; iter++) - { - pg_usleep(1000000); /* 1 sec */ - log = fopen(log_file, "a"); - } - } -#endif - - if (log == NULL) - pg_fatal("cannot write to log file %s\n", log_file); - -#ifdef WIN32 - /* Are we printing "command:" before its output? */ - if (mainThreadId == GetCurrentThreadId()) - fprintf(log, "\n\n"); -#endif - fprintf(log, "command: %s\n", cmd); -#ifdef WIN32 - /* Are we printing "command:" after its output? */ - if (mainThreadId != GetCurrentThreadId()) - fprintf(log, "\n\n"); -#endif - - /* - * In Windows, we must close the log file at this point so the file is not - * open while the command is running, or we get a share violation. - */ - fclose(log); - -#ifdef WIN32 - /* see comment above */ - if (mainThreadId == GetCurrentThreadId()) -#endif - result = system(cmd); - - if (result != 0) - { - /* we might be in on a progress status line, so go to the next line */ - report_status(PG_REPORT, "\n*failure*"); - fflush(stdout); - - pg_log(PG_VERBOSE, "There were problems executing \"%s\"\n", cmd); - if (opt_log_file) - pg_log(throw_error ? PG_FATAL : PG_REPORT, - "Consult the last few lines of \"%s\" or \"%s\" for\n" - "the probable cause of the failure.\n", - log_file, opt_log_file); - else - pg_log(throw_error ? PG_FATAL : PG_REPORT, - "Consult the last few lines of \"%s\" for\n" - "the probable cause of the failure.\n", - log_file); - } - -#ifndef WIN32 - - /* - * We can't do this on Windows because it will keep the "pg_ctl start" - * output filename open until the server stops, so we do the \n\n above on - * that platform. We use a unique filename for "pg_ctl start" that is - * never reused while the server is running, so it works fine. We could - * log these commands to a third file, but that just adds complexity. - */ - if ((log = fopen(log_file, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", log_file); - fprintf(log, "\n\n"); - fclose(log); -#endif - - return result == 0; -} - - -/* - * pid_lock_file_exists() - * - * Checks whether the postmaster.pid file exists. - */ -bool -pid_lock_file_exists(const char *datadir) -{ - char path[MAXPGPATH]; - int fd; - - snprintf(path, sizeof(path), "%s/postmaster.pid", datadir); - - if ((fd = open(path, O_RDONLY, 0)) < 0) - { - /* ENOTDIR means we will throw a more useful error later */ - if (errno != ENOENT && errno != ENOTDIR) - pg_fatal("could not open file \"%s\" for reading: %s\n", - path, getErrorText(errno)); - - return false; - } - - close(fd); - return true; -} - - -/* - * verify_directories() - * - * does all the hectic work of verifying directories and executables - * of old and new server. - * - * NOTE: May update the values of all parameters - */ -void -verify_directories(void) -{ -#ifndef WIN32 - if (access(".", R_OK | W_OK | X_OK) != 0) -#else - if (win32_check_directory_write_permissions() != 0) -#endif - pg_fatal("You must have read and write access in the current directory.\n"); - - check_bin_dir(&old_cluster); - check_data_dir(old_cluster.pgdata); - check_bin_dir(&new_cluster); - check_data_dir(new_cluster.pgdata); -} - - -#ifdef WIN32 -/* - * win32_check_directory_write_permissions() - * - * access() on WIN32 can't check directory permissions, so we have to - * optionally create, then delete a file to check. - * http://msdn.microsoft.com/en-us/library/1w06ktdy%28v=vs.80%29.aspx - */ -static int -win32_check_directory_write_permissions(void) -{ - int fd; - - /* - * We open a file we would normally create anyway. We do this even in - * 'check' mode, which isn't ideal, but this is the best we can do. - */ - if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) - return -1; - close(fd); - - return unlink(GLOBALS_DUMP_FILE); -} -#endif - - -/* - * check_data_dir() - * - * This function validates the given cluster directory - we search for a - * small set of subdirectories that we expect to find in a valid $PGDATA - * directory. If any of the subdirectories are missing (or secured against - * us) we display an error message and exit() - * - */ -static void -check_data_dir(const char *pg_data) -{ - char subDirName[MAXPGPATH]; - int subdirnum; - - /* start check with top-most directory */ - const char *requiredSubdirs[] = {"", "base", "global", "pg_clog", - "pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase", - "pg_xlog"}; - - for (subdirnum = 0; - subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); - ++subdirnum) - { - struct stat statBuf; - - snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data, - /* Win32 can't stat() a directory with a trailing slash. */ - *requiredSubdirs[subdirnum] ? "/" : "", - requiredSubdirs[subdirnum]); - - if (stat(subDirName, &statBuf) != 0) - report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - subDirName, getErrorText(errno)); - else if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, "%s is not a directory\n", - subDirName); - } -} - - -/* - * check_bin_dir() - * - * This function searches for the executables that we expect to find - * in the binaries directory. If we find that a required executable - * is missing (or secured against us), we display an error message and - * exit(). - */ -static void -check_bin_dir(ClusterInfo *cluster) -{ - struct stat statBuf; - - /* check bindir */ - if (stat(cluster->bindir, &statBuf) != 0) - report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - cluster->bindir, getErrorText(errno)); - else if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, "%s is not a directory\n", - cluster->bindir); - - validate_exec(cluster->bindir, "postgres"); - validate_exec(cluster->bindir, "pg_ctl"); - validate_exec(cluster->bindir, "pg_resetxlog"); - if (cluster == &new_cluster) - { - /* these are only needed in the new cluster */ - validate_exec(cluster->bindir, "psql"); - validate_exec(cluster->bindir, "pg_dumpall"); - } -} - - -/* - * validate_exec() - * - * validate "path" as an executable file - */ -static void -validate_exec(const char *dir, const char *cmdName) -{ - char path[MAXPGPATH]; - struct stat buf; - - snprintf(path, sizeof(path), "%s/%s", dir, cmdName); - -#ifdef WIN32 - /* Windows requires a .exe suffix for stat() */ - if (strlen(path) <= strlen(EXE_EXT) || - pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0) - strlcat(path, EXE_EXT, sizeof(path)); -#endif - - /* - * Ensure that the file exists and is a regular file. - */ - if (stat(path, &buf) < 0) - pg_fatal("check for \"%s\" failed: %s\n", - path, getErrorText(errno)); - else if (!S_ISREG(buf.st_mode)) - pg_fatal("check for \"%s\" failed: not an executable file\n", - path); - - /* - * Ensure that the file is both executable and readable (required for - * dynamic loading). - */ -#ifndef WIN32 - if (access(path, R_OK) != 0) -#else - if ((buf.st_mode & S_IRUSR) == 0) -#endif - pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n", - path); - -#ifndef WIN32 - if (access(path, X_OK) != 0) -#else - if ((buf.st_mode & S_IXUSR) == 0) -#endif - pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n", - path); -} diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c deleted file mode 100644 index ab9d1edcb6..0000000000 --- a/contrib/pg_upgrade/file.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * file.c - * - * file system operations - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/file.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <fcntl.h> - - - -#ifndef WIN32 -static int copy_file(const char *fromfile, const char *tofile, bool force); -#else -static int win32_pghardlink(const char *src, const char *dst); -#endif - - -/* - * copyAndUpdateFile() - * - * Copies a relation file from src to dst. If pageConverter is non-NULL, this function - * uses that pageConverter to do a page-by-page conversion. - */ -const char * -copyAndUpdateFile(pageCnvCtx *pageConverter, - const char *src, const char *dst, bool force) -{ - if (pageConverter == NULL) - { - if (pg_copy_file(src, dst, force) == -1) - return getErrorText(errno); - else - return NULL; - } - else - { - /* - * We have a pageConverter object - that implies that the - * PageLayoutVersion differs between the two clusters so we have to - * perform a page-by-page conversion. - * - * If the pageConverter can convert the entire file at once, invoke - * that plugin function, otherwise, read each page in the relation - * file and call the convertPage plugin function. - */ - -#ifdef PAGE_CONVERSION - if (pageConverter->convertFile) - return pageConverter->convertFile(pageConverter->pluginData, - dst, src); - else -#endif - { - int src_fd; - int dstfd; - char buf[BLCKSZ]; - ssize_t bytesRead; - const char *msg = NULL; - - if ((src_fd = open(src, O_RDONLY, 0)) < 0) - return "could not open source file"; - - if ((dstfd = open(dst, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) < 0) - { - close(src_fd); - return "could not create destination file"; - } - - while ((bytesRead = read(src_fd, buf, BLCKSZ)) == BLCKSZ) - { -#ifdef PAGE_CONVERSION - if ((msg = pageConverter->convertPage(pageConverter->pluginData, buf, buf)) != NULL) - break; -#endif - if (write(dstfd, buf, BLCKSZ) != BLCKSZ) - { - msg = "could not write new page to destination"; - break; - } - } - - close(src_fd); - close(dstfd); - - if (msg) - return msg; - else if (bytesRead != 0) - return "found partial page in source file"; - else - return NULL; - } - } -} - - -/* - * linkAndUpdateFile() - * - * Creates a hard link between the given relation files. We use - * this function to perform a true in-place update. If the on-disk - * format of the new cluster is bit-for-bit compatible with the on-disk - * format of the old cluster, we can simply link each relation - * instead of copying the data from the old cluster to the new cluster. - */ -const char * -linkAndUpdateFile(pageCnvCtx *pageConverter, - const char *src, const char *dst) -{ - if (pageConverter != NULL) - return "Cannot in-place update this cluster, page-by-page conversion is required"; - - if (pg_link_file(src, dst) == -1) - return getErrorText(errno); - else - return NULL; -} - - -#ifndef WIN32 -static int -copy_file(const char *srcfile, const char *dstfile, bool force) -{ -#define COPY_BUF_SIZE (50 * BLCKSZ) - - int src_fd; - int dest_fd; - char *buffer; - int ret = 0; - int save_errno = 0; - - if ((srcfile == NULL) || (dstfile == NULL)) - { - errno = EINVAL; - return -1; - } - - if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0) - return -1; - - if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0) - { - save_errno = errno; - - if (src_fd != 0) - close(src_fd); - - errno = save_errno; - return -1; - } - - buffer = (char *) pg_malloc(COPY_BUF_SIZE); - - /* perform data copying i.e read src source, write to destination */ - while (true) - { - ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE); - - if (nbytes < 0) - { - save_errno = errno; - ret = -1; - break; - } - - if (nbytes == 0) - break; - - errno = 0; - - if (write(dest_fd, buffer, nbytes) != nbytes) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - save_errno = errno; - ret = -1; - break; - } - } - - pg_free(buffer); - - if (src_fd != 0) - close(src_fd); - - if (dest_fd != 0) - close(dest_fd); - - if (save_errno != 0) - errno = save_errno; - - return ret; -} -#endif - - -void -check_hard_link(void) -{ - char existing_file[MAXPGPATH]; - char new_link_file[MAXPGPATH]; - - snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", old_cluster.pgdata); - snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", new_cluster.pgdata); - unlink(new_link_file); /* might fail */ - - if (pg_link_file(existing_file, new_link_file) == -1) - { - pg_fatal("Could not create hard link between old and new data directories: %s\n" - "In link mode the old and new data directories must be on the same file system volume.\n", - getErrorText(errno)); - } - unlink(new_link_file); -} - -#ifdef WIN32 -static int -win32_pghardlink(const char *src, const char *dst) -{ - /* - * CreateHardLinkA returns zero for failure - * http://msdn.microsoft.com/en-us/library/aa363860(VS.85).aspx - */ - if (CreateHardLinkA(dst, src, NULL) == 0) - return -1; - else - return 0; -} -#endif - - -/* fopen() file with no group/other permissions */ -FILE * -fopen_priv(const char *path, const char *mode) -{ - mode_t old_umask = umask(S_IRWXG | S_IRWXO); - FILE *fp; - - fp = fopen(path, mode); - umask(old_umask); - - return fp; -} diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c deleted file mode 100644 index f2cd4716c7..0000000000 --- a/contrib/pg_upgrade/function.c +++ /dev/null @@ -1,353 +0,0 @@ -/* - * function.c - * - * server-side function support - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/function.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "access/transam.h" - -#define PG_UPGRADE_SUPPORT "$libdir/pg_upgrade_support" - -/* - * install_support_functions_in_new_db() - * - * pg_upgrade requires some support functions that enable it to modify - * backend behavior. - */ -void -install_support_functions_in_new_db(const char *db_name) -{ - PGconn *conn = connectToServer(&new_cluster, db_name); - - /* suppress NOTICE of dropped objects */ - PQclear(executeQueryOrDie(conn, - "SET client_min_messages = warning;")); - PQclear(executeQueryOrDie(conn, - "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;")); - PQclear(executeQueryOrDie(conn, - "RESET client_min_messages;")); - - PQclear(executeQueryOrDie(conn, - "CREATE SCHEMA binary_upgrade;")); - - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_pg_type_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_array_pg_type_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_toast_pg_type_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_heap_pg_class_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_index_pg_class_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_toast_pg_class_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_pg_enum_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.set_next_pg_authid_oid(OID) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C STRICT;")); - PQclear(executeQueryOrDie(conn, - "CREATE OR REPLACE FUNCTION " - "binary_upgrade.create_empty_extension(text, text, bool, text, oid[], text[], text[]) " - "RETURNS VOID " - "AS '$libdir/pg_upgrade_support' " - "LANGUAGE C;")); - PQfinish(conn); -} - - -void -uninstall_support_functions_from_new_cluster(void) -{ - int dbnum; - - prep_status("Removing support functions from new cluster"); - - for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) - { - DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(&new_cluster, new_db->db_name); - - /* suppress NOTICE of dropped objects */ - PQclear(executeQueryOrDie(conn, - "SET client_min_messages = warning;")); - PQclear(executeQueryOrDie(conn, - "DROP SCHEMA binary_upgrade CASCADE;")); - PQclear(executeQueryOrDie(conn, - "RESET client_min_messages;")); - PQfinish(conn); - } - check_ok(); -} - - -/* - * get_loadable_libraries() - * - * Fetch the names of all old libraries containing C-language functions. - * We will later check that they all exist in the new installation. - */ -void -get_loadable_libraries(void) -{ - PGresult **ress; - int totaltups; - int dbnum; - bool found_public_plpython_handler = false; - - ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *)); - totaltups = 0; - - /* Fetch all library names, removing duplicates within each DB */ - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - DbInfo *active_db = &old_cluster.dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(&old_cluster, active_db->db_name); - - /* - * Fetch all libraries referenced in this DB. We can't exclude the - * "pg_catalog" schema because, while such functions are not - * explicitly dumped by pg_dump, they do reference implicit objects - * that pg_dump does dump, e.g. CREATE LANGUAGE plperl. - */ - ress[dbnum] = executeQueryOrDie(conn, - "SELECT DISTINCT probin " - "FROM pg_catalog.pg_proc " - "WHERE prolang = 13 /* C */ AND " - "probin IS NOT NULL AND " - "oid >= %u;", - FirstNormalObjectId); - totaltups += PQntuples(ress[dbnum]); - - /* - * Systems that install plpython before 8.1 have - * plpython_call_handler() defined in the "public" schema, causing - * pg_dumpall to dump it. However that function still references - * "plpython" (no "2"), so it throws an error on restore. This code - * checks for the problem function, reports affected databases to the - * user and explains how to remove them. 8.1 git commit: - * e0dedd0559f005d60c69c9772163e69c204bac69 - * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php - * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php - */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901) - { - PGresult *res; - - res = executeQueryOrDie(conn, - "SELECT 1 " - "FROM pg_catalog.pg_proc JOIN pg_namespace " - " ON pronamespace = pg_namespace.oid " - "WHERE proname = 'plpython_call_handler' AND " - "nspname = 'public' AND " - "prolang = 13 /* C */ AND " - "probin = '$libdir/plpython' AND " - "pg_proc.oid >= %u;", - FirstNormalObjectId); - if (PQntuples(res) > 0) - { - if (!found_public_plpython_handler) - { - pg_log(PG_WARNING, - "\nThe old cluster has a \"plpython_call_handler\" function defined\n" - "in the \"public\" schema which is a duplicate of the one defined\n" - "in the \"pg_catalog\" schema. You can confirm this by executing\n" - "in psql:\n" - "\n" - " \\df *.plpython_call_handler\n" - "\n" - "The \"public\" schema version of this function was created by a\n" - "pre-8.1 install of plpython, and must be removed for pg_upgrade\n" - "to complete because it references a now-obsolete \"plpython\"\n" - "shared object file. You can remove the \"public\" schema version\n" - "of this function by running the following command:\n" - "\n" - " DROP FUNCTION public.plpython_call_handler()\n" - "\n" - "in each affected database:\n" - "\n"); - } - pg_log(PG_WARNING, " %s\n", active_db->db_name); - found_public_plpython_handler = true; - } - PQclear(res); - } - - PQfinish(conn); - } - - if (found_public_plpython_handler) - pg_fatal("Remove the problem functions from the old cluster to continue.\n"); - - totaltups++; /* reserve for pg_upgrade_support */ - - /* Allocate what's certainly enough space */ - os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *)); - - /* - * Now remove duplicates across DBs. This is pretty inefficient code, but - * there probably aren't enough entries to matter. - */ - totaltups = 0; - os_info.libraries[totaltups++] = pg_strdup(PG_UPGRADE_SUPPORT); - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - PGresult *res = ress[dbnum]; - int ntups; - int rowno; - - ntups = PQntuples(res); - for (rowno = 0; rowno < ntups; rowno++) - { - char *lib = PQgetvalue(res, rowno, 0); - bool dup = false; - int n; - - for (n = 0; n < totaltups; n++) - { - if (strcmp(lib, os_info.libraries[n]) == 0) - { - dup = true; - break; - } - } - if (!dup) - os_info.libraries[totaltups++] = pg_strdup(lib); - } - - PQclear(res); - } - - os_info.num_libraries = totaltups; - - pg_free(ress); -} - - -/* - * check_loadable_libraries() - * - * Check that the new cluster contains all required libraries. - * We do this by actually trying to LOAD each one, thereby testing - * compatibility as well as presence. - */ -void -check_loadable_libraries(void) -{ - PGconn *conn = connectToServer(&new_cluster, "template1"); - int libnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for presence of required libraries"); - - snprintf(output_path, sizeof(output_path), "loadable_libraries.txt"); - - for (libnum = 0; libnum < os_info.num_libraries; libnum++) - { - char *lib = os_info.libraries[libnum]; - int llen = strlen(lib); - char cmd[7 + 2 * MAXPGPATH + 1]; - PGresult *res; - - /* - * In Postgres 9.0, Python 3 support was added, and to do that, a - * plpython2u language was created with library name plpython2.so as a - * symbolic link to plpython.so. In Postgres 9.1, only the - * plpython2.so library was created, and both plpythonu and plpython2u - * pointing to it. For this reason, any reference to library name - * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in - * the new cluster. - * - * For this case, we could check pg_pltemplate, but that only works - * for languages, and does not help with function shared objects, so - * we just do a general fix. - */ - if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && - strcmp(lib, "$libdir/plpython") == 0) - { - lib = "$libdir/plpython2"; - llen = strlen(lib); - } - - strcpy(cmd, "LOAD '"); - PQescapeStringConn(conn, cmd + strlen(cmd), lib, llen, NULL); - strcat(cmd, "'"); - - res = PQexec(conn, cmd); - - if (PQresultStatus(res) != PGRES_COMMAND_OK) - { - found = true; - - /* exit and report missing support library with special message */ - if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0) - pg_fatal("The pg_upgrade_support module must be created and installed in the new cluster.\n"); - - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - fprintf(script, "Could not load library \"%s\"\n%s\n", - lib, - PQerrorMessage(conn)); - } - - PQclear(res); - } - - PQfinish(conn); - - if (found) - { - fclose(script); - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation references loadable libraries that are missing from the\n" - "new installation. You can add these libraries to the new installation,\n" - "or remove the functions using them from the old installation. A list of\n" - "problem libraries is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c deleted file mode 100644 index d2968b479a..0000000000 --- a/contrib/pg_upgrade/info.c +++ /dev/null @@ -1,486 +0,0 @@ -/* - * info.c - * - * information support functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/info.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "access/transam.h" - - -static void create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map); -static void free_db_and_rel_infos(DbInfoArr *db_arr); -static void get_db_infos(ClusterInfo *cluster); -static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo); -static void free_rel_infos(RelInfoArr *rel_arr); -static void print_db_infos(DbInfoArr *dbinfo); -static void print_rel_infos(RelInfoArr *rel_arr); - - -/* - * gen_db_file_maps() - * - * generates database mappings for "old_db" and "new_db". Returns a malloc'ed - * array of mappings. nmaps is a return parameter which refers to the number - * mappings. - */ -FileNameMap * -gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, - int *nmaps, const char *old_pgdata, const char *new_pgdata) -{ - FileNameMap *maps; - int relnum; - int num_maps = 0; - - maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * - old_db->rel_arr.nrels); - - for (relnum = 0; relnum < Min(old_db->rel_arr.nrels, new_db->rel_arr.nrels); - relnum++) - { - RelInfo *old_rel = &old_db->rel_arr.rels[relnum]; - RelInfo *new_rel = &new_db->rel_arr.rels[relnum]; - - if (old_rel->reloid != new_rel->reloid) - pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", - old_db->db_name, old_rel->reloid, new_rel->reloid); - - /* - * TOAST table names initially match the heap pg_class oid. In - * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST - * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >= - * 9.0, TOAST relation names always use heap table oids, hence we - * cannot check relation names when upgrading from pre-9.0. Clusters - * upgraded to 9.0 will get matching TOAST names. If index names don't - * match primary key constraint names, this will fail because pg_dump - * dumps constraint names and pg_upgrade checks index names. - */ - if (strcmp(old_rel->nspname, new_rel->nspname) != 0 || - ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 || - strcmp(old_rel->nspname, "pg_toast") != 0) && - strcmp(old_rel->relname, new_rel->relname) != 0)) - pg_fatal("Mismatch of relation names in database \"%s\": " - "old name \"%s.%s\", new name \"%s.%s\"\n", - old_db->db_name, old_rel->nspname, old_rel->relname, - new_rel->nspname, new_rel->relname); - - create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, - old_rel, new_rel, maps + num_maps); - num_maps++; - } - - /* - * Do this check after the loop so hopefully we will produce a clearer - * error above - */ - if (old_db->rel_arr.nrels != new_db->rel_arr.nrels) - pg_fatal("old and new databases \"%s\" have a different number of relations\n", - old_db->db_name); - - *nmaps = num_maps; - return maps; -} - - -/* - * create_rel_filename_map() - * - * fills a file node map structure and returns it in "map". - */ -static void -create_rel_filename_map(const char *old_data, const char *new_data, - const DbInfo *old_db, const DbInfo *new_db, - const RelInfo *old_rel, const RelInfo *new_rel, - FileNameMap *map) -{ - if (strlen(old_rel->tablespace) == 0) - { - /* - * relation belongs to the default tablespace, hence relfiles should - * exist in the data directories. - */ - map->old_tablespace = old_data; - map->new_tablespace = new_data; - map->old_tablespace_suffix = "/base"; - map->new_tablespace_suffix = "/base"; - } - else - { - /* relation belongs to a tablespace, so use the tablespace location */ - map->old_tablespace = old_rel->tablespace; - map->new_tablespace = new_rel->tablespace; - map->old_tablespace_suffix = old_cluster.tablespace_suffix; - map->new_tablespace_suffix = new_cluster.tablespace_suffix; - } - - map->old_db_oid = old_db->db_oid; - map->new_db_oid = new_db->db_oid; - - /* - * old_relfilenode might differ from pg_class.oid (and hence - * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. - */ - map->old_relfilenode = old_rel->relfilenode; - - /* new_relfilenode will match old and new pg_class.oid */ - map->new_relfilenode = new_rel->relfilenode; - - /* used only for logging and error reporing, old/new are identical */ - map->nspname = old_rel->nspname; - map->relname = old_rel->relname; -} - - -void -print_maps(FileNameMap *maps, int n_maps, const char *db_name) -{ - if (log_opts.verbose) - { - int mapnum; - - pg_log(PG_VERBOSE, "mappings for database \"%s\":\n", db_name); - - for (mapnum = 0; mapnum < n_maps; mapnum++) - pg_log(PG_VERBOSE, "%s.%s: %u to %u\n", - maps[mapnum].nspname, maps[mapnum].relname, - maps[mapnum].old_relfilenode, - maps[mapnum].new_relfilenode); - - pg_log(PG_VERBOSE, "\n\n"); - } -} - - -/* - * get_db_and_rel_infos() - * - * higher level routine to generate dbinfos for the database running - * on the given "port". Assumes that server is already running. - */ -void -get_db_and_rel_infos(ClusterInfo *cluster) -{ - int dbnum; - - if (cluster->dbarr.dbs != NULL) - free_db_and_rel_infos(&cluster->dbarr); - - get_db_infos(cluster); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - get_rel_infos(cluster, &cluster->dbarr.dbs[dbnum]); - - pg_log(PG_VERBOSE, "\n%s databases:\n", CLUSTER_NAME(cluster)); - if (log_opts.verbose) - print_db_infos(&cluster->dbarr); -} - - -/* - * get_db_infos() - * - * Scans pg_database system catalog and populates all user - * databases. - */ -static void -get_db_infos(ClusterInfo *cluster) -{ - PGconn *conn = connectToServer(cluster, "template1"); - PGresult *res; - int ntups; - int tupnum; - DbInfo *dbinfos; - int i_datname, - i_oid, - i_spclocation; - char query[QUERY_ALLOC]; - - snprintf(query, sizeof(query), - "SELECT d.oid, d.datname, %s " - "FROM pg_catalog.pg_database d " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON d.dattablespace = t.oid " - "WHERE d.datallowconn = true " - /* we don't preserve pg_database.oid so we sort by name */ - "ORDER BY 2", - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); - - res = executeQueryOrDie(conn, "%s", query); - - i_oid = PQfnumber(res, "oid"); - i_datname = PQfnumber(res, "datname"); - i_spclocation = PQfnumber(res, "spclocation"); - - ntups = PQntuples(res); - dbinfos = (DbInfo *) pg_malloc(sizeof(DbInfo) * ntups); - - for (tupnum = 0; tupnum < ntups; tupnum++) - { - dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid)); - dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname)); - snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s", - PQgetvalue(res, tupnum, i_spclocation)); - } - PQclear(res); - - PQfinish(conn); - - cluster->dbarr.dbs = dbinfos; - cluster->dbarr.ndbs = ntups; -} - - -/* - * get_rel_infos() - * - * gets the relinfos for all the user tables of the database referred - * by "db". - * - * NOTE: we assume that relations/entities with oids greater than - * FirstNormalObjectId belongs to the user - */ -static void -get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) -{ - PGconn *conn = connectToServer(cluster, - dbinfo->db_name); - PGresult *res; - RelInfo *relinfos; - int ntups; - int relnum; - int num_rels = 0; - char *nspname = NULL; - char *relname = NULL; - char *tablespace = NULL; - int i_spclocation, - i_nspname, - i_relname, - i_oid, - i_relfilenode, - i_reltablespace; - char query[QUERY_ALLOC]; - char *last_namespace = NULL, - *last_tablespace = NULL; - - /* - * pg_largeobject contains user data that does not appear in pg_dumpall - * --schema-only output, so we have to copy that system table heap and - * index. We could grab the pg_largeobject oids from template1, but it is - * easy to treat it as a normal table. Order by oid so we can join old/new - * structures efficiently. - */ - - snprintf(query, sizeof(query), - "CREATE TEMPORARY TABLE info_rels (reloid) AS SELECT c.oid " - "FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - "LEFT OUTER JOIN pg_catalog.pg_index i " - " ON c.oid = i.indexrelid " - "WHERE relkind IN ('r', 'm', 'i'%s) AND " - - /* - * pg_dump only dumps valid indexes; testing indisready is necessary in - * 9.2, and harmless in earlier/later versions. - */ - " i.indisvalid IS DISTINCT FROM false AND " - " i.indisready IS DISTINCT FROM false AND " - /* exclude possible orphaned temp tables */ - " ((n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - /* skip pg_toast because toast index have relkind == 'i', not 't' */ - " n.nspname NOT IN ('pg_catalog', 'information_schema', " - " 'binary_upgrade', 'pg_toast') AND " - " c.oid >= %u) " - " OR (n.nspname = 'pg_catalog' AND " - " relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) ));", - /* see the comment at the top of old_8_3_create_sequence_script() */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ? - "" : ", 'S'", - FirstNormalObjectId, - /* does pg_largeobject_metadata need to be migrated? */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ? - "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'"); - - PQclear(executeQueryOrDie(conn, "%s", query)); - - /* - * Get TOAST tables and indexes; we have to gather the TOAST tables in - * later steps because we can't schema-qualify TOAST tables. - */ - PQclear(executeQueryOrDie(conn, - "INSERT INTO info_rels " - "SELECT reltoastrelid " - "FROM info_rels i JOIN pg_catalog.pg_class c " - " ON i.reloid = c.oid " - " AND c.reltoastrelid != %u", InvalidOid)); - PQclear(executeQueryOrDie(conn, - "INSERT INTO info_rels " - "SELECT indexrelid " - "FROM pg_index " - "WHERE indisvalid " - " AND indrelid IN (SELECT reltoastrelid " - " FROM info_rels i " - " JOIN pg_catalog.pg_class c " - " ON i.reloid = c.oid " - " AND c.reltoastrelid != %u)", - InvalidOid)); - - snprintf(query, sizeof(query), - "SELECT c.oid, n.nspname, c.relname, " - " c.relfilenode, c.reltablespace, %s " - "FROM info_rels i JOIN pg_catalog.pg_class c " - " ON i.reloid = c.oid " - " JOIN pg_catalog.pg_namespace n " - " ON c.relnamespace = n.oid " - " LEFT OUTER JOIN pg_catalog.pg_tablespace t " - " ON c.reltablespace = t.oid " - /* we preserve pg_class.oid so we sort by it to match old/new */ - "ORDER BY 1;", - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(cluster->major_version) <= 901) ? - "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation"); - - res = executeQueryOrDie(conn, "%s", query); - - ntups = PQntuples(res); - - relinfos = (RelInfo *) pg_malloc(sizeof(RelInfo) * ntups); - - i_oid = PQfnumber(res, "oid"); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_relfilenode = PQfnumber(res, "relfilenode"); - i_reltablespace = PQfnumber(res, "reltablespace"); - i_spclocation = PQfnumber(res, "spclocation"); - - for (relnum = 0; relnum < ntups; relnum++) - { - RelInfo *curr = &relinfos[num_rels++]; - - curr->reloid = atooid(PQgetvalue(res, relnum, i_oid)); - - nspname = PQgetvalue(res, relnum, i_nspname); - curr->nsp_alloc = false; - - /* - * Many of the namespace and tablespace strings are identical, so we - * try to reuse the allocated string pointers where possible to reduce - * memory consumption. - */ - /* Can we reuse the previous string allocation? */ - if (last_namespace && strcmp(nspname, last_namespace) == 0) - curr->nspname = last_namespace; - else - { - last_namespace = curr->nspname = pg_strdup(nspname); - curr->nsp_alloc = true; - } - - relname = PQgetvalue(res, relnum, i_relname); - curr->relname = pg_strdup(relname); - - curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode)); - curr->tblsp_alloc = false; - - /* Is the tablespace oid non-zero? */ - if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0) - { - /* - * The tablespace location might be "", meaning the cluster - * default location, i.e. pg_default or pg_global. - */ - tablespace = PQgetvalue(res, relnum, i_spclocation); - - /* Can we reuse the previous string allocation? */ - if (last_tablespace && strcmp(tablespace, last_tablespace) == 0) - curr->tablespace = last_tablespace; - else - { - last_tablespace = curr->tablespace = pg_strdup(tablespace); - curr->tblsp_alloc = true; - } - } - else - /* A zero reltablespace oid indicates the database tablespace. */ - curr->tablespace = dbinfo->db_tablespace; - } - PQclear(res); - - PQfinish(conn); - - dbinfo->rel_arr.rels = relinfos; - dbinfo->rel_arr.nrels = num_rels; -} - - -static void -free_db_and_rel_infos(DbInfoArr *db_arr) -{ - int dbnum; - - for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) - { - free_rel_infos(&db_arr->dbs[dbnum].rel_arr); - pg_free(db_arr->dbs[dbnum].db_name); - } - pg_free(db_arr->dbs); - db_arr->dbs = NULL; - db_arr->ndbs = 0; -} - - -static void -free_rel_infos(RelInfoArr *rel_arr) -{ - int relnum; - - for (relnum = 0; relnum < rel_arr->nrels; relnum++) - { - if (rel_arr->rels[relnum].nsp_alloc) - pg_free(rel_arr->rels[relnum].nspname); - pg_free(rel_arr->rels[relnum].relname); - if (rel_arr->rels[relnum].tblsp_alloc) - pg_free(rel_arr->rels[relnum].tablespace); - } - pg_free(rel_arr->rels); - rel_arr->nrels = 0; -} - - -static void -print_db_infos(DbInfoArr *db_arr) -{ - int dbnum; - - for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++) - { - pg_log(PG_VERBOSE, "Database: %s\n", db_arr->dbs[dbnum].db_name); - print_rel_infos(&db_arr->dbs[dbnum].rel_arr); - pg_log(PG_VERBOSE, "\n\n"); - } -} - - -static void -print_rel_infos(RelInfoArr *rel_arr) -{ - int relnum; - - for (relnum = 0; relnum < rel_arr->nrels; relnum++) - pg_log(PG_VERBOSE, "relname: %s.%s: reloid: %u reltblspace: %s\n", - rel_arr->rels[relnum].nspname, - rel_arr->rels[relnum].relname, - rel_arr->rels[relnum].reloid, - rel_arr->rels[relnum].tablespace); -} diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c deleted file mode 100644 index b81010a813..0000000000 --- a/contrib/pg_upgrade/option.c +++ /dev/null @@ -1,480 +0,0 @@ -/* - * opt.c - * - * options functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/option.c - */ - -#include "postgres_fe.h" - -#include "miscadmin.h" -#include "getopt_long.h" - -#include "pg_upgrade.h" - -#include <time.h> -#include <sys/types.h> -#ifdef WIN32 -#include <io.h> -#endif - - -static void usage(void); -static void check_required_directory(char **dirpath, char **configpath, - char *envVarName, char *cmdLineOption, char *description); -#define FIX_DEFAULT_READ_ONLY "-c default_transaction_read_only=false" - - -UserOpts user_opts; - - -/* - * parseCommandLine() - * - * Parses the command line (argc, argv[]) and loads structures - */ -void -parseCommandLine(int argc, char *argv[]) -{ - static struct option long_options[] = { - {"old-datadir", required_argument, NULL, 'd'}, - {"new-datadir", required_argument, NULL, 'D'}, - {"old-bindir", required_argument, NULL, 'b'}, - {"new-bindir", required_argument, NULL, 'B'}, - {"old-options", required_argument, NULL, 'o'}, - {"new-options", required_argument, NULL, 'O'}, - {"old-port", required_argument, NULL, 'p'}, - {"new-port", required_argument, NULL, 'P'}, - - {"username", required_argument, NULL, 'U'}, - {"check", no_argument, NULL, 'c'}, - {"link", no_argument, NULL, 'k'}, - {"retain", no_argument, NULL, 'r'}, - {"jobs", required_argument, NULL, 'j'}, - {"verbose", no_argument, NULL, 'v'}, - {NULL, 0, NULL, 0} - }; - int option; /* Command line option */ - int optindex = 0; /* used by getopt_long */ - int os_user_effective_id; - FILE *fp; - char **filename; - time_t run_time = time(NULL); - - user_opts.transfer_mode = TRANSFER_MODE_COPY; - - os_info.progname = get_progname(argv[0]); - - /* Process libpq env. variables; load values here for usage() output */ - old_cluster.port = getenv("PGPORTOLD") ? atoi(getenv("PGPORTOLD")) : DEF_PGUPORT; - new_cluster.port = getenv("PGPORTNEW") ? atoi(getenv("PGPORTNEW")) : DEF_PGUPORT; - - os_user_effective_id = get_user_info(&os_info.user); - /* we override just the database user name; we got the OS id above */ - if (getenv("PGUSER")) - { - pg_free(os_info.user); - /* must save value, getenv()'s pointer is not stable */ - os_info.user = pg_strdup(getenv("PGUSER")); - } - - if (argc > 1) - { - if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) - { - usage(); - exit(0); - } - if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) - { - puts("pg_upgrade (PostgreSQL) " PG_VERSION); - exit(0); - } - } - - /* Allow help and version to be run as root, so do the test here. */ - if (os_user_effective_id == 0) - pg_fatal("%s: cannot be run as root\n", os_info.progname); - - if ((log_opts.internal = fopen_priv(INTERNAL_LOG_FILE, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", INTERNAL_LOG_FILE); - - while ((option = getopt_long(argc, argv, "d:D:b:B:cj:ko:O:p:P:rU:v", - long_options, &optindex)) != -1) - { - switch (option) - { - case 'b': - old_cluster.bindir = pg_strdup(optarg); - break; - - case 'B': - new_cluster.bindir = pg_strdup(optarg); - break; - - case 'c': - user_opts.check = true; - break; - - case 'd': - old_cluster.pgdata = pg_strdup(optarg); - old_cluster.pgconfig = pg_strdup(optarg); - break; - - case 'D': - new_cluster.pgdata = pg_strdup(optarg); - new_cluster.pgconfig = pg_strdup(optarg); - break; - - case 'j': - user_opts.jobs = atoi(optarg); - break; - - case 'k': - user_opts.transfer_mode = TRANSFER_MODE_LINK; - break; - - case 'o': - old_cluster.pgopts = pg_strdup(optarg); - break; - - case 'O': - new_cluster.pgopts = pg_strdup(optarg); - break; - - /* - * Someday, the port number option could be removed and passed - * using -o/-O, but that requires postmaster -C to be - * supported on all old/new versions. - */ - case 'p': - if ((old_cluster.port = atoi(optarg)) <= 0) - { - pg_fatal("invalid old port number\n"); - exit(1); - } - break; - - case 'P': - if ((new_cluster.port = atoi(optarg)) <= 0) - { - pg_fatal("invalid new port number\n"); - exit(1); - } - break; - - case 'r': - log_opts.retain = true; - break; - - case 'U': - pg_free(os_info.user); - os_info.user = pg_strdup(optarg); - os_info.user_specified = true; - - /* - * Push the user name into the environment so pre-9.1 - * pg_ctl/libpq uses it. - */ - pg_putenv("PGUSER", os_info.user); - break; - - case 'v': - pg_log(PG_REPORT, "Running in verbose mode\n"); - log_opts.verbose = true; - break; - - default: - pg_fatal("Try \"%s --help\" for more information.\n", - os_info.progname); - break; - } - } - - /* label start of upgrade in logfiles */ - for (filename = output_files; *filename != NULL; filename++) - { - if ((fp = fopen_priv(*filename, "a")) == NULL) - pg_fatal("cannot write to log file %s\n", *filename); - - /* Start with newline because we might be appending to a file. */ - fprintf(fp, "\n" - "-----------------------------------------------------------------\n" - " pg_upgrade run on %s" - "-----------------------------------------------------------------\n\n", - ctime(&run_time)); - fclose(fp); - } - - /* Turn off read-only mode; add prefix to PGOPTIONS? */ - if (getenv("PGOPTIONS")) - { - char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY, - getenv("PGOPTIONS")); - - pg_putenv("PGOPTIONS", pgoptions); - pfree(pgoptions); - } - else - pg_putenv("PGOPTIONS", FIX_DEFAULT_READ_ONLY); - - /* Get values from env if not already set */ - check_required_directory(&old_cluster.bindir, NULL, "PGBINOLD", "-b", - "old cluster binaries reside"); - check_required_directory(&new_cluster.bindir, NULL, "PGBINNEW", "-B", - "new cluster binaries reside"); - check_required_directory(&old_cluster.pgdata, &old_cluster.pgconfig, - "PGDATAOLD", "-d", "old cluster data resides"); - check_required_directory(&new_cluster.pgdata, &new_cluster.pgconfig, - "PGDATANEW", "-D", "new cluster data resides"); -} - - -static void -usage(void) -{ - printf(_("pg_upgrade upgrades a PostgreSQL cluster to a different major version.\n\ -\nUsage:\n\ - pg_upgrade [OPTION]...\n\ -\n\ -Options:\n\ - -b, --old-bindir=BINDIR old cluster executable directory\n\ - -B, --new-bindir=BINDIR new cluster executable directory\n\ - -c, --check check clusters only, don't change any data\n\ - -d, --old-datadir=DATADIR old cluster data directory\n\ - -D, --new-datadir=DATADIR new cluster data directory\n\ - -j, --jobs number of simultaneous processes or threads to use\n\ - -k, --link link instead of copying files to new cluster\n\ - -o, --old-options=OPTIONS old cluster options to pass to the server\n\ - -O, --new-options=OPTIONS new cluster options to pass to the server\n\ - -p, --old-port=PORT old cluster port number (default %d)\n\ - -P, --new-port=PORT new cluster port number (default %d)\n\ - -r, --retain retain SQL and log files after success\n\ - -U, --username=NAME cluster superuser (default \"%s\")\n\ - -v, --verbose enable verbose internal logging\n\ - -V, --version display version information, then exit\n\ - -?, --help show this help, then exit\n\ -\n\ -Before running pg_upgrade you must:\n\ - create a new database cluster (using the new version of initdb)\n\ - shutdown the postmaster servicing the old cluster\n\ - shutdown the postmaster servicing the new cluster\n\ -\n\ -When you run pg_upgrade, you must provide the following information:\n\ - the data directory for the old cluster (-d DATADIR)\n\ - the data directory for the new cluster (-D DATADIR)\n\ - the \"bin\" directory for the old version (-b BINDIR)\n\ - the \"bin\" directory for the new version (-B BINDIR)\n\ -\n\ -For example:\n\ - pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\ -or\n"), old_cluster.port, new_cluster.port, os_info.user); -#ifndef WIN32 - printf(_("\ - $ export PGDATAOLD=oldCluster/data\n\ - $ export PGDATANEW=newCluster/data\n\ - $ export PGBINOLD=oldCluster/bin\n\ - $ export PGBINNEW=newCluster/bin\n\ - $ pg_upgrade\n")); -#else - printf(_("\ - C:\\> set PGDATAOLD=oldCluster/data\n\ - C:\\> set PGDATANEW=newCluster/data\n\ - C:\\> set PGBINOLD=oldCluster/bin\n\ - C:\\> set PGBINNEW=newCluster/bin\n\ - C:\\> pg_upgrade\n")); -#endif - printf(_("\nReport bugs to <pgsql-bugs@postgresql.org>.\n")); -} - - -/* - * check_required_directory() - * - * Checks a directory option. - * dirpath - the directory name supplied on the command line - * configpath - optional configuration directory - * envVarName - the name of an environment variable to get if dirpath is NULL - * cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N) - * description - a description of this directory option - * - * We use the last two arguments to construct a meaningful error message if the - * user hasn't provided the required directory name. - */ -static void -check_required_directory(char **dirpath, char **configpath, - char *envVarName, char *cmdLineOption, - char *description) -{ - if (*dirpath == NULL || strlen(*dirpath) == 0) - { - const char *envVar; - - if ((envVar = getenv(envVarName)) && strlen(envVar)) - { - *dirpath = pg_strdup(envVar); - if (configpath) - *configpath = pg_strdup(envVar); - } - else - pg_fatal("You must identify the directory where the %s.\n" - "Please use the %s command-line option or the %s environment variable.\n", - description, cmdLineOption, envVarName); - } - - /* - * Trim off any trailing path separators because we construct paths by - * appending to this path. - */ -#ifndef WIN32 - if ((*dirpath)[strlen(*dirpath) - 1] == '/') -#else - if ((*dirpath)[strlen(*dirpath) - 1] == '/' || - (*dirpath)[strlen(*dirpath) - 1] == '\\') -#endif - (*dirpath)[strlen(*dirpath) - 1] = 0; -} - -/* - * adjust_data_dir - * - * If a configuration-only directory was specified, find the real data dir - * by quering the running server. This has limited checking because we - * can't check for a running server because we can't find postmaster.pid. - */ -void -adjust_data_dir(ClusterInfo *cluster) -{ - char filename[MAXPGPATH]; - char cmd[MAXPGPATH], - cmd_output[MAX_STRING]; - FILE *fp, - *output; - - /* If there is no postgresql.conf, it can't be a config-only dir */ - snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig); - if ((fp = fopen(filename, "r")) == NULL) - return; - fclose(fp); - - /* If PG_VERSION exists, it can't be a config-only dir */ - snprintf(filename, sizeof(filename), "%s/PG_VERSION", cluster->pgconfig); - if ((fp = fopen(filename, "r")) != NULL) - { - fclose(fp); - return; - } - - /* Must be a configuration directory, so find the real data directory. */ - - prep_status("Finding the real data directory for the %s cluster", - CLUSTER_NAME(cluster)); - - /* - * We don't have a data directory yet, so we can't check the PG version, - * so this might fail --- only works for PG 9.2+. If this fails, - * pg_upgrade will fail anyway because the data files will not be found. - */ - snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory", - cluster->bindir, cluster->pgconfig); - - if ((output = popen(cmd, "r")) == NULL || - fgets(cmd_output, sizeof(cmd_output), output) == NULL) - pg_fatal("Could not get data directory using %s: %s\n", - cmd, getErrorText(errno)); - - pclose(output); - - /* Remove trailing newline */ - if (strchr(cmd_output, '\n') != NULL) - *strchr(cmd_output, '\n') = '\0'; - - cluster->pgdata = pg_strdup(cmd_output); - - check_ok(); -} - - -/* - * get_sock_dir - * - * Identify the socket directory to use for this cluster. If we're doing - * a live check (old cluster only), we need to find out where the postmaster - * is listening. Otherwise, we're going to put the socket into the current - * directory. - */ -void -get_sock_dir(ClusterInfo *cluster, bool live_check) -{ -#ifdef HAVE_UNIX_SOCKETS - - /* - * sockdir and port were added to postmaster.pid in PG 9.1. Pre-9.1 cannot - * process pg_ctl -w for sockets in non-default locations. - */ - if (GET_MAJOR_VERSION(cluster->major_version) >= 901) - { - if (!live_check) - { - /* Use the current directory for the socket */ - cluster->sockdir = pg_malloc(MAXPGPATH); - if (!getcwd(cluster->sockdir, MAXPGPATH)) - pg_fatal("cannot find current directory\n"); - } - else - { - /* - * If we are doing a live check, we will use the old cluster's - * Unix domain socket directory so we can connect to the live - * server. - */ - unsigned short orig_port = cluster->port; - char filename[MAXPGPATH], - line[MAXPGPATH]; - FILE *fp; - int lineno; - - snprintf(filename, sizeof(filename), "%s/postmaster.pid", - cluster->pgdata); - if ((fp = fopen(filename, "r")) == NULL) - pg_fatal("Cannot open file %s: %m\n", filename); - - for (lineno = 1; - lineno <= Max(LOCK_FILE_LINE_PORT, LOCK_FILE_LINE_SOCKET_DIR); - lineno++) - { - if (fgets(line, sizeof(line), fp) == NULL) - pg_fatal("Cannot read line %d from %s: %m\n", lineno, filename); - - /* potentially overwrite user-supplied value */ - if (lineno == LOCK_FILE_LINE_PORT) - sscanf(line, "%hu", &old_cluster.port); - if (lineno == LOCK_FILE_LINE_SOCKET_DIR) - { - cluster->sockdir = pg_strdup(line); - /* strip off newline */ - if (strchr(cluster->sockdir, '\n') != NULL) - *strchr(cluster->sockdir, '\n') = '\0'; - } - } - fclose(fp); - - /* warn of port number correction */ - if (orig_port != DEF_PGUPORT && old_cluster.port != orig_port) - pg_log(PG_WARNING, "User-supplied old port number %hu corrected to %hu\n", - orig_port, cluster->port); - } - } - else - - /* - * Can't get sockdir and pg_ctl -w can't use a non-default, use - * default - */ - cluster->sockdir = NULL; -#else /* !HAVE_UNIX_SOCKETS */ - cluster->sockdir = NULL; -#endif -} diff --git a/contrib/pg_upgrade/page.c b/contrib/pg_upgrade/page.c deleted file mode 100644 index 6354cec2b0..0000000000 --- a/contrib/pg_upgrade/page.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * page.c - * - * per-page conversion operations - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/page.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "storage/bufpage.h" - - -#ifdef PAGE_CONVERSION - - -static void getPageVersion( - uint16 *version, const char *pathName); -static pageCnvCtx *loadConverterPlugin( - uint16 newPageVersion, uint16 oldPageVersion); - - -/* - * setupPageConverter() - * - * This function determines the PageLayoutVersion of the old cluster and - * the PageLayoutVersion of the new cluster. If the versions differ, this - * function loads a converter plugin and returns a pointer to a pageCnvCtx - * object (in *result) that knows how to convert pages from the old format - * to the new format. If the versions are identical, this function just - * returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion - * is not required. - */ -pageCnvCtx * -setupPageConverter(void) -{ - uint16 oldPageVersion; - uint16 newPageVersion; - pageCnvCtx *converter; - const char *msg; - char dstName[MAXPGPATH]; - char srcName[MAXPGPATH]; - - snprintf(dstName, sizeof(dstName), "%s/global/%u", new_cluster.pgdata, - new_cluster.pg_database_oid); - snprintf(srcName, sizeof(srcName), "%s/global/%u", old_cluster.pgdata, - old_cluster.pg_database_oid); - - getPageVersion(&oldPageVersion, srcName); - getPageVersion(&newPageVersion, dstName); - - /* - * If the old cluster and new cluster use the same page layouts, then we - * don't need a page converter. - */ - if (newPageVersion != oldPageVersion) - { - /* - * The clusters use differing page layouts, see if we can find a - * plugin that knows how to convert from the old page layout to the - * new page layout. - */ - - if ((converter = loadConverterPlugin(newPageVersion, oldPageVersion)) == NULL) - pg_fatal("could not find plugin to convert from old page layout to new page layout\n"); - - return converter; - } - else - return NULL; -} - - -/* - * getPageVersion() - * - * Retrieves the PageLayoutVersion for the given relation. - * - * Returns NULL on success (and stores the PageLayoutVersion at *version), - * if an error occurs, this function returns an error message (in the form - * of a null-terminated string). - */ -static void -getPageVersion(uint16 *version, const char *pathName) -{ - int relfd; - PageHeaderData page; - ssize_t bytesRead; - - if ((relfd = open(pathName, O_RDONLY, 0)) < 0) - pg_fatal("could not open relation %s\n", pathName); - - if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page)) - pg_fatal("could not read page header of %s\n", pathName); - - *version = PageGetPageLayoutVersion(&page); - - close(relfd); - - return; -} - - -/* - * loadConverterPlugin() - * - * This function loads a page-converter plugin library and grabs a - * pointer to each of the (interesting) functions provided by that - * plugin. The name of the plugin library is derived from the given - * newPageVersion and oldPageVersion. If a plugin is found, this - * function returns a pointer to a pageCnvCtx object (which will contain - * a collection of plugin function pointers). If the required plugin - * is not found, this function returns NULL. - */ -static pageCnvCtx * -loadConverterPlugin(uint16 newPageVersion, uint16 oldPageVersion) -{ - char pluginName[MAXPGPATH]; - void *plugin; - - /* - * Try to find a plugin that can convert pages of oldPageVersion into - * pages of newPageVersion. For example, if we oldPageVersion = 3 and - * newPageVersion is 4, we search for a plugin named: - * plugins/convertLayout_3_to_4.dll - */ - - /* - * FIXME: we are searching for plugins relative to the current directory, - * we should really search relative to our own executable instead. - */ - snprintf(pluginName, sizeof(pluginName), "./plugins/convertLayout_%d_to_%d%s", - oldPageVersion, newPageVersion, DLSUFFIX); - - if ((plugin = pg_dlopen(pluginName)) == NULL) - return NULL; - else - { - pageCnvCtx *result = (pageCnvCtx *) pg_malloc(sizeof(*result)); - - result->old.PageVersion = oldPageVersion; - result->new.PageVersion = newPageVersion; - - result->startup = (pluginStartup) pg_dlsym(plugin, "init"); - result->convertFile = (pluginConvertFile) pg_dlsym(plugin, "convertFile"); - result->convertPage = (pluginConvertPage) pg_dlsym(plugin, "convertPage"); - result->shutdown = (pluginShutdown) pg_dlsym(plugin, "fini"); - result->pluginData = NULL; - - /* - * If the plugin has exported an initializer, go ahead and invoke it. - */ - if (result->startup) - result->startup(MIGRATOR_API_VERSION, &result->pluginVersion, - newPageVersion, oldPageVersion, &result->pluginData); - - return result; - } -} - -#endif diff --git a/contrib/pg_upgrade/parallel.c b/contrib/pg_upgrade/parallel.c deleted file mode 100644 index 5d2565d441..0000000000 --- a/contrib/pg_upgrade/parallel.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * parallel.c - * - * multi-process support - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/parallel.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <stdlib.h> -#include <string.h> -#include <sys/types.h> -#include <sys/wait.h> - -#ifdef WIN32 -#include <io.h> -#endif - -static int parallel_jobs; - -#ifdef WIN32 -/* - * Array holding all active threads. There can't be any gaps/zeros so - * it can be passed to WaitForMultipleObjects(). We use two arrays - * so the thread_handles array can be passed to WaitForMultipleObjects(). - */ -HANDLE *thread_handles; - -typedef struct -{ - char *log_file; - char *opt_log_file; - char *cmd; -} exec_thread_arg; - -typedef struct -{ - DbInfoArr *old_db_arr; - DbInfoArr *new_db_arr; - char *old_pgdata; - char *new_pgdata; - char *old_tablespace; -} transfer_thread_arg; - -exec_thread_arg **exec_thread_args; -transfer_thread_arg **transfer_thread_args; - -/* track current thread_args struct so reap_child() can be used for all cases */ -void **cur_thread_args; - -DWORD win32_exec_prog(exec_thread_arg *args); -DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args); -#endif - -/* - * parallel_exec_prog - * - * This has the same API as exec_prog, except it does parallel execution, - * and therefore must throw errors and doesn't return an error status. - */ -void -parallel_exec_prog(const char *log_file, const char *opt_log_file, - const char *fmt,...) -{ - va_list args; - char cmd[MAX_STRING]; - -#ifndef WIN32 - pid_t child; -#else - HANDLE child; - exec_thread_arg *new_arg; -#endif - - va_start(args, fmt); - vsnprintf(cmd, sizeof(cmd), fmt, args); - va_end(args); - - if (user_opts.jobs <= 1) - /* throw_error must be true to allow jobs */ - exec_prog(log_file, opt_log_file, true, "%s", cmd); - else - { - /* parallel */ -#ifdef WIN32 - if (thread_handles == NULL) - thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); - - if (exec_thread_args == NULL) - { - int i; - - exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *)); - - /* - * For safety and performance, we keep the args allocated during - * the entire life of the process, and we don't free the args in a - * thread different from the one that allocated it. - */ - for (i = 0; i < user_opts.jobs; i++) - exec_thread_args[i] = pg_malloc0(sizeof(exec_thread_arg)); - } - - cur_thread_args = (void **) exec_thread_args; -#endif - /* harvest any dead children */ - while (reap_child(false) == true) - ; - - /* must we wait for a dead child? */ - if (parallel_jobs >= user_opts.jobs) - reap_child(true); - - /* set this before we start the job */ - parallel_jobs++; - - /* Ensure stdio state is quiesced before forking */ - fflush(NULL); - -#ifndef WIN32 - child = fork(); - if (child == 0) - /* use _exit to skip atexit() functions */ - _exit(!exec_prog(log_file, opt_log_file, true, "%s", cmd)); - else if (child < 0) - /* fork failed */ - pg_fatal("could not create worker process: %s\n", strerror(errno)); -#else - /* empty array element are always at the end */ - new_arg = exec_thread_args[parallel_jobs - 1]; - - /* Can only pass one pointer into the function, so use a struct */ - if (new_arg->log_file) - pg_free(new_arg->log_file); - new_arg->log_file = pg_strdup(log_file); - if (new_arg->opt_log_file) - pg_free(new_arg->opt_log_file); - new_arg->opt_log_file = opt_log_file ? pg_strdup(opt_log_file) : NULL; - if (new_arg->cmd) - pg_free(new_arg->cmd); - new_arg->cmd = pg_strdup(cmd); - - child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog, - new_arg, 0, NULL); - if (child == 0) - pg_fatal("could not create worker thread: %s\n", strerror(errno)); - - thread_handles[parallel_jobs - 1] = child; -#endif - } - - return; -} - - -#ifdef WIN32 -DWORD -win32_exec_prog(exec_thread_arg *args) -{ - int ret; - - ret = !exec_prog(args->log_file, args->opt_log_file, true, "%s", args->cmd); - - /* terminates thread */ - return ret; -} -#endif - - -/* - * parallel_transfer_all_new_dbs - * - * This has the same API as transfer_all_new_dbs, except it does parallel execution - * by transfering multiple tablespaces in parallel - */ -void -parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, - char *old_tablespace) -{ -#ifndef WIN32 - pid_t child; -#else - HANDLE child; - transfer_thread_arg *new_arg; -#endif - - if (user_opts.jobs <= 1) - /* throw_error must be true to allow jobs */ - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL); - else - { - /* parallel */ -#ifdef WIN32 - if (thread_handles == NULL) - thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); - - if (transfer_thread_args == NULL) - { - int i; - - transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *)); - - /* - * For safety and performance, we keep the args allocated during - * the entire life of the process, and we don't free the args in a - * thread different from the one that allocated it. - */ - for (i = 0; i < user_opts.jobs; i++) - transfer_thread_args[i] = pg_malloc0(sizeof(transfer_thread_arg)); - } - - cur_thread_args = (void **) transfer_thread_args; -#endif - /* harvest any dead children */ - while (reap_child(false) == true) - ; - - /* must we wait for a dead child? */ - if (parallel_jobs >= user_opts.jobs) - reap_child(true); - - /* set this before we start the job */ - parallel_jobs++; - - /* Ensure stdio state is quiesced before forking */ - fflush(NULL); - -#ifndef WIN32 - child = fork(); - if (child == 0) - { - transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, - old_tablespace); - /* if we take another exit path, it will be non-zero */ - /* use _exit to skip atexit() functions */ - _exit(0); - } - else if (child < 0) - /* fork failed */ - pg_fatal("could not create worker process: %s\n", strerror(errno)); -#else - /* empty array element are always at the end */ - new_arg = transfer_thread_args[parallel_jobs - 1]; - - /* Can only pass one pointer into the function, so use a struct */ - new_arg->old_db_arr = old_db_arr; - new_arg->new_db_arr = new_db_arr; - if (new_arg->old_pgdata) - pg_free(new_arg->old_pgdata); - new_arg->old_pgdata = pg_strdup(old_pgdata); - if (new_arg->new_pgdata) - pg_free(new_arg->new_pgdata); - new_arg->new_pgdata = pg_strdup(new_pgdata); - if (new_arg->old_tablespace) - pg_free(new_arg->old_tablespace); - new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL; - - child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs, - new_arg, 0, NULL); - if (child == 0) - pg_fatal("could not create worker thread: %s\n", strerror(errno)); - - thread_handles[parallel_jobs - 1] = child; -#endif - } - - return; -} - - -#ifdef WIN32 -DWORD -win32_transfer_all_new_dbs(transfer_thread_arg *args) -{ - transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata, - args->new_pgdata, args->old_tablespace); - - /* terminates thread */ - return 0; -} -#endif - - -/* - * collect status from a completed worker child - */ -bool -reap_child(bool wait_for_child) -{ -#ifndef WIN32 - int work_status; - int ret; -#else - int thread_num; - DWORD res; -#endif - - if (user_opts.jobs <= 1 || parallel_jobs == 0) - return false; - -#ifndef WIN32 - ret = waitpid(-1, &work_status, wait_for_child ? 0 : WNOHANG); - - /* no children or, for WNOHANG, no dead children */ - if (ret <= 0 || !WIFEXITED(work_status)) - return false; - - if (WEXITSTATUS(work_status) != 0) - pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); -#else - /* wait for one to finish */ - thread_num = WaitForMultipleObjects(parallel_jobs, thread_handles, - false, wait_for_child ? INFINITE : 0); - - if (thread_num == WAIT_TIMEOUT || thread_num == WAIT_FAILED) - return false; - - /* compute thread index in active_threads */ - thread_num -= WAIT_OBJECT_0; - - /* get the result */ - GetExitCodeThread(thread_handles[thread_num], &res); - if (res != 0) - pg_fatal("child worker exited abnormally: %s\n", strerror(errno)); - - /* dispose of handle to stop leaks */ - CloseHandle(thread_handles[thread_num]); - - /* Move last slot into dead child's position */ - if (thread_num != parallel_jobs - 1) - { - void *tmp_args; - - thread_handles[thread_num] = thread_handles[parallel_jobs - 1]; - - /* - * Move last active thead arg struct into the now-dead slot, and the - * now-dead slot to the end for reuse by the next thread. Though the - * thread struct is in use by another thread, we can safely swap the - * struct pointers within the array. - */ - tmp_args = cur_thread_args[thread_num]; - cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1]; - cur_thread_args[parallel_jobs - 1] = tmp_args; - } -#endif - - /* do this after job has been removed */ - parallel_jobs--; - - return true; -} diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c deleted file mode 100644 index 773bb07e04..0000000000 --- a/contrib/pg_upgrade/pg_upgrade.c +++ /dev/null @@ -1,584 +0,0 @@ -/* - * pg_upgrade.c - * - * main source file - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/pg_upgrade.c - */ - -/* - * To simplify the upgrade process, we force certain system values to be - * identical between old and new clusters: - * - * We control all assignments of pg_class.oid (and relfilenode) so toast - * oids are the same between old and new clusters. This is important - * because toast oids are stored as toast pointers in user tables. - * - * While pg_class.oid and pg_class.relfilenode are initially the same - * in a cluster, they can diverge due to CLUSTER, REINDEX, or VACUUM - * FULL. In the new cluster, pg_class.oid and pg_class.relfilenode will - * be the same and will match the old pg_class.oid value. Because of - * this, old/new pg_class.relfilenode values will not match if CLUSTER, - * REINDEX, or VACUUM FULL have been performed in the old cluster. - * - * We control all assignments of pg_type.oid because these oids are stored - * in user composite type values. - * - * We control all assignments of pg_enum.oid because these oids are stored - * in user tables as enum values. - * - * We control all assignments of pg_authid.oid because these oids are stored - * in pg_largeobject_metadata. - */ - - - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#ifdef HAVE_LANGINFO_H -#include <langinfo.h> -#endif - -static void prepare_new_cluster(void); -static void prepare_new_databases(void); -static void create_new_objects(void); -static void copy_clog_xlog_xid(void); -static void set_frozenxids(void); -static void setup(char *argv0, bool *live_check); -static void cleanup(void); - -ClusterInfo old_cluster, - new_cluster; -OSInfo os_info; - -char *output_files[] = { - SERVER_LOG_FILE, -#ifdef WIN32 - /* unique file for pg_ctl start */ - SERVER_START_LOG_FILE, -#endif - UTILITY_LOG_FILE, - INTERNAL_LOG_FILE, - NULL -}; - - -int -main(int argc, char **argv) -{ - char *sequence_script_file_name = NULL; - char *analyze_script_file_name = NULL; - char *deletion_script_file_name = NULL; - bool live_check = false; - - parseCommandLine(argc, argv); - - adjust_data_dir(&old_cluster); - adjust_data_dir(&new_cluster); - - setup(argv[0], &live_check); - - output_check_banner(live_check); - - check_cluster_versions(); - - get_sock_dir(&old_cluster, live_check); - get_sock_dir(&new_cluster, false); - - check_cluster_compatibility(live_check); - - check_and_dump_old_cluster(live_check, &sequence_script_file_name); - - - /* -- NEW -- */ - start_postmaster(&new_cluster, true); - - check_new_cluster(); - report_clusters_compatible(); - - pg_log(PG_REPORT, "\nPerforming Upgrade\n"); - pg_log(PG_REPORT, "------------------\n"); - - prepare_new_cluster(); - - stop_postmaster(false); - - /* - * Destructive Changes to New Cluster - */ - - copy_clog_xlog_xid(); - - /* New now using xids of the old system */ - - /* -- NEW -- */ - start_postmaster(&new_cluster, true); - - prepare_new_databases(); - - create_new_objects(); - - stop_postmaster(false); - - /* - * Most failures happen in create_new_objects(), which has completed at - * this point. We do this here because it is just before linking, which - * will link the old and new cluster data files, preventing the old - * cluster from being safely started once the new cluster is started. - */ - if (user_opts.transfer_mode == TRANSFER_MODE_LINK) - disable_old_cluster(); - - transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr, - old_cluster.pgdata, new_cluster.pgdata); - - /* - * Assuming OIDs are only used in system tables, there is no need to - * restore the OID counter because we have not transferred any OIDs from - * the old system, but we do it anyway just in case. We do it late here - * because there is no need to have the schema load use new oids. - */ - prep_status("Setting next OID for new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -o %u \"%s\"", - new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, - new_cluster.pgdata); - check_ok(); - - prep_status("Sync data directory to disk"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir, - new_cluster.pgdata); - check_ok(); - - create_script_for_cluster_analyze(&analyze_script_file_name); - create_script_for_old_cluster_deletion(&deletion_script_file_name); - - issue_warnings(sequence_script_file_name); - - pg_log(PG_REPORT, "\nUpgrade Complete\n"); - pg_log(PG_REPORT, "----------------\n"); - - output_completion_banner(analyze_script_file_name, - deletion_script_file_name); - - pg_free(analyze_script_file_name); - pg_free(deletion_script_file_name); - pg_free(sequence_script_file_name); - - cleanup(); - - return 0; -} - - -static void -setup(char *argv0, bool *live_check) -{ - char exec_path[MAXPGPATH]; /* full path to my executable */ - - /* - * make sure the user has a clean environment, otherwise, we may confuse - * libpq when we connect to one (or both) of the servers. - */ - check_pghost_envvar(); - - verify_directories(); - - /* no postmasters should be running, except for a live check */ - if (pid_lock_file_exists(old_cluster.pgdata)) - { - /* - * If we have a postmaster.pid file, try to start the server. If it - * starts, the pid file was stale, so stop the server. If it doesn't - * start, assume the server is running. If the pid file is left over - * from a server crash, this also allows any committed transactions - * stored in the WAL to be replayed so they are not lost, because WAL - * files are not transfered from old to new servers. - */ - if (start_postmaster(&old_cluster, false)) - stop_postmaster(false); - else - { - if (!user_opts.check) - pg_fatal("There seems to be a postmaster servicing the old cluster.\n" - "Please shutdown that postmaster and try again.\n"); - else - *live_check = true; - } - } - - /* same goes for the new postmaster */ - if (pid_lock_file_exists(new_cluster.pgdata)) - { - if (start_postmaster(&new_cluster, false)) - stop_postmaster(false); - else - pg_fatal("There seems to be a postmaster servicing the new cluster.\n" - "Please shutdown that postmaster and try again.\n"); - } - - /* get path to pg_upgrade executable */ - if (find_my_exec(argv0, exec_path) < 0) - pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); - - /* Trim off program name and keep just path */ - *last_dir_separator(exec_path) = '\0'; - canonicalize_path(exec_path); - os_info.exec_path = pg_strdup(exec_path); -} - - -static void -prepare_new_cluster(void) -{ - /* - * It would make more sense to freeze after loading the schema, but that - * would cause us to lose the frozenids restored by the load. We use - * --analyze so autovacuum doesn't update statistics later - */ - prep_status("Analyzing all rows in the new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/vacuumdb\" %s --all --analyze %s", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - log_opts.verbose ? "--verbose" : ""); - check_ok(); - - /* - * We do freeze after analyze so pg_statistic is also frozen. template0 is - * not frozen here, but data rows were frozen by initdb, and we set its - * datfrozenxid and relfrozenxids later to match the new xid counter - * later. - */ - prep_status("Freezing all rows on the new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/vacuumdb\" %s --all --freeze %s", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - log_opts.verbose ? "--verbose" : ""); - check_ok(); - - get_pg_database_relfilenode(&new_cluster); -} - - -static void -prepare_new_databases(void) -{ - /* - * We set autovacuum_freeze_max_age to its maximum value so autovacuum - * does not launch here and delete clog files, before the frozen xids are - * set. - */ - - set_frozenxids(); - - prep_status("Restoring global objects in the new cluster"); - - /* - * Install support functions in the global-object restore database to - * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template - * database so objects we add into 'template1' are not propogated. They - * are removed on pg_upgrade exit. - */ - install_support_functions_in_new_db("template1"); - - /* - * We have to create the databases first so we can install support - * functions in all the other databases. Ideally we could create the - * support functions in template1 but pg_dumpall creates database using - * the template0 template. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/psql\" " EXEC_PSQL_ARGS " %s -f \"%s\"", - new_cluster.bindir, cluster_conn_opts(&new_cluster), - GLOBALS_DUMP_FILE); - check_ok(); - - /* we load this to get a current list of databases */ - get_db_and_rel_infos(&new_cluster); -} - - -static void -create_new_objects(void) -{ - int dbnum; - - prep_status("Adding support functions to new cluster"); - - /* - * Technically, we only need to install these support functions in new - * databases that also exist in the old cluster, but for completeness we - * process all new databases. - */ - for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) - { - DbInfo *new_db = &new_cluster.dbarr.dbs[dbnum]; - - /* skip db we already installed */ - if (strcmp(new_db->db_name, "template1") != 0) - install_support_functions_in_new_db(new_db->db_name); - } - check_ok(); - - prep_status("Restoring database schemas in the new cluster\n"); - - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - pg_log(PG_STATUS, "%s", old_db->db_name); - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - - /* - * pg_dump only produces its output at the end, so there is little - * parallelism if using the pipe. - */ - parallel_exec_prog(log_file_name, - NULL, - "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"", - new_cluster.bindir, - cluster_conn_opts(&new_cluster), - old_db->db_name, - sql_file_name); - } - - /* reap all children */ - while (reap_child(true) == true) - ; - - end_progress_output(); - check_ok(); - - /* regenerate now that we have objects in the databases */ - get_db_and_rel_infos(&new_cluster); - - uninstall_support_functions_from_new_cluster(); -} - -/* - * Delete the given subdirectory contents from the new cluster, and copy the - * files from the old cluster into it. - */ -static void -copy_subdir_files(char *subdir) -{ - char old_path[MAXPGPATH]; - char new_path[MAXPGPATH]; - - prep_status("Deleting files from new %s", subdir); - - snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); - snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); - if (!rmtree(new_path, true)) - pg_fatal("could not delete directory \"%s\"\n", new_path); - check_ok(); - - prep_status("Copying old %s to new server", subdir); - - exec_prog(UTILITY_LOG_FILE, NULL, true, -#ifndef WIN32 - "cp -Rf \"%s\" \"%s\"", -#else - /* flags: everything, no confirm, quiet, overwrite read-only */ - "xcopy /e /y /q /r \"%s\" \"%s\\\"", -#endif - old_path, new_path); - - check_ok(); -} - -static void -copy_clog_xlog_xid(void) -{ - /* copy old commit logs to new data dir */ - copy_subdir_files("pg_clog"); - - /* set the next transaction id of the new cluster */ - prep_status("Setting next transaction ID for new cluster"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -f -x %u \"%s\"", - new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, - new_cluster.pgdata); - check_ok(); - - /* - * If the old server is before the MULTIXACT_FORMATCHANGE_CAT_VER change - * (see pg_upgrade.h) and the new server is after, then we don't copy - * pg_multixact files, but we need to reset pg_control so that the new - * server doesn't attempt to read multis older than the cutoff value. - */ - if (old_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER && - new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - { - copy_subdir_files("pg_multixact/offsets"); - copy_subdir_files("pg_multixact/members"); - prep_status("Setting next multixact ID and offset for new cluster"); - - /* - * we preserve all files and contents, so we must preserve both "next" - * counters here and the oldest multi present on system. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -O %u -m %u,%u \"%s\"", - new_cluster.bindir, - old_cluster.controldata.chkpnt_nxtmxoff, - old_cluster.controldata.chkpnt_nxtmulti, - old_cluster.controldata.chkpnt_oldstMulti, - new_cluster.pgdata); - check_ok(); - } - else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) - { - prep_status("Setting oldest multixact ID on new cluster"); - - /* - * We don't preserve files in this case, but it's important that the - * oldest multi is set to the latest value used by the old system, so - * that multixact.c returns the empty set for multis that might be - * present on disk. We set next multi to the value following that; it - * might end up wrapped around (i.e. 0) if the old cluster had - * next=MaxMultiXactId, but multixact.c can cope with that just fine. - */ - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -m %u,%u \"%s\"", - new_cluster.bindir, - old_cluster.controldata.chkpnt_nxtmulti + 1, - old_cluster.controldata.chkpnt_nxtmulti, - new_cluster.pgdata); - check_ok(); - } - - /* now reset the wal archives in the new cluster */ - prep_status("Resetting WAL archives"); - exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir, - old_cluster.controldata.nextxlogfile, - new_cluster.pgdata); - check_ok(); -} - - -/* - * set_frozenxids() - * - * We have frozen all xids, so set relfrozenxid and datfrozenxid - * to be the old cluster's xid counter, which we just set in the new - * cluster. User-table frozenxid values will be set by pg_dumpall - * --binary-upgrade, but objects not set by the pg_dump must have - * proper frozen counters. - */ -static -void -set_frozenxids(void) -{ - int dbnum; - PGconn *conn, - *conn_template1; - PGresult *dbres; - int ntups; - int i_datname; - int i_datallowconn; - - prep_status("Setting frozenxid counters in new cluster"); - - conn_template1 = connectToServer(&new_cluster, "template1"); - - /* set pg_database.datfrozenxid */ - PQclear(executeQueryOrDie(conn_template1, - "UPDATE pg_catalog.pg_database " - "SET datfrozenxid = '%u'", - old_cluster.controldata.chkpnt_nxtxid)); - - /* get database names */ - dbres = executeQueryOrDie(conn_template1, - "SELECT datname, datallowconn " - "FROM pg_catalog.pg_database"); - - i_datname = PQfnumber(dbres, "datname"); - i_datallowconn = PQfnumber(dbres, "datallowconn"); - - ntups = PQntuples(dbres); - for (dbnum = 0; dbnum < ntups; dbnum++) - { - char *datname = PQgetvalue(dbres, dbnum, i_datname); - char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); - - /* - * We must update databases where datallowconn = false, e.g. - * template0, because autovacuum increments their datfrozenxids and - * relfrozenxids even if autovacuum is turned off, and even though all - * the data rows are already frozen To enable this, we temporarily - * change datallowconn. - */ - if (strcmp(datallowconn, "f") == 0) - PQclear(executeQueryOrDie(conn_template1, - "UPDATE pg_catalog.pg_database " - "SET datallowconn = true " - "WHERE datname = '%s'", datname)); - - conn = connectToServer(&new_cluster, datname); - - /* set pg_class.relfrozenxid */ - PQclear(executeQueryOrDie(conn, - "UPDATE pg_catalog.pg_class " - "SET relfrozenxid = '%u' " - /* only heap, materialized view, and TOAST are vacuumed */ - "WHERE relkind IN ('r', 'm', 't')", - old_cluster.controldata.chkpnt_nxtxid)); - PQfinish(conn); - - /* Reset datallowconn flag */ - if (strcmp(datallowconn, "f") == 0) - PQclear(executeQueryOrDie(conn_template1, - "UPDATE pg_catalog.pg_database " - "SET datallowconn = false " - "WHERE datname = '%s'", datname)); - } - - PQclear(dbres); - - PQfinish(conn_template1); - - check_ok(); -} - - -static void -cleanup(void) -{ - fclose(log_opts.internal); - - /* Remove dump and log files? */ - if (!log_opts.retain) - { - int dbnum; - char **filename; - - for (filename = output_files; *filename != NULL; filename++) - unlink(*filename); - - /* remove dump files */ - unlink(GLOBALS_DUMP_FILE); - - if (old_cluster.dbarr.dbs) - for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - { - char sql_file_name[MAXPGPATH], - log_file_name[MAXPGPATH]; - DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum]; - - snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid); - unlink(sql_file_name); - - snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid); - unlink(log_file_name); - } - } -} diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h deleted file mode 100644 index 0410b02293..0000000000 --- a/contrib/pg_upgrade/pg_upgrade.h +++ /dev/null @@ -1,494 +0,0 @@ -/* - * pg_upgrade.h - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/pg_upgrade.h - */ - -#include <unistd.h> -#include <assert.h> -#include <sys/stat.h> -#include <sys/time.h> - -#include "libpq-fe.h" - -/* Use port in the private/dynamic port number range */ -#define DEF_PGUPORT 50432 - -/* Allocate for null byte */ -#define USER_NAME_SIZE 128 - -#define MAX_STRING 1024 -#define LINE_ALLOC 4096 -#define QUERY_ALLOC 8192 - -#define MIGRATOR_API_VERSION 1 - -#define MESSAGE_WIDTH 60 - -#define GET_MAJOR_VERSION(v) ((v) / 100) - -/* contains both global db information and CREATE DATABASE commands */ -#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql" -#define DB_DUMP_FILE_MASK "pg_upgrade_dump_%u.custom" - -#define DB_DUMP_LOG_FILE_MASK "pg_upgrade_dump_%u.log" -#define SERVER_LOG_FILE "pg_upgrade_server.log" -#define UTILITY_LOG_FILE "pg_upgrade_utility.log" -#define INTERNAL_LOG_FILE "pg_upgrade_internal.log" - -extern char *output_files[]; - -/* - * WIN32 files do not accept writes from multiple processes - * - * On Win32, we can't send both pg_upgrade output and command output to the - * same file because we get the error: "The process cannot access the file - * because it is being used by another process." so send the pg_ctl - * command-line output to a new file, rather than into the server log file. - * Ideally we could use UTILITY_LOG_FILE for this, but some Windows platforms - * keep the pg_ctl output file open by the running postmaster, even after - * pg_ctl exits. - * - * We could use the Windows pgwin32_open() flags to allow shared file - * writes but is unclear how all other tools would use those flags, so - * we just avoid it and log a little differently on Windows; we adjust - * the error message appropriately. - */ -#ifndef WIN32 -#define SERVER_START_LOG_FILE SERVER_LOG_FILE -#define SERVER_STOP_LOG_FILE SERVER_LOG_FILE -#else -#define SERVER_START_LOG_FILE "pg_upgrade_server_start.log" -/* - * "pg_ctl start" keeps SERVER_START_LOG_FILE and SERVER_LOG_FILE open - * while the server is running, so we use UTILITY_LOG_FILE for "pg_ctl - * stop". - */ -#define SERVER_STOP_LOG_FILE UTILITY_LOG_FILE -#endif - - -#ifndef WIN32 -#define pg_copy_file copy_file -#define pg_mv_file rename -#define pg_link_file link -#define PATH_SEPARATOR '/' -#define RM_CMD "rm -f" -#define RMDIR_CMD "rm -rf" -#define SCRIPT_EXT "sh" -#define ECHO_QUOTE "'" -#define ECHO_BLANK "" -#else -#define pg_copy_file CopyFile -#define pg_mv_file pgrename -#define pg_link_file win32_pghardlink -#define PATH_SEPARATOR '\\' -#define RM_CMD "DEL /q" -#define RMDIR_CMD "RMDIR /s/q" -#define SCRIPT_EXT "bat" -#define EXE_EXT ".exe" -#define ECHO_QUOTE "" -#define ECHO_BLANK "." -#endif - -#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \ - (cluster) == &new_cluster ? "new" : "none") - -#define atooid(x) ((Oid) strtoul((x), NULL, 10)) - -/* OID system catalog preservation added during PG 9.0 development */ -#define TABLE_SPACE_SUBDIRS_CAT_VER 201001111 -/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */ -#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251 -/* - * Visibility map changed with this 9.2 commit, - * 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version. - */ -#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031 - -/* - * pg_multixact format changed in 9.3 commit 0ac5ad5134f2769ccbaefec73844f85, - * ("Improve concurrency of foreign key locking") which also updated catalog - * version to this value. pg_upgrade behavior depends on whether old and new - * server versions are both newer than this, or only the new one is. - */ -#define MULTIXACT_FORMATCHANGE_CAT_VER 201301231 - -/* - * Each relation is represented by a relinfo structure. - */ -typedef struct -{ - /* Can't use NAMEDATALEN; not guaranteed to fit on client */ - char *nspname; /* namespace name */ - char *relname; /* relation name */ - Oid reloid; /* relation oid */ - Oid relfilenode; /* relation relfile node */ - /* relation tablespace path, or "" for the cluster default */ - char *tablespace; - bool nsp_alloc; - bool tblsp_alloc; -} RelInfo; - -typedef struct -{ - RelInfo *rels; - int nrels; -} RelInfoArr; - -/* - * The following structure represents a relation mapping. - */ -typedef struct -{ - const char *old_tablespace; - const char *new_tablespace; - const char *old_tablespace_suffix; - const char *new_tablespace_suffix; - Oid old_db_oid; - Oid new_db_oid; - - /* - * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes - * due to VACUUM FULL or REINDEX. Other relfilenodes are preserved. - */ - Oid old_relfilenode; - Oid new_relfilenode; - /* the rest are used only for logging and error reporting */ - char *nspname; /* namespaces */ - char *relname; -} FileNameMap; - -/* - * Structure to store database information - */ -typedef struct -{ - Oid db_oid; /* oid of the database */ - char *db_name; /* database name */ - char db_tablespace[MAXPGPATH]; /* database default tablespace - * path */ - RelInfoArr rel_arr; /* array of all user relinfos */ -} DbInfo; - -typedef struct -{ - DbInfo *dbs; /* array of db infos */ - int ndbs; /* number of db infos */ -} DbInfoArr; - -/* - * The following structure is used to hold pg_control information. - * Rather than using the backend's control structure we use our own - * structure to avoid pg_control version issues between releases. - */ -typedef struct -{ - uint32 ctrl_ver; - uint32 cat_ver; - char nextxlogfile[25]; - uint32 chkpnt_tli; - uint32 chkpnt_nxtxid; - uint32 chkpnt_nxtoid; - uint32 chkpnt_nxtmulti; - uint32 chkpnt_nxtmxoff; - uint32 chkpnt_oldstMulti; - uint32 align; - uint32 blocksz; - uint32 largesz; - uint32 walsz; - uint32 walseg; - uint32 ident; - uint32 index; - uint32 toast; - bool date_is_int; - bool float8_pass_by_value; - bool data_checksum_version; - char *lc_collate; - char *lc_ctype; - char *encoding; -} ControlData; - -/* - * Enumeration to denote link modes - */ -typedef enum -{ - TRANSFER_MODE_COPY, - TRANSFER_MODE_LINK -} transferMode; - -/* - * Enumeration to denote pg_log modes - */ -typedef enum -{ - PG_VERBOSE, - PG_STATUS, - PG_REPORT, - PG_WARNING, - PG_FATAL -} eLogType; - - -typedef long pgpid_t; - - -/* - * cluster - * - * information about each cluster - */ -typedef struct -{ - ControlData controldata; /* pg_control information */ - DbInfoArr dbarr; /* dbinfos array */ - char *pgdata; /* pathname for cluster's $PGDATA directory */ - char *pgconfig; /* pathname for cluster's config file - * directory */ - char *bindir; /* pathname for cluster's executable directory */ - char *pgopts; /* options to pass to the server, like pg_ctl - * -o */ - char *sockdir; /* directory for Unix Domain socket, if any */ - unsigned short port; /* port number where postmaster is waiting */ - uint32 major_version; /* PG_VERSION of cluster */ - char major_version_str[64]; /* string PG_VERSION of cluster */ - uint32 bin_version; /* version returned from pg_ctl */ - Oid pg_database_oid; /* OID of pg_database relation */ - Oid install_role_oid; /* OID of connected role */ - Oid role_count; /* number of roles defined in the cluster */ - const char *tablespace_suffix; /* directory specification */ -} ClusterInfo; - - -/* - * LogOpts -*/ -typedef struct -{ - FILE *internal; /* internal log FILE */ - bool verbose; /* TRUE -> be verbose in messages */ - bool retain; /* retain log files on success */ -} LogOpts; - - -/* - * UserOpts -*/ -typedef struct -{ - bool check; /* TRUE -> ask user for permission to make - * changes */ - transferMode transfer_mode; /* copy files or link them? */ - int jobs; -} UserOpts; - - -/* - * OSInfo - */ -typedef struct -{ - const char *progname; /* complete pathname for this program */ - char *exec_path; /* full path to my executable */ - char *user; /* username for clusters */ - bool user_specified; /* user specified on command-line */ - char **old_tablespaces; /* tablespaces */ - int num_old_tablespaces; - char **libraries; /* loadable libraries */ - int num_libraries; - ClusterInfo *running_cluster; -} OSInfo; - - -/* - * Global variables - */ -extern LogOpts log_opts; -extern UserOpts user_opts; -extern ClusterInfo old_cluster, - new_cluster; -extern OSInfo os_info; - - -/* check.c */ - -void output_check_banner(bool live_check); -void check_and_dump_old_cluster(bool live_check, - char **sequence_script_file_name); -void check_new_cluster(void); -void report_clusters_compatible(void); -void issue_warnings(char *sequence_script_file_name); -void output_completion_banner(char *analyze_script_file_name, - char *deletion_script_file_name); -void check_cluster_versions(void); -void check_cluster_compatibility(bool live_check); -void create_script_for_old_cluster_deletion(char **deletion_script_file_name); -void create_script_for_cluster_analyze(char **analyze_script_file_name); - - -/* controldata.c */ - -void get_control_data(ClusterInfo *cluster, bool live_check); -void check_control_data(ControlData *oldctrl, ControlData *newctrl); -void disable_old_cluster(void); - - -/* dump.c */ - -void generate_old_dump(void); - - -/* exec.c */ - -#define EXEC_PSQL_ARGS "--echo-queries --set ON_ERROR_STOP=on --no-psqlrc --dbname=template1" -bool -exec_prog(const char *log_file, const char *opt_log_file, - bool throw_error, const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5))); -void verify_directories(void); -bool pid_lock_file_exists(const char *datadir); - - -/* file.c */ - -#ifdef PAGE_CONVERSION -typedef const char *(*pluginStartup) (uint16 migratorVersion, - uint16 *pluginVersion, uint16 newPageVersion, - uint16 oldPageVersion, void **pluginData); -typedef const char *(*pluginConvertFile) (void *pluginData, - const char *dstName, const char *srcName); -typedef const char *(*pluginConvertPage) (void *pluginData, - const char *dstPage, const char *srcPage); -typedef const char *(*pluginShutdown) (void *pluginData); - -typedef struct -{ - uint16 oldPageVersion; /* Page layout version of the old cluster */ - uint16 newPageVersion; /* Page layout version of the new cluster */ - uint16 pluginVersion; /* API version of converter plugin */ - void *pluginData; /* Plugin data (set by plugin) */ - pluginStartup startup; /* Pointer to plugin's startup function */ - pluginConvertFile convertFile; /* Pointer to plugin's file converter - * function */ - pluginConvertPage convertPage; /* Pointer to plugin's page converter - * function */ - pluginShutdown shutdown; /* Pointer to plugin's shutdown function */ -} pageCnvCtx; - -const pageCnvCtx *setupPageConverter(void); -#else -/* dummy */ -typedef void *pageCnvCtx; -#endif - -const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src, - const char *dst, bool force); -const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src, - const char *dst); - -void check_hard_link(void); -FILE *fopen_priv(const char *path, const char *mode); - -/* function.c */ - -void install_support_functions_in_new_db(const char *db_name); -void uninstall_support_functions_from_new_cluster(void); -void get_loadable_libraries(void); -void check_loadable_libraries(void); - -/* info.c */ - -FileNameMap *gen_db_file_maps(DbInfo *old_db, - DbInfo *new_db, int *nmaps, const char *old_pgdata, - const char *new_pgdata); -void get_db_and_rel_infos(ClusterInfo *cluster); -void print_maps(FileNameMap *maps, int n, - const char *db_name); - -/* option.c */ - -void parseCommandLine(int argc, char *argv[]); -void adjust_data_dir(ClusterInfo *cluster); -void get_sock_dir(ClusterInfo *cluster, bool live_check); - -/* relfilenode.c */ - -void get_pg_database_relfilenode(ClusterInfo *cluster); -void transfer_all_new_tablespaces(DbInfoArr *old_db_arr, - DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata); -void transfer_all_new_dbs(DbInfoArr *old_db_arr, - DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata, - char *old_tablespace); - -/* tablespace.c */ - -void init_tablespaces(void); - - -/* server.c */ - -PGconn *connectToServer(ClusterInfo *cluster, const char *db_name); -PGresult * -executeQueryOrDie(PGconn *conn, const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); - -char *cluster_conn_opts(ClusterInfo *cluster); - -bool start_postmaster(ClusterInfo *cluster, bool throw_error); -void stop_postmaster(bool fast); -uint32 get_major_server_version(ClusterInfo *cluster); -void check_pghost_envvar(void); - - -/* util.c */ - -char *quote_identifier(const char *s); -int get_user_info(char **user_name_p); -void check_ok(void); -void -report_status(eLogType type, const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); -void -pg_log(eLogType type, const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); -void -pg_fatal(const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2), noreturn)); -void end_progress_output(void); -void -prep_status(const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); -void check_ok(void); -const char *getErrorText(int errNum); -unsigned int str2uint(const char *str); -void pg_putenv(const char *var, const char *val); - - -/* version.c */ - -void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, - bool check_mode); -void old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster); - -/* version_old_8_3.c */ - -void old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster); -void old_8_3_check_for_tsquery_usage(ClusterInfo *cluster); -void old_8_3_check_ltree_usage(ClusterInfo *cluster); -void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode); -void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode); -void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster, - bool check_mode); -char *old_8_3_create_sequence_script(ClusterInfo *cluster); - -/* parallel.c */ -void -parallel_exec_prog(const char *log_file, const char *opt_log_file, - const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4))); -void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, - char *old_tablespace); -bool reap_child(bool wait_for_child); diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c deleted file mode 100644 index aa6aafde5e..0000000000 --- a/contrib/pg_upgrade/relfilenode.c +++ /dev/null @@ -1,296 +0,0 @@ -/* - * relfilenode.c - * - * relfilenode functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/relfilenode.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "catalog/pg_class.h" -#include "access/transam.h" - - -static void transfer_single_new_db(pageCnvCtx *pageConverter, - FileNameMap *maps, int size, char *old_tablespace); -static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, - const char *suffix); - - -/* - * transfer_all_new_tablespaces() - * - * Responsible for upgrading all database. invokes routines to generate mappings and then - * physically link the databases. - */ -void -transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata) -{ - pg_log(PG_REPORT, "%s user relation files\n", - user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying"); - - /* - * Transfering files by tablespace is tricky because a single database can - * use multiple tablespaces. For non-parallel mode, we just pass a NULL - * tablespace path, which matches all tablespaces. In parallel mode, we - * pass the default tablespace and all user-created tablespaces and let - * those operations happen in parallel. - */ - if (user_opts.jobs <= 1) - parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, NULL); - else - { - int tblnum; - - /* transfer default tablespace */ - parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, - new_pgdata, old_pgdata); - - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - parallel_transfer_all_new_dbs(old_db_arr, - new_db_arr, - old_pgdata, - new_pgdata, - os_info.old_tablespaces[tblnum]); - /* reap all children */ - while (reap_child(true) == true) - ; - } - - end_progress_output(); - check_ok(); - - return; -} - - -/* - * transfer_all_new_dbs() - * - * Responsible for upgrading all database. invokes routines to generate mappings and then - * physically link the databases. - */ -void -transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr, - char *old_pgdata, char *new_pgdata, char *old_tablespace) -{ - int old_dbnum, - new_dbnum; - - /* Scan the old cluster databases and transfer their files */ - for (old_dbnum = new_dbnum = 0; - old_dbnum < old_db_arr->ndbs; - old_dbnum++, new_dbnum++) - { - DbInfo *old_db = &old_db_arr->dbs[old_dbnum], - *new_db = NULL; - FileNameMap *mappings; - int n_maps; - pageCnvCtx *pageConverter = NULL; - - /* - * Advance past any databases that exist in the new cluster but not in - * the old, e.g. "postgres". (The user might have removed the - * 'postgres' database from the old cluster.) - */ - for (; new_dbnum < new_db_arr->ndbs; new_dbnum++) - { - new_db = &new_db_arr->dbs[new_dbnum]; - if (strcmp(old_db->db_name, new_db->db_name) == 0) - break; - } - - if (new_dbnum >= new_db_arr->ndbs) - pg_fatal("old database \"%s\" not found in the new cluster\n", - old_db->db_name); - - n_maps = 0; - mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata, - new_pgdata); - - if (n_maps) - { - print_maps(mappings, n_maps, new_db->db_name); - -#ifdef PAGE_CONVERSION - pageConverter = setupPageConverter(); -#endif - transfer_single_new_db(pageConverter, mappings, n_maps, - old_tablespace); - - pg_free(mappings); - } - } - - return; -} - - -/* - * get_pg_database_relfilenode() - * - * Retrieves the relfilenode for a few system-catalog tables. We need these - * relfilenodes later in the upgrade process. - */ -void -get_pg_database_relfilenode(ClusterInfo *cluster) -{ - PGconn *conn = connectToServer(cluster, "template1"); - PGresult *res; - int i_relfile; - - res = executeQueryOrDie(conn, - "SELECT c.relname, c.relfilenode " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE c.relnamespace = n.oid AND " - " n.nspname = 'pg_catalog' AND " - " c.relname = 'pg_database' " - "ORDER BY c.relname"); - - i_relfile = PQfnumber(res, "relfilenode"); - cluster->pg_database_oid = atooid(PQgetvalue(res, 0, i_relfile)); - - PQclear(res); - PQfinish(conn); -} - - -/* - * transfer_single_new_db() - * - * create links for mappings stored in "maps" array. - */ -static void -transfer_single_new_db(pageCnvCtx *pageConverter, - FileNameMap *maps, int size, char *old_tablespace) -{ - int mapnum; - bool vm_crashsafe_match = true; - - /* - * Do the old and new cluster disagree on the crash-safetiness of the vm - * files? If so, do not copy them. - */ - if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER && - new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER) - vm_crashsafe_match = false; - - for (mapnum = 0; mapnum < size; mapnum++) - { - if (old_tablespace == NULL || - strcmp(maps[mapnum].old_tablespace, old_tablespace) == 0) - { - /* transfer primary file */ - transfer_relfile(pageConverter, &maps[mapnum], ""); - - /* fsm/vm files added in PG 8.4 */ - if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) - { - /* - * Copy/link any fsm and vm files, if they exist - */ - transfer_relfile(pageConverter, &maps[mapnum], "_fsm"); - if (vm_crashsafe_match) - transfer_relfile(pageConverter, &maps[mapnum], "_vm"); - } - } - } -} - - -/* - * transfer_relfile() - * - * Copy or link file from old cluster to new one. - */ -static void -transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, - const char *type_suffix) -{ - const char *msg; - char old_file[MAXPGPATH]; - char new_file[MAXPGPATH]; - int fd; - int segno; - char extent_suffix[65]; - - /* - * Now copy/link any related segments as well. Remember, PG breaks large - * files into 1GB segments, the first segment has no extension, subsequent - * segments are named relfilenode.1, relfilenode.2, relfilenode.3. copied. - */ - for (segno = 0;; segno++) - { - if (segno == 0) - extent_suffix[0] = '\0'; - else - snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno); - - snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", - map->old_tablespace, - map->old_tablespace_suffix, - map->old_db_oid, - map->old_relfilenode, - type_suffix, - extent_suffix); - snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", - map->new_tablespace, - map->new_tablespace_suffix, - map->new_db_oid, - map->new_relfilenode, - type_suffix, - extent_suffix); - - /* Is it an extent, fsm, or vm file? */ - if (type_suffix[0] != '\0' || segno != 0) - { - /* Did file open fail? */ - if ((fd = open(old_file, O_RDONLY, 0)) == -1) - { - /* File does not exist? That's OK, just return */ - if (errno == ENOENT) - return; - else - pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, - getErrorText(errno)); - } - close(fd); - } - - unlink(new_file); - - /* Copying files might take some time, so give feedback. */ - pg_log(PG_STATUS, "%s", old_file); - - if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL)) - pg_fatal("This upgrade requires page-by-page conversion, " - "you must use copy mode instead of link mode.\n"); - - if (user_opts.transfer_mode == TRANSFER_MODE_COPY) - { - pg_log(PG_VERBOSE, "copying \"%s\" to \"%s\"\n", old_file, new_file); - - if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL) - pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, msg); - } - else - { - pg_log(PG_VERBOSE, "linking \"%s\" to \"%s\"\n", old_file, new_file); - - if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL) - pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n", - map->nspname, map->relname, old_file, new_file, msg); - } - } - - return; -} diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c deleted file mode 100644 index 5f4b5307cb..0000000000 --- a/contrib/pg_upgrade/server.c +++ /dev/null @@ -1,348 +0,0 @@ -/* - * server.c - * - * database server functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/server.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - - -static PGconn *get_db_conn(ClusterInfo *cluster, const char *db_name); - - -/* - * connectToServer() - * - * Connects to the desired database on the designated server. - * If the connection attempt fails, this function logs an error - * message and calls exit() to kill the program. - */ -PGconn * -connectToServer(ClusterInfo *cluster, const char *db_name) -{ - PGconn *conn = get_db_conn(cluster, db_name); - - if (conn == NULL || PQstatus(conn) != CONNECTION_OK) - { - pg_log(PG_REPORT, "connection to database failed: %s\n", - PQerrorMessage(conn)); - - if (conn) - PQfinish(conn); - - printf("Failure, exiting\n"); - exit(1); - } - - return conn; -} - - -/* - * get_db_conn() - * - * get database connection, using named database + standard params for cluster - */ -static PGconn * -get_db_conn(ClusterInfo *cluster, const char *db_name) -{ - char conn_opts[2 * NAMEDATALEN + MAXPGPATH + 100]; - - if (cluster->sockdir) - snprintf(conn_opts, sizeof(conn_opts), - "dbname = '%s' user = '%s' host = '%s' port = %d", - db_name, os_info.user, cluster->sockdir, cluster->port); - else - snprintf(conn_opts, sizeof(conn_opts), - "dbname = '%s' user = '%s' port = %d", - db_name, os_info.user, cluster->port); - - return PQconnectdb(conn_opts); -} - - -/* - * cluster_conn_opts() - * - * Return standard command-line options for connecting to this cluster when - * using psql, pg_dump, etc. Ideally this would match what get_db_conn() - * sets, but the utilities we need aren't very consistent about the treatment - * of database name options, so we leave that out. - * - * Note result is in static storage, so use it right away. - */ -char * -cluster_conn_opts(ClusterInfo *cluster) -{ - static char conn_opts[MAXPGPATH + NAMEDATALEN + 100]; - - if (cluster->sockdir) - snprintf(conn_opts, sizeof(conn_opts), - "--host \"%s\" --port %d --username \"%s\"", - cluster->sockdir, cluster->port, os_info.user); - else - snprintf(conn_opts, sizeof(conn_opts), - "--port %d --username \"%s\"", - cluster->port, os_info.user); - - return conn_opts; -} - - -/* - * executeQueryOrDie() - * - * Formats a query string from the given arguments and executes the - * resulting query. If the query fails, this function logs an error - * message and calls exit() to kill the program. - */ -PGresult * -executeQueryOrDie(PGconn *conn, const char *fmt,...) -{ - static char command[8192]; - va_list args; - PGresult *result; - ExecStatusType status; - - va_start(args, fmt); - vsnprintf(command, sizeof(command), fmt, args); - va_end(args); - - pg_log(PG_VERBOSE, "executing: %s\n", command); - result = PQexec(conn, command); - status = PQresultStatus(result); - - if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK)) - { - pg_log(PG_REPORT, "SQL command failed\n%s\n%s\n", command, - PQerrorMessage(conn)); - PQclear(result); - PQfinish(conn); - printf("Failure, exiting\n"); - exit(1); - } - else - return result; -} - - -/* - * get_major_server_version() - * - * gets the version (in unsigned int form) for the given datadir. Assumes - * that datadir is an absolute path to a valid pgdata directory. The version - * is retrieved by reading the PG_VERSION file. - */ -uint32 -get_major_server_version(ClusterInfo *cluster) -{ - FILE *version_fd; - char ver_filename[MAXPGPATH]; - int integer_version = 0; - int fractional_version = 0; - - snprintf(ver_filename, sizeof(ver_filename), "%s/PG_VERSION", - cluster->pgdata); - if ((version_fd = fopen(ver_filename, "r")) == NULL) - pg_fatal("could not open version file: %s\n", ver_filename); - - if (fscanf(version_fd, "%63s", cluster->major_version_str) == 0 || - sscanf(cluster->major_version_str, "%d.%d", &integer_version, - &fractional_version) != 2) - pg_fatal("could not get version from %s\n", cluster->pgdata); - - fclose(version_fd); - - return (100 * integer_version + fractional_version) * 100; -} - - -static void -stop_postmaster_atexit(void) -{ - stop_postmaster(true); -} - - -bool -start_postmaster(ClusterInfo *cluster, bool throw_error) -{ - char cmd[MAXPGPATH * 4 + 1000]; - PGconn *conn; - bool exit_hook_registered = false; - bool pg_ctl_return = false; - char socket_string[MAXPGPATH + 200]; - - if (!exit_hook_registered) - { - atexit(stop_postmaster_atexit); - exit_hook_registered = true; - } - - socket_string[0] = '\0'; - -#ifdef HAVE_UNIX_SOCKETS - /* prevent TCP/IP connections, restrict socket access */ - strcat(socket_string, - " -c listen_addresses='' -c unix_socket_permissions=0700"); - - /* Have a sockdir? Tell the postmaster. */ - if (cluster->sockdir) - snprintf(socket_string + strlen(socket_string), - sizeof(socket_string) - strlen(socket_string), - " -c %s='%s'", - (GET_MAJOR_VERSION(cluster->major_version) < 903) ? - "unix_socket_directory" : "unix_socket_directories", - cluster->sockdir); -#endif - - /* - * Using autovacuum=off disables cleanup vacuum and analyze, but freeze - * vacuums can still happen, so we set autovacuum_freeze_max_age to its - * maximum. We assume all datfrozenxid and relfrozen values are less than - * a gap of 2000000000 from the current xid counter, so autovacuum will - * not touch them. - * - * Turn off durability requirements to improve object creation speed, and - * we only modify the new cluster, so only use it there. If there is a - * crash, the new cluster has to be recreated anyway. fsync=off is a big - * win on ext4. - */ - snprintf(cmd, sizeof(cmd), - "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" -o \"-p %d%s%s %s%s\" start", - cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port, - (cluster->controldata.cat_ver >= - BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? " -b" : - " -c autovacuum=off -c autovacuum_freeze_max_age=2000000000", - (cluster == &new_cluster) ? - " -c synchronous_commit=off -c fsync=off -c full_page_writes=off" : "", - cluster->pgopts ? cluster->pgopts : "", socket_string); - - /* - * Don't throw an error right away, let connecting throw the error because - * it might supply a reason for the failure. - */ - pg_ctl_return = exec_prog(SERVER_START_LOG_FILE, - /* pass both file names if they differ */ - (strcmp(SERVER_LOG_FILE, - SERVER_START_LOG_FILE) != 0) ? - SERVER_LOG_FILE : NULL, - false, - "%s", cmd); - - /* Did it fail and we are just testing if the server could be started? */ - if (!pg_ctl_return && !throw_error) - return false; - - /* - * We set this here to make sure atexit() shuts down the server, but only - * if we started the server successfully. We do it before checking for - * connectivity in case the server started but there is a connectivity - * failure. If pg_ctl did not return success, we will exit below. - * - * Pre-9.1 servers do not have PQping(), so we could be leaving the server - * running if authentication was misconfigured, so someday we might went - * to be more aggressive about doing server shutdowns even if pg_ctl - * fails, but now (2013-08-14) it seems prudent to be cautious. We don't - * want to shutdown a server that might have been accidentally started - * during the upgrade. - */ - if (pg_ctl_return) - os_info.running_cluster = cluster; - - /* - * pg_ctl -w might have failed because the server couldn't be started, or - * there might have been a connection problem in _checking_ if the server - * has started. Therefore, even if pg_ctl failed, we continue and test - * for connectivity in case we get a connection reason for the failure. - */ - if ((conn = get_db_conn(cluster, "template1")) == NULL || - PQstatus(conn) != CONNECTION_OK) - { - pg_log(PG_REPORT, "\nconnection to database failed: %s\n", - PQerrorMessage(conn)); - if (conn) - PQfinish(conn); - pg_fatal("could not connect to %s postmaster started with the command:\n" - "%s\n", - CLUSTER_NAME(cluster), cmd); - } - PQfinish(conn); - - /* - * If pg_ctl failed, and the connection didn't fail, and throw_error is - * enabled, fail now. This could happen if the server was already - * running. - */ - if (!pg_ctl_return) - pg_fatal("pg_ctl failed to start the %s server, or connection failed\n", - CLUSTER_NAME(cluster)); - - return true; -} - - -void -stop_postmaster(bool fast) -{ - ClusterInfo *cluster; - - if (os_info.running_cluster == &old_cluster) - cluster = &old_cluster; - else if (os_info.running_cluster == &new_cluster) - cluster = &new_cluster; - else - return; /* no cluster running */ - - exec_prog(SERVER_STOP_LOG_FILE, NULL, !fast, - "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" %s stop", - cluster->bindir, cluster->pgconfig, - cluster->pgopts ? cluster->pgopts : "", - fast ? "-m fast" : ""); - - os_info.running_cluster = NULL; -} - - -/* - * check_pghost_envvar() - * - * Tests that PGHOST does not point to a non-local server - */ -void -check_pghost_envvar(void) -{ - PQconninfoOption *option; - PQconninfoOption *start; - - /* Get valid libpq env vars from the PQconndefaults function */ - - start = PQconndefaults(); - - if (!start) - pg_fatal("out of memory\n"); - - for (option = start; option->keyword != NULL; option++) - { - if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 || - strcmp(option->envvar, "PGHOSTADDR") == 0)) - { - const char *value = getenv(option->envvar); - - if (value && strlen(value) > 0 && - /* check for 'local' host values */ - (strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 && - strcmp(value, "::1") != 0 && value[0] != '/')) - pg_fatal("libpq environment variable %s has a non-local server value: %s\n", - option->envvar, value); - } - } - - /* Free the memory that libpq allocated on our behalf */ - PQconninfoFree(start); -} diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c deleted file mode 100644 index 68e9cb241c..0000000000 --- a/contrib/pg_upgrade/tablespace.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * tablespace.c - * - * tablespace functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/tablespace.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include <sys/types.h> - -static void get_tablespace_paths(void); -static void set_tablespace_directory_suffix(ClusterInfo *cluster); - - -void -init_tablespaces(void) -{ - get_tablespace_paths(); - - set_tablespace_directory_suffix(&old_cluster); - set_tablespace_directory_suffix(&new_cluster); - - if (os_info.num_old_tablespaces > 0 && - strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) - pg_fatal("Cannot upgrade to/from the same system catalog version when\n" - "using tablespaces.\n"); -} - - -/* - * get_tablespace_paths() - * - * Scans pg_tablespace and returns a malloc'ed array of all tablespace - * paths. Its the caller's responsibility to free the array. - */ -static void -get_tablespace_paths(void) -{ - PGconn *conn = connectToServer(&old_cluster, "template1"); - PGresult *res; - int tblnum; - int i_spclocation; - char query[QUERY_ALLOC]; - - snprintf(query, sizeof(query), - "SELECT %s " - "FROM pg_catalog.pg_tablespace " - "WHERE spcname != 'pg_default' AND " - " spcname != 'pg_global'", - /* 9.2 removed the spclocation column */ - (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ? - "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation"); - - res = executeQueryOrDie(conn, "%s", query); - - if ((os_info.num_old_tablespaces = PQntuples(res)) != 0) - os_info.old_tablespaces = (char **) pg_malloc( - os_info.num_old_tablespaces * sizeof(char *)); - else - os_info.old_tablespaces = NULL; - - i_spclocation = PQfnumber(res, "spclocation"); - - for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) - { - struct stat statBuf; - - os_info.old_tablespaces[tblnum] = pg_strdup( - PQgetvalue(res, tblnum, i_spclocation)); - - /* - * Check that the tablespace path exists and is a directory. - * Effectively, this is checking only for tables/indexes in - * non-existent tablespace directories. Databases located in - * non-existent tablespaces already throw a backend error. - * Non-existent tablespace directories can occur when a data directory - * that contains user tablespaces is moved as part of pg_upgrade - * preparation and the symbolic links are not updated. - */ - if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0) - { - if (errno == ENOENT) - report_status(PG_FATAL, - "tablespace directory \"%s\" does not exist\n", - os_info.old_tablespaces[tblnum]); - else - report_status(PG_FATAL, - "cannot stat() tablespace directory \"%s\": %s\n", - os_info.old_tablespaces[tblnum], getErrorText(errno)); - } - if (!S_ISDIR(statBuf.st_mode)) - report_status(PG_FATAL, - "tablespace path \"%s\" is not a directory\n", - os_info.old_tablespaces[tblnum]); - } - - PQclear(res); - - PQfinish(conn); - - return; -} - - -static void -set_tablespace_directory_suffix(ClusterInfo *cluster) -{ - if (GET_MAJOR_VERSION(cluster->major_version) <= 804) - cluster->tablespace_suffix = pg_strdup(""); - else - { - /* This cluster has a version-specific subdirectory */ - - /* The leading slash is needed to start a new directory. */ - cluster->tablespace_suffix = psprintf("/PG_%s_%d", - cluster->major_version_str, - cluster->controldata.cat_ver); - } -} diff --git a/contrib/pg_upgrade/test.sh b/contrib/pg_upgrade/test.sh deleted file mode 100644 index baa7d4748b..0000000000 --- a/contrib/pg_upgrade/test.sh +++ /dev/null @@ -1,191 +0,0 @@ -#!/bin/sh - -# contrib/pg_upgrade/test.sh -# -# Test driver for pg_upgrade. Initializes a new database cluster, -# runs the regression tests (to put in some data), runs pg_dumpall, -# runs pg_upgrade, runs pg_dumpall again, compares the dumps. -# -# Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group -# Portions Copyright (c) 1994, Regents of the University of California - -set -e - -: ${MAKE=make} - -# Guard against parallel make issues (see comments in pg_regress.c) -unset MAKEFLAGS -unset MAKELEVEL - -# Set listen_addresses desirably -testhost=`uname -s` - -case $testhost in - MINGW*) LISTEN_ADDRESSES="localhost" ;; - *) LISTEN_ADDRESSES="" ;; -esac - -POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES" - -temp_root=$PWD/tmp_check - -if [ "$1" = '--install' ]; then - temp_install=$temp_root/install - bindir=$temp_install/$bindir - libdir=$temp_install/$libdir - - "$MAKE" -s -C ../.. install DESTDIR="$temp_install" - "$MAKE" -s -C ../pg_upgrade_support install DESTDIR="$temp_install" - "$MAKE" -s -C . install DESTDIR="$temp_install" - - # platform-specific magic to find the shared libraries; see pg_regress.c - LD_LIBRARY_PATH=$libdir:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH - DYLD_LIBRARY_PATH=$libdir:$DYLD_LIBRARY_PATH - export DYLD_LIBRARY_PATH - LIBPATH=$libdir:$LIBPATH - export LIBPATH - PATH=$libdir:$PATH - - # We need to make it use psql from our temporary installation, - # because otherwise the installcheck run below would try to - # use psql from the proper installation directory, which might - # be outdated or missing. But don't override anything else that's - # already in EXTRA_REGRESS_OPTS. - EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --psqldir=$bindir" - export EXTRA_REGRESS_OPTS -fi - -: ${oldbindir=$bindir} - -: ${oldsrc=../..} -oldsrc=`cd "$oldsrc" && pwd` -newsrc=`cd ../.. && pwd` - -PATH=$bindir:$PATH -export PATH - -BASE_PGDATA=$temp_root/data -PGDATA="$BASE_PGDATA.old" -export PGDATA -rm -rf "$BASE_PGDATA" "$PGDATA" - -logdir=$PWD/log -rm -rf "$logdir" -mkdir "$logdir" - -# Clear out any environment vars that might cause libpq to connect to -# the wrong postmaster (cf pg_regress.c) -# -# Some shells, such as NetBSD's, return non-zero from unset if the variable -# is already unset. Since we are operating under 'set -e', this causes the -# script to fail. To guard against this, set them all to an empty string first. -PGDATABASE=""; unset PGDATABASE -PGUSER=""; unset PGUSER -PGSERVICE=""; unset PGSERVICE -PGSSLMODE=""; unset PGSSLMODE -PGREQUIRESSL=""; unset PGREQUIRESSL -PGCONNECT_TIMEOUT=""; unset PGCONNECT_TIMEOUT -PGHOST=""; unset PGHOST -PGHOSTADDR=""; unset PGHOSTADDR - -# Select a non-conflicting port number, similarly to pg_regress.c -PG_VERSION_NUM=`grep '#define PG_VERSION_NUM' $newsrc/src/include/pg_config.h | awk '{print $3}'` -PGPORT=`expr $PG_VERSION_NUM % 16384 + 49152` -export PGPORT - -i=0 -while psql -X postgres </dev/null 2>/dev/null -do - i=`expr $i + 1` - if [ $i -eq 16 ] - then - echo port $PGPORT apparently in use - exit 1 - fi - PGPORT=`expr $PGPORT + 1` - export PGPORT -done - -# buildfarm may try to override port via EXTRA_REGRESS_OPTS ... -EXTRA_REGRESS_OPTS="$EXTRA_REGRESS_OPTS --port=$PGPORT" -export EXTRA_REGRESS_OPTS - -# enable echo so the user can see what is being executed -set -x - -$oldbindir/initdb -N -$oldbindir/pg_ctl start -l "$logdir/postmaster1.log" -o "$POSTMASTER_OPTS" -w -if "$MAKE" -C "$oldsrc" installcheck; then - pg_dumpall -f "$temp_root"/dump1.sql || pg_dumpall1_status=$? - if [ "$newsrc" != "$oldsrc" ]; then - oldpgversion=`psql -A -t -d regression -c "SHOW server_version_num"` - fix_sql="" - case $oldpgversion in - 804??) - fix_sql="UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%'; DROP FUNCTION public.myfunc(integer);" - ;; - 900??) - fix_sql="SET bytea_output TO escape; UPDATE pg_proc SET probin = replace(probin::text, '$oldsrc', '$newsrc')::bytea WHERE probin LIKE '$oldsrc%';" - ;; - 901??) - fix_sql="UPDATE pg_proc SET probin = replace(probin, '$oldsrc', '$newsrc') WHERE probin LIKE '$oldsrc%';" - ;; - esac - psql -d regression -c "$fix_sql;" || psql_fix_sql_status=$? - - mv "$temp_root"/dump1.sql "$temp_root"/dump1.sql.orig - sed "s;$oldsrc;$newsrc;g" "$temp_root"/dump1.sql.orig >"$temp_root"/dump1.sql - fi -else - make_installcheck_status=$? -fi -$oldbindir/pg_ctl -m fast stop -if [ -n "$make_installcheck_status" ]; then - exit 1 -fi -if [ -n "$psql_fix_sql_status" ]; then - exit 1 -fi -if [ -n "$pg_dumpall1_status" ]; then - echo "pg_dumpall of pre-upgrade database cluster failed" - exit 1 -fi - -PGDATA=$BASE_PGDATA - -initdb -N - -pg_upgrade $PG_UPGRADE_OPTS -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir" -p "$PGPORT" -P "$PGPORT" - -pg_ctl start -l "$logdir/postmaster2.log" -o "$POSTMASTER_OPTS" -w - -case $testhost in - MINGW*) cmd /c analyze_new_cluster.bat ;; - *) sh ./analyze_new_cluster.sh ;; -esac - -pg_dumpall -f "$temp_root"/dump2.sql || pg_dumpall2_status=$? -pg_ctl -m fast stop - -# no need to echo commands anymore -set +x -echo - -if [ -n "$pg_dumpall2_status" ]; then - echo "pg_dumpall of post-upgrade database cluster failed" - exit 1 -fi - -case $testhost in - MINGW*) cmd /c delete_old_cluster.bat ;; - *) sh ./delete_old_cluster.sh ;; -esac - -if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then - echo PASSED - exit 0 -else - echo "dumps were not identical" - exit 1 -fi diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c deleted file mode 100644 index 3b94057696..0000000000 --- a/contrib/pg_upgrade/util.c +++ /dev/null @@ -1,297 +0,0 @@ -/* - * util.c - * - * utility functions - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/util.c - */ - -#include "postgres_fe.h" - -#include "common/username.h" -#include "pg_upgrade.h" - -#include <signal.h> - - -LogOpts log_opts; - -/* - * report_status() - * - * Displays the result of an operation (ok, failed, error message,...) - */ -void -report_status(eLogType type, const char *fmt,...) -{ - va_list args; - char message[MAX_STRING]; - - va_start(args, fmt); - vsnprintf(message, sizeof(message), fmt, args); - va_end(args); - - pg_log(type, "%s\n", message); -} - - -/* force blank output for progress display */ -void -end_progress_output(void) -{ - /* - * In case nothing printed; pass a space so gcc doesn't complain about - * empty format string. - */ - prep_status(" "); -} - - -/* - * prep_status - * - * Displays a message that describes an operation we are about to begin. - * We pad the message out to MESSAGE_WIDTH characters so that all of the "ok" and - * "failed" indicators line up nicely. - * - * A typical sequence would look like this: - * prep_status("about to flarb the next %d files", fileCount ); - * - * if(( message = flarbFiles(fileCount)) == NULL) - * report_status(PG_REPORT, "ok" ); - * else - * pg_log(PG_FATAL, "failed - %s\n", message ); - */ -void -prep_status(const char *fmt,...) -{ - va_list args; - char message[MAX_STRING]; - - va_start(args, fmt); - vsnprintf(message, sizeof(message), fmt, args); - va_end(args); - - if (strlen(message) > 0 && message[strlen(message) - 1] == '\n') - pg_log(PG_REPORT, "%s", message); - else - /* trim strings that don't end in a newline */ - pg_log(PG_REPORT, "%-*s", MESSAGE_WIDTH, message); -} - - -static -__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0))) -void -pg_log_v(eLogType type, const char *fmt, va_list ap) -{ - char message[MAX_STRING]; - - vsnprintf(message, sizeof(message), fmt, ap); - - /* PG_VERBOSE and PG_STATUS are only output in verbose mode */ - /* fopen() on log_opts.internal might have failed, so check it */ - if (((type != PG_VERBOSE && type != PG_STATUS) || log_opts.verbose) && - log_opts.internal != NULL) - { - if (type == PG_STATUS) - /* status messages need two leading spaces and a newline */ - fprintf(log_opts.internal, " %s\n", message); - else - fprintf(log_opts.internal, "%s", message); - fflush(log_opts.internal); - } - - switch (type) - { - case PG_VERBOSE: - if (log_opts.verbose) - printf("%s", _(message)); - break; - - case PG_STATUS: - /* for output to a display, do leading truncation and append \r */ - if (isatty(fileno(stdout))) - /* -2 because we use a 2-space indent */ - printf(" %s%-*.*s\r", - /* prefix with "..." if we do leading truncation */ - strlen(message) <= MESSAGE_WIDTH - 2 ? "" : "...", - MESSAGE_WIDTH - 2, MESSAGE_WIDTH - 2, - /* optional leading truncation */ - strlen(message) <= MESSAGE_WIDTH - 2 ? message : - message + strlen(message) - MESSAGE_WIDTH + 3 + 2); - else - printf(" %s\n", _(message)); - break; - - case PG_REPORT: - case PG_WARNING: - printf("%s", _(message)); - break; - - case PG_FATAL: - printf("\n%s", _(message)); - printf("Failure, exiting\n"); - exit(1); - break; - - default: - break; - } - fflush(stdout); -} - - -void -pg_log(eLogType type, const char *fmt,...) -{ - va_list args; - - va_start(args, fmt); - pg_log_v(type, fmt, args); - va_end(args); -} - - -void -pg_fatal(const char *fmt,...) -{ - va_list args; - - va_start(args, fmt); - pg_log_v(PG_FATAL, fmt, args); - va_end(args); - printf("Failure, exiting\n"); - exit(1); -} - - -void -check_ok(void) -{ - /* all seems well */ - report_status(PG_REPORT, "ok"); - fflush(stdout); -} - - -/* - * quote_identifier() - * Properly double-quote a SQL identifier. - * - * The result should be pg_free'd, but most callers don't bother because - * memory leakage is not a big deal in this program. - */ -char * -quote_identifier(const char *s) -{ - char *result = pg_malloc(strlen(s) * 2 + 3); - char *r = result; - - *r++ = '"'; - while (*s) - { - if (*s == '"') - *r++ = *s; - *r++ = *s; - s++; - } - *r++ = '"'; - *r++ = '\0'; - - return result; -} - - -/* - * get_user_info() - */ -int -get_user_info(char **user_name_p) -{ - int user_id; - const char *user_name; - char *errstr; - -#ifndef WIN32 - user_id = geteuid(); -#else - user_id = 1; -#endif - - user_name = get_user_name(&errstr); - if (!user_name) - pg_fatal("%s\n", errstr); - - /* make a copy */ - *user_name_p = pg_strdup(user_name); - - return user_id; -} - - -/* - * getErrorText() - * - * Returns the text of the error message for the given error number - * - * This feature is factored into a separate function because it is - * system-dependent. - */ -const char * -getErrorText(int errNum) -{ -#ifdef WIN32 - _dosmaperr(GetLastError()); -#endif - return pg_strdup(strerror(errNum)); -} - - -/* - * str2uint() - * - * convert string to oid - */ -unsigned int -str2uint(const char *str) -{ - return strtoul(str, NULL, 10); -} - - -/* - * pg_putenv() - * - * This is like putenv(), but takes two arguments. - * It also does unsetenv() if val is NULL. - */ -void -pg_putenv(const char *var, const char *val) -{ - if (val) - { -#ifndef WIN32 - char *envstr; - - envstr = psprintf("%s=%s", var, val); - putenv(envstr); - - /* - * Do not free envstr because it becomes part of the environment on - * some operating systems. See port/unsetenv.c::unsetenv. - */ -#else - SetEnvironmentVariableA(var, val); -#endif - } - else - { -#ifndef WIN32 - unsetenv(var); -#else - SetEnvironmentVariableA(var, ""); -#endif - } -} diff --git a/contrib/pg_upgrade/version.c b/contrib/pg_upgrade/version.c deleted file mode 100644 index 0f9dc079b2..0000000000 --- a/contrib/pg_upgrade/version.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * version.c - * - * Postgres-version-specific routines - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/version.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - - - -/* - * new_9_0_populate_pg_largeobject_metadata() - * new >= 9.0, old <= 8.4 - * 9.0 has a new pg_largeobject permission table - */ -void -new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for large objects"); - - snprintf(output_path, sizeof(output_path), "pg_largeobject.sql"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - int i_count; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* find if there are any large objects */ - res = executeQueryOrDie(conn, - "SELECT count(*) " - "FROM pg_catalog.pg_largeobject "); - - i_count = PQfnumber(res, "count"); - if (atoi(PQgetvalue(res, 0, i_count)) != 0) - { - found = true; - if (!check_mode) - { - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - fprintf(script, "\\connect %s\n", - quote_identifier(active_db->db_name)); - fprintf(script, - "SELECT pg_catalog.lo_create(t.loid)\n" - "FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n"); - } - } - - PQclear(res); - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - report_status(PG_WARNING, "warning"); - if (check_mode) - pg_log(PG_WARNING, "\n" - "Your installation contains large objects. The new database has an\n" - "additional large object permission table. After upgrading, you will be\n" - "given a command to populate the pg_largeobject permission table with\n" - "default permissions.\n\n"); - else - pg_log(PG_WARNING, "\n" - "Your installation contains large objects. The new database has an\n" - "additional large object permission table, so default permissions must be\n" - "defined for all large objects. The file\n" - " %s\n" - "when executed by psql by the database superuser will set the default\n" - "permissions.\n\n", - output_path); - } - else - check_ok(); -} - - -/* - * old_9_3_check_for_line_data_type_usage() - * 9.3 -> 9.4 - * Fully implement the 'line' data type in 9.4, which previously returned - * "not enabled" by default and was only functionally enabled with a - * compile-time switch; 9.4 "line" has different binary and text - * representation formats; checks tables and indexes. - */ -void -old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for invalid \"line\" user columns"); - - snprintf(output_path, sizeof(output_path), "tables_using_line.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid = 'pg_catalog.line'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains the \"line\" data type in user tables. This\n" - "data type changed its internal and input/output format between your old\n" - "and new clusters so this cluster cannot currently be upgraded. You can\n" - "remove the problem tables and restart the upgrade. A list of the problem\n" - "columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c deleted file mode 100644 index 07e79bd609..0000000000 --- a/contrib/pg_upgrade/version_old_8_3.c +++ /dev/null @@ -1,769 +0,0 @@ -/* - * version.c - * - * Postgres-version-specific routines - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade/version_old_8_3.c - */ - -#include "postgres_fe.h" - -#include "pg_upgrade.h" - -#include "access/transam.h" - - -/* - * old_8_3_check_for_name_data_type_usage() - * 8.3 -> 8.4 - * Alignment for the 'name' data type changed to 'char' in 8.4; - * checks tables and indexes. - */ -void -old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for invalid \"name\" user columns"); - - snprintf(output_path, sizeof(output_path), "tables_using_name.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* - * With a smaller alignment in 8.4, 'name' cannot be used in a - * non-pg_catalog table, except as the first column. (We could tighten - * that condition with enough analysis, but it seems not worth the - * trouble.) - */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - "WHERE c.oid = a.attrelid AND " - " a.attnum > 1 AND " - " NOT a.attisdropped AND " - " a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains the \"name\" data type in user tables. This\n" - "data type changed its internal alignment between your old and new\n" - "clusters so this cluster cannot currently be upgraded. You can remove\n" - "the problem tables and restart the upgrade. A list of the problem\n" - "columns is in the file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_check_for_tsquery_usage() - * 8.3 -> 8.4 - * A new 'prefix' field was added to the 'tsquery' data type in 8.4 - * so upgrading of such fields is impossible. - */ -void -old_8_3_check_for_tsquery_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for tsquery user columns"); - - snprintf(output_path, sizeof(output_path), "tables_using_tsquery.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any user-defined tsquery columns */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - /* materialized views didn't exist in 8.3, so no need to check 'm' */ - "WHERE c.relkind = 'r' AND " - " c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - " a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_relname), - PQgetvalue(res, rowno, i_attname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains the \"tsquery\" data type. This data type\n" - "added a new internal field between your old and new clusters so this\n" - "cluster cannot currently be upgraded. You can remove the problem\n" - "columns and restart the upgrade. A list of the problem columns is in the\n" - "file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_check_ltree_usage() - * 8.3 -> 8.4 - * The internal ltree structure was changed in 8.4 so upgrading is impossible. - */ -void -old_8_3_check_ltree_usage(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for contrib/ltree"); - - snprintf(output_path, sizeof(output_path), "contrib_ltree.txt"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_proname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any functions coming from contrib/ltree */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, p.proname " - "FROM pg_catalog.pg_proc p, " - " pg_catalog.pg_namespace n " - "WHERE p.pronamespace = n.oid AND " - " p.probin = '$libdir/ltree'"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_proname = PQfnumber(res, "proname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "Database: %s\n", active_db->db_name); - db_used = true; - } - fprintf(script, " %s.%s\n", - PQgetvalue(res, rowno, i_nspname), - PQgetvalue(res, rowno, i_proname)); - } - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - pg_log(PG_REPORT, "fatal\n"); - pg_fatal("Your installation contains the \"ltree\" data type. This data type\n" - "changed its internal storage format between your old and new clusters so this\n" - "cluster cannot currently be upgraded. You can manually upgrade databases\n" - "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n" - "cluster and restart the upgrade. A list of the problem functions is in the\n" - "file:\n" - " %s\n\n", output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_rebuild_tsvector_tables() - * 8.3 -> 8.4 - * 8.3 sorts lexemes by its length and if lengths are the same then it uses - * alphabetic order; 8.4 sorts lexemes in lexicographical order, e.g. - * - * => SELECT 'c bb aaa'::tsvector; - * tsvector - * ---------------- - * 'aaa' 'bb' 'c' -- 8.4 - * 'c' 'bb' 'aaa' -- 8.3 - */ -void -old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for tsvector user columns"); - - snprintf(output_path, sizeof(output_path), "rebuild_tsvector_tables.sql"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - char nspname[NAMEDATALEN] = "", - relname[NAMEDATALEN] = ""; - int ntups; - int rowno; - int i_nspname, - i_relname, - i_attname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any user-defined tsvector columns */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname, a.attname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n, " - " pg_catalog.pg_attribute a " - /* materialized views didn't exist in 8.3, so no need to check 'm' */ - "WHERE c.relkind = 'r' AND " - " c.oid = a.attrelid AND " - " NOT a.attisdropped AND " - /* child attribute changes are processed by the parent */ - " a.attinhcount = 0 AND " - " a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - -/* - * This macro is used below to avoid reindexing indexes already rebuilt - * because of tsvector columns. - */ -#define SKIP_TSVECTOR_TABLES \ - "i.indrelid NOT IN ( " \ - "SELECT DISTINCT c.oid " \ - "FROM pg_catalog.pg_class c, " \ - " pg_catalog.pg_namespace n, " \ - " pg_catalog.pg_attribute a " \ - /* materialized views didn't exist in 8.3, so no need to check 'm' */ \ - "WHERE c.relkind = 'r' AND " \ - " c.oid = a.attrelid AND " \ - " NOT a.attisdropped AND " \ - /* child attribute changes are processed by the parent */ \ - " a.attinhcount = 0 AND " \ - " a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " \ - " c.relnamespace = n.oid AND " \ - " n.nspname !~ '^pg_' AND " \ - " n.nspname != 'information_schema') " - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - i_attname = PQfnumber(res, "attname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (!check_mode) - { - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "\\connect %s\n\n", - quote_identifier(active_db->db_name)); - db_used = true; - } - - /* Rebuild all tsvector collumns with one ALTER TABLE command */ - if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 || - strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0) - { - if (strlen(nspname) != 0 || strlen(relname) != 0) - fprintf(script, ";\n\n"); - fprintf(script, "ALTER TABLE %s.%s\n", - quote_identifier(PQgetvalue(res, rowno, i_nspname)), - quote_identifier(PQgetvalue(res, rowno, i_relname))); - } - else - fprintf(script, ",\n"); - strlcpy(nspname, PQgetvalue(res, rowno, i_nspname), sizeof(nspname)); - strlcpy(relname, PQgetvalue(res, rowno, i_relname), sizeof(relname)); - - fprintf(script, "ALTER COLUMN %s " - /* This could have been a custom conversion function call. */ - "TYPE pg_catalog.tsvector USING %s::pg_catalog.text::pg_catalog.tsvector", - quote_identifier(PQgetvalue(res, rowno, i_attname)), - quote_identifier(PQgetvalue(res, rowno, i_attname))); - } - } - if (strlen(nspname) != 0 || strlen(relname) != 0) - fprintf(script, ";\n\n"); - - PQclear(res); - - /* XXX Mark tables as not accessible somehow */ - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - report_status(PG_WARNING, "warning"); - if (check_mode) - pg_log(PG_WARNING, "\n" - "Your installation contains tsvector columns. The tsvector internal\n" - "storage format changed between your old and new clusters so the tables\n" - "must be rebuilt. After upgrading, you will be given instructions.\n\n"); - else - pg_log(PG_WARNING, "\n" - "Your installation contains tsvector columns. The tsvector internal\n" - "storage format changed between your old and new clusters so the tables\n" - "must be rebuilt. The file:\n" - " %s\n" - "when executed by psql by the database superuser will rebuild all tables\n" - "with tsvector columns.\n\n", - output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_invalidate_hash_gin_indexes() - * 8.3 -> 8.4 - * Hash and GIN index binary format changed from 8.3->8.4 - */ -void -old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for hash and GIN indexes"); - - snprintf(output_path, sizeof(output_path), "reindex_hash_and_gin.sql"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* find hash and gin indexes */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_index i, " - " pg_catalog.pg_am a, " - " pg_catalog.pg_namespace n " - "WHERE i.indexrelid = c.oid AND " - " c.relam = a.oid AND " - " c.relnamespace = n.oid AND " - " a.amname IN ('hash', 'gin') AND " - SKIP_TSVECTOR_TABLES); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (!check_mode) - { - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "\\connect %s\n", - quote_identifier(active_db->db_name)); - db_used = true; - } - fprintf(script, "REINDEX INDEX %s.%s;\n", - quote_identifier(PQgetvalue(res, rowno, i_nspname)), - quote_identifier(PQgetvalue(res, rowno, i_relname))); - } - } - - PQclear(res); - - if (!check_mode && found) - /* mark hash and gin indexes as invalid */ - PQclear(executeQueryOrDie(conn, - "UPDATE pg_catalog.pg_index i " - "SET indisvalid = false " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_am a, " - " pg_catalog.pg_namespace n " - "WHERE i.indexrelid = c.oid AND " - " c.relam = a.oid AND " - " c.relnamespace = n.oid AND " - " a.amname IN ('hash', 'gin')")); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - report_status(PG_WARNING, "warning"); - if (check_mode) - pg_log(PG_WARNING, "\n" - "Your installation contains hash and/or GIN indexes. These indexes have\n" - "different internal formats between your old and new clusters, so they\n" - "must be reindexed with the REINDEX command. After upgrading, you will\n" - "be given REINDEX instructions.\n\n"); - else - pg_log(PG_WARNING, "\n" - "Your installation contains hash and/or GIN indexes. These indexes have\n" - "different internal formats between your old and new clusters, so they\n" - "must be reindexed with the REINDEX command. The file:\n" - " %s\n" - "when executed by psql by the database superuser will recreate all invalid\n" - "indexes; until then, none of these indexes will be used.\n\n", - output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_invalidate_bpchar_pattern_ops_indexes() - * 8.3 -> 8.4 - * 8.4 bpchar_pattern_ops no longer sorts based on trailing spaces - */ -void -old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster, - bool check_mode) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char output_path[MAXPGPATH]; - - prep_status("Checking for bpchar_pattern_ops indexes"); - - snprintf(output_path, sizeof(output_path), "reindex_bpchar_ops.sql"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* find bpchar_pattern_ops indexes */ - - /* - * Do only non-hash, non-gin indexees; we already invalidated them - * above; no need to reindex twice - */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname " - "FROM pg_catalog.pg_index i, " - " pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE indexrelid = c.oid AND " - " c.relnamespace = n.oid AND " - " ( " - " SELECT o.oid " - " FROM pg_catalog.pg_opclass o, " - " pg_catalog.pg_am a" - " WHERE a.amname NOT IN ('hash', 'gin') AND " - " a.oid = o.opcmethod AND " - " o.opcname = 'bpchar_pattern_ops') " - " = ANY (i.indclass) AND " - SKIP_TSVECTOR_TABLES); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - for (rowno = 0; rowno < ntups; rowno++) - { - found = true; - if (!check_mode) - { - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "\\connect %s\n", - quote_identifier(active_db->db_name)); - db_used = true; - } - fprintf(script, "REINDEX INDEX %s.%s;\n", - quote_identifier(PQgetvalue(res, rowno, i_nspname)), - quote_identifier(PQgetvalue(res, rowno, i_relname))); - } - } - - PQclear(res); - - if (!check_mode && found) - /* mark bpchar_pattern_ops indexes as invalid */ - PQclear(executeQueryOrDie(conn, - "UPDATE pg_catalog.pg_index i " - "SET indisvalid = false " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE indexrelid = c.oid AND " - " c.relnamespace = n.oid AND " - " ( " - " SELECT o.oid " - " FROM pg_catalog.pg_opclass o, " - " pg_catalog.pg_am a" - " WHERE a.amname NOT IN ('hash', 'gin') AND " - " a.oid = o.opcmethod AND " - " o.opcname = 'bpchar_pattern_ops') " - " = ANY (i.indclass)")); - - PQfinish(conn); - } - - if (script) - fclose(script); - - if (found) - { - report_status(PG_WARNING, "warning"); - if (check_mode) - pg_log(PG_WARNING, "\n" - "Your installation contains indexes using \"bpchar_pattern_ops\". These\n" - "indexes have different internal formats between your old and new clusters\n" - "so they must be reindexed with the REINDEX command. After upgrading, you\n" - "will be given REINDEX instructions.\n\n"); - else - pg_log(PG_WARNING, "\n" - "Your installation contains indexes using \"bpchar_pattern_ops\". These\n" - "indexes have different internal formats between your old and new clusters\n" - "so they must be reindexed with the REINDEX command. The file:\n" - " %s\n" - "when executed by psql by the database superuser will recreate all invalid\n" - "indexes; until then, none of these indexes will be used.\n\n", - output_path); - } - else - check_ok(); -} - - -/* - * old_8_3_create_sequence_script() - * 8.3 -> 8.4 - * 8.4 added the column "start_value" to all sequences. For this reason, - * we don't transfer sequence files but instead use the CREATE SEQUENCE - * command from the schema dump, and use setval() to restore the sequence - * value and 'is_called' from the old database. This is safe to run - * by pg_upgrade because sequence files are not transferred from the old - * server, even in link mode. - */ -char * -old_8_3_create_sequence_script(ClusterInfo *cluster) -{ - int dbnum; - FILE *script = NULL; - bool found = false; - char *output_path; - - output_path = pg_strdup("adjust_sequences.sql"); - - prep_status("Creating script to adjust sequences"); - - for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) - { - PGresult *res; - bool db_used = false; - int ntups; - int rowno; - int i_nspname, - i_relname; - DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; - PGconn *conn = connectToServer(cluster, active_db->db_name); - - /* Find any sequences */ - res = executeQueryOrDie(conn, - "SELECT n.nspname, c.relname " - "FROM pg_catalog.pg_class c, " - " pg_catalog.pg_namespace n " - "WHERE c.relkind = 'S' AND " - " c.relnamespace = n.oid AND " - /* exclude possible orphaned temp tables */ - " n.nspname !~ '^pg_temp_' AND " - " n.nspname !~ '^pg_toast_temp_' AND " - " n.nspname NOT IN ('pg_catalog', 'information_schema')"); - - ntups = PQntuples(res); - i_nspname = PQfnumber(res, "nspname"); - i_relname = PQfnumber(res, "relname"); - for (rowno = 0; rowno < ntups; rowno++) - { - PGresult *seq_res; - int i_last_value, - i_is_called; - const char *nspname = PQgetvalue(res, rowno, i_nspname); - const char *relname = PQgetvalue(res, rowno, i_relname); - - found = true; - - if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); - if (!db_used) - { - fprintf(script, "\\connect %s\n\n", - quote_identifier(active_db->db_name)); - db_used = true; - } - - /* Find the desired sequence */ - seq_res = executeQueryOrDie(conn, - "SELECT s.last_value, s.is_called " - "FROM %s.%s s", - quote_identifier(nspname), - quote_identifier(relname)); - - assert(PQntuples(seq_res) == 1); - i_last_value = PQfnumber(seq_res, "last_value"); - i_is_called = PQfnumber(seq_res, "is_called"); - - fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n", - quote_identifier(nspname), quote_identifier(relname), - PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called)); - PQclear(seq_res); - } - if (db_used) - fprintf(script, "\n"); - - PQclear(res); - - PQfinish(conn); - } - - if (script) - fclose(script); - - check_ok(); - - if (found) - return output_path; - else - { - pg_free(output_path); - return NULL; - } -} diff --git a/contrib/pg_upgrade_support/Makefile b/contrib/pg_upgrade_support/Makefile deleted file mode 100644 index f7def160c3..0000000000 --- a/contrib/pg_upgrade_support/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# contrib/pg_upgrade_support/Makefile - -PGFILEDESC = "pg_upgrade_support - server-side functions for pg_upgrade" - -MODULES = pg_upgrade_support - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_upgrade_support -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/pg_upgrade_support/pg_upgrade_support.c b/contrib/pg_upgrade_support/pg_upgrade_support.c deleted file mode 100644 index edd41d06ae..0000000000 --- a/contrib/pg_upgrade_support/pg_upgrade_support.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * pg_upgrade_support.c - * - * server-side functions to set backend global variables - * to control oid and relfilenode assignment, and do other special - * hacks needed for pg_upgrade. - * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group - * contrib/pg_upgrade_support/pg_upgrade_support.c - */ - -#include "postgres.h" - -#include "catalog/binary_upgrade.h" -#include "catalog/namespace.h" -#include "catalog/pg_type.h" -#include "commands/extension.h" -#include "miscadmin.h" -#include "utils/array.h" -#include "utils/builtins.h" - -/* THIS IS USED ONLY FOR PG >= 9.0 */ - -#ifdef PG_MODULE_MAGIC -PG_MODULE_MAGIC; -#endif - -PG_FUNCTION_INFO_V1(set_next_pg_type_oid); -PG_FUNCTION_INFO_V1(set_next_array_pg_type_oid); -PG_FUNCTION_INFO_V1(set_next_toast_pg_type_oid); - -PG_FUNCTION_INFO_V1(set_next_heap_pg_class_oid); -PG_FUNCTION_INFO_V1(set_next_index_pg_class_oid); -PG_FUNCTION_INFO_V1(set_next_toast_pg_class_oid); - -PG_FUNCTION_INFO_V1(set_next_pg_enum_oid); -PG_FUNCTION_INFO_V1(set_next_pg_authid_oid); - -PG_FUNCTION_INFO_V1(create_empty_extension); - - -Datum -set_next_pg_type_oid(PG_FUNCTION_ARGS) -{ - Oid typoid = PG_GETARG_OID(0); - - binary_upgrade_next_pg_type_oid = typoid; - - PG_RETURN_VOID(); -} - -Datum -set_next_array_pg_type_oid(PG_FUNCTION_ARGS) -{ - Oid typoid = PG_GETARG_OID(0); - - binary_upgrade_next_array_pg_type_oid = typoid; - - PG_RETURN_VOID(); -} - -Datum -set_next_toast_pg_type_oid(PG_FUNCTION_ARGS) -{ - Oid typoid = PG_GETARG_OID(0); - - binary_upgrade_next_toast_pg_type_oid = typoid; - - PG_RETURN_VOID(); -} - -Datum -set_next_heap_pg_class_oid(PG_FUNCTION_ARGS) -{ - Oid reloid = PG_GETARG_OID(0); - - binary_upgrade_next_heap_pg_class_oid = reloid; - - PG_RETURN_VOID(); -} - -Datum -set_next_index_pg_class_oid(PG_FUNCTION_ARGS) -{ - Oid reloid = PG_GETARG_OID(0); - - binary_upgrade_next_index_pg_class_oid = reloid; - - PG_RETURN_VOID(); -} - -Datum -set_next_toast_pg_class_oid(PG_FUNCTION_ARGS) -{ - Oid reloid = PG_GETARG_OID(0); - - binary_upgrade_next_toast_pg_class_oid = reloid; - - PG_RETURN_VOID(); -} - -Datum -set_next_pg_enum_oid(PG_FUNCTION_ARGS) -{ - Oid enumoid = PG_GETARG_OID(0); - - binary_upgrade_next_pg_enum_oid = enumoid; - - PG_RETURN_VOID(); -} - -Datum -set_next_pg_authid_oid(PG_FUNCTION_ARGS) -{ - Oid authoid = PG_GETARG_OID(0); - - binary_upgrade_next_pg_authid_oid = authoid; - PG_RETURN_VOID(); -} - -Datum -create_empty_extension(PG_FUNCTION_ARGS) -{ - text *extName = PG_GETARG_TEXT_PP(0); - text *schemaName = PG_GETARG_TEXT_PP(1); - bool relocatable = PG_GETARG_BOOL(2); - text *extVersion = PG_GETARG_TEXT_PP(3); - Datum extConfig; - Datum extCondition; - List *requiredExtensions; - - if (PG_ARGISNULL(4)) - extConfig = PointerGetDatum(NULL); - else - extConfig = PG_GETARG_DATUM(4); - - if (PG_ARGISNULL(5)) - extCondition = PointerGetDatum(NULL); - else - extCondition = PG_GETARG_DATUM(5); - - requiredExtensions = NIL; - if (!PG_ARGISNULL(6)) - { - ArrayType *textArray = PG_GETARG_ARRAYTYPE_P(6); - Datum *textDatums; - int ndatums; - int i; - - deconstruct_array(textArray, - TEXTOID, -1, false, 'i', - &textDatums, NULL, &ndatums); - for (i = 0; i < ndatums; i++) - { - text *txtname = DatumGetTextPP(textDatums[i]); - char *extName = text_to_cstring(txtname); - Oid extOid = get_extension_oid(extName, false); - - requiredExtensions = lappend_oid(requiredExtensions, extOid); - } - } - - InsertExtensionTuple(text_to_cstring(extName), - GetUserId(), - get_namespace_oid(text_to_cstring(schemaName), false), - relocatable, - text_to_cstring(extVersion), - extConfig, - extCondition, - requiredExtensions); - - PG_RETURN_VOID(); -} diff --git a/contrib/pg_xlogdump/.gitignore b/contrib/pg_xlogdump/.gitignore index 71f8531c40..16cf749ee4 100644 --- a/contrib/pg_xlogdump/.gitignore +++ b/contrib/pg_xlogdump/.gitignore @@ -1,6 +1,8 @@ /pg_xlogdump # Source files copied from src/backend/access/ +/brindesc.c /clogdesc.c +/committsdesc.c /dbasedesc.c /gindesc.c /gistdesc.c diff --git a/contrib/pg_xlogdump/Makefile b/contrib/pg_xlogdump/Makefile index ada261c4dd..30a8706948 100644 --- a/contrib/pg_xlogdump/Makefile +++ b/contrib/pg_xlogdump/Makefile @@ -1,6 +1,6 @@ # contrib/pg_xlogdump/Makefile -PGFILEDESC = "pg_xlogdump" +PGFILEDESC = "pg_xlogdump - decode and display WAL" PGAPPICON=win32 PROGRAM = pg_xlogdump diff --git a/contrib/pg_xlogdump/compat.c b/contrib/pg_xlogdump/compat.c index 6ca7012fd9..4f5cad6706 100644 --- a/contrib/pg_xlogdump/compat.c +++ b/contrib/pg_xlogdump/compat.c @@ -3,7 +3,7 @@ * compat.c * Reimplementations of various backend functions. * - * Portions Copyright (c) 2013-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2013-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_xlogdump/compat.c diff --git a/contrib/pg_xlogdump/pg_xlogdump.c b/contrib/pg_xlogdump/pg_xlogdump.c index 824b8c393c..4f297e9572 100644 --- a/contrib/pg_xlogdump/pg_xlogdump.c +++ b/contrib/pg_xlogdump/pg_xlogdump.c @@ -2,7 +2,7 @@ * * pg_xlogdump.c - decode and display WAL * - * Copyright (c) 2013-2014, PostgreSQL Global Development Group + * Copyright (c) 2013-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/pg_xlogdump/pg_xlogdump.c @@ -15,8 +15,9 @@ #include <dirent.h> #include <unistd.h> -#include "access/xlog.h" #include "access/xlogreader.h" +#include "access/xlogrecord.h" +#include "access/xlog_internal.h" #include "access/transam.h" #include "common/fe_memutils.h" #include "getopt_long.h" @@ -41,6 +42,8 @@ typedef struct XLogDumpConfig int stop_after_records; int already_displayed_records; bool follow; + bool stats; + bool stats_per_record; /* filter options */ int filter_by_rmgr; @@ -48,9 +51,23 @@ typedef struct XLogDumpConfig bool filter_by_xid_enabled; } XLogDumpConfig; -static void -fatal_error(const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); +typedef struct Stats +{ + uint64 count; + uint64 rec_len; + uint64 fpi_len; +} Stats; + +#define MAX_XLINFO_TYPES 16 + +typedef struct XLogDumpStats +{ + uint64 count; + Stats rmgr_stats[RM_NEXT_ID]; + Stats record_stats[RM_NEXT_ID][MAX_XLINFO_TYPES]; +} XLogDumpStats; + +static void fatal_error(const char *fmt,...) pg_attribute_printf(1, 2); /* * Big red button to push when things go horribly wrong. @@ -152,7 +169,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fname, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; /* XLOGDIR / fname */ @@ -161,7 +178,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; datadir = getenv("PGDATA"); @@ -173,7 +190,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; } } @@ -185,7 +202,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; /* directory / XLOGDIR / fname */ @@ -194,7 +211,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; } return -1; @@ -322,62 +339,277 @@ XLogDumpReadPage(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, } /* - * Print a record to stdout + * Store per-rmgr and per-record statistics for a given record. */ static void -XLogDumpDisplayRecord(XLogDumpConfig *config, XLogRecPtr ReadRecPtr, XLogRecord *record) +XLogDumpCountRecord(XLogDumpConfig *config, XLogDumpStats *stats, + XLogReaderState *record) { - const RmgrDescData *desc = &RmgrDescTable[record->xl_rmid]; + RmgrId rmid; + uint8 recid; + uint32 rec_len; + uint32 fpi_len; + int block_id; - if (config->filter_by_rmgr != -1 && - config->filter_by_rmgr != record->xl_rmid) - return; + stats->count++; - if (config->filter_by_xid_enabled && - config->filter_by_xid != record->xl_xid) - return; + rmid = XLogRecGetRmid(record); + rec_len = XLogRecGetDataLen(record) + SizeOfXLogRecord; - config->already_displayed_records++; + /* + * Calculate the amount of FPI data in the record. + * + * XXX: We peek into xlogreader's private decoded backup blocks for the + * bimg_len indicating the length of FPI data. It doesn't seem worth it to + * add an accessor macro for this. + */ + fpi_len = 0; + for (block_id = 0; block_id <= record->max_block_id; block_id++) + { + if (XLogRecHasBlockImage(record, block_id)) + fpi_len += record->blocks[block_id].bimg_len; + } - printf("rmgr: %-11s len (rec/tot): %6u/%6u, tx: %10u, lsn: %X/%08X, prev %X/%08X, bkp: %u%u%u%u, desc: ", + /* Update per-rmgr statistics */ + + stats->rmgr_stats[rmid].count++; + stats->rmgr_stats[rmid].rec_len += rec_len; + stats->rmgr_stats[rmid].fpi_len += fpi_len; + + /* + * Update per-record statistics, where the record is identified by a + * combination of the RmgrId and the four bits of the xl_info field that + * are the rmgr's domain (resulting in sixteen possible entries per + * RmgrId). + */ + + recid = XLogRecGetInfo(record) >> 4; + + stats->record_stats[rmid][recid].count++; + stats->record_stats[rmid][recid].rec_len += rec_len; + stats->record_stats[rmid][recid].fpi_len += fpi_len; +} + +/* + * Print a record to stdout + */ +static void +XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record) +{ + const char *id; + const RmgrDescData *desc = &RmgrDescTable[XLogRecGetRmid(record)]; + RelFileNode rnode; + ForkNumber forknum; + BlockNumber blk; + int block_id; + uint8 info = XLogRecGetInfo(record); + XLogRecPtr xl_prev = XLogRecGetPrev(record); + + id = desc->rm_identify(info); + if (id == NULL) + id = psprintf("UNKNOWN (%x)", info & ~XLR_INFO_MASK); + + printf("rmgr: %-11s len (rec/tot): %6u/%6u, tx: %10u, lsn: %X/%08X, prev %X/%08X, ", desc->rm_name, - record->xl_len, record->xl_tot_len, - record->xl_xid, - (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr, - (uint32) (record->xl_prev >> 32), (uint32) record->xl_prev, - !!(XLR_BKP_BLOCK(0) & record->xl_info), - !!(XLR_BKP_BLOCK(1) & record->xl_info), - !!(XLR_BKP_BLOCK(2) & record->xl_info), - !!(XLR_BKP_BLOCK(3) & record->xl_info)); + XLogRecGetDataLen(record), XLogRecGetTotalLen(record), + XLogRecGetXid(record), + (uint32) (record->ReadRecPtr >> 32), (uint32) record->ReadRecPtr, + (uint32) (xl_prev >> 32), (uint32) xl_prev); + printf("desc: %s ", id); /* the desc routine will printf the description directly to stdout */ - desc->rm_desc(NULL, record->xl_info, XLogRecGetData(record)); - - putchar('\n'); + desc->rm_desc(NULL, record); - if (config->bkp_details) + if (!config->bkp_details) { - int bkpnum; - char *blk = (char *) XLogRecGetData(record) + record->xl_len; - - for (bkpnum = 0; bkpnum < XLR_MAX_BKP_BLOCKS; bkpnum++) + /* print block references (short format) */ + for (block_id = 0; block_id <= record->max_block_id; block_id++) { - BkpBlock bkpb; + if (!XLogRecHasBlockRef(record, block_id)) + continue; - if (!(XLR_BKP_BLOCK(bkpnum) & record->xl_info)) + XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk); + if (forknum != MAIN_FORKNUM) + printf(", blkref #%u: rel %u/%u/%u fork %s blk %u", + block_id, + rnode.spcNode, rnode.dbNode, rnode.relNode, + forkNames[forknum], + blk); + else + printf(", blkref #%u: rel %u/%u/%u blk %u", + block_id, + rnode.spcNode, rnode.dbNode, rnode.relNode, + blk); + if (XLogRecHasBlockImage(record, block_id)) + printf(" FPW"); + } + putchar('\n'); + } + else + { + /* print block references (detailed format) */ + putchar('\n'); + for (block_id = 0; block_id <= record->max_block_id; block_id++) + { + if (!XLogRecHasBlockRef(record, block_id)) continue; - memcpy(&bkpb, blk, sizeof(BkpBlock)); - blk += sizeof(BkpBlock); - blk += BLCKSZ - bkpb.hole_length; + XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk); + printf("\tblkref #%u: rel %u/%u/%u fork %s blk %u", + block_id, + rnode.spcNode, rnode.dbNode, rnode.relNode, + forkNames[forknum], + blk); + if (XLogRecHasBlockImage(record, block_id)) + { + if (record->blocks[block_id].bimg_info & + BKPIMAGE_IS_COMPRESSED) + { + printf(" (FPW); hole: offset: %u, length: %u, compression saved: %u\n", + record->blocks[block_id].hole_offset, + record->blocks[block_id].hole_length, + BLCKSZ - + record->blocks[block_id].hole_length - + record->blocks[block_id].bimg_len); + } + else + { + printf(" (FPW); hole: offset: %u, length: %u\n", + record->blocks[block_id].hole_offset, + record->blocks[block_id].hole_length); + } + } + putchar('\n'); + } + } +} - printf("\tbackup bkp #%u; rel %u/%u/%u; fork: %s; block: %u; hole: offset: %u, length: %u\n", - bkpnum, - bkpb.node.spcNode, bkpb.node.dbNode, bkpb.node.relNode, - forkNames[bkpb.fork], - bkpb.block, bkpb.hole_offset, bkpb.hole_length); +/* + * Display a single row of record counts and sizes for an rmgr or record. + */ +static void +XLogDumpStatsRow(const char *name, + uint64 n, double n_pct, + uint64 rec_len, double rec_len_pct, + uint64 fpi_len, double fpi_len_pct, + uint64 total_len, double total_len_pct) +{ + printf("%-27s " + "%20" INT64_MODIFIER "u (%6.02f) " + "%20" INT64_MODIFIER "u (%6.02f) " + "%20" INT64_MODIFIER "u (%6.02f) " + "%20" INT64_MODIFIER "u (%6.02f)\n", + name, n, n_pct, rec_len, rec_len_pct, fpi_len, fpi_len_pct, + total_len, total_len_pct); +} + + +/* + * Display summary statistics about the records seen so far. + */ +static void +XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats) +{ + int ri, rj; + uint64 total_count = 0; + uint64 total_rec_len = 0; + uint64 total_fpi_len = 0; + uint64 total_len = 0; + + /* --- + * Make a first pass to calculate column totals: + * count(*), + * sum(xl_len+SizeOfXLogRecord), + * sum(xl_tot_len-xl_len-SizeOfXLogRecord), and + * sum(xl_tot_len). + * These are used to calculate percentages for each record type. + * --- + */ + + for (ri = 0; ri < RM_NEXT_ID; ri++) + { + total_count += stats->rmgr_stats[ri].count; + total_rec_len += stats->rmgr_stats[ri].rec_len; + total_fpi_len += stats->rmgr_stats[ri].fpi_len; + } + total_len = total_rec_len+total_fpi_len; + + /* + * 27 is strlen("Transaction/COMMIT_PREPARED"), + * 20 is strlen(2^64), 8 is strlen("(100.00%)") + */ + + printf("%-27s %20s %8s %20s %8s %20s %8s %20s %8s\n" + "%-27s %20s %8s %20s %8s %20s %8s %20s %8s\n", + "Type", "N", "(%)", "Record size", "(%)", "FPI size", "(%)", "Combined size", "(%)", + "----", "-", "---", "-----------", "---", "--------", "---", "-------------", "---"); + + for (ri = 0; ri < RM_NEXT_ID; ri++) + { + uint64 count, rec_len, fpi_len, tot_len; + const RmgrDescData *desc = &RmgrDescTable[ri]; + + if (!config->stats_per_record) + { + count = stats->rmgr_stats[ri].count; + rec_len = stats->rmgr_stats[ri].rec_len; + fpi_len = stats->rmgr_stats[ri].fpi_len; + tot_len = rec_len + fpi_len; + + XLogDumpStatsRow(desc->rm_name, + count, 100 * (double) count / total_count, + rec_len, 100 * (double) rec_len / total_rec_len, + fpi_len, 100 * (double) fpi_len / total_fpi_len, + tot_len, 100 * (double) tot_len / total_len); + } + else + { + for (rj = 0; rj < MAX_XLINFO_TYPES; rj++) + { + const char *id; + + count = stats->record_stats[ri][rj].count; + rec_len = stats->record_stats[ri][rj].rec_len; + fpi_len = stats->record_stats[ri][rj].fpi_len; + tot_len = rec_len + fpi_len; + + /* Skip undefined combinations and ones that didn't occur */ + if (count == 0) + continue; + + /* the upper four bits in xl_info are the rmgr's */ + id = desc->rm_identify(rj << 4); + if (id == NULL) + id = psprintf("UNKNOWN (%x)", rj << 4); + + XLogDumpStatsRow(psprintf("%s/%s", desc->rm_name, id), + count, 100 * (double) count / total_count, + rec_len, 100 * (double) rec_len / total_rec_len, + fpi_len, 100 * (double) fpi_len / total_fpi_len, + tot_len, 100 * (double) tot_len / total_len); + } } } + + printf("%-27s %20s %8s %20s %8s %20s %8s %20s\n", + "", "--------", "", "--------", "", "--------", "", "--------"); + + /* + * The percentages in earlier rows were calculated against the + * column total, but the ones that follow are against the row total. + * Note that these are displayed with a % symbol to differentiate + * them from the earlier ones, and are thus up to 9 characters long. + */ + + printf("%-27s " + "%20" INT64_MODIFIER "u %-9s" + "%20" INT64_MODIFIER "u %-9s" + "%20" INT64_MODIFIER "u %-9s" + "%20" INT64_MODIFIER "u %-6s\n", + "Total", stats->count, "", + total_rec_len, psprintf("[%.02f%%]", 100 * (double)total_rec_len / total_len), + total_fpi_len, psprintf("[%.02f%%]", 100 * (double)total_fpi_len / total_len), + total_len, "[100%]"); } static void @@ -401,6 +633,8 @@ usage(void) printf(" (default: 1 or the value used in STARTSEG)\n"); printf(" -V, --version output version information, then exit\n"); printf(" -x, --xid=XID only show records with TransactionId XID\n"); + printf(" -z, --stats[=record] show statistics instead of records\n"); + printf(" (optionally, show per-record statistics)\n"); printf(" -?, --help show this help, then exit\n"); } @@ -412,6 +646,7 @@ main(int argc, char **argv) XLogReaderState *xlogreader_state; XLogDumpPrivate private; XLogDumpConfig config; + XLogDumpStats stats; XLogRecord *record; XLogRecPtr first_record; char *errormsg; @@ -428,6 +663,7 @@ main(int argc, char **argv) {"timeline", required_argument, NULL, 't'}, {"xid", required_argument, NULL, 'x'}, {"version", no_argument, NULL, 'V'}, + {"stats", optional_argument, NULL, 'z'}, {NULL, 0, NULL, 0} }; @@ -438,6 +674,7 @@ main(int argc, char **argv) memset(&private, 0, sizeof(XLogDumpPrivate)); memset(&config, 0, sizeof(XLogDumpConfig)); + memset(&stats, 0, sizeof(XLogDumpStats)); private.timeline = 1; private.startptr = InvalidXLogRecPtr; @@ -451,6 +688,8 @@ main(int argc, char **argv) config.filter_by_rmgr = -1; config.filter_by_xid = InvalidTransactionId; config.filter_by_xid_enabled = false; + config.stats = false; + config.stats_per_record = false; if (argc <= 1) { @@ -458,7 +697,7 @@ main(int argc, char **argv) goto bad_argument; } - while ((option = getopt_long(argc, argv, "be:?fn:p:r:s:t:Vx:", + while ((option = getopt_long(argc, argv, "be:?fn:p:r:s:t:Vx:z", long_options, &optindex)) != -1) { switch (option) @@ -551,6 +790,21 @@ main(int argc, char **argv) } config.filter_by_xid_enabled = true; break; + case 'z': + config.stats = true; + config.stats_per_record = false; + if (optarg) + { + if (strcmp(optarg, "record") == 0) + config.stats_per_record = true; + else if (strcmp(optarg, "rmgr") != 0) + { + fprintf(stderr, "%s: unrecognised argument to --stats: %s\n", + progname, optarg); + goto bad_argument; + } + } + break; default: goto bad_argument; } @@ -711,14 +965,32 @@ main(int argc, char **argv) /* after reading the first record, continue at next one */ first_record = InvalidXLogRecPtr; - XLogDumpDisplayRecord(&config, xlogreader_state->ReadRecPtr, record); + + /* apply all specified filters */ + if (config.filter_by_rmgr != -1 && + config.filter_by_rmgr != record->xl_rmid) + continue; + + if (config.filter_by_xid_enabled && + config.filter_by_xid != record->xl_xid) + continue; + + /* process the record */ + if (config.stats == true) + XLogDumpCountRecord(&config, &stats, xlogreader_state); + else + XLogDumpDisplayRecord(&config, xlogreader_state); /* check whether we printed enough */ + config.already_displayed_records++; if (config.stop_after_records > 0 && config.already_displayed_records >= config.stop_after_records) break; } + if (config.stats == true) + XLogDumpDisplayStats(&config, &stats); + if (errormsg) fatal_error("error in WAL record at %X/%X: %s\n", (uint32) (xlogreader_state->ReadRecPtr >> 32), diff --git a/contrib/pg_xlogdump/rmgrdesc.c b/contrib/pg_xlogdump/rmgrdesc.c index cbcaaa6b0c..bd3344ccd6 100644 --- a/contrib/pg_xlogdump/rmgrdesc.c +++ b/contrib/pg_xlogdump/rmgrdesc.c @@ -8,7 +8,9 @@ #define FRONTEND 1 #include "postgres.h" +#include "access/brin_xlog.h" #include "access/clog.h" +#include "access/commit_ts.h" #include "access/gin.h" #include "access/gist_private.h" #include "access/hash.h" @@ -20,15 +22,15 @@ #include "access/xact.h" #include "access/xlog_internal.h" #include "catalog/storage_xlog.h" -#include "commands/dbcommands.h" +#include "commands/dbcommands_xlog.h" #include "commands/sequence.h" #include "commands/tablespace.h" #include "rmgrdesc.h" #include "storage/standby.h" #include "utils/relmapper.h" -#define PG_RMGR(symname,name,redo,desc,startup,cleanup) \ - { name, desc, }, +#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \ + { name, desc, identify}, const RmgrDescData RmgrDescTable[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/contrib/pg_xlogdump/rmgrdesc.h b/contrib/pg_xlogdump/rmgrdesc.h index edf8257751..aec4418303 100644 --- a/contrib/pg_xlogdump/rmgrdesc.h +++ b/contrib/pg_xlogdump/rmgrdesc.h @@ -13,7 +13,8 @@ typedef struct RmgrDescData { const char *rm_name; - void (*rm_desc) (StringInfo buf, uint8 xl_info, char *rec); + void (*rm_desc) (StringInfo buf, XLogReaderState *record); + const char *(*rm_identify) (uint8 info); } RmgrDescData; extern const RmgrDescData RmgrDescTable[]; diff --git a/contrib/pgbench/.gitignore b/contrib/pgbench/.gitignore deleted file mode 100644 index 489a2d62d0..0000000000 --- a/contrib/pgbench/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/pgbench diff --git a/contrib/pgbench/Makefile b/contrib/pgbench/Makefile deleted file mode 100644 index b8f5fb467f..0000000000 --- a/contrib/pgbench/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# contrib/pgbench/Makefile - -PGFILEDESC = "pgbench - a simple program for running benchmark tests" -PGAPPICON = win32 - -PROGRAM = pgbench -OBJS = pgbench.o - -PG_CPPFLAGS = -I$(libpq_srcdir) -PG_LIBS = $(libpq_pgport) $(PTHREAD_LIBS) - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pgbench -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -ifneq ($(PORTNAME), win32) -override CFLAGS += $(PTHREAD_CFLAGS) -endif diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c deleted file mode 100644 index b70a9a0901..0000000000 --- a/contrib/pgbench/pgbench.c +++ /dev/null @@ -1,3643 +0,0 @@ -/* - * pgbench.c - * - * A simple benchmark program for PostgreSQL - * Originally written by Tatsuo Ishii and enhanced by many contributors. - * - * contrib/pgbench/pgbench.c - * Copyright (c) 2000-2014, PostgreSQL Global Development Group - * ALL RIGHTS RESERVED; - * - * Permission to use, copy, modify, and distribute this software and its - * documentation for any purpose, without fee, and without a written agreement - * is hereby granted, provided that the above copyright notice and this - * paragraph and the following two paragraphs appear in all copies. - * - * IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING - * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS - * DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - * THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - */ - -#ifdef WIN32 -#define FD_SETSIZE 1024 /* set before winsock2.h is included */ -#endif /* ! WIN32 */ - -#include "postgres_fe.h" - -#include "getopt_long.h" -#include "libpq-fe.h" -#include "portability/instr_time.h" - -#include <ctype.h> -#include <math.h> -#include <signal.h> -#include <sys/time.h> -#ifdef HAVE_SYS_SELECT_H -#include <sys/select.h> -#endif - -#ifdef HAVE_SYS_RESOURCE_H -#include <sys/resource.h> /* for getrlimit */ -#endif - -#ifndef INT64_MAX -#define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF) -#endif - -/* - * Multi-platform pthread implementations - */ - -#ifdef WIN32 -/* Use native win32 threads on Windows */ -typedef struct win32_pthread *pthread_t; -typedef int pthread_attr_t; - -static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); -static int pthread_join(pthread_t th, void **thread_return); -#elif defined(ENABLE_THREAD_SAFETY) -/* Use platform-dependent pthread capability */ -#include <pthread.h> -#else -/* Use emulation with fork. Rename pthread identifiers to avoid conflicts */ -#define PTHREAD_FORK_EMULATION -#include <sys/wait.h> - -#define pthread_t pg_pthread_t -#define pthread_attr_t pg_pthread_attr_t -#define pthread_create pg_pthread_create -#define pthread_join pg_pthread_join - -typedef struct fork_pthread *pthread_t; -typedef int pthread_attr_t; - -static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg); -static int pthread_join(pthread_t th, void **thread_return); -#endif - - -/******************************************************************** - * some configurable parameters */ - -/* max number of clients allowed */ -#ifdef FD_SETSIZE -#define MAXCLIENTS (FD_SETSIZE - 10) -#else -#define MAXCLIENTS 1024 -#endif - -#define LOG_STEP_SECONDS 5 /* seconds between log messages */ -#define DEFAULT_NXACTS 10 /* default nxacts */ - -int nxacts = 0; /* number of transactions per client */ -int duration = 0; /* duration in seconds */ - -/* - * scaling factor. for example, scale = 10 will make 1000000 tuples in - * pgbench_accounts table. - */ -int scale = 1; - -/* - * fillfactor. for example, fillfactor = 90 will use only 90 percent - * space during inserts and leave 10 percent free. - */ -int fillfactor = 100; - -/* - * create foreign key constraints on the tables? - */ -int foreign_keys = 0; - -/* - * use unlogged tables? - */ -int unlogged_tables = 0; - -/* - * log sampling rate (1.0 = log everything, 0.0 = option not given) - */ -double sample_rate = 0.0; - -/* - * When threads are throttled to a given rate limit, this is the target delay - * to reach that rate in usec. 0 is the default and means no throttling. - */ -int64 throttle_delay = 0; - -/* - * tablespace selection - */ -char *tablespace = NULL; -char *index_tablespace = NULL; - -/* - * end of configurable parameters - *********************************************************************/ - -#define nbranches 1 /* Makes little sense to change this. Change - * -s instead */ -#define ntellers 10 -#define naccounts 100000 - -#ifdef PGXC -bool use_branch = false; /* use branch id in DDL and DML */ -#endif -/* - * The scale factor at/beyond which 32bit integers are incapable of storing - * 64bit values. - * - * Although the actual threshold is 21474, we use 20000 because it is easier to - * document and remember, and isn't that far away from the real threshold. - */ -#define SCALE_32BIT_THRESHOLD 20000 - -bool use_log; /* log transaction latencies to a file */ -bool use_quiet; /* quiet logging onto stderr */ -int agg_interval; /* log aggregates instead of individual - * transactions */ -int progress = 0; /* thread progress report every this seconds */ -int progress_nclients = 0; /* number of clients for progress - * report */ -int progress_nthreads = 0; /* number of threads for progress - * report */ -bool is_connect; /* establish connection for each transaction */ -bool is_latencies; /* report per-command latencies */ -int main_pid; /* main process id used in log filename */ - -char *pghost = ""; -char *pgport = ""; -char *login = NULL; -char *dbName; -const char *progname; - -volatile bool timer_exceeded = false; /* flag from signal handler */ - -/* variable definitions */ -typedef struct -{ - char *name; /* variable name */ - char *value; /* its value */ -} Variable; - -#define MAX_FILES 128 /* max number of SQL script files allowed */ -#define SHELL_COMMAND_SIZE 256 /* maximum size allowed for shell command */ - -/* - * structures used in custom query mode - */ - -typedef struct -{ - PGconn *con; /* connection handle to DB */ - int id; /* client No. */ - int state; /* state No. */ - int cnt; /* xacts count */ - int ecnt; /* error count */ - int listen; /* 0 indicates that an async query has been - * sent */ - int sleeping; /* 1 indicates that the client is napping */ - bool throttling; /* whether nap is for throttling */ - int64 until; /* napping until (usec) */ - Variable *variables; /* array of variable definitions */ - int nvariables; - instr_time txn_begin; /* used for measuring transaction latencies */ - instr_time stmt_begin; /* used for measuring statement latencies */ - int64 txn_latencies; /* cumulated latencies */ - int64 txn_sqlats; /* cumulated square latencies */ - bool is_throttled; /* whether transaction throttling is done */ - int use_file; /* index in sql_files for this client */ - bool prepared[MAX_FILES]; -} CState; - -/* - * Thread state and result - */ -typedef struct -{ - int tid; /* thread id */ - pthread_t thread; /* thread handle */ - CState *state; /* array of CState */ - int nstate; /* length of state[] */ - instr_time start_time; /* thread start time */ - instr_time *exec_elapsed; /* time spent executing cmds (per Command) */ - int *exec_count; /* number of cmd executions (per Command) */ - unsigned short random_state[3]; /* separate randomness for each thread */ - int64 throttle_trigger; /* previous/next throttling (us) */ - int64 throttle_lag; /* total transaction lag behind throttling */ - int64 throttle_lag_max; /* max transaction lag */ -} TState; - -#define INVALID_THREAD ((pthread_t) 0) - -typedef struct -{ - instr_time conn_time; - int64 xacts; - int64 latencies; - int64 sqlats; - int64 throttle_lag; - int64 throttle_lag_max; -} TResult; - -/* - * queries read from files - */ -#define SQL_COMMAND 1 -#define META_COMMAND 2 -#define MAX_ARGS 10 - -typedef enum QueryMode -{ - QUERY_SIMPLE, /* simple query */ - QUERY_EXTENDED, /* extended query */ - QUERY_PREPARED, /* extended query with prepared statements */ - NUM_QUERYMODE -} QueryMode; - -static QueryMode querymode = QUERY_SIMPLE; -static const char *QUERYMODE[] = {"simple", "extended", "prepared"}; - -typedef struct -{ - char *line; /* full text of command line */ - int command_num; /* unique index of this Command struct */ - int type; /* command type (SQL_COMMAND or META_COMMAND) */ - int argc; /* number of command words */ - char *argv[MAX_ARGS]; /* command word list */ -} Command; - -typedef struct -{ - - long start_time; /* when does the interval start */ - int cnt; /* number of transactions */ - double min_duration; /* min/max durations */ - double max_duration; - double sum; /* sum(duration), sum(duration^2) - for - * estimates */ - double sum2; - -} AggVals; - -static Command **sql_files[MAX_FILES]; /* SQL script files */ -static int num_files; /* number of script files */ -static int num_commands = 0; /* total number of Command structs */ -static int debug = 0; /* debug flag */ - -/* default scenario */ -static char *tpc_b = { - "\\set nbranches " CppAsString2(nbranches) " * :scale\n" - "\\set ntellers " CppAsString2(ntellers) " * :scale\n" - "\\set naccounts " CppAsString2(naccounts) " * :scale\n" - "\\setrandom aid 1 :naccounts\n" - "\\setrandom bid 1 :nbranches\n" - "\\setrandom tid 1 :ntellers\n" - "\\setrandom delta -5000 5000\n" - "BEGIN;\n" - "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n" - "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n" - "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;\n" - "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n" - "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n" - "END;\n" -}; - -#ifdef PGXC -static char *tpc_b_bid = { - "\\set nbranches " CppAsString2(nbranches) " * :scale\n" - "\\set ntellers " CppAsString2(ntellers) " * :scale\n" - "\\set naccounts " CppAsString2(naccounts) " * :scale\n" - "\\setrandom aid 1 :naccounts\n" - "\\setrandom bid 1 :nbranches\n" - "\\setrandom tid 1 :ntellers\n" - "\\setrandom delta -5000 5000\n" - "BEGIN;\n" - "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n" - "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid\n" - "UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid AND bid = :bid;\n" - "UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;\n" - "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n" - "END;\n" -}; -#endif - - -/* -N case */ -static char *simple_update = { - "\\set nbranches " CppAsString2(nbranches) " * :scale\n" - "\\set ntellers " CppAsString2(ntellers) " * :scale\n" - "\\set naccounts " CppAsString2(naccounts) " * :scale\n" - "\\setrandom aid 1 :naccounts\n" - "\\setrandom bid 1 :nbranches\n" - "\\setrandom tid 1 :ntellers\n" - "\\setrandom delta -5000 5000\n" - "BEGIN;\n" - "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;\n" - "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n" - "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n" - "END;\n" -}; - -#ifdef PGXC -static char *simple_update_bid = { - "\\set nbranches " CppAsString2(nbranches) " * :scale\n" - "\\set ntellers " CppAsString2(ntellers) " * :scale\n" - "\\set naccounts " CppAsString2(naccounts) " * :scale\n" - "\\setrandom aid 1 :naccounts\n" - "\\setrandom bid 1 :nbranches\n" - "\\setrandom tid 1 :ntellers\n" - "\\setrandom delta -5000 5000\n" - "BEGIN;\n" - "UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid AND bid = :bid;\n" - "SELECT abalance FROM pgbench_accounts WHERE aid = :aid AND bid = :bid;\n" - "INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);\n" - "END;\n" -}; -#endif - -/* -S case */ -static char *select_only = { - "\\set naccounts " CppAsString2(naccounts) " * :scale\n" - "\\setrandom aid 1 :naccounts\n" - "SELECT abalance FROM pgbench_accounts WHERE aid = :aid;\n" -}; - -/* Function prototypes */ -static void setalarm(int seconds); -static void *threadRun(void *arg); - -static void -usage(void) -{ - printf("%s is a benchmarking tool for PostgreSQL.\n\n" - "Usage:\n" - " %s [OPTION]... [DBNAME]\n" - "\nInitialization options:\n" - " -i, --initialize invokes initialization mode\n" - " -F, --fillfactor=NUM set fill factor\n" -#ifdef PGXC - " -k distribute by primary key branch id - bid\n" -#endif - " -n, --no-vacuum do not run VACUUM after initialization\n" - " -q, --quiet quiet logging (one message each 5 seconds)\n" - " -s, --scale=NUM scaling factor\n" - " --foreign-keys create foreign key constraints between tables\n" - " --index-tablespace=TABLESPACE\n" - " create indexes in the specified tablespace\n" - " --tablespace=TABLESPACE create tables in the specified tablespace\n" - " --unlogged-tables create tables as unlogged tables\n" - "\nBenchmarking options:\n" - " -c, --client=NUM number of concurrent database clients (default: 1)\n" - " -C, --connect establish new connection for each transaction\n" - " -D, --define=VARNAME=VALUE\n" - " define variable for use by custom script\n" - " -f, --file=FILENAME read transaction script from FILENAME\n" -#ifdef PGXC - " -k query with default key and additional key branch id (bid)\n" -#endif - " -j, --jobs=NUM number of threads (default: 1)\n" - " -l, --log write transaction times to log file\n" - " -M, --protocol=simple|extended|prepared\n" - " protocol for submitting queries (default: simple)\n" - " -n, --no-vacuum do not run VACUUM before tests\n" - " -N, --skip-some-updates skip updates of pgbench_tellers and pgbench_branches\n" - " -P, --progress=NUM show thread progress report every NUM seconds\n" - " -r, --report-latencies report average latency per command\n" - " -R, --rate=NUM target rate in transactions per second\n" - " -s, --scale=NUM report this scale factor in output\n" - " -S, --select-only perform SELECT-only transactions\n" - " -t, --transactions=NUM number of transactions each client runs (default: 10)\n" - " -T, --time=NUM duration of benchmark test in seconds\n" - " -v, --vacuum-all vacuum all four standard tables before tests\n" - " --aggregate-interval=NUM aggregate data over NUM seconds\n" - " --sampling-rate=NUM fraction of transactions to log (e.g. 0.01 for 1%%)\n" - "\nCommon options:\n" - " -d, --debug print debugging output\n" - " -h, --host=HOSTNAME database server host or socket directory\n" - " -p, --port=PORT database server port number\n" - " -U, --username=USERNAME connect as specified database user\n" - " -V, --version output version information, then exit\n" - " -?, --help show this help, then exit\n" - "\n" - "Report bugs to <pgsql-bugs@postgresql.org>.\n", - progname, progname); -} - -/* - * strtoint64 -- convert a string to 64-bit integer - * - * This function is a modified version of scanint8() from - * src/backend/utils/adt/int8.c. - */ -static int64 -strtoint64(const char *str) -{ - const char *ptr = str; - int64 result = 0; - int sign = 1; - - /* - * Do our own scan, rather than relying on sscanf which might be broken - * for long long. - */ - - /* skip leading spaces */ - while (*ptr && isspace((unsigned char) *ptr)) - ptr++; - - /* handle sign */ - if (*ptr == '-') - { - ptr++; - - /* - * Do an explicit check for INT64_MIN. Ugly though this is, it's - * cleaner than trying to get the loop below to handle it portably. - */ - if (strncmp(ptr, "9223372036854775808", 19) == 0) - { - result = -INT64CONST(0x7fffffffffffffff) - 1; - ptr += 19; - goto gotdigits; - } - sign = -1; - } - else if (*ptr == '+') - ptr++; - - /* require at least one digit */ - if (!isdigit((unsigned char) *ptr)) - fprintf(stderr, "invalid input syntax for integer: \"%s\"\n", str); - - /* process digits */ - while (*ptr && isdigit((unsigned char) *ptr)) - { - int64 tmp = result * 10 + (*ptr++ - '0'); - - if ((tmp / 10) != result) /* overflow? */ - fprintf(stderr, "value \"%s\" is out of range for type bigint\n", str); - result = tmp; - } - -gotdigits: - - /* allow trailing whitespace, but not other trailing chars */ - while (*ptr != '\0' && isspace((unsigned char) *ptr)) - ptr++; - - if (*ptr != '\0') - fprintf(stderr, "invalid input syntax for integer: \"%s\"\n", str); - - return ((sign < 0) ? -result : result); -} - -/* random number generator: uniform distribution from min to max inclusive */ -static int64 -getrand(TState *thread, int64 min, int64 max) -{ - /* - * Odd coding is so that min and max have approximately the same chance of - * being selected as do numbers between them. - * - * pg_erand48() is thread-safe and concurrent, which is why we use it - * rather than random(), which in glibc is non-reentrant, and therefore - * protected by a mutex, and therefore a bottleneck on machines with many - * CPUs. - */ - return min + (int64) ((max - min + 1) * pg_erand48(thread->random_state)); -} - -/* call PQexec() and exit() on failure */ -static void -executeStatement(PGconn *con, const char *sql) -{ - PGresult *res; - - res = PQexec(con, sql); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - { - fprintf(stderr, "%s", PQerrorMessage(con)); - exit(1); - } - PQclear(res); -} - -/* set up a connection to the backend */ -static PGconn * -doConnect(void) -{ - PGconn *conn; - static char *password = NULL; - bool new_pass; - - /* - * Start the connection. Loop until we have a password if requested by - * backend. - */ - do - { -#define PARAMS_ARRAY_SIZE 7 - - const char *keywords[PARAMS_ARRAY_SIZE]; - const char *values[PARAMS_ARRAY_SIZE]; - - keywords[0] = "host"; - values[0] = pghost; - keywords[1] = "port"; - values[1] = pgport; - keywords[2] = "user"; - values[2] = login; - keywords[3] = "password"; - values[3] = password; - keywords[4] = "dbname"; - values[4] = dbName; - keywords[5] = "fallback_application_name"; - values[5] = progname; - keywords[6] = NULL; - values[6] = NULL; - - new_pass = false; - - conn = PQconnectdbParams(keywords, values, true); - - if (!conn) - { - fprintf(stderr, "Connection to database \"%s\" failed\n", - dbName); - return NULL; - } - - if (PQstatus(conn) == CONNECTION_BAD && - PQconnectionNeedsPassword(conn) && - password == NULL) - { - PQfinish(conn); - password = simple_prompt("Password: ", 100, false); - new_pass = true; - } - } while (new_pass); - - /* check to see that the backend connection was successfully made */ - if (PQstatus(conn) == CONNECTION_BAD) - { - fprintf(stderr, "Connection to database \"%s\" failed:\n%s", - dbName, PQerrorMessage(conn)); - PQfinish(conn); - return NULL; - } - - return conn; -} - -/* throw away response from backend */ -static void -discard_response(CState *state) -{ - PGresult *res; - - do - { - res = PQgetResult(state->con); - if (res) - PQclear(res); - } while (res); -} - -static int -compareVariables(const void *v1, const void *v2) -{ - return strcmp(((const Variable *) v1)->name, - ((const Variable *) v2)->name); -} - -static char * -getVariable(CState *st, char *name) -{ - Variable key, - *var; - - /* On some versions of Solaris, bsearch of zero items dumps core */ - if (st->nvariables <= 0) - return NULL; - - key.name = name; - var = (Variable *) bsearch((void *) &key, - (void *) st->variables, - st->nvariables, - sizeof(Variable), - compareVariables); - if (var != NULL) - return var->value; - else - return NULL; -} - -/* check whether the name consists of alphabets, numerals and underscores. */ -static bool -isLegalVariableName(const char *name) -{ - int i; - - for (i = 0; name[i] != '\0'; i++) - { - if (!isalnum((unsigned char) name[i]) && name[i] != '_') - return false; - } - - return true; -} - -static int -putVariable(CState *st, const char *context, char *name, char *value) -{ - Variable key, - *var; - - key.name = name; - /* On some versions of Solaris, bsearch of zero items dumps core */ - if (st->nvariables > 0) - var = (Variable *) bsearch((void *) &key, - (void *) st->variables, - st->nvariables, - sizeof(Variable), - compareVariables); - else - var = NULL; - - if (var == NULL) - { - Variable *newvars; - - /* - * Check for the name only when declaring a new variable to avoid - * overhead. - */ - if (!isLegalVariableName(name)) - { - fprintf(stderr, "%s: invalid variable name '%s'\n", context, name); - return false; - } - - if (st->variables) - newvars = (Variable *) pg_realloc(st->variables, - (st->nvariables + 1) * sizeof(Variable)); - else - newvars = (Variable *) pg_malloc(sizeof(Variable)); - - st->variables = newvars; - - var = &newvars[st->nvariables]; - - var->name = pg_strdup(name); - var->value = pg_strdup(value); - - st->nvariables++; - - qsort((void *) st->variables, st->nvariables, sizeof(Variable), - compareVariables); - } - else - { - char *val; - - /* dup then free, in case value is pointing at this variable */ - val = pg_strdup(value); - - free(var->value); - var->value = val; - } - - return true; -} - -static char * -parseVariable(const char *sql, int *eaten) -{ - int i = 0; - char *name; - - do - { - i++; - } while (isalnum((unsigned char) sql[i]) || sql[i] == '_'); - if (i == 1) - return NULL; - - name = pg_malloc(i); - memcpy(name, &sql[1], i - 1); - name[i - 1] = '\0'; - - *eaten = i; - return name; -} - -static char * -replaceVariable(char **sql, char *param, int len, char *value) -{ - int valueln = strlen(value); - - if (valueln > len) - { - size_t offset = param - *sql; - - *sql = pg_realloc(*sql, strlen(*sql) - len + valueln + 1); - param = *sql + offset; - } - - if (valueln != len) - memmove(param + valueln, param + len, strlen(param + len) + 1); - strncpy(param, value, valueln); - - return param + valueln; -} - -static char * -assignVariables(CState *st, char *sql) -{ - char *p, - *name, - *val; - - p = sql; - while ((p = strchr(p, ':')) != NULL) - { - int eaten; - - name = parseVariable(p, &eaten); - if (name == NULL) - { - while (*p == ':') - { - p++; - } - continue; - } - - val = getVariable(st, name); - free(name); - if (val == NULL) - { - p++; - continue; - } - - p = replaceVariable(&sql, p, eaten, val); - } - - return sql; -} - -static void -getQueryParams(CState *st, const Command *command, const char **params) -{ - int i; - - for (i = 0; i < command->argc - 1; i++) - params[i] = getVariable(st, command->argv[i + 1]); -} - -/* - * Run a shell command. The result is assigned to the variable if not NULL. - * Return true if succeeded, or false on error. - */ -static bool -runShellCommand(CState *st, char *variable, char **argv, int argc) -{ - char command[SHELL_COMMAND_SIZE]; - int i, - len = 0; - FILE *fp; - char res[64]; - char *endptr; - int retval; - - /*---------- - * Join arguments with whitespace separators. Arguments starting with - * exactly one colon are treated as variables: - * name - append a string "name" - * :var - append a variable named 'var' - * ::name - append a string ":name" - *---------- - */ - for (i = 0; i < argc; i++) - { - char *arg; - int arglen; - - if (argv[i][0] != ':') - { - arg = argv[i]; /* a string literal */ - } - else if (argv[i][1] == ':') - { - arg = argv[i] + 1; /* a string literal starting with colons */ - } - else if ((arg = getVariable(st, argv[i] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[i]); - return false; - } - - arglen = strlen(arg); - if (len + arglen + (i > 0 ? 1 : 0) >= SHELL_COMMAND_SIZE - 1) - { - fprintf(stderr, "%s: too long shell command\n", argv[0]); - return false; - } - - if (i > 0) - command[len++] = ' '; - memcpy(command + len, arg, arglen); - len += arglen; - } - - command[len] = '\0'; - - /* Fast path for non-assignment case */ - if (variable == NULL) - { - if (system(command)) - { - if (!timer_exceeded) - fprintf(stderr, "%s: cannot launch shell command\n", argv[0]); - return false; - } - return true; - } - - /* Execute the command with pipe and read the standard output. */ - if ((fp = popen(command, "r")) == NULL) - { - fprintf(stderr, "%s: cannot launch shell command\n", argv[0]); - return false; - } - if (fgets(res, sizeof(res), fp) == NULL) - { - if (!timer_exceeded) - fprintf(stderr, "%s: cannot read the result\n", argv[0]); - return false; - } - if (pclose(fp) < 0) - { - fprintf(stderr, "%s: cannot close shell command\n", argv[0]); - return false; - } - - /* Check whether the result is an integer and assign it to the variable */ - retval = (int) strtol(res, &endptr, 10); - while (*endptr != '\0' && isspace((unsigned char) *endptr)) - endptr++; - if (*res == '\0' || *endptr != '\0') - { - fprintf(stderr, "%s: must return an integer ('%s' returned)\n", argv[0], res); - return false; - } - snprintf(res, sizeof(res), "%d", retval); - if (!putVariable(st, "setshell", variable, res)) - return false; - -#ifdef DEBUG - printf("shell parameter name: %s, value: %s\n", argv[1], res); -#endif - return true; -} - -#define MAX_PREPARE_NAME 32 -static void -preparedStatementName(char *buffer, int file, int state) -{ - sprintf(buffer, "P%d_%d", file, state); -} - -static bool -clientDone(CState *st, bool ok) -{ - (void) ok; /* unused */ - - if (st->con != NULL) - { - PQfinish(st->con); - st->con = NULL; - } - return false; /* always false */ -} - -static -void -agg_vals_init(AggVals *aggs, instr_time start) -{ - /* basic counters */ - aggs->cnt = 0; /* number of transactions */ - aggs->sum = 0; /* SUM(duration) */ - aggs->sum2 = 0; /* SUM(duration*duration) */ - - /* min and max transaction duration */ - aggs->min_duration = 0; - aggs->max_duration = 0; - - /* start of the current interval */ - aggs->start_time = INSTR_TIME_GET_DOUBLE(start); -} - -/* return false iff client should be disconnected */ -static bool -doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVals *agg) -{ - PGresult *res; - Command **commands; - bool trans_needs_throttle = false; - -top: - commands = sql_files[st->use_file]; - - /* - * Handle throttling once per transaction by sleeping. It is simpler to - * do this here rather than at the end, because so much complicated logic - * happens below when statements finish. - */ - if (throttle_delay && !st->is_throttled) - { - /* - * Use inverse transform sampling to randomly generate a delay, such - * that the series of delays will approximate a Poisson distribution - * centered on the throttle_delay time. - * - * 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay - * multiplier, and results in a 0.055 % target underestimation bias: - * - * SELECT 1.0/AVG(-LN(i/10000.0)) FROM generate_series(1,10000) AS i; - * = 1.000552717032611116335474 - * - * If transactions are too slow or a given wait is shorter than a - * transaction, the next transaction will start right away. - */ - int64 wait = (int64) (throttle_delay * - 1.00055271703 * -log(getrand(thread, 1, 10000) / 10000.0)); - - thread->throttle_trigger += wait; - - st->until = thread->throttle_trigger; - st->sleeping = 1; - st->throttling = true; - st->is_throttled = true; - if (debug) - fprintf(stderr, "client %d throttling " INT64_FORMAT " us\n", - st->id, wait); - } - - if (st->sleeping) - { /* are we sleeping? */ - instr_time now; - int64 now_us; - - INSTR_TIME_SET_CURRENT(now); - now_us = INSTR_TIME_GET_MICROSEC(now); - if (st->until <= now_us) - { - st->sleeping = 0; /* Done sleeping, go ahead with next command */ - if (st->throttling) - { - /* Measure lag of throttled transaction relative to target */ - int64 lag = now_us - st->until; - - thread->throttle_lag += lag; - if (lag > thread->throttle_lag_max) - thread->throttle_lag_max = lag; - st->throttling = false; - } - } - else - return true; /* Still sleeping, nothing to do here */ - } - - if (st->listen) - { /* are we receiver? */ - if (commands[st->state]->type == SQL_COMMAND) - { - if (debug) - fprintf(stderr, "client %d receiving\n", st->id); - if (!PQconsumeInput(st->con)) - { /* there's something wrong */ - fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n", st->id, st->state); - return clientDone(st, false); - } - if (PQisBusy(st->con)) - return true; /* don't have the whole result yet */ - } - - /* - * command finished: accumulate per-command execution times in - * thread-local data structure, if per-command latencies are requested - */ - if (is_latencies) - { - instr_time now; - int cnum = commands[st->state]->command_num; - - INSTR_TIME_SET_CURRENT(now); - INSTR_TIME_ACCUM_DIFF(thread->exec_elapsed[cnum], - now, st->stmt_begin); - thread->exec_count[cnum]++; - } - - /* transaction finished: record latency under progress or throttling */ - if ((progress || throttle_delay) && commands[st->state + 1] == NULL) - { - instr_time diff; - int64 latency; - - INSTR_TIME_SET_CURRENT(diff); - INSTR_TIME_SUBTRACT(diff, st->txn_begin); - latency = INSTR_TIME_GET_MICROSEC(diff); - st->txn_latencies += latency; - - /* - * XXX In a long benchmark run of high-latency transactions, this - * int64 addition eventually overflows. For example, 100 threads - * running 10s transactions will overflow it in 2.56 hours. With - * a more-typical OLTP workload of .1s transactions, overflow - * would take 256 hours. - */ - st->txn_sqlats += latency * latency; - } - - /* - * if transaction finished, record the time it took in the log - */ - if (logfile && commands[st->state + 1] == NULL) - { - instr_time now; - instr_time diff; - double usec; - - /* - * write the log entry if this row belongs to the random sample, - * or no sampling rate was given which means log everything. - */ - if (sample_rate == 0.0 || - pg_erand48(thread->random_state) <= sample_rate) - { - INSTR_TIME_SET_CURRENT(now); - diff = now; - INSTR_TIME_SUBTRACT(diff, st->txn_begin); - usec = (double) INSTR_TIME_GET_MICROSEC(diff); - - /* should we aggregate the results or not? */ - if (agg_interval > 0) - { - /* - * are we still in the same interval? if yes, accumulate - * the values (print them otherwise) - */ - if (agg->start_time + agg_interval >= INSTR_TIME_GET_DOUBLE(now)) - { - agg->cnt += 1; - agg->sum += usec; - agg->sum2 += usec * usec; - - /* first in this aggregation interval */ - if ((agg->cnt == 1) || (usec < agg->min_duration)) - agg->min_duration = usec; - - if ((agg->cnt == 1) || (usec > agg->max_duration)) - agg->max_duration = usec; - } - else - { - /* - * Loop until we reach the interval of the current - * transaction (and print all the empty intervals in - * between). - */ - while (agg->start_time + agg_interval < INSTR_TIME_GET_DOUBLE(now)) - { - /* - * This is a non-Windows branch (thanks to the - * ifdef in usage), so we don't need to handle - * this in a special way (see below). - */ - fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n", - agg->start_time, - agg->cnt, - agg->sum, - agg->sum2, - agg->min_duration, - agg->max_duration); - - /* move to the next inteval */ - agg->start_time = agg->start_time + agg_interval; - - /* reset for "no transaction" intervals */ - agg->cnt = 0; - agg->min_duration = 0; - agg->max_duration = 0; - agg->sum = 0; - agg->sum2 = 0; - } - - /* - * and now update the reset values (include the - * current) - */ - agg->cnt = 1; - agg->min_duration = usec; - agg->max_duration = usec; - agg->sum = usec; - agg->sum2 = usec * usec; - } - } - else - { - /* no, print raw transactions */ -#ifndef WIN32 - - /* - * This is more than we really ought to know about - * instr_time - */ - fprintf(logfile, "%d %d %.0f %d %ld %ld\n", - st->id, st->cnt, usec, st->use_file, - (long) now.tv_sec, (long) now.tv_usec); -#else - - /* - * On Windows, instr_time doesn't provide a timestamp - * anyway - */ - fprintf(logfile, "%d %d %.0f %d 0 0\n", - st->id, st->cnt, usec, st->use_file); -#endif - } - } - } - - if (commands[st->state]->type == SQL_COMMAND) - { - /* - * Read and discard the query result; note this is not included in - * the statement latency numbers. - */ - res = PQgetResult(st->con); - switch (PQresultStatus(res)) - { - case PGRES_COMMAND_OK: - case PGRES_TUPLES_OK: - break; /* OK */ - default: - fprintf(stderr, "Client %d aborted in state %d: %s", - st->id, st->state, PQerrorMessage(st->con)); - PQclear(res); - return clientDone(st, false); - } - PQclear(res); - discard_response(st); - } - - if (commands[st->state + 1] == NULL) - { - if (is_connect) - { - PQfinish(st->con); - st->con = NULL; - } - - ++st->cnt; - if ((st->cnt >= nxacts && duration <= 0) || timer_exceeded) - return clientDone(st, true); /* exit success */ - } - - /* increment state counter */ - st->state++; - if (commands[st->state] == NULL) - { - st->state = 0; - st->use_file = (int) getrand(thread, 0, num_files - 1); - commands = sql_files[st->use_file]; - st->is_throttled = false; - - /* - * No transaction is underway anymore, which means there is - * nothing to listen to right now. When throttling rate limits - * are active, a sleep will happen next, as the next transaction - * starts. And then in any case the next SQL command will set - * listen back to 1. - */ - st->listen = 0; - trans_needs_throttle = (throttle_delay > 0); - } - } - - if (st->con == NULL) - { - instr_time start, - end; - - INSTR_TIME_SET_CURRENT(start); - if ((st->con = doConnect()) == NULL) - { - fprintf(stderr, "Client %d aborted in establishing connection.\n", st->id); - return clientDone(st, false); - } - INSTR_TIME_SET_CURRENT(end); - INSTR_TIME_ACCUM_DIFF(*conn_time, end, start); - } - - /* - * This ensures that a throttling delay is inserted before proceeding with - * sql commands, after the first transaction. The first transaction - * throttling is performed when first entering doCustom. - */ - if (trans_needs_throttle) - { - trans_needs_throttle = false; - goto top; - } - - /* Record transaction start time under logging, progress or throttling */ - if ((logfile || progress || throttle_delay) && st->state == 0) - INSTR_TIME_SET_CURRENT(st->txn_begin); - - /* Record statement start time if per-command latencies are requested */ - if (is_latencies) - INSTR_TIME_SET_CURRENT(st->stmt_begin); - - if (commands[st->state]->type == SQL_COMMAND) - { - const Command *command = commands[st->state]; - int r; - - if (querymode == QUERY_SIMPLE) - { - char *sql; - - sql = pg_strdup(command->argv[0]); - sql = assignVariables(st, sql); - - if (debug) - fprintf(stderr, "client %d sending %s\n", st->id, sql); - r = PQsendQuery(st->con, sql); - free(sql); - } - else if (querymode == QUERY_EXTENDED) - { - const char *sql = command->argv[0]; - const char *params[MAX_ARGS]; - - getQueryParams(st, command, params); - - if (debug) - fprintf(stderr, "client %d sending %s\n", st->id, sql); - r = PQsendQueryParams(st->con, sql, command->argc - 1, - NULL, params, NULL, NULL, 0); - } - else if (querymode == QUERY_PREPARED) - { - char name[MAX_PREPARE_NAME]; - const char *params[MAX_ARGS]; - - if (!st->prepared[st->use_file]) - { - int j; - - for (j = 0; commands[j] != NULL; j++) - { - PGresult *res; - char name[MAX_PREPARE_NAME]; - - if (commands[j]->type != SQL_COMMAND) - continue; - preparedStatementName(name, st->use_file, j); - res = PQprepare(st->con, name, - commands[j]->argv[0], commands[j]->argc - 1, NULL); - if (PQresultStatus(res) != PGRES_COMMAND_OK) - fprintf(stderr, "%s", PQerrorMessage(st->con)); - PQclear(res); - } - st->prepared[st->use_file] = true; - } - - getQueryParams(st, command, params); - preparedStatementName(name, st->use_file, st->state); - - if (debug) - fprintf(stderr, "client %d sending %s\n", st->id, name); - r = PQsendQueryPrepared(st->con, name, command->argc - 1, - params, NULL, NULL, 0); - } - else /* unknown sql mode */ - r = 0; - - if (r == 0) - { - if (debug) - fprintf(stderr, "client %d cannot send %s\n", st->id, command->argv[0]); - st->ecnt++; - } - else - st->listen = 1; /* flags that should be listened */ - } - else if (commands[st->state]->type == META_COMMAND) - { - int argc = commands[st->state]->argc, - i; - char **argv = commands[st->state]->argv; - - if (debug) - { - fprintf(stderr, "client %d executing \\%s", st->id, argv[0]); - for (i = 1; i < argc; i++) - fprintf(stderr, " %s", argv[i]); - fprintf(stderr, "\n"); - } - - if (pg_strcasecmp(argv[0], "setrandom") == 0) - { - char *var; - int64 min, - max; - char res[64]; - - if (*argv[2] == ':') - { - if ((var = getVariable(st, argv[2] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[2]); - st->ecnt++; - return true; - } - min = strtoint64(var); - } - else - min = strtoint64(argv[2]); - -#ifdef NOT_USED - if (min < 0) - { - fprintf(stderr, "%s: invalid minimum number %d\n", argv[0], min); - st->ecnt++; - return; - } -#endif - - if (*argv[3] == ':') - { - if ((var = getVariable(st, argv[3] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[3]); - st->ecnt++; - return true; - } - max = strtoint64(var); - } - else - max = strtoint64(argv[3]); - - if (max < min) - { - fprintf(stderr, "%s: maximum is less than minimum\n", argv[0]); - st->ecnt++; - return true; - } - - /* - * getrand() needs to be able to subtract max from min and add one - * to the result without overflowing. Since we know max > min, we - * can detect overflow just by checking for a negative result. But - * we must check both that the subtraction doesn't overflow, and - * that adding one to the result doesn't overflow either. - */ - if (max - min < 0 || (max - min) + 1 < 0) - { - fprintf(stderr, "%s: range too large\n", argv[0]); - st->ecnt++; - return true; - } - -#ifdef DEBUG - printf("min: " INT64_FORMAT " max: " INT64_FORMAT " random: " INT64_FORMAT "\n", min, max, getrand(thread, min, max)); -#endif - snprintf(res, sizeof(res), INT64_FORMAT, getrand(thread, min, max)); - - if (!putVariable(st, argv[0], argv[1], res)) - { - st->ecnt++; - return true; - } - - st->listen = 1; - } - else if (pg_strcasecmp(argv[0], "set") == 0) - { - char *var; - int64 ope1, - ope2; - char res[64]; - - if (*argv[2] == ':') - { - if ((var = getVariable(st, argv[2] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[2]); - st->ecnt++; - return true; - } - ope1 = strtoint64(var); - } - else - ope1 = strtoint64(argv[2]); - - if (argc < 5) - snprintf(res, sizeof(res), INT64_FORMAT, ope1); - else - { - if (*argv[4] == ':') - { - if ((var = getVariable(st, argv[4] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[4]); - st->ecnt++; - return true; - } - ope2 = strtoint64(var); - } - else - ope2 = strtoint64(argv[4]); - - if (strcmp(argv[3], "+") == 0) - snprintf(res, sizeof(res), INT64_FORMAT, ope1 + ope2); - else if (strcmp(argv[3], "-") == 0) - snprintf(res, sizeof(res), INT64_FORMAT, ope1 - ope2); - else if (strcmp(argv[3], "*") == 0) - snprintf(res, sizeof(res), INT64_FORMAT, ope1 * ope2); - else if (strcmp(argv[3], "/") == 0) - { - if (ope2 == 0) - { - fprintf(stderr, "%s: division by zero\n", argv[0]); - st->ecnt++; - return true; - } - snprintf(res, sizeof(res), INT64_FORMAT, ope1 / ope2); - } - else - { - fprintf(stderr, "%s: unsupported operator %s\n", argv[0], argv[3]); - st->ecnt++; - return true; - } - } - - if (!putVariable(st, argv[0], argv[1], res)) - { - st->ecnt++; - return true; - } - - st->listen = 1; - } - else if (pg_strcasecmp(argv[0], "sleep") == 0) - { - char *var; - int usec; - instr_time now; - - if (*argv[1] == ':') - { - if ((var = getVariable(st, argv[1] + 1)) == NULL) - { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[1]); - st->ecnt++; - return true; - } - usec = atoi(var); - } - else - usec = atoi(argv[1]); - - if (argc > 2) - { - if (pg_strcasecmp(argv[2], "ms") == 0) - usec *= 1000; - else if (pg_strcasecmp(argv[2], "s") == 0) - usec *= 1000000; - } - else - usec *= 1000000; - - INSTR_TIME_SET_CURRENT(now); - st->until = INSTR_TIME_GET_MICROSEC(now) + usec; - st->sleeping = 1; - - st->listen = 1; - } - else if (pg_strcasecmp(argv[0], "setshell") == 0) - { - bool ret = runShellCommand(st, argv[1], argv + 2, argc - 2); - - if (timer_exceeded) /* timeout */ - return clientDone(st, true); - else if (!ret) /* on error */ - { - st->ecnt++; - return true; - } - else /* succeeded */ - st->listen = 1; - } - else if (pg_strcasecmp(argv[0], "shell") == 0) - { - bool ret = runShellCommand(st, NULL, argv + 1, argc - 1); - - if (timer_exceeded) /* timeout */ - return clientDone(st, true); - else if (!ret) /* on error */ - { - st->ecnt++; - return true; - } - else /* succeeded */ - st->listen = 1; - } - goto top; - } - - return true; -} - -/* discard connections */ -static void -disconnect_all(CState *state, int length) -{ - int i; - - for (i = 0; i < length; i++) - { - if (state[i].con) - { - PQfinish(state[i].con); - state[i].con = NULL; - } - } -} - -/* create tables and setup data */ -static void -init(bool is_no_vacuum) -{ -/* - * The scale factor at/beyond which 32-bit integers are insufficient for - * storing TPC-B account IDs. - * - * Although the actual threshold is 21474, we use 20000 because it is easier to - * document and remember, and isn't that far away from the real threshold. - */ -#define SCALE_32BIT_THRESHOLD 20000 - - /* - * Note: TPC-B requires at least 100 bytes per row, and the "filler" - * fields in these table declarations were intended to comply with that. - * The pgbench_accounts table complies with that because the "filler" - * column is set to blank-padded empty string. But for all other tables - * the columns default to NULL and so don't actually take any space. We - * could fix that by giving them non-null default values. However, that - * would completely break comparability of pgbench results with prior - * versions. Since pgbench has never pretended to be fully TPC-B compliant - * anyway, we stick with the historical behavior. - */ - struct ddlinfo - { - const char *table; /* table name */ - const char *smcols; /* column decls if accountIDs are 32 bits */ - const char *bigcols; /* column decls if accountIDs are 64 bits */ - int declare_fillfactor; -#ifdef PGXC - char *distribute_by; -#endif - }; - static const struct ddlinfo DDLs[] = { - { - "pgbench_history", - "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)", - "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)", - 0 -#ifdef PGXC - , "distribute by hash (bid)" -#endif - }, - { - "pgbench_tellers", - "tid int not null,bid int,tbalance int,filler char(84)", - "tid int not null,bid int,tbalance int,filler char(84)", - 1 -#ifdef PGXC - , "distribute by hash (bid)" -#endif - }, - { - "pgbench_accounts", - "aid int not null,bid int,abalance int,filler char(84)", - "aid bigint not null,bid int,abalance int,filler char(84)", - 1 -#ifdef PGXC - , "distribute by hash (bid)" -#endif - }, - { - "pgbench_branches", - "bid int not null,bbalance int,filler char(88)", - "bid int not null,bbalance int,filler char(88)", - 1 -#ifdef PGXC - , "distribute by hash (bid)" -#endif - } - }; - static const char *const DDLINDEXes[] = { - "alter table pgbench_branches add primary key (bid)", - "alter table pgbench_tellers add primary key (tid)", - "alter table pgbench_accounts add primary key (aid)" - }; - static const char *const DDLKEYs[] = { - "alter table pgbench_tellers add foreign key (bid) references pgbench_branches", - "alter table pgbench_accounts add foreign key (bid) references pgbench_branches", - "alter table pgbench_history add foreign key (bid) references pgbench_branches", - "alter table pgbench_history add foreign key (tid) references pgbench_tellers", - "alter table pgbench_history add foreign key (aid) references pgbench_accounts" - }; - -#ifdef PGXC - static char *DDLAFTERs_bid[] = { - "alter table pgbench_branches add primary key (bid)", - "alter table pgbench_tellers add primary key (tid,bid)", - "alter table pgbench_accounts add primary key (aid,bid)" - }; -#endif - - PGconn *con; - PGresult *res; - char sql[256]; - int i; - int64 k; - - /* used to track elapsed time and estimate of the remaining time */ - instr_time start, - diff; - double elapsed_sec, - remaining_sec; - int log_interval = 1; - - if ((con = doConnect()) == NULL) - exit(1); - - for (i = 0; i < lengthof(DDLs); i++) - { - char opts[256]; - char buffer[256]; - const struct ddlinfo *ddl = &DDLs[i]; - const char *cols; - - /* Remove old table, if it exists. */ - snprintf(buffer, sizeof(buffer), "drop table if exists %s", ddl->table); - executeStatement(con, buffer); - - /* Construct new create table statement. */ - opts[0] = '\0'; - if (ddl->declare_fillfactor) - snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts), - " with (fillfactor=%d)", fillfactor); - if (tablespace != NULL) - { - char *escape_tablespace; - - escape_tablespace = PQescapeIdentifier(con, tablespace, - strlen(tablespace)); - snprintf(opts + strlen(opts), sizeof(opts) - strlen(opts), - " tablespace %s", escape_tablespace); - PQfreemem(escape_tablespace); - } - - cols = (scale >= SCALE_32BIT_THRESHOLD) ? ddl->bigcols : ddl->smcols; - -#ifdef PGXC - /* Add distribution columns if necessary */ - if (use_branch) - snprintf(buffer, sizeof(buffer), "create%s table %s(%s)%s %s", - unlogged_tables ? " unlogged" : "", - ddl->table, cols, opts, ddl->distribute_by); - else -#endif - snprintf(buffer, sizeof(buffer), "create%s table %s(%s)%s", - unlogged_tables ? " unlogged" : "", - ddl->table, cols, opts); - - executeStatement(con, buffer); - } - - executeStatement(con, "begin"); - - for (i = 0; i < nbranches * scale; i++) - { - /* "filler" column defaults to NULL */ - snprintf(sql, sizeof(sql), - "insert into pgbench_branches(bid,bbalance) values(%d,0)", - i + 1); - executeStatement(con, sql); - } - - for (i = 0; i < ntellers * scale; i++) - { - /* "filler" column defaults to NULL */ - snprintf(sql, sizeof(sql), - "insert into pgbench_tellers(tid,bid,tbalance) values (%d,%d,0)", - i + 1, i / ntellers + 1); - executeStatement(con, sql); - } - - executeStatement(con, "commit"); - - /* - * fill the pgbench_accounts table with some data - */ - fprintf(stderr, "creating tables...\n"); - - executeStatement(con, "begin"); - executeStatement(con, "truncate pgbench_accounts"); - - res = PQexec(con, "copy pgbench_accounts from stdin"); - if (PQresultStatus(res) != PGRES_COPY_IN) - { - fprintf(stderr, "%s", PQerrorMessage(con)); - exit(1); - } - PQclear(res); - - INSTR_TIME_SET_CURRENT(start); - - for (k = 0; k < (int64) naccounts * scale; k++) - { - int64 j = k + 1; - - /* "filler" column defaults to blank padded empty string */ - snprintf(sql, sizeof(sql), - INT64_FORMAT "\t" INT64_FORMAT "\t%d\t\n", - j, k / naccounts + 1, 0); - if (PQputline(con, sql)) - { - fprintf(stderr, "PQputline failed\n"); - exit(1); - } - - /* - * If we want to stick with the original logging, print a message each - * 100k inserted rows. - */ - if ((!use_quiet) && (j % 100000 == 0)) - { - INSTR_TIME_SET_CURRENT(diff); - INSTR_TIME_SUBTRACT(diff, start); - - elapsed_sec = INSTR_TIME_GET_DOUBLE(diff); - remaining_sec = ((double) scale * naccounts - j) * elapsed_sec / j; - - fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n", - j, (int64) naccounts * scale, - (int) (((int64) j * 100) / (naccounts * (int64) scale)), - elapsed_sec, remaining_sec); - } - /* let's not call the timing for each row, but only each 100 rows */ - else if (use_quiet && (j % 100 == 0)) - { - INSTR_TIME_SET_CURRENT(diff); - INSTR_TIME_SUBTRACT(diff, start); - - elapsed_sec = INSTR_TIME_GET_DOUBLE(diff); - remaining_sec = ((double) scale * naccounts - j) * elapsed_sec / j; - - /* have we reached the next interval (or end)? */ - if ((j == scale * naccounts) || (elapsed_sec >= log_interval * LOG_STEP_SECONDS)) - { - fprintf(stderr, INT64_FORMAT " of " INT64_FORMAT " tuples (%d%%) done (elapsed %.2f s, remaining %.2f s).\n", - j, (int64) naccounts * scale, - (int) (((int64) j * 100) / (naccounts * (int64) scale)), elapsed_sec, remaining_sec); - - /* skip to the next interval */ - log_interval = (int) ceil(elapsed_sec / LOG_STEP_SECONDS); - } - } - - } - if (PQputline(con, "\\.\n")) - { - fprintf(stderr, "very last PQputline failed\n"); - exit(1); - } - if (PQendcopy(con)) - { - fprintf(stderr, "PQendcopy failed\n"); - exit(1); - } - executeStatement(con, "commit"); - - /* vacuum */ - if (!is_no_vacuum) - { - fprintf(stderr, "vacuum...\n"); - executeStatement(con, "vacuum analyze pgbench_branches"); - executeStatement(con, "vacuum analyze pgbench_tellers"); - executeStatement(con, "vacuum analyze pgbench_accounts"); - executeStatement(con, "vacuum analyze pgbench_history"); - } - - /* - * create indexes - */ - fprintf(stderr, "set primary key...\n"); -#ifdef PGXC - /* - * If all the tables are distributed according to bid, create an index on it - * instead. - */ - if (use_branch) - { - for (i = 0; i < lengthof(DDLAFTERs_bid); i++) - { - char buffer[256]; - - strncpy(buffer, DDLAFTERs_bid[i], 256); - - if (index_tablespace != NULL) - { - char *escape_tablespace; - - escape_tablespace = PQescapeIdentifier(con, index_tablespace, - strlen(index_tablespace)); - snprintf(buffer + strlen(buffer), 256 - strlen(buffer), - " using index tablespace %s", escape_tablespace); - PQfreemem(escape_tablespace); - } - - executeStatement(con, buffer); - } - } - else -#endif - for (i = 0; i < lengthof(DDLINDEXes); i++) - { - char buffer[256]; - - strlcpy(buffer, DDLINDEXes[i], sizeof(buffer)); - - if (index_tablespace != NULL) - { - char *escape_tablespace; - - escape_tablespace = PQescapeIdentifier(con, index_tablespace, - strlen(index_tablespace)); - snprintf(buffer + strlen(buffer), sizeof(buffer) - strlen(buffer), - " using index tablespace %s", escape_tablespace); - PQfreemem(escape_tablespace); - } - - executeStatement(con, buffer); - } - - /* - * create foreign keys - */ - if (foreign_keys) - { - fprintf(stderr, "set foreign keys...\n"); - for (i = 0; i < lengthof(DDLKEYs); i++) - { - executeStatement(con, DDLKEYs[i]); - } - } - - fprintf(stderr, "done.\n"); - PQfinish(con); -} - -/* - * Parse the raw sql and replace :param to $n. - */ -static bool -parseQuery(Command *cmd, const char *raw_sql) -{ - char *sql, - *p; - - sql = pg_strdup(raw_sql); - cmd->argc = 1; - - p = sql; - while ((p = strchr(p, ':')) != NULL) - { - char var[12]; - char *name; - int eaten; - - name = parseVariable(p, &eaten); - if (name == NULL) - { - while (*p == ':') - { - p++; - } - continue; - } - - if (cmd->argc >= MAX_ARGS) - { - fprintf(stderr, "statement has too many arguments (maximum is %d): %s\n", MAX_ARGS - 1, raw_sql); - return false; - } - - sprintf(var, "$%d", cmd->argc); - p = replaceVariable(&sql, p, eaten, var); - - cmd->argv[cmd->argc] = name; - cmd->argc++; - } - - cmd->argv[0] = sql; - return true; -} - -/* Parse a command; return a Command struct, or NULL if it's a comment */ -static Command * -process_commands(char *buf) -{ - const char delim[] = " \f\n\r\t\v"; - - Command *my_commands; - int j; - char *p, - *tok; - - /* Make the string buf end at the next newline */ - if ((p = strchr(buf, '\n')) != NULL) - *p = '\0'; - - /* Skip leading whitespace */ - p = buf; - while (isspace((unsigned char) *p)) - p++; - - /* If the line is empty or actually a comment, we're done */ - if (*p == '\0' || strncmp(p, "--", 2) == 0) - return NULL; - - /* Allocate and initialize Command structure */ - my_commands = (Command *) pg_malloc(sizeof(Command)); - my_commands->line = pg_strdup(buf); - my_commands->command_num = num_commands++; - my_commands->type = 0; /* until set */ - my_commands->argc = 0; - - if (*p == '\\') - { - my_commands->type = META_COMMAND; - - j = 0; - tok = strtok(++p, delim); - - while (tok != NULL) - { - my_commands->argv[j++] = pg_strdup(tok); - my_commands->argc++; - tok = strtok(NULL, delim); - } - - if (pg_strcasecmp(my_commands->argv[0], "setrandom") == 0) - { - if (my_commands->argc < 4) - { - fprintf(stderr, "%s: missing argument\n", my_commands->argv[0]); - exit(1); - } - - for (j = 4; j < my_commands->argc; j++) - fprintf(stderr, "%s: extra argument \"%s\" ignored\n", - my_commands->argv[0], my_commands->argv[j]); - } - else if (pg_strcasecmp(my_commands->argv[0], "set") == 0) - { - if (my_commands->argc < 3) - { - fprintf(stderr, "%s: missing argument\n", my_commands->argv[0]); - exit(1); - } - - for (j = my_commands->argc < 5 ? 3 : 5; j < my_commands->argc; j++) - fprintf(stderr, "%s: extra argument \"%s\" ignored\n", - my_commands->argv[0], my_commands->argv[j]); - } - else if (pg_strcasecmp(my_commands->argv[0], "sleep") == 0) - { - if (my_commands->argc < 2) - { - fprintf(stderr, "%s: missing argument\n", my_commands->argv[0]); - exit(1); - } - - /* - * Split argument into number and unit to allow "sleep 1ms" etc. - * We don't have to terminate the number argument with null - * because it will be parsed with atoi, which ignores trailing - * non-digit characters. - */ - if (my_commands->argv[1][0] != ':') - { - char *c = my_commands->argv[1]; - - while (isdigit((unsigned char) *c)) - c++; - if (*c) - { - my_commands->argv[2] = c; - if (my_commands->argc < 3) - my_commands->argc = 3; - } - } - - if (my_commands->argc >= 3) - { - if (pg_strcasecmp(my_commands->argv[2], "us") != 0 && - pg_strcasecmp(my_commands->argv[2], "ms") != 0 && - pg_strcasecmp(my_commands->argv[2], "s") != 0) - { - fprintf(stderr, "%s: unknown time unit '%s' - must be us, ms or s\n", - my_commands->argv[0], my_commands->argv[2]); - exit(1); - } - } - - for (j = 3; j < my_commands->argc; j++) - fprintf(stderr, "%s: extra argument \"%s\" ignored\n", - my_commands->argv[0], my_commands->argv[j]); - } - else if (pg_strcasecmp(my_commands->argv[0], "setshell") == 0) - { - if (my_commands->argc < 3) - { - fprintf(stderr, "%s: missing argument\n", my_commands->argv[0]); - exit(1); - } - } - else if (pg_strcasecmp(my_commands->argv[0], "shell") == 0) - { - if (my_commands->argc < 1) - { - fprintf(stderr, "%s: missing command\n", my_commands->argv[0]); - exit(1); - } - } - else - { - fprintf(stderr, "Invalid command %s\n", my_commands->argv[0]); - exit(1); - } - } - else - { - my_commands->type = SQL_COMMAND; - - switch (querymode) - { - case QUERY_SIMPLE: - my_commands->argv[0] = pg_strdup(p); - my_commands->argc++; - break; - case QUERY_EXTENDED: - case QUERY_PREPARED: - if (!parseQuery(my_commands, p)) - exit(1); - break; - default: - exit(1); - } - } - - return my_commands; -} - -/* - * Read a line from fd, and return it in a malloc'd buffer. - * Return NULL at EOF. - * - * The buffer will typically be larger than necessary, but we don't care - * in this program, because we'll free it as soon as we've parsed the line. - */ -static char * -read_line_from_file(FILE *fd) -{ - char tmpbuf[BUFSIZ]; - char *buf; - size_t buflen = BUFSIZ; - size_t used = 0; - - buf = (char *) palloc(buflen); - buf[0] = '\0'; - - while (fgets(tmpbuf, BUFSIZ, fd) != NULL) - { - size_t thislen = strlen(tmpbuf); - - /* Append tmpbuf to whatever we had already */ - memcpy(buf + used, tmpbuf, thislen + 1); - used += thislen; - - /* Done if we collected a newline */ - if (thislen > 0 && tmpbuf[thislen - 1] == '\n') - break; - - /* Else, enlarge buf to ensure we can append next bufferload */ - buflen += BUFSIZ; - buf = (char *) pg_realloc(buf, buflen); - } - - if (used > 0) - return buf; - - /* Reached EOF */ - free(buf); - return NULL; -} - -static int -process_file(char *filename) -{ -#define COMMANDS_ALLOC_NUM 128 - - Command **my_commands; - FILE *fd; - int lineno; - char *buf; - int alloc_num; - - if (num_files >= MAX_FILES) - { - fprintf(stderr, "Up to only %d SQL files are allowed\n", MAX_FILES); - exit(1); - } - - alloc_num = COMMANDS_ALLOC_NUM; - my_commands = (Command **) pg_malloc(sizeof(Command *) * alloc_num); - - if (strcmp(filename, "-") == 0) - fd = stdin; - else if ((fd = fopen(filename, "r")) == NULL) - { - fprintf(stderr, "%s: %s\n", filename, strerror(errno)); - return false; - } - - lineno = 0; - - while ((buf = read_line_from_file(fd)) != NULL) - { - Command *command; - - command = process_commands(buf); - - free(buf); - - if (command == NULL) - continue; - - my_commands[lineno] = command; - lineno++; - - if (lineno >= alloc_num) - { - alloc_num += COMMANDS_ALLOC_NUM; - my_commands = pg_realloc(my_commands, sizeof(Command *) * alloc_num); - } - } - fclose(fd); - - my_commands[lineno] = NULL; - - sql_files[num_files++] = my_commands; - - return true; -} - -static Command ** -process_builtin(char *tb) -{ -#define COMMANDS_ALLOC_NUM 128 - - Command **my_commands; - int lineno; - char buf[BUFSIZ]; - int alloc_num; - - alloc_num = COMMANDS_ALLOC_NUM; - my_commands = (Command **) pg_malloc(sizeof(Command *) * alloc_num); - - lineno = 0; - - for (;;) - { - char *p; - Command *command; - - p = buf; - while (*tb && *tb != '\n') - *p++ = *tb++; - - if (*tb == '\0') - break; - - if (*tb == '\n') - tb++; - - *p = '\0'; - - command = process_commands(buf); - if (command == NULL) - continue; - - my_commands[lineno] = command; - lineno++; - - if (lineno >= alloc_num) - { - alloc_num += COMMANDS_ALLOC_NUM; - my_commands = pg_realloc(my_commands, sizeof(Command *) * alloc_num); - } - } - - my_commands[lineno] = NULL; - - return my_commands; -} - -/* print out results */ -static void -printResults(int ttype, int64 normal_xacts, int nclients, - TState *threads, int nthreads, - instr_time total_time, instr_time conn_total_time, - int64 total_latencies, int64 total_sqlats, - int64 throttle_lag, int64 throttle_lag_max) -{ - double time_include, - tps_include, - tps_exclude; - char *s; - - time_include = INSTR_TIME_GET_DOUBLE(total_time); - tps_include = normal_xacts / time_include; - tps_exclude = normal_xacts / (time_include - - (INSTR_TIME_GET_DOUBLE(conn_total_time) / nthreads)); - - if (ttype == 0) - s = "TPC-B (sort of)"; - else if (ttype == 2) - s = "Update only pgbench_accounts"; - else if (ttype == 1) - s = "SELECT only"; - else - s = "Custom query"; - - printf("transaction type: %s\n", s); - printf("scaling factor: %d\n", scale); - printf("query mode: %s\n", QUERYMODE[querymode]); - printf("number of clients: %d\n", nclients); - printf("number of threads: %d\n", nthreads); - if (duration <= 0) - { - printf("number of transactions per client: %d\n", nxacts); - printf("number of transactions actually processed: " INT64_FORMAT "/" INT64_FORMAT "\n", - normal_xacts, (int64) nxacts * nclients); - } - else - { - printf("duration: %d s\n", duration); - printf("number of transactions actually processed: " INT64_FORMAT "\n", - normal_xacts); - } - - if (throttle_delay || progress) - { - /* compute and show latency average and standard deviation */ - double latency = 0.001 * total_latencies / normal_xacts; - double sqlat = (double) total_sqlats / normal_xacts; - - printf("latency average: %.3f ms\n" - "latency stddev: %.3f ms\n", - latency, 0.001 * sqrt(sqlat - 1000000.0 * latency * latency)); - } - else - { - /* only an average latency computed from the duration is available */ - printf("latency average: %.3f ms\n", - 1000.0 * duration * nclients / normal_xacts); - } - - if (throttle_delay) - { - /* - * Report average transaction lag under rate limit throttling. This - * is the delay between scheduled and actual start times for the - * transaction. The measured lag may be caused by thread/client load, - * the database load, or the Poisson throttling process. - */ - printf("rate limit schedule lag: avg %.3f (max %.3f) ms\n", - 0.001 * throttle_lag / normal_xacts, 0.001 * throttle_lag_max); - } - - printf("tps = %f (including connections establishing)\n", tps_include); - printf("tps = %f (excluding connections establishing)\n", tps_exclude); - - /* Report per-command latencies */ - if (is_latencies) - { - int i; - - for (i = 0; i < num_files; i++) - { - Command **commands; - - if (num_files > 1) - printf("statement latencies in milliseconds, file %d:\n", i + 1); - else - printf("statement latencies in milliseconds:\n"); - - for (commands = sql_files[i]; *commands != NULL; commands++) - { - Command *command = *commands; - int cnum = command->command_num; - double total_time; - instr_time total_exec_elapsed; - int total_exec_count; - int t; - - /* Accumulate per-thread data for command */ - INSTR_TIME_SET_ZERO(total_exec_elapsed); - total_exec_count = 0; - for (t = 0; t < nthreads; t++) - { - TState *thread = &threads[t]; - - INSTR_TIME_ADD(total_exec_elapsed, - thread->exec_elapsed[cnum]); - total_exec_count += thread->exec_count[cnum]; - } - - if (total_exec_count > 0) - total_time = INSTR_TIME_GET_MILLISEC(total_exec_elapsed) / (double) total_exec_count; - else - total_time = 0.0; - - printf("\t%f\t%s\n", total_time, command->line); - } - } - } -} - - -int -main(int argc, char **argv) -{ - static struct option long_options[] = { - /* systematic long/short named options */ - {"client", required_argument, NULL, 'c'}, - {"connect", no_argument, NULL, 'C'}, - {"debug", no_argument, NULL, 'd'}, - {"define", required_argument, NULL, 'D'}, - {"file", required_argument, NULL, 'f'}, - {"fillfactor", required_argument, NULL, 'F'}, - {"host", required_argument, NULL, 'h'}, - {"initialize", no_argument, NULL, 'i'}, - {"jobs", required_argument, NULL, 'j'}, - {"log", no_argument, NULL, 'l'}, - {"no-vacuum", no_argument, NULL, 'n'}, - {"port", required_argument, NULL, 'p'}, - {"progress", required_argument, NULL, 'P'}, - {"protocol", required_argument, NULL, 'M'}, - {"quiet", no_argument, NULL, 'q'}, - {"report-latencies", no_argument, NULL, 'r'}, - {"scale", required_argument, NULL, 's'}, - {"select-only", no_argument, NULL, 'S'}, - {"skip-some-updates", no_argument, NULL, 'N'}, - {"time", required_argument, NULL, 'T'}, - {"transactions", required_argument, NULL, 't'}, - {"username", required_argument, NULL, 'U'}, - {"vacuum-all", no_argument, NULL, 'v'}, - /* long-named only options */ - {"foreign-keys", no_argument, &foreign_keys, 1}, - {"index-tablespace", required_argument, NULL, 3}, - {"tablespace", required_argument, NULL, 2}, - {"unlogged-tables", no_argument, &unlogged_tables, 1}, - {"sampling-rate", required_argument, NULL, 4}, - {"aggregate-interval", required_argument, NULL, 5}, - {"rate", required_argument, NULL, 'R'}, - {NULL, 0, NULL, 0} - }; - - int c; - int nclients = 1; /* default number of simulated clients */ - int nthreads = 1; /* default number of threads */ - int is_init_mode = 0; /* initialize mode? */ - int is_no_vacuum = 0; /* no vacuum at all before testing? */ - int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */ - int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT only, - * 2: skip update of branches and tellers */ - int optindex; - char *filename = NULL; - bool scale_given = false; - - CState *state; /* status of clients */ - TState *threads; /* array of thread */ - - instr_time start_time; /* start up time */ - instr_time total_time; - instr_time conn_total_time; - int64 total_xacts = 0; - int64 total_latencies = 0; - int64 total_sqlats = 0; - int64 throttle_lag = 0; - int64 throttle_lag_max = 0; - - int i; - -#ifdef HAVE_GETRLIMIT - struct rlimit rlim; -#endif - - PGconn *con; - PGresult *res; - char *env; - - char val[64]; - - progname = get_progname(argv[0]); - - if (argc > 1) - { - if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) - { - usage(); - exit(0); - } - if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) - { - puts("pgbench (PostgreSQL) " PG_VERSION); - exit(0); - } - } - -#ifdef WIN32 - /* stderr is buffered on Win32. */ - setvbuf(stderr, NULL, _IONBF, 0); -#endif - - if ((env = getenv("PGHOST")) != NULL && *env != '\0') - pghost = env; - if ((env = getenv("PGPORT")) != NULL && *env != '\0') - pgport = env; - else if ((env = getenv("PGUSER")) != NULL && *env != '\0') - login = env; - - state = (CState *) pg_malloc(sizeof(CState)); - memset(state, 0, sizeof(CState)); - -#ifdef PGXC - while ((c = getopt_long(argc, argv, "ih:knvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:", long_options, &optindex)) != -1) -#else - while ((c = getopt_long(argc, argv, "ih:nvp:dqSNc:j:Crs:t:T:U:lf:D:F:M:P:R:", long_options, &optindex)) != -1) -#endif - { - switch (c) - { - case 'i': - is_init_mode++; - break; -#ifdef PGXC - case 'k': - use_branch = true; - break; -#endif - case 'h': - pghost = pg_strdup(optarg); - break; - case 'n': - is_no_vacuum++; - break; - case 'v': - do_vacuum_accounts++; - break; - case 'p': - pgport = pg_strdup(optarg); - break; - case 'd': - debug++; - break; - case 'S': - ttype = 1; - break; - case 'N': - ttype = 2; - break; - case 'c': - nclients = atoi(optarg); - if (nclients <= 0 || nclients > MAXCLIENTS) - { - fprintf(stderr, "invalid number of clients: %d\n", nclients); - exit(1); - } -#ifdef HAVE_GETRLIMIT -#ifdef RLIMIT_NOFILE /* most platforms use RLIMIT_NOFILE */ - if (getrlimit(RLIMIT_NOFILE, &rlim) == -1) -#else /* but BSD doesn't ... */ - if (getrlimit(RLIMIT_OFILE, &rlim) == -1) -#endif /* RLIMIT_NOFILE */ - { - fprintf(stderr, "getrlimit failed: %s\n", strerror(errno)); - exit(1); - } - if (rlim.rlim_cur <= (nclients + 2)) - { - fprintf(stderr, "You need at least %d open files but you are only allowed to use %ld.\n", nclients + 2, (long) rlim.rlim_cur); - fprintf(stderr, "Use limit/ulimit to increase the limit before using pgbench.\n"); - exit(1); - } -#endif /* HAVE_GETRLIMIT */ - break; - case 'j': /* jobs */ - nthreads = atoi(optarg); - if (nthreads <= 0) - { - fprintf(stderr, "invalid number of threads: %d\n", nthreads); - exit(1); - } - break; - case 'C': - is_connect = true; - break; - case 'r': - is_latencies = true; - break; - case 's': - scale_given = true; - scale = atoi(optarg); - if (scale <= 0) - { - fprintf(stderr, "invalid scaling factor: %d\n", scale); - exit(1); - } - break; - case 't': - if (duration > 0) - { - fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both.\n"); - exit(1); - } - nxacts = atoi(optarg); - if (nxacts <= 0) - { - fprintf(stderr, "invalid number of transactions: %d\n", nxacts); - exit(1); - } - break; - case 'T': - if (nxacts > 0) - { - fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both.\n"); - exit(1); - } - duration = atoi(optarg); - if (duration <= 0) - { - fprintf(stderr, "invalid duration: %d\n", duration); - exit(1); - } - break; - case 'U': - login = pg_strdup(optarg); - break; - case 'l': - use_log = true; - break; - case 'q': - use_quiet = true; - break; - case 'f': - ttype = 3; - filename = pg_strdup(optarg); - if (process_file(filename) == false || *sql_files[num_files - 1] == NULL) - exit(1); - break; - case 'D': - { - char *p; - - if ((p = strchr(optarg, '=')) == NULL || p == optarg || *(p + 1) == '\0') - { - fprintf(stderr, "invalid variable definition: %s\n", optarg); - exit(1); - } - - *p++ = '\0'; - if (!putVariable(&state[0], "option", optarg, p)) - exit(1); - } - break; - case 'F': - fillfactor = atoi(optarg); - if ((fillfactor < 10) || (fillfactor > 100)) - { - fprintf(stderr, "invalid fillfactor: %d\n", fillfactor); - exit(1); - } - break; - case 'M': - if (num_files > 0) - { - fprintf(stderr, "query mode (-M) should be specifiled before transaction scripts (-f)\n"); - exit(1); - } - for (querymode = 0; querymode < NUM_QUERYMODE; querymode++) - if (strcmp(optarg, QUERYMODE[querymode]) == 0) - break; - if (querymode >= NUM_QUERYMODE) - { - fprintf(stderr, "invalid query mode (-M): %s\n", optarg); - exit(1); - } - break; - case 'P': - progress = atoi(optarg); - if (progress <= 0) - { - fprintf(stderr, - "thread progress delay (-P) must be positive (%s)\n", - optarg); - exit(1); - } - break; - case 'R': - { - /* get a double from the beginning of option value */ - double throttle_value = atof(optarg); - - if (throttle_value <= 0.0) - { - fprintf(stderr, "invalid rate limit: %s\n", optarg); - exit(1); - } - /* Invert rate limit into a time offset */ - throttle_delay = (int64) (1000000.0 / throttle_value); - } - break; - case 0: - /* This covers long options which take no argument. */ - break; - case 2: /* tablespace */ - tablespace = pg_strdup(optarg); - break; - case 3: /* index-tablespace */ - index_tablespace = pg_strdup(optarg); - break; - case 4: - sample_rate = atof(optarg); - if (sample_rate <= 0.0 || sample_rate > 1.0) - { - fprintf(stderr, "invalid sampling rate: %f\n", sample_rate); - exit(1); - } - break; - case 5: -#ifdef WIN32 - fprintf(stderr, "--aggregate-interval is not currently supported on Windows"); - exit(1); -#else - agg_interval = atoi(optarg); - if (agg_interval <= 0) - { - fprintf(stderr, "invalid number of seconds for aggregation: %d\n", agg_interval); - exit(1); - } -#endif - break; - default: - fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); - exit(1); - break; - } - } - - /* compute a per thread delay */ - throttle_delay *= nthreads; - - if (argc > optind) - dbName = argv[optind]; - else - { - if ((env = getenv("PGDATABASE")) != NULL && *env != '\0') - dbName = env; - else if (login != NULL && *login != '\0') - dbName = login; - else - dbName = ""; - } - - if (is_init_mode) - { - init(is_no_vacuum); - exit(0); - } - - /* Use DEFAULT_NXACTS if neither nxacts nor duration is specified. */ - if (nxacts <= 0 && duration <= 0) - nxacts = DEFAULT_NXACTS; - - if (nclients % nthreads != 0) - { - fprintf(stderr, "number of clients (%d) must be a multiple of number of threads (%d)\n", nclients, nthreads); - exit(1); - } - - /* --sampling-rate may be used only with -l */ - if (sample_rate > 0.0 && !use_log) - { - fprintf(stderr, "log sampling rate is allowed only when logging transactions (-l) \n"); - exit(1); - } - - /* -q may be used only with -i */ - if (use_quiet && !is_init_mode) - { - fprintf(stderr, "quiet-logging is allowed only in initialization mode (-i)\n"); - exit(1); - } - - /* --sampling-rate may must not be used with --aggregate-interval */ - if (sample_rate > 0.0 && agg_interval > 0) - { - fprintf(stderr, "log sampling (--sampling-rate) and aggregation (--aggregate-interval) can't be used at the same time\n"); - exit(1); - } - - if (agg_interval > 0 && (!use_log)) - { - fprintf(stderr, "log aggregation is allowed only when actually logging transactions\n"); - exit(1); - } - - if ((duration > 0) && (agg_interval > duration)) - { - fprintf(stderr, "number of seconds for aggregation (%d) must not be higher that test duration (%d)\n", agg_interval, duration); - exit(1); - } - - if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0)) - { - fprintf(stderr, "duration (%d) must be a multiple of aggregation interval (%d)\n", duration, agg_interval); - exit(1); - } - - /* - * is_latencies only works with multiple threads in thread-based - * implementations, not fork-based ones, because it supposes that the - * parent can see changes made to the per-thread execution stats by child - * threads. It seems useful enough to accept despite this limitation, but - * perhaps we should FIXME someday (by passing the stats data back up - * through the parent-to-child pipes). - */ -#ifndef ENABLE_THREAD_SAFETY - if (is_latencies && nthreads > 1) - { - fprintf(stderr, "-r does not work with -j larger than 1 on this platform.\n"); - exit(1); - } -#endif - - /* - * save main process id in the global variable because process id will be - * changed after fork. - */ - main_pid = (int) getpid(); - progress_nclients = nclients; - progress_nthreads = nthreads; - - if (nclients > 1) - { - state = (CState *) pg_realloc(state, sizeof(CState) * nclients); - memset(state + 1, 0, sizeof(CState) * (nclients - 1)); - - /* copy any -D switch values to all clients */ - for (i = 1; i < nclients; i++) - { - int j; - - state[i].id = i; - for (j = 0; j < state[0].nvariables; j++) - { - if (!putVariable(&state[i], "startup", state[0].variables[j].name, state[0].variables[j].value)) - exit(1); - } - } - } - - if (debug) - { - if (duration <= 0) - printf("pghost: %s pgport: %s nclients: %d nxacts: %d dbName: %s\n", - pghost, pgport, nclients, nxacts, dbName); - else - printf("pghost: %s pgport: %s nclients: %d duration: %d dbName: %s\n", - pghost, pgport, nclients, duration, dbName); - } - - /* opening connection... */ - con = doConnect(); - if (con == NULL) - exit(1); - - if (PQstatus(con) == CONNECTION_BAD) - { - fprintf(stderr, "Connection to database '%s' failed.\n", dbName); - fprintf(stderr, "%s", PQerrorMessage(con)); - exit(1); - } - - if (ttype != 3) - { - /* - * get the scaling factor that should be same as count(*) from - * pgbench_branches if this is not a custom query - */ - res = PQexec(con, "select count(*) from pgbench_branches"); - if (PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "%s", PQerrorMessage(con)); - exit(1); - } - scale = atoi(PQgetvalue(res, 0, 0)); - if (scale < 0) - { - fprintf(stderr, "count(*) from pgbench_branches invalid (%d)\n", scale); - exit(1); - } - PQclear(res); - - /* warn if we override user-given -s switch */ - if (scale_given) - fprintf(stderr, - "Scale option ignored, using pgbench_branches table count = %d\n", - scale); - } - - /* - * :scale variables normally get -s or database scale, but don't override - * an explicit -D switch - */ - if (getVariable(&state[0], "scale") == NULL) - { - snprintf(val, sizeof(val), "%d", scale); - for (i = 0; i < nclients; i++) - { - if (!putVariable(&state[i], "startup", "scale", val)) - exit(1); - } - } - - /* - * Define a :client_id variable that is unique per connection. But don't - * override an explicit -D switch. - */ - if (getVariable(&state[0], "client_id") == NULL) - { - for (i = 0; i < nclients; i++) - { - snprintf(val, sizeof(val), "%d", i); - if (!putVariable(&state[i], "startup", "client_id", val)) - exit(1); - } - } - - if (!is_no_vacuum) - { - fprintf(stderr, "starting vacuum..."); - executeStatement(con, "vacuum pgbench_branches"); - executeStatement(con, "vacuum pgbench_tellers"); - executeStatement(con, "truncate pgbench_history"); - fprintf(stderr, "end.\n"); - - if (do_vacuum_accounts) - { - fprintf(stderr, "starting vacuum pgbench_accounts..."); - executeStatement(con, "vacuum analyze pgbench_accounts"); - fprintf(stderr, "end.\n"); - } - } - PQfinish(con); - - /* set random seed */ - INSTR_TIME_SET_CURRENT(start_time); - srandom((unsigned int) INSTR_TIME_GET_MICROSEC(start_time)); - - /* process builtin SQL scripts */ - switch (ttype) - { - case 0: -#ifdef PGXC - if (use_branch) - sql_files[0] = process_builtin(tpc_b_bid); - else -#endif - sql_files[0] = process_builtin(tpc_b); - num_files = 1; - break; - - case 1: - sql_files[0] = process_builtin(select_only); - num_files = 1; - break; - - case 2: -#ifdef PGXC - if (use_branch) - sql_files[0] = process_builtin(simple_update_bid); - else -#endif - sql_files[0] = process_builtin(simple_update); - num_files = 1; - break; - - default: - break; - } - - /* set up thread data structures */ - threads = (TState *) pg_malloc(sizeof(TState) * nthreads); - for (i = 0; i < nthreads; i++) - { - TState *thread = &threads[i]; - - thread->tid = i; - thread->state = &state[nclients / nthreads * i]; - thread->nstate = nclients / nthreads; - thread->random_state[0] = random(); - thread->random_state[1] = random(); - thread->random_state[2] = random(); - - if (is_latencies) - { - /* Reserve memory for the thread to store per-command latencies */ - int t; - - thread->exec_elapsed = (instr_time *) - pg_malloc(sizeof(instr_time) * num_commands); - thread->exec_count = (int *) - pg_malloc(sizeof(int) * num_commands); - - for (t = 0; t < num_commands; t++) - { - INSTR_TIME_SET_ZERO(thread->exec_elapsed[t]); - thread->exec_count[t] = 0; - } - } - else - { - thread->exec_elapsed = NULL; - thread->exec_count = NULL; - } - } - - /* get start up time */ - INSTR_TIME_SET_CURRENT(start_time); - - /* set alarm if duration is specified. */ - if (duration > 0) - setalarm(duration); - - /* start threads */ - for (i = 0; i < nthreads; i++) - { - TState *thread = &threads[i]; - - INSTR_TIME_SET_CURRENT(thread->start_time); - - /* the first thread (i = 0) is executed by main thread */ - if (i > 0) - { - int err = pthread_create(&thread->thread, NULL, threadRun, thread); - - if (err != 0 || thread->thread == INVALID_THREAD) - { - fprintf(stderr, "cannot create thread: %s\n", strerror(err)); - exit(1); - } - } - else - { - thread->thread = INVALID_THREAD; - } - } - - /* wait for threads and accumulate results */ - INSTR_TIME_SET_ZERO(conn_total_time); - for (i = 0; i < nthreads; i++) - { - void *ret = NULL; - - if (threads[i].thread == INVALID_THREAD) - ret = threadRun(&threads[i]); - else - pthread_join(threads[i].thread, &ret); - - if (ret != NULL) - { - TResult *r = (TResult *) ret; - - total_xacts += r->xacts; - total_latencies += r->latencies; - total_sqlats += r->sqlats; - throttle_lag += r->throttle_lag; - if (r->throttle_lag_max > throttle_lag_max) - throttle_lag_max = r->throttle_lag_max; - INSTR_TIME_ADD(conn_total_time, r->conn_time); - free(ret); - } - } - disconnect_all(state, nclients); - - /* - * XXX We compute results as though every client of every thread started - * and finished at the same time. That model can diverge noticeably from - * reality for a short benchmark run involving relatively many threads. - * The first thread may process notably many transactions before the last - * thread begins. Improving the model alone would bring limited benefit, - * because performance during those periods of partial thread count can - * easily exceed steady state performance. This is one of the many ways - * short runs convey deceptive performance figures. - */ - INSTR_TIME_SET_CURRENT(total_time); - INSTR_TIME_SUBTRACT(total_time, start_time); - printResults(ttype, total_xacts, nclients, threads, nthreads, - total_time, conn_total_time, total_latencies, total_sqlats, - throttle_lag, throttle_lag_max); - - return 0; -} - -static void * -threadRun(void *arg) -{ - TState *thread = (TState *) arg; - CState *state = thread->state; - TResult *result; - FILE *logfile = NULL; /* per-thread log file */ - instr_time start, - end; - int nstate = thread->nstate; - int remains = nstate; /* number of remaining clients */ - int i; - - /* for reporting progress: */ - int64 thread_start = INSTR_TIME_GET_MICROSEC(thread->start_time); - int64 last_report = thread_start; - int64 next_report = last_report + (int64) progress * 1000000; - int64 last_count = 0, - last_lats = 0, - last_sqlats = 0, - last_lags = 0; - - AggVals aggs; - - /* - * Initialize throttling rate target for all of the thread's clients. It - * might be a little more accurate to reset thread->start_time here too. - * The possible drift seems too small relative to typical throttle delay - * times to worry about it. - */ - INSTR_TIME_SET_CURRENT(start); - thread->throttle_trigger = INSTR_TIME_GET_MICROSEC(start); - thread->throttle_lag = 0; - thread->throttle_lag_max = 0; - - result = pg_malloc(sizeof(TResult)); - - INSTR_TIME_SET_ZERO(result->conn_time); - - /* open log file if requested */ - if (use_log) - { - char logpath[64]; - - if (thread->tid == 0) - snprintf(logpath, sizeof(logpath), "pgbench_log.%d", main_pid); - else - snprintf(logpath, sizeof(logpath), "pgbench_log.%d.%d", main_pid, thread->tid); - logfile = fopen(logpath, "w"); - - if (logfile == NULL) - { - fprintf(stderr, "Couldn't open logfile \"%s\": %s", logpath, strerror(errno)); - goto done; - } - } - - if (!is_connect) - { - /* make connections to the database */ - for (i = 0; i < nstate; i++) - { - if ((state[i].con = doConnect()) == NULL) - goto done; - } - } - - /* time after thread and connections set up */ - INSTR_TIME_SET_CURRENT(result->conn_time); - INSTR_TIME_SUBTRACT(result->conn_time, thread->start_time); - - agg_vals_init(&aggs, thread->start_time); - - /* send start up queries in async manner */ - for (i = 0; i < nstate; i++) - { - CState *st = &state[i]; - Command **commands = sql_files[st->use_file]; - int prev_ecnt = st->ecnt; - - st->use_file = getrand(thread, 0, num_files - 1); - if (!doCustom(thread, st, &result->conn_time, logfile, &aggs)) - remains--; /* I've aborted */ - - if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND) - { - fprintf(stderr, "Client %d aborted in state %d. Execution meta-command failed.\n", i, st->state); - remains--; /* I've aborted */ - PQfinish(st->con); - st->con = NULL; - } - } - - while (remains > 0) - { - fd_set input_mask; - int maxsock; /* max socket number to be waited */ - int64 now_usec = 0; - int64 min_usec; - - FD_ZERO(&input_mask); - - maxsock = -1; - min_usec = INT64_MAX; - for (i = 0; i < nstate; i++) - { - CState *st = &state[i]; - Command **commands = sql_files[st->use_file]; - int sock; - - if (st->con == NULL) - { - continue; - } - else if (st->sleeping) - { - if (st->throttling && timer_exceeded) - { - /* interrupt client which has not started a transaction */ - remains--; - st->sleeping = 0; - st->throttling = false; - PQfinish(st->con); - st->con = NULL; - continue; - } - else /* just a nap from the script */ - { - int this_usec; - - if (min_usec == INT64_MAX) - { - instr_time now; - - INSTR_TIME_SET_CURRENT(now); - now_usec = INSTR_TIME_GET_MICROSEC(now); - } - - this_usec = st->until - now_usec; - if (min_usec > this_usec) - min_usec = this_usec; - } - } - else if (commands[st->state]->type == META_COMMAND) - { - min_usec = 0; /* the connection is ready to run */ - break; - } - - sock = PQsocket(st->con); - if (sock < 0) - { - fprintf(stderr, "bad socket: %s\n", strerror(errno)); - goto done; - } - - FD_SET(sock, &input_mask); - - if (maxsock < sock) - maxsock = sock; - } - - if (min_usec > 0 && maxsock != -1) - { - int nsocks; /* return from select(2) */ - - if (min_usec != INT64_MAX) - { - struct timeval timeout; - - timeout.tv_sec = min_usec / 1000000; - timeout.tv_usec = min_usec % 1000000; - nsocks = select(maxsock + 1, &input_mask, NULL, NULL, &timeout); - } - else - nsocks = select(maxsock + 1, &input_mask, NULL, NULL, NULL); - if (nsocks < 0) - { - if (errno == EINTR) - continue; - /* must be something wrong */ - fprintf(stderr, "select failed: %s\n", strerror(errno)); - goto done; - } - } - - /* ok, backend returns reply */ - for (i = 0; i < nstate; i++) - { - CState *st = &state[i]; - Command **commands = sql_files[st->use_file]; - int prev_ecnt = st->ecnt; - - if (st->con && (FD_ISSET(PQsocket(st->con), &input_mask) - || commands[st->state]->type == META_COMMAND)) - { - if (!doCustom(thread, st, &result->conn_time, logfile, &aggs)) - remains--; /* I've aborted */ - } - - if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND) - { - fprintf(stderr, "Client %d aborted in state %d. Execution of meta-command failed.\n", i, st->state); - remains--; /* I've aborted */ - PQfinish(st->con); - st->con = NULL; - } - } - -#ifdef PTHREAD_FORK_EMULATION - /* each process reports its own progression */ - if (progress) - { - instr_time now_time; - int64 now; - - INSTR_TIME_SET_CURRENT(now_time); - now = INSTR_TIME_GET_MICROSEC(now_time); - if (now >= next_report) - { - /* generate and show report */ - int64 count = 0, - lats = 0, - sqlats = 0; - int64 lags = thread->throttle_lag; - int64 run = now - last_report; - double tps, - total_run, - latency, - sqlat, - stdev, - lag; - - for (i = 0; i < nstate; i++) - { - count += state[i].cnt; - lats += state[i].txn_latencies; - sqlats += state[i].txn_sqlats; - } - - total_run = (now - thread_start) / 1000000.0; - tps = 1000000.0 * (count - last_count) / run; - latency = 0.001 * (lats - last_lats) / (count - last_count); - sqlat = 1.0 * (sqlats - last_sqlats) / (count - last_count); - stdev = 0.001 * sqrt(sqlat - 1000000.0 * latency * latency); - lag = 0.001 * (lags - last_lags) / (count - last_count); - - if (throttle_delay) - fprintf(stderr, - "progress %d: %.1f s, %.1f tps, " - "lat %.3f ms stddev %.3f, lag %.3f ms\n", - thread->tid, total_run, tps, latency, stdev, lag); - else - fprintf(stderr, - "progress %d: %.1f s, %.1f tps, " - "lat %.3f ms stddev %.3f\n", - thread->tid, total_run, tps, latency, stdev); - - last_count = count; - last_lats = lats; - last_sqlats = sqlats; - last_lags = lags; - last_report = now; - next_report += (int64) progress *1000000; - } - } -#else - /* progress report by thread 0 for all threads */ - if (progress && thread->tid == 0) - { - instr_time now_time; - int64 now; - - INSTR_TIME_SET_CURRENT(now_time); - now = INSTR_TIME_GET_MICROSEC(now_time); - if (now >= next_report) - { - /* generate and show report */ - int64 count = 0, - lats = 0, - sqlats = 0, - lags = 0; - int64 run = now - last_report; - double tps, - total_run, - latency, - sqlat, - lag, - stdev; - - for (i = 0; i < progress_nclients; i++) - { - count += state[i].cnt; - lats += state[i].txn_latencies; - sqlats += state[i].txn_sqlats; - } - - for (i = 0; i < progress_nthreads; i++) - lags += thread[i].throttle_lag; - - total_run = (now - thread_start) / 1000000.0; - tps = 1000000.0 * (count - last_count) / run; - latency = 0.001 * (lats - last_lats) / (count - last_count); - sqlat = 1.0 * (sqlats - last_sqlats) / (count - last_count); - stdev = 0.001 * sqrt(sqlat - 1000000.0 * latency * latency); - lag = 0.001 * (lags - last_lags) / (count - last_count); - - if (throttle_delay) - fprintf(stderr, - "progress: %.1f s, %.1f tps, " - "lat %.3f ms stddev %.3f, lag %.3f ms\n", - total_run, tps, latency, stdev, lag); - else - fprintf(stderr, - "progress: %.1f s, %.1f tps, " - "lat %.3f ms stddev %.3f\n", - total_run, tps, latency, stdev); - - last_count = count; - last_lats = lats; - last_sqlats = sqlats; - last_lags = lags; - last_report = now; - next_report += (int64) progress *1000000; - } - } -#endif /* PTHREAD_FORK_EMULATION */ - } - -done: - INSTR_TIME_SET_CURRENT(start); - disconnect_all(state, nstate); - result->xacts = 0; - result->latencies = 0; - result->sqlats = 0; - for (i = 0; i < nstate; i++) - { - result->xacts += state[i].cnt; - result->latencies += state[i].txn_latencies; - result->sqlats += state[i].txn_sqlats; - } - result->throttle_lag = thread->throttle_lag; - result->throttle_lag_max = thread->throttle_lag_max; - INSTR_TIME_SET_CURRENT(end); - INSTR_TIME_ACCUM_DIFF(result->conn_time, end, start); - if (logfile) - fclose(logfile); - return result; -} - -/* - * Support for duration option: set timer_exceeded after so many seconds. - */ - -#ifndef WIN32 - -static void -handle_sig_alarm(SIGNAL_ARGS) -{ - timer_exceeded = true; -} - -static void -setalarm(int seconds) -{ - pqsignal(SIGALRM, handle_sig_alarm); - alarm(seconds); -} - -#ifndef ENABLE_THREAD_SAFETY - -/* - * implements pthread using fork. - */ - -typedef struct fork_pthread -{ - pid_t pid; - int pipes[2]; -} fork_pthread; - -static int -pthread_create(pthread_t *thread, - pthread_attr_t *attr, - void *(*start_routine) (void *), - void *arg) -{ - fork_pthread *th; - void *ret; - int rc; - - th = (fork_pthread *) pg_malloc(sizeof(fork_pthread)); - if (pipe(th->pipes) < 0) - { - free(th); - return errno; - } - - th->pid = fork(); - if (th->pid == -1) /* error */ - { - free(th); - return errno; - } - if (th->pid != 0) /* in parent process */ - { - close(th->pipes[1]); - *thread = th; - return 0; - } - - /* in child process */ - close(th->pipes[0]); - - /* set alarm again because the child does not inherit timers */ - if (duration > 0) - setalarm(duration); - - ret = start_routine(arg); - rc = write(th->pipes[1], ret, sizeof(TResult)); - (void) rc; - close(th->pipes[1]); - free(th); - exit(0); -} - -static int -pthread_join(pthread_t th, void **thread_return) -{ - int status; - - while (waitpid(th->pid, &status, 0) != th->pid) - { - if (errno != EINTR) - return errno; - } - - if (thread_return != NULL) - { - /* assume result is TResult */ - *thread_return = pg_malloc(sizeof(TResult)); - if (read(th->pipes[0], *thread_return, sizeof(TResult)) != sizeof(TResult)) - { - free(*thread_return); - *thread_return = NULL; - } - } - close(th->pipes[0]); - - free(th); - return 0; -} -#endif -#else /* WIN32 */ - -static VOID CALLBACK -win32_timer_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired) -{ - timer_exceeded = true; -} - -static void -setalarm(int seconds) -{ - HANDLE queue; - HANDLE timer; - - /* This function will be called at most once, so we can cheat a bit. */ - queue = CreateTimerQueue(); - if (seconds > ((DWORD) -1) / 1000 || - !CreateTimerQueueTimer(&timer, queue, - win32_timer_callback, NULL, seconds * 1000, 0, - WT_EXECUTEINTIMERTHREAD | WT_EXECUTEONLYONCE)) - { - fprintf(stderr, "Failed to set timer\n"); - exit(1); - } -} - -/* partial pthread implementation for Windows */ - -typedef struct win32_pthread -{ - HANDLE handle; - void *(*routine) (void *); - void *arg; - void *result; -} win32_pthread; - -static unsigned __stdcall -win32_pthread_run(void *arg) -{ - win32_pthread *th = (win32_pthread *) arg; - - th->result = th->routine(th->arg); - - return 0; -} - -static int -pthread_create(pthread_t *thread, - pthread_attr_t *attr, - void *(*start_routine) (void *), - void *arg) -{ - int save_errno; - win32_pthread *th; - - th = (win32_pthread *) pg_malloc(sizeof(win32_pthread)); - th->routine = start_routine; - th->arg = arg; - th->result = NULL; - - th->handle = (HANDLE) _beginthreadex(NULL, 0, win32_pthread_run, th, 0, NULL); - if (th->handle == NULL) - { - save_errno = errno; - free(th); - return save_errno; - } - - *thread = th; - return 0; -} - -static int -pthread_join(pthread_t th, void **thread_return) -{ - if (th == NULL || th->handle == NULL) - return errno = EINVAL; - - if (WaitForSingleObject(th->handle, INFINITE) != WAIT_OBJECT_0) - { - _dosmaperr(GetLastError()); - return errno; - } - - if (thread_return) - *thread_return = th->result; - - CloseHandle(th->handle); - free(th); - return 0; -} - -#endif /* WIN32 */ diff --git a/contrib/pgcrypto/Makefile b/contrib/pgcrypto/Makefile index 1c85c982ff..18bad1a05f 100644 --- a/contrib/pgcrypto/Makefile +++ b/contrib/pgcrypto/Makefile @@ -23,10 +23,12 @@ SRCS = pgcrypto.c px.c px-hmac.c px-crypt.c \ pgp-pgsql.c MODULE_big = pgcrypto -OBJS = $(SRCS:.c=.o) +OBJS = $(SRCS:.c=.o) $(WIN32RES) EXTENSION = pgcrypto -DATA = pgcrypto--1.1.sql pgcrypto--1.0--1.1.sql pgcrypto--unpackaged--1.0.sql +DATA = pgcrypto--1.2.sql pgcrypto--1.1--1.2.sql pgcrypto--1.0--1.1.sql \ + pgcrypto--unpackaged--1.0.sql +PGFILEDESC = "pgcrypto - cryptographic functions" REGRESS = init md5 sha1 hmac-md5 hmac-sha1 blowfish rijndael \ $(CF_TESTS) \ @@ -54,7 +56,7 @@ SHLIB_LINK += $(filter -lcrypto -lz, $(LIBS)) ifeq ($(PORTNAME), win32) SHLIB_LINK += $(filter -leay32, $(LIBS)) # those must be at the end -SHLIB_LINK += -lwsock32 -lws2_32 +SHLIB_LINK += -lws2_32 endif rijndael.o: rijndael.tbl diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c index 4ed44beeff..b43141fed5 100644 --- a/contrib/pgcrypto/crypt-des.c +++ b/contrib/pgcrypto/crypt-des.c @@ -708,7 +708,7 @@ px_crypt_des(const char *key, const char *setting) if (des_setkey((char *) keybuf)) return (NULL); } - strncpy(output, setting, 9); + StrNCpy(output, setting, 10); /* * Double check that we weren't given a short setting. If we were, the @@ -716,7 +716,6 @@ px_crypt_des(const char *key, const char *setting) * salt, but we don't really care. Just make sure the output string * doesn't have an extra NUL in it. */ - output[9] = '\0'; p = output + strlen(output); } else diff --git a/contrib/pgcrypto/expected/pgp-armor.out b/contrib/pgcrypto/expected/pgp-armor.out index c95549412e..89d410a7dc 100644 --- a/contrib/pgcrypto/expected/pgp-armor.out +++ b/contrib/pgcrypto/expected/pgp-armor.out @@ -102,3 +102,271 @@ em9va2E= -----END PGP MESSAGE----- '); ERROR: Corrupt ascii-armor +-- corrupt (no space after the colon) +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +foo: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); +ERROR: Corrupt ascii-armor +-- corrupt (no empty line) +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); +ERROR: Corrupt ascii-armor +-- no headers +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +-----+------- +(0 rows) + +-- header with empty value +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +foo: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +-----+------- + foo | +(1 row) + +-- simple +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +fookey: foovalue +barkey: barvalue + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +--------+---------- + fookey | foovalue + barkey | barvalue +(2 rows) + +-- insane keys, part 1 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +insane:key : + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +-------------+------- + insane:key | +(1 row) + +-- insane keys, part 2 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +insane:key : text value here + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +-------------+----------------- + insane:key | text value here +(1 row) + +-- long value +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than 76 characters long, but it should still parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +------+----------------------------------------------------------------------------------------------------------------- + long | this value is more than 76 characters long, but it should still parse correctly as that's permitted by RFC 4880 +(1 row) + +-- long value, split up +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than 76 characters long, but it should still +long: parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +------+------------------------------------------------------------------ + long | this value is more than 76 characters long, but it should still + long | parse correctly as that's permitted by RFC 4880 +(2 rows) + +-- long value, split up, part 2 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than +long: 76 characters long, but it should still +long: parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +------+------------------------------------------------- + long | this value is more than + long | 76 characters long, but it should still + long | parse correctly as that's permitted by RFC 4880 +(3 rows) + +-- long value, split up, part 3 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +emptykey: +long: this value is more than +emptykey: +long: 76 characters long, but it should still +emptykey: +long: parse correctly as that''s permitted by RFC 4880 +emptykey: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + key | value +----------+------------------------------------------------- + emptykey | + long | this value is more than + emptykey | + long | 76 characters long, but it should still + emptykey | + long | parse correctly as that's permitted by RFC 4880 + emptykey | +(7 rows) + +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +Comment: dat1.blowfish.sha1.mdc.s2k3.z0 + +jA0EBAMCfFNwxnvodX9g0jwB4n4s26/g5VmKzVab1bX1SmwY7gvgvlWdF3jKisvS +yA6Ce1QTMK3KdL2MPfamsTUSAML8huCJMwYQFfE= +=JcP+ +-----END PGP MESSAGE----- +'); + key | value +---------+-------------------------------- + Comment | dat1.blowfish.sha1.mdc.s2k3.z0 +(1 row) + +-- test CR+LF line endings +select * from pgp_armor_headers(replace(' +-----BEGIN PGP MESSAGE----- +fookey: foovalue +barkey: barvalue + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +', E'\n', E'\r\n')); + key | value +--------+---------- + fookey | foovalue + barkey | barvalue +(2 rows) + +-- test header generation +select armor('zooka', array['foo'], array['bar']); + armor +----------------------------- + -----BEGIN PGP MESSAGE-----+ + foo: bar + + + + em9va2E= + + =D5cR + + -----END PGP MESSAGE----- + + +(1 row) + +select armor('zooka', array['Version', 'Comment'], array['Created by pgcrypto', 'PostgreSQL, the world''s most advanced open source database']); + armor +--------------------------------------------------------------------- + -----BEGIN PGP MESSAGE----- + + Version: Created by pgcrypto + + Comment: PostgreSQL, the world's most advanced open source database+ + + + em9va2E= + + =D5cR + + -----END PGP MESSAGE----- + + +(1 row) + +select * from pgp_armor_headers( + armor('zooka', array['Version', 'Comment'], + array['Created by pgcrypto', 'PostgreSQL, the world''s most advanced open source database'])); + key | value +---------+------------------------------------------------------------ + Version | Created by pgcrypto + Comment | PostgreSQL, the world's most advanced open source database +(2 rows) + +-- error/corner cases +select armor('', array['foo'], array['too', 'many']); +ERROR: mismatched array dimensions +select armor('', array['too', 'many'], array['foo']); +ERROR: mismatched array dimensions +select armor('', array[['']], array['foo']); +ERROR: wrong number of array subscripts +select armor('', array['foo'], array[['']]); +ERROR: wrong number of array subscripts +select armor('', array[null], array['foo']); +ERROR: null value not allowed for header key +select armor('', array['foo'], array[null]); +ERROR: null value not allowed for header value +select armor('', '[0:0]={"foo"}', array['foo']); + armor +----------------------------- + -----BEGIN PGP MESSAGE-----+ + foo: foo + + + + =twTO + + -----END PGP MESSAGE----- + + +(1 row) + +select armor('', array['foo'], '[0:0]={"foo"}'); + armor +----------------------------- + -----BEGIN PGP MESSAGE-----+ + foo: foo + + + + =twTO + + -----END PGP MESSAGE----- + + +(1 row) + +select armor('', array[E'embedded\nnewline'], array['foo']); +ERROR: header key must not contain newlines +select armor('', array['foo'], array[E'embedded\nnewline']); +ERROR: header value must not contain newlines +select armor('', array['embedded: colon+space'], array['foo']); +ERROR: header key must not contain ": " diff --git a/contrib/pgcrypto/expected/pgp-decrypt.out b/contrib/pgcrypto/expected/pgp-decrypt.out index 859f4d681b..7193dca026 100644 --- a/contrib/pgcrypto/expected/pgp-decrypt.out +++ b/contrib/pgcrypto/expected/pgp-decrypt.out @@ -364,3 +364,11 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs (1 row) -- expected: 7efefcab38467f7484d6fa43dc86cf5281bd78e2 +-- check BUG #11905, problem with messages 6 less than a power of 2. +select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530); + ?column? +---------- + t +(1 row) + +-- expected: true diff --git a/contrib/pgcrypto/expected/pgp-info.out b/contrib/pgcrypto/expected/pgp-info.out index 1fe008890f..9064838373 100644 --- a/contrib/pgcrypto/expected/pgp-info.out +++ b/contrib/pgcrypto/expected/pgp-info.out @@ -74,5 +74,6 @@ from encdata order by id; 2C226E1FFE5CC7D4 B68504FD128E1FF9 FD0206C409B74875 -(4 rows) + FD0206C409B74875 +(5 rows) diff --git a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out index 61e09b9a86..d290a1349f 100644 --- a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out +++ b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out @@ -564,6 +564,27 @@ GQ== =XHkF -----END PGP MESSAGE----- '); +-- rsaenc2048 / aes128 (not from gnupg) +insert into encdata (id, data) values (5, ' +-----BEGIN PGP MESSAGE----- + +wcBMA/0CBsQJt0h1AQgAzxZ8j+OTeZ8IlLxfZ/mVd28/gUsCY+xigWBk/anZlK3T +p2tNU2idHzKdAttH2Hu/PWbZp4kwjl9spezYxMqCeBZqtfGED88Y+rqK0n/ul30A +7jjFHaw0XUOqFNlST1v6H2i7UXndnp+kcLfHPhnO5BIYWxB2CYBehItqtrn75eqr +C7trGzU/cr74efcWagbCDSNjiAV7GlEptlzmgVMmNikyI6w0ojEUx8lCLc/OsFz9 +pJUAX8xuwjxDVv+W7xk6c96grQiQlm+FLDYGiGNXoAzx3Wi/howu3uV40dXfY+jx +3WBrhEew5Pkpt1SsWoFnJWOfJ8GLd0ec8vfRCqAIVdLgAeS7NyawQYtd6wuVrEAj +5SMg4Thb4d+g45RksuGLHUUr4qO9tiXglODa4InhmJfgNuLk+RGz4LXjq8wepEmW +vRbgFOG54+Cf4C/gC+HkreDm5JKSKjvvw4B/jC6CDxq+JoziEe2Z1uEjCuEcr+Es +/eGzeOi36BejXPMHeKxXejj5qBBHKV0pHVhZSgffR0TtlXdB967Yl/5agV0R89hI +7Gw52emfnH4Z0Y4V0au2H0k1dR/2IxXdJEWSTG7Be1JHT59p9ei2gSEOrdBMIOjP +tbYYUlmmbvD49bHfThkDiC+oc9947LgQsk3kOOLbNHcjkbrjH8R5kjII4m/SEZA1 +g09T+338SzevBcVXh/cFrQ6/Et+lyyO2LJRUMs69g/HyzJOVWT2Iu8E0eS9MWevY +Qtrkrhrpkl3Y02qEp/j6M03Yu2t6ZF7dp51aJ5VhO2mmmtHaTnCyCc8Fcf72LmD8 +blH2nKZC9d6fi4YzSYMepZpMOFR65M80MCMiDUGnZBB8sEADu2/iVtqDUeG8mAA= +=PHJ1 +-----END PGP MESSAGE----- +'); -- successful decrypt select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) from keytbl, encdata where keytbl.id=1 and encdata.id=1; @@ -629,3 +650,7 @@ from keytbl, encdata where keytbl.id=5 and encdata.id=1; Secret msg (1 row) +-- test for a short read from prefix_init +select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) +from keytbl, encdata where keytbl.id=6 and encdata.id=5; +ERROR: Wrong key or corrupt data diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index 5c6ebebfe2..61a01e2b71 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -818,7 +818,8 @@ mp_int_mul(mp_int a, mp_int b, mp_int c) */ ua = MP_USED(a); ub = MP_USED(b); - osize = ua + ub; + osize = MAX(ua, ub); + osize = 4 * ((osize + 1) / 2); if (c == a || c == b) { @@ -907,7 +908,7 @@ mp_int_sqr(mp_int a, mp_int c) CHECK(a != NULL && c != NULL); /* Get a temporary buffer big enough to hold the result */ - osize = (mp_size) 2 *MP_USED(a); + osize = (mp_size) 4 *((MP_USED(a) + 1) / 2); if (a == c) { @@ -2605,8 +2606,8 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, * Now we'll get t1 = a0b0 and t2 = a1b1, and subtract them out so * that we're left with only the pieces we want: t3 = a1b0 + a0b1 */ - ZERO(t1, bot_size + 1); - ZERO(t2, bot_size + 1); + ZERO(t1, buf_size); + ZERO(t2, buf_size); (void) s_kmul(da, db, t1, bot_size, bot_size); /* t1 = a0 * b0 */ (void) s_kmul(a_top, b_top, t2, at_size, bt_size); /* t2 = a1 * b1 */ @@ -2616,11 +2617,13 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, /* Assemble the output value */ COPY(t1, dc, buf_size); - (void) s_uadd(t3, dc + bot_size, dc + bot_size, - buf_size + 1, buf_size + 1); + carry = s_uadd(t3, dc + bot_size, dc + bot_size, + buf_size + 1, buf_size); + assert(carry == 0); - (void) s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, - buf_size, buf_size); + carry = s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, + buf_size, buf_size); + assert(carry == 0); s_free(t1); /* note t2 and t3 are just internal pointers * to t1 */ @@ -3307,7 +3310,10 @@ s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c) dbt = db + MP_USED(b) - 1; while (last < 3) - SETUP(mp_int_init_size(TEMP(last), 2 * umu), last); + { + SETUP(mp_int_init_size(TEMP(last), 4 * umu), last); + ZERO(MP_DIGITS(TEMP(last - 1)), MP_ALLOC(TEMP(last - 1))); + } (void) mp_int_set_value(c, 1); diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c index 6124e4513c..c59691ed2c 100644 --- a/contrib/pgcrypto/mbuf.c +++ b/contrib/pgcrypto/mbuf.c @@ -305,6 +305,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf) break; memcpy(tmpbuf + total, tmp, res); total += res; + len -= res; } return total; } diff --git a/contrib/pgcrypto/pgcrypto--1.1--1.2.sql b/contrib/pgcrypto/pgcrypto--1.1--1.2.sql new file mode 100644 index 0000000000..753e169384 --- /dev/null +++ b/contrib/pgcrypto/pgcrypto--1.1--1.2.sql @@ -0,0 +1,14 @@ +/* contrib/pgcrypto/pgcrypto--1.1--1.2.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION pgcrypto UPDATE TO '1.2'" to load this file. \quit + +CREATE FUNCTION armor(bytea, text[], text[]) +RETURNS text +AS 'MODULE_PATHNAME', 'pg_armor' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION pgp_armor_headers(text, key OUT text, value OUT text) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pgp_armor_headers' +LANGUAGE C IMMUTABLE STRICT; diff --git a/contrib/pgcrypto/pgcrypto--1.1.sql b/contrib/pgcrypto/pgcrypto--1.2.sql index a260857d30..d6d5e7de34 100644 --- a/contrib/pgcrypto/pgcrypto--1.1.sql +++ b/contrib/pgcrypto/pgcrypto--1.2.sql @@ -1,4 +1,4 @@ -/* contrib/pgcrypto/pgcrypto--1.1.sql */ +/* contrib/pgcrypto/pgcrypto--1.2.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION pgcrypto" to load this file. \quit @@ -201,7 +201,17 @@ RETURNS text AS 'MODULE_PATHNAME', 'pg_armor' LANGUAGE C IMMUTABLE STRICT; +CREATE FUNCTION armor(bytea, text[], text[]) +RETURNS text +AS 'MODULE_PATHNAME', 'pg_armor' +LANGUAGE C IMMUTABLE STRICT; + CREATE FUNCTION dearmor(text) RETURNS bytea AS 'MODULE_PATHNAME', 'pg_dearmor' LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION pgp_armor_headers(text, key OUT text, value OUT text) +RETURNS SETOF record +AS 'MODULE_PATHNAME', 'pgp_armor_headers' +LANGUAGE C IMMUTABLE STRICT; diff --git a/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql b/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql index fe8d4c4e72..8154e85f44 100644 --- a/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql +++ b/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgcrypto" to load this file. \quit +\echo Use "CREATE EXTENSION pgcrypto FROM unpackaged" to load this file. \quit ALTER EXTENSION pgcrypto ADD function digest(text,text); ALTER EXTENSION pgcrypto ADD function digest(bytea,text); diff --git a/contrib/pgcrypto/pgcrypto.control b/contrib/pgcrypto/pgcrypto.control index 7f79d044ab..bb6885bc1b 100644 --- a/contrib/pgcrypto/pgcrypto.control +++ b/contrib/pgcrypto/pgcrypto.control @@ -1,5 +1,5 @@ # pgcrypto extension comment = 'cryptographic functions' -default_version = '1.1' +default_version = '1.2' module_pathname = '$libdir/pgcrypto' relocatable = true diff --git a/contrib/pgcrypto/pgp-armor.c b/contrib/pgcrypto/pgp-armor.c index 40f20550ea..24eb42fa89 100644 --- a/contrib/pgcrypto/pgp-armor.c +++ b/contrib/pgcrypto/pgp-armor.c @@ -178,7 +178,7 @@ b64_dec_len(unsigned srclen) * PGP armor */ -static const char *armor_header = "-----BEGIN PGP MESSAGE-----\n\n"; +static const char *armor_header = "-----BEGIN PGP MESSAGE-----\n"; static const char *armor_footer = "\n-----END PGP MESSAGE-----\n"; /* CRC24 implementation from rfc2440 */ @@ -203,38 +203,40 @@ crc24(const uint8 *data, unsigned len) return crc & 0xffffffL; } -int -pgp_armor_encode(const uint8 *src, unsigned len, uint8 *dst) +void +pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst, + int num_headers, char **keys, char **values) { int n; - uint8 *pos = dst; + int res; + unsigned b64len; unsigned crc = crc24(src, len); - n = strlen(armor_header); - memcpy(pos, armor_header, n); - pos += n; + appendStringInfoString(dst, armor_header); + + for (n = 0; n < num_headers; n++) + appendStringInfo(dst, "%s: %s\n", keys[n], values[n]); + appendStringInfoChar(dst, '\n'); - n = b64_encode(src, len, pos); - pos += n; + /* make sure we have enough room to b64_encode() */ + b64len = b64_enc_len(len); + enlargeStringInfo(dst, (int) b64len); - if (*(pos - 1) != '\n') - *pos++ = '\n'; + res = b64_encode(src, len, (uint8 *) dst->data + dst->len); + if (res > b64len) + elog(FATAL, "overflow - encode estimate too small"); + dst->len += res; - *pos++ = '='; - pos[3] = _base64[crc & 0x3f]; - crc >>= 6; - pos[2] = _base64[crc & 0x3f]; - crc >>= 6; - pos[1] = _base64[crc & 0x3f]; - crc >>= 6; - pos[0] = _base64[crc & 0x3f]; - pos += 4; + if (*(dst->data + dst->len - 1) != '\n') + appendStringInfoChar(dst, '\n'); - n = strlen(armor_footer); - memcpy(pos, armor_footer, n); - pos += n; + appendStringInfoChar(dst, '='); + appendStringInfoChar(dst, _base64[(crc >> 18) & 0x3f]); + appendStringInfoChar(dst, _base64[(crc >> 12) & 0x3f]); + appendStringInfoChar(dst, _base64[(crc >> 6) & 0x3f]); + appendStringInfoChar(dst, _base64[crc & 0x3f]); - return pos - dst; + appendStringInfoString(dst, armor_footer); } static const uint8 * @@ -309,7 +311,7 @@ find_header(const uint8 *data, const uint8 *datend, } int -pgp_armor_decode(const uint8 *src, unsigned len, uint8 *dst) +pgp_armor_decode(const uint8 *src, int len, StringInfo dst) { const uint8 *p = src; const uint8 *data_end = src + len; @@ -319,6 +321,7 @@ pgp_armor_decode(const uint8 *src, unsigned len, uint8 *dst) const uint8 *base64_end = NULL; uint8 buf[4]; int hlen; + int blen; int res = PXE_PGP_CORRUPT_ARMOR; /* armor start */ @@ -360,23 +363,126 @@ pgp_armor_decode(const uint8 *src, unsigned len, uint8 *dst) crc = (((long) buf[0]) << 16) + (((long) buf[1]) << 8) + (long) buf[2]; /* decode data */ - res = b64_decode(base64_start, base64_end - base64_start, dst); - - /* check crc */ - if (res >= 0 && crc24(dst, res) != crc) - res = PXE_PGP_CORRUPT_ARMOR; + blen = (int) b64_dec_len(len); + enlargeStringInfo(dst, blen); + res = b64_decode(base64_start, base64_end - base64_start, (uint8 *) dst->data); + if (res > blen) + elog(FATAL, "overflow - decode estimate too small"); + if (res >= 0) + { + if (crc24((uint8 *) dst->data, res) == crc) + dst->len += res; + else + res = PXE_PGP_CORRUPT_ARMOR; + } out: return res; } -unsigned -pgp_armor_enc_len(unsigned len) +/* + * Extracts all armor headers from an ASCII-armored input. + * + * Returns 0 on success, or PXE_* error code on error. On success, the + * number of headers and their keys and values are returned in *nheaders, + * *nkeys and *nvalues. + */ +int +pgp_extract_armor_headers(const uint8 *src, unsigned len, + int *nheaders, char ***keys, char ***values) { - return b64_enc_len(len) + strlen(armor_header) + strlen(armor_footer) + 16; -} + const uint8 *data_end = src + len; + const uint8 *p; + const uint8 *base64_start; + const uint8 *armor_start; + const uint8 *armor_end; + Size armor_len; + char *line; + char *nextline; + char *eol, + *colon; + int hlen; + char *buf; + int hdrlines; + int n; -unsigned -pgp_armor_dec_len(unsigned len) -{ - return b64_dec_len(len); + /* armor start */ + hlen = find_header(src, data_end, &armor_start, 0); + if (hlen <= 0) + return PXE_PGP_CORRUPT_ARMOR; + armor_start += hlen; + + /* armor end */ + hlen = find_header(armor_start, data_end, &armor_end, 1); + if (hlen <= 0) + return PXE_PGP_CORRUPT_ARMOR; + + /* Count the number of armor header lines. */ + hdrlines = 0; + p = armor_start; + while (p < armor_end && *p != '\n' && *p != '\r') + { + p = memchr(p, '\n', armor_end - p); + if (!p) + return PXE_PGP_CORRUPT_ARMOR; + + /* step to start of next line */ + p++; + hdrlines++; + } + base64_start = p; + + /* + * Make a modifiable copy of the part of the input that contains the + * headers. The returned key/value pointers will point inside the buffer. + */ + armor_len = base64_start - armor_start; + buf = palloc(armor_len + 1); + memcpy(buf, armor_start, armor_len); + buf[armor_len] = '\0'; + + /* Allocate return arrays */ + *keys = (char **) palloc(hdrlines * sizeof(char *)); + *values = (char **) palloc(hdrlines * sizeof(char *)); + + /* + * Split the header lines at newlines and ": " separators, and collect + * pointers to the keys and values in the return arrays. + */ + n = 0; + line = buf; + for (;;) + { + /* find end of line */ + eol = strchr(line, '\n'); + if (!eol) + break; + nextline = eol + 1; + /* if the line ends in CR + LF, strip the CR */ + if (eol > line && *(eol - 1) == '\r') + eol--; + *eol = '\0'; + + /* find colon+space separating the key and value */ + colon = strstr(line, ": "); + if (!colon) + return PXE_PGP_CORRUPT_ARMOR; + *colon = '\0'; + + /* shouldn't happen, we counted the number of lines beforehand */ + if (n >= hdrlines) + elog(ERROR, "unexpected number of armor header lines"); + + (*keys)[n] = line; + (*values)[n] = colon + 2; + n++; + + /* step to start of next line */ + line = nextline; + } + + if (n != hdrlines) + elog(ERROR, "unexpected number of armor header lines"); + + *nheaders = n; + return 0; } diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c index e03ee7f5f0..c0c5773e66 100644 --- a/contrib/pgcrypto/pgp-decrypt.c +++ b/contrib/pgcrypto/pgp-decrypt.c @@ -182,7 +182,7 @@ pktreader_pull(void *priv, PullFilter *src, int len, if (pkt->type == PKT_CONTEXT) return pullf_read(src, len, data_p); - if (pkt->len == 0) + while (pkt->len == 0) { /* this was last chunk in stream */ if (pkt->type == PKT_NORMAL) @@ -351,37 +351,33 @@ mdc_free(void *priv) } static int -mdc_finish(PGP_Context *ctx, PullFilter *src, - int len, uint8 **data_p) +mdc_finish(PGP_Context *ctx, PullFilter *src, int len) { int res; uint8 hash[20]; - uint8 tmpbuf[22]; + uint8 tmpbuf[20]; + uint8 *data; - if (len + 1 > sizeof(tmpbuf)) + /* should not happen */ + if (ctx->use_mdcbuf_filter) return PXE_BUG; + /* It's SHA1 */ + if (len != 20) + return PXE_PGP_CORRUPT_DATA; + + /* mdc_read should not call md_update */ + ctx->in_mdc_pkt = 1; + /* read data */ - res = pullf_read_max(src, len + 1, data_p, tmpbuf); + res = pullf_read_max(src, len, &data, tmpbuf); if (res < 0) return res; if (res == 0) { - if (ctx->mdc_checked == 0) - { - px_debug("no mdc"); - return PXE_PGP_CORRUPT_DATA; - } - return 0; - } - - /* safety check */ - if (ctx->in_mdc_pkt > 1) - { - px_debug("mdc_finish: several times here?"); + px_debug("no mdc"); return PXE_PGP_CORRUPT_DATA; } - ctx->in_mdc_pkt++; /* is the packet sane? */ if (res != 20) @@ -394,7 +390,7 @@ mdc_finish(PGP_Context *ctx, PullFilter *src, * ok, we got the hash, now check */ px_md_finish(ctx->mdc_ctx, hash); - res = memcmp(hash, *data_p, 20); + res = memcmp(hash, data, 20); px_memset(hash, 0, 20); px_memset(tmpbuf, 0, sizeof(tmpbuf)); if (res != 0) @@ -403,7 +399,7 @@ mdc_finish(PGP_Context *ctx, PullFilter *src, return PXE_PGP_CORRUPT_DATA; } ctx->mdc_checked = 1; - return len; + return 0; } static int @@ -414,12 +410,9 @@ mdc_read(void *priv, PullFilter *src, int len, PGP_Context *ctx = priv; /* skip this filter? */ - if (ctx->use_mdcbuf_filter) + if (ctx->use_mdcbuf_filter || ctx->in_mdc_pkt) return pullf_read(src, len, data_p); - if (ctx->in_mdc_pkt) - return mdc_finish(ctx, src, len, data_p); - res = pullf_read(src, len, data_p); if (res < 0) return res; @@ -878,7 +871,6 @@ process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src, int got_data = 0; int got_mdc = 0; PullFilter *pkt = NULL; - uint8 *tmp; while (1) { @@ -937,11 +929,8 @@ process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src, break; } - /* notify mdc_filter */ - ctx->in_mdc_pkt = 1; - - res = pullf_read(pkt, 8192, &tmp); - if (res > 0) + res = mdc_finish(ctx, pkt, len); + if (res >= 0) got_mdc = 1; break; default: @@ -1069,7 +1058,7 @@ pgp_skip_packet(PullFilter *pkt) while (res > 0) res = pullf_read(pkt, 32 * 1024, &tmp); - return res < 0 ? res : 0; + return res; } /* @@ -1078,19 +1067,16 @@ pgp_skip_packet(PullFilter *pkt) int pgp_expect_packet_end(PullFilter *pkt) { - int res = 1; + int res; uint8 *tmp; - while (res > 0) + res = pullf_read(pkt, 32 * 1024, &tmp); + if (res > 0) { - res = pullf_read(pkt, 32 * 1024, &tmp); - if (res > 0) - { - px_debug("pgp_expect_packet_end: got data"); - return PXE_PGP_CORRUPT_DATA; - } + px_debug("pgp_expect_packet_end: got data"); + return PXE_PGP_CORRUPT_DATA; } - return res < 0 ? res : 0; + return res; } int diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c index ad1fd08427..d0da05cd13 100644 --- a/contrib/pgcrypto/pgp-pgsql.c +++ b/contrib/pgcrypto/pgp-pgsql.c @@ -31,8 +31,12 @@ #include "postgres.h" +#include "lib/stringinfo.h" +#include "catalog/pg_type.h" #include "mb/pg_wchar.h" #include "utils/builtins.h" +#include "utils/array.h" +#include "funcapi.h" #include "mbuf.h" #include "px.h" @@ -55,6 +59,7 @@ PG_FUNCTION_INFO_V1(pgp_key_id_w); PG_FUNCTION_INFO_V1(pg_armor); PG_FUNCTION_INFO_V1(pg_dearmor); +PG_FUNCTION_INFO_V1(pgp_armor_headers); /* * Mix a block of data into RNG. @@ -147,6 +152,19 @@ convert_to_utf8(text *src) return convert_charset(src, GetDatabaseEncoding(), PG_UTF8); } +static bool +string_is_ascii(const char *str) +{ + const char *p; + + for (p = str; *p; p++) + { + if (IS_HIGHBIT_SET(*p)) + return false; + } + return true; +} + static void clear_and_pfree(text *p) { @@ -241,7 +259,10 @@ set_arg(PGP_Context *ctx, char *key, char *val, res = pgp_set_convert_crlf(ctx, atoi(val)); else if (strcmp(key, "unicode-mode") == 0) res = pgp_set_unicode_mode(ctx, atoi(val)); - /* decrypt debug */ + /* + * The remaining options are for debugging/testing and are therefore not + * documented in the user-facing docs. + */ else if (ex != NULL && strcmp(key, "debug") == 0) ex->debug = atoi(val); else if (ex != NULL && strcmp(key, "expect-cipher-algo") == 0) @@ -554,35 +575,25 @@ decrypt_internal(int is_pubenc, int need_text, text *data, err = pgp_set_symkey(ctx, (uint8 *) VARDATA(key), VARSIZE(key) - VARHDRSZ); - /* - * decrypt - */ + /* decrypt */ if (err >= 0) + { err = pgp_decrypt(ctx, src, dst); - /* - * failed? - */ - if (err < 0) - goto out; + if (ex.expect) + check_expect(ctx, &ex); - if (ex.expect) - check_expect(ctx, &ex); - - /* remember the setting */ - got_unicode = pgp_get_unicode_mode(ctx); + /* remember the setting */ + got_unicode = pgp_get_unicode_mode(ctx); + } -out: - if (src) - mbuf_free(src); - if (ctx) - pgp_free(ctx); + mbuf_free(src); + pgp_free(ctx); if (err) { px_set_debug_handler(NULL); - if (dst) - mbuf_free(dst); + mbuf_free(dst); ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), errmsg("%s", px_strerror(err)))); @@ -815,28 +826,133 @@ pgp_pub_decrypt_text(PG_FUNCTION_ARGS) * Wrappers for PGP ascii armor */ +/* + * Helper function for pgp_armor. Converts arrays of keys and values into + * plain C arrays, and checks that they don't contain invalid characters. + */ +static int +parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array, + char ***p_keys, char ***p_values) +{ + int nkdims = ARR_NDIM(key_array); + int nvdims = ARR_NDIM(val_array); + char **keys, + **values; + Datum *key_datums, + *val_datums; + bool *key_nulls, + *val_nulls; + int key_count, + val_count; + int i; + + if (nkdims > 1 || nkdims != nvdims) + ereport(ERROR, + (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), + errmsg("wrong number of array subscripts"))); + if (nkdims == 0) + return 0; + + deconstruct_array(key_array, + TEXTOID, -1, false, 'i', + &key_datums, &key_nulls, &key_count); + + deconstruct_array(val_array, + TEXTOID, -1, false, 'i', + &val_datums, &val_nulls, &val_count); + + if (key_count != val_count) + ereport(ERROR, + (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), + errmsg("mismatched array dimensions"))); + + keys = (char **) palloc(sizeof(char *) * key_count); + values = (char **) palloc(sizeof(char *) * val_count); + + for (i = 0; i < key_count; i++) + { + char *v; + + /* Check that the key doesn't contain anything funny */ + if (key_nulls[i]) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("null value not allowed for header key"))); + + v = TextDatumGetCString(key_datums[i]); + + if (!string_is_ascii(v)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("header key must not contain non-ASCII characters"))); + if (strstr(v, ": ")) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("header key must not contain \": \""))); + if (strchr(v, '\n')) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("header key must not contain newlines"))); + keys[i] = v; + + /* And the same for the value */ + if (val_nulls[i]) + ereport(ERROR, + (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), + errmsg("null value not allowed for header value"))); + + v = TextDatumGetCString(val_datums[i]); + + if (!string_is_ascii(v)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("header value must not contain non-ASCII characters"))); + if (strchr(v, '\n')) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("header value must not contain newlines"))); + + values[i] = v; + } + + *p_keys = keys; + *p_values = values; + return key_count; +} + Datum pg_armor(PG_FUNCTION_ARGS) { bytea *data; text *res; - int data_len, - res_len, - guess_len; + int data_len; + StringInfoData buf; + int num_headers; + char **keys = NULL, + **values = NULL; data = PG_GETARG_BYTEA_P(0); data_len = VARSIZE(data) - VARHDRSZ; + if (PG_NARGS() == 3) + { + num_headers = parse_key_value_arrays(PG_GETARG_ARRAYTYPE_P(1), + PG_GETARG_ARRAYTYPE_P(2), + &keys, &values); + } + else if (PG_NARGS() == 1) + num_headers = 0; + else + elog(ERROR, "unexpected number of arguments %d", PG_NARGS()); - guess_len = pgp_armor_enc_len(data_len); - res = palloc(VARHDRSZ + guess_len); + initStringInfo(&buf); - res_len = pgp_armor_encode((uint8 *) VARDATA(data), data_len, - (uint8 *) VARDATA(res)); - if (res_len > guess_len) - ereport(ERROR, - (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - errmsg("Overflow - encode estimate too small"))); - SET_VARSIZE(res, VARHDRSZ + res_len); + pgp_armor_encode((uint8 *) VARDATA(data), data_len, &buf, + num_headers, keys, values); + + res = palloc(VARHDRSZ + buf.len); + SET_VARSIZE(res, VARHDRSZ + buf.len); + memcpy(VARDATA(res), buf.data, buf.len); + pfree(buf.data); PG_FREE_IF_COPY(data, 0); PG_RETURN_TEXT_P(res); @@ -847,32 +963,105 @@ pg_dearmor(PG_FUNCTION_ARGS) { text *data; bytea *res; - int data_len, - res_len, - guess_len; + int data_len; + int ret; + StringInfoData buf; data = PG_GETARG_TEXT_P(0); data_len = VARSIZE(data) - VARHDRSZ; - guess_len = pgp_armor_dec_len(data_len); - res = palloc(VARHDRSZ + guess_len); + initStringInfo(&buf); - res_len = pgp_armor_decode((uint8 *) VARDATA(data), data_len, - (uint8 *) VARDATA(res)); - if (res_len < 0) - ereport(ERROR, - (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - errmsg("%s", px_strerror(res_len)))); - if (res_len > guess_len) + ret = pgp_armor_decode((uint8 *) VARDATA(data), data_len, &buf); + if (ret < 0) ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), - errmsg("Overflow - decode estimate too small"))); - SET_VARSIZE(res, VARHDRSZ + res_len); + errmsg("%s", px_strerror(ret)))); + res = palloc(VARHDRSZ + buf.len); + SET_VARSIZE(res, VARHDRSZ + buf.len); + memcpy(VARDATA(res), buf.data, buf.len); + pfree(buf.data); PG_FREE_IF_COPY(data, 0); PG_RETURN_TEXT_P(res); } +/* cross-call state for pgp_armor_headers */ +typedef struct +{ + int nheaders; + char **keys; + char **values; +} pgp_armor_headers_state; + +Datum +pgp_armor_headers(PG_FUNCTION_ARGS) +{ + FuncCallContext *funcctx; + pgp_armor_headers_state *state; + char *utf8key; + char *utf8val; + HeapTuple tuple; + TupleDesc tupdesc; + AttInMetadata *attinmeta; + + if (SRF_IS_FIRSTCALL()) + { + text *data = PG_GETARG_TEXT_PP(0); + int res; + MemoryContext oldcontext; + + funcctx = SRF_FIRSTCALL_INIT(); + + /* we need the state allocated in the multi call context */ + oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); + + /* Build a tuple descriptor for our result type */ + if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) + elog(ERROR, "return type must be a row type"); + + attinmeta = TupleDescGetAttInMetadata(tupdesc); + funcctx->attinmeta = attinmeta; + + state = (pgp_armor_headers_state *) palloc(sizeof(pgp_armor_headers_state)); + + res = pgp_extract_armor_headers((uint8 *) VARDATA_ANY(data), + VARSIZE_ANY_EXHDR(data), + &state->nheaders, &state->keys, + &state->values); + if (res < 0) + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION), + errmsg("%s", px_strerror(res)))); + + MemoryContextSwitchTo(oldcontext); + funcctx->user_fctx = state; + } + + funcctx = SRF_PERCALL_SETUP(); + state = (pgp_armor_headers_state *) funcctx->user_fctx; + + if (funcctx->call_cntr >= state->nheaders) + SRF_RETURN_DONE(funcctx); + else + { + char *values[2]; + + /* we assume that the keys (and values) are in UTF-8. */ + utf8key = state->keys[funcctx->call_cntr]; + utf8val = state->values[funcctx->call_cntr]; + + values[0] = pg_any_to_server(utf8key, strlen(utf8key), PG_UTF8); + values[1] = pg_any_to_server(utf8val, strlen(utf8val), PG_UTF8); + + /* build a tuple */ + tuple = BuildTupleFromCStrings(funcctx->attinmeta, values); + SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple)); + } +} + + + /* * Wrappers for PGP key id */ diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h index 8d4ab9862d..398f21bca2 100644 --- a/contrib/pgcrypto/pgp.h +++ b/contrib/pgcrypto/pgp.h @@ -29,6 +29,8 @@ * contrib/pgcrypto/pgp.h */ +#include "lib/stringinfo.h" + #include "mbuf.h" #include "px.h" @@ -274,10 +276,11 @@ void pgp_cfb_free(PGP_CFB *ctx); int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst); int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst); -int pgp_armor_encode(const uint8 *src, unsigned len, uint8 *dst); -int pgp_armor_decode(const uint8 *src, unsigned len, uint8 *dst); -unsigned pgp_armor_enc_len(unsigned len); -unsigned pgp_armor_dec_len(unsigned len); +void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst, + int num_headers, char **keys, char **values); +int pgp_armor_decode(const uint8 *src, int len, StringInfo dst); +int pgp_extract_armor_headers(const uint8 *src, unsigned len, + int *nheaders, char ***keys, char ***values); int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst); int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src); diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index a01a58e29c..7255ebf04c 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -206,9 +206,7 @@ void px_set_debug_handler(void (*handler) (const char *)); void px_memset(void *ptr, int c, size_t len); #ifdef PX_DEBUG -void -px_debug(const char *fmt,...) -__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); +void px_debug(const char *fmt,...) pg_attribute_printf(1, 2); #else #define px_debug(...) #endif diff --git a/contrib/pgcrypto/random.c b/contrib/pgcrypto/random.c index 3f092ca346..d72679e412 100644 --- a/contrib/pgcrypto/random.c +++ b/contrib/pgcrypto/random.c @@ -32,6 +32,7 @@ #include "postgres.h" #include "px.h" +#include "utils/memdebug.h" /* how many bytes to ask from system random provider */ #define RND_BYTES 32 @@ -195,7 +196,7 @@ try_unix_std(uint8 *dst) memcpy(dst, (uint8 *) &x, sizeof(x)); dst += sizeof(x); - /* let's be desperate */ + /* hash of uninitialized stack and heap allocations */ res = px_find_digest("sha1", &md); if (res >= 0) { @@ -203,8 +204,10 @@ try_unix_std(uint8 *dst) uint8 stack[8192]; int alloc = 32 * 1024; + VALGRIND_MAKE_MEM_DEFINED(stack, sizeof(stack)); px_md_update(md, stack, sizeof(stack)); ptr = px_alloc(alloc); + VALGRIND_MAKE_MEM_DEFINED(ptr, alloc); px_md_update(md, ptr, alloc); px_free(ptr); diff --git a/contrib/pgcrypto/sql/pgp-armor.sql b/contrib/pgcrypto/sql/pgp-armor.sql index 71ffba26a0..a277a1894c 100644 --- a/contrib/pgcrypto/sql/pgp-armor.sql +++ b/contrib/pgcrypto/sql/pgp-armor.sql @@ -56,3 +56,161 @@ em9va2E= =ZZZZ -----END PGP MESSAGE----- '); + +-- corrupt (no space after the colon) +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +foo: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- corrupt (no empty line) +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- no headers +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- header with empty value +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +foo: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- simple +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +fookey: foovalue +barkey: barvalue + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- insane keys, part 1 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +insane:key : + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- insane keys, part 2 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +insane:key : text value here + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- long value +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than 76 characters long, but it should still parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- long value, split up +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than 76 characters long, but it should still +long: parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- long value, split up, part 2 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +long: this value is more than +long: 76 characters long, but it should still +long: parse correctly as that''s permitted by RFC 4880 + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +-- long value, split up, part 3 +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +emptykey: +long: this value is more than +emptykey: +long: 76 characters long, but it should still +emptykey: +long: parse correctly as that''s permitted by RFC 4880 +emptykey: + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +'); + +select * from pgp_armor_headers(' +-----BEGIN PGP MESSAGE----- +Comment: dat1.blowfish.sha1.mdc.s2k3.z0 + +jA0EBAMCfFNwxnvodX9g0jwB4n4s26/g5VmKzVab1bX1SmwY7gvgvlWdF3jKisvS +yA6Ce1QTMK3KdL2MPfamsTUSAML8huCJMwYQFfE= +=JcP+ +-----END PGP MESSAGE----- +'); + +-- test CR+LF line endings +select * from pgp_armor_headers(replace(' +-----BEGIN PGP MESSAGE----- +fookey: foovalue +barkey: barvalue + +em9va2E= +=ZZZZ +-----END PGP MESSAGE----- +', E'\n', E'\r\n')); + +-- test header generation +select armor('zooka', array['foo'], array['bar']); +select armor('zooka', array['Version', 'Comment'], array['Created by pgcrypto', 'PostgreSQL, the world''s most advanced open source database']); +select * from pgp_armor_headers( + armor('zooka', array['Version', 'Comment'], + array['Created by pgcrypto', 'PostgreSQL, the world''s most advanced open source database'])); + +-- error/corner cases +select armor('', array['foo'], array['too', 'many']); +select armor('', array['too', 'many'], array['foo']); +select armor('', array[['']], array['foo']); +select armor('', array['foo'], array[['']]); +select armor('', array[null], array['foo']); +select armor('', array['foo'], array[null]); +select armor('', '[0:0]={"foo"}', array['foo']); +select armor('', array['foo'], '[0:0]={"foo"}'); +select armor('', array[E'embedded\nnewline'], array['foo']); +select armor('', array['foo'], array[E'embedded\nnewline']); +select armor('', array['embedded: colon+space'], array['foo']); diff --git a/contrib/pgcrypto/sql/pgp-decrypt.sql b/contrib/pgcrypto/sql/pgp-decrypt.sql index 93535ab016..5457152ccf 100644 --- a/contrib/pgcrypto/sql/pgp-decrypt.sql +++ b/contrib/pgcrypto/sql/pgp-decrypt.sql @@ -264,3 +264,7 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs -----END PGP MESSAGE----- '), 'key', 'convert-crlf=1'), 'sha1'), 'hex'); -- expected: 7efefcab38467f7484d6fa43dc86cf5281bd78e2 + +-- check BUG #11905, problem with messages 6 less than a power of 2. +select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530); +-- expected: true diff --git a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql index f8495d1e54..3f2bae9e40 100644 --- a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql +++ b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql @@ -579,6 +579,28 @@ GQ== -----END PGP MESSAGE----- '); +-- rsaenc2048 / aes128 (not from gnupg) +insert into encdata (id, data) values (5, ' +-----BEGIN PGP MESSAGE----- + +wcBMA/0CBsQJt0h1AQgAzxZ8j+OTeZ8IlLxfZ/mVd28/gUsCY+xigWBk/anZlK3T +p2tNU2idHzKdAttH2Hu/PWbZp4kwjl9spezYxMqCeBZqtfGED88Y+rqK0n/ul30A +7jjFHaw0XUOqFNlST1v6H2i7UXndnp+kcLfHPhnO5BIYWxB2CYBehItqtrn75eqr +C7trGzU/cr74efcWagbCDSNjiAV7GlEptlzmgVMmNikyI6w0ojEUx8lCLc/OsFz9 +pJUAX8xuwjxDVv+W7xk6c96grQiQlm+FLDYGiGNXoAzx3Wi/howu3uV40dXfY+jx +3WBrhEew5Pkpt1SsWoFnJWOfJ8GLd0ec8vfRCqAIVdLgAeS7NyawQYtd6wuVrEAj +5SMg4Thb4d+g45RksuGLHUUr4qO9tiXglODa4InhmJfgNuLk+RGz4LXjq8wepEmW +vRbgFOG54+Cf4C/gC+HkreDm5JKSKjvvw4B/jC6CDxq+JoziEe2Z1uEjCuEcr+Es +/eGzeOi36BejXPMHeKxXejj5qBBHKV0pHVhZSgffR0TtlXdB967Yl/5agV0R89hI +7Gw52emfnH4Z0Y4V0au2H0k1dR/2IxXdJEWSTG7Be1JHT59p9ei2gSEOrdBMIOjP +tbYYUlmmbvD49bHfThkDiC+oc9947LgQsk3kOOLbNHcjkbrjH8R5kjII4m/SEZA1 +g09T+338SzevBcVXh/cFrQ6/Et+lyyO2LJRUMs69g/HyzJOVWT2Iu8E0eS9MWevY +Qtrkrhrpkl3Y02qEp/j6M03Yu2t6ZF7dp51aJ5VhO2mmmtHaTnCyCc8Fcf72LmD8 +blH2nKZC9d6fi4YzSYMepZpMOFR65M80MCMiDUGnZBB8sEADu2/iVtqDUeG8mAA= +=PHJ1 +-----END PGP MESSAGE----- +'); + -- successful decrypt select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) from keytbl, encdata where keytbl.id=1 and encdata.id=1; @@ -619,3 +641,7 @@ from keytbl, encdata where keytbl.id=5 and encdata.id=1; -- password-protected secret key, right password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool') from keytbl, encdata where keytbl.id=5 and encdata.id=1; + +-- test for a short read from prefix_init +select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) +from keytbl, encdata where keytbl.id=6 and encdata.id=5; diff --git a/contrib/pgrowlocks/Makefile b/contrib/pgrowlocks/Makefile index fe8042344f..059ea0528b 100644 --- a/contrib/pgrowlocks/Makefile +++ b/contrib/pgrowlocks/Makefile @@ -1,10 +1,11 @@ # contrib/pgrowlocks/Makefile MODULE_big = pgrowlocks -OBJS = pgrowlocks.o +OBJS = pgrowlocks.o $(WIN32RES) EXTENSION = pgrowlocks DATA = pgrowlocks--1.1.sql pgrowlocks--1.0--1.1.sql pgrowlocks--unpackaged--1.0.sql +PGFILEDESC = "pgrowlocks - display row locking information" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql b/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql index b8c3faf1c7..bfa9855825 100644 --- a/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql +++ b/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgrowlocks" to load this file. \quit +\echo Use "CREATE EXTENSION pgrowlocks FROM unpackaged" to load this file. \quit ALTER EXTENSION pgrowlocks ADD function pgrowlocks(text); diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index 15d9704752..88f7137a17 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -38,7 +38,6 @@ #include "utils/snapmgr.h" #include "utils/tqual.h" - PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(pgrowlocks); @@ -137,14 +136,9 @@ pgrowlocks(PG_FUNCTION_ARGS) infomask = tuple->t_data->t_infomask; /* - * a tuple is locked if HTSU returns BeingUpdated, and if it returns - * MayBeUpdated but the Xmax is valid and pointing at us. + * A tuple is locked if HTSU returns BeingUpdated. */ - if (htsu == HeapTupleBeingUpdated || - (htsu == HeapTupleMayBeUpdated && - !(infomask & HEAP_XMAX_INVALID) && - !(infomask & HEAP_XMAX_IS_MULTI) && - (xmax == GetCurrentTransactionIdIfAny()))) + if (htsu == HeapTupleBeingUpdated) { char **values; @@ -166,7 +160,8 @@ pgrowlocks(PG_FUNCTION_ARGS) allow_old = !(infomask & HEAP_LOCK_MASK) && (infomask & HEAP_XMAX_LOCK_ONLY); - nmembers = GetMultiXactIdMembers(xmax, &members, allow_old); + nmembers = GetMultiXactIdMembers(xmax, &members, allow_old, + false); if (nmembers == -1) { values[Atnum_xids] = "{0}"; diff --git a/contrib/pgstattuple/Makefile b/contrib/pgstattuple/Makefile index d991c3a803..862585cc01 100644 --- a/contrib/pgstattuple/Makefile +++ b/contrib/pgstattuple/Makefile @@ -1,10 +1,11 @@ # contrib/pgstattuple/Makefile MODULE_big = pgstattuple -OBJS = pgstattuple.o pgstatindex.o +OBJS = pgstattuple.o pgstatindex.o $(WIN32RES) EXTENSION = pgstattuple DATA = pgstattuple--1.2.sql pgstattuple--1.1--1.2.sql pgstattuple--1.0--1.1.sql pgstattuple--unpackaged--1.0.sql +PGFILEDESC = "pgstattuple - tuple-level statistics" REGRESS = pgstattuple diff --git a/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql b/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql index 14b63cafcf..ef71000a32 100644 --- a/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql +++ b/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgstattuple" to load this file. \quit +\echo Use "CREATE EXTENSION pgstattuple FROM unpackaged" to load this file. \quit ALTER EXTENSION pgstattuple ADD function pgstattuple(text); ALTER EXTENSION pgstattuple ADD function pgstattuple(oid); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index edc603f6a1..c3a8b1d424 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -36,7 +36,6 @@ #include "utils/builtins.h" #include "utils/tqual.h" - PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(pgstattuple); @@ -274,7 +273,6 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) BlockNumber tupblock; Buffer buffer; pgstattuple_type stat = {0}; - BufferAccessStrategy bstrategy; SnapshotData SnapshotDirty; /* Disable syncscan because we assume we scan from block zero upwards */ @@ -283,10 +281,6 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) nblocks = scan->rs_nblocks; /* # blocks to be scanned */ - /* prepare access strategy for this table */ - bstrategy = GetAccessStrategy(BAS_BULKREAD); - scan->rs_strategy = bstrategy; - /* scan the relation */ while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -320,26 +314,28 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) { CHECK_FOR_INTERRUPTS(); - buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, bstrategy); + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, + RBM_NORMAL, scan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } } - heap_endscan(scan); while (block < nblocks) { CHECK_FOR_INTERRUPTS(); - buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, bstrategy); + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, + RBM_NORMAL, scan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } + heap_endscan(scan); relation_close(rel, AccessShareLock); stat.table_len = (uint64) nblocks *BLCKSZ; diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile index 8c497201d0..d2b98e10f3 100644 --- a/contrib/postgres_fdw/Makefile +++ b/contrib/postgres_fdw/Makefile @@ -1,25 +1,23 @@ # contrib/postgres_fdw/Makefile MODULE_big = postgres_fdw -OBJS = postgres_fdw.o option.o deparse.o connection.o +OBJS = postgres_fdw.o option.o deparse.o connection.o $(WIN32RES) +PGFILEDESC = "postgres_fdw - foreign data wrapper for PostgreSQL" PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -SHLIB_PREREQS = submake-libpq EXTENSION = postgres_fdw DATA = postgres_fdw--1.0.sql REGRESS = postgres_fdw -# the db name is hard-coded in the tests -override USE_MODULE_DB = - ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +SHLIB_PREREQS = submake-libpq subdir = contrib/postgres_fdw top_builddir = ../.. include $(top_builddir)/src/Makefile.global diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c index 116be7ddcb..4e02cb289d 100644 --- a/contrib/postgres_fdw/connection.c +++ b/contrib/postgres_fdw/connection.c @@ -3,7 +3,7 @@ * connection.c * Connection management functions for postgres_fdw * - * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/connection.c @@ -109,12 +109,11 @@ GetConnection(ForeignServer *server, UserMapping *user, MemSet(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(ConnCacheKey); ctl.entrysize = sizeof(ConnCacheEntry); - ctl.hash = tag_hash; /* allocate ConnectionHash in the cache context */ ctl.hcxt = CacheMemoryContext; ConnectionHash = hash_create("postgres_fdw connections", 8, &ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* * Register some callback functions that manage connection cleanup. diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index d7d9b9c77d..94fab18c42 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -23,7 +23,7 @@ * the foreign table's columns are not marked with collations that match the * remote table's columns, which we can consider to be user error. * - * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/deparse.c @@ -50,6 +50,7 @@ #include "parser/parsetree.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "utils/rel.h" #include "utils/syscache.h" @@ -116,7 +117,6 @@ static void deparseReturningList(StringInfo buf, PlannerInfo *root, static void deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root); static void deparseRelation(StringInfo buf, Relation rel); -static void deparseStringLiteral(StringInfo buf, const char *val); static void deparseExpr(Expr *expr, deparse_expr_cxt *context); static void deparseVar(Var *node, deparse_expr_cxt *context); static void deparseConst(Const *node, deparse_expr_cxt *context); @@ -254,6 +254,18 @@ foreign_expr_walker(Node *node, var->varlevelsup == 0) { /* Var belongs to foreign table */ + + /* + * System columns other than ctid should not be sent to + * the remote, since we don't make any effort to ensure + * that local and remote values match (tableoid, in + * particular, almost certainly doesn't match). + */ + if (var->varattno < 0 && + var->varattno != SelfItemPointerAttributeNumber) + return false; + + /* Else check the collation */ collation = var->varcollid; state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE; } @@ -305,7 +317,7 @@ foreign_expr_walker(Node *node, break; case T_ArrayRef: { - ArrayRef *ar = (ArrayRef *) node;; + ArrayRef *ar = (ArrayRef *) node; /* Assignment should not be in restrictions. */ if (ar->refassgnexpr != NULL) @@ -1160,7 +1172,7 @@ deparseRelation(StringInfo buf, Relation rel) /* * Append a SQL string literal representing "val" to buf. */ -static void +void deparseStringLiteral(StringInfo buf, const char *val) { const char *valptr; @@ -1716,9 +1728,6 @@ deparseRelabelType(RelabelType *node, deparse_expr_cxt *context) /* * Deparse a BoolExpr node. - * - * Note: by the time we get here, AND and OR expressions have been flattened - * into N-argument form, so we'd better be prepared to deal with that. */ static void deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context) diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 2e49ee317a..783cb41571 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -3,8 +3,14 @@ -- =================================================================== CREATE EXTENSION postgres_fdw; CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw; -CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname 'contrib_regression'); +DO $d$ + BEGIN + EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname '$$||current_database()||$$', + port '$$||current_setting('port')||$$' + )$$; + END; +$d$; CREATE USER MAPPING FOR public SERVER testserver1 OPTIONS (user 'value', password 'value'); CREATE USER MAPPING FOR CURRENT_USER SERVER loopback; @@ -231,6 +237,39 @@ SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo (1 row) +-- with FOR UPDATE/SHARE +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + LockRows + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE +(5 rows) + +SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+----+-------+------------------------------+--------------------------+----+------------+----- + 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + LockRows + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE +(5 rows) + +SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+----+-------+------------------------------+--------------------------+----+------------+----- + 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo +(1 row) + -- aggregate SELECT COUNT(*) FROM ft1 t1; count @@ -826,6 +865,74 @@ DEALLOCATE st2; DEALLOCATE st3; DEALLOCATE st4; DEALLOCATE st5; +-- System columns, except ctid, should not be sent to remote +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------- + Limit + Output: c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8 + Filter: (t1.tableoid = '1259'::oid) + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(6 rows) + +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----+----+-------+------------------------------+--------------------------+----+------------+----- + 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------- + Limit + Output: ((tableoid)::regclass), c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: (tableoid)::regclass, c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(5 rows) + +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; + tableoid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----------+----+----+-------+------------------------------+--------------------------+----+------------+----- + ft1 | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((ctid = '(0,2)'::tid)) +(3 rows) + +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----+----+-------+------------------------------+--------------------------+----+------------+----- + 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT ctid, * FROM ft1 t1 LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Limit + Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" +(5 rows) + +SELECT ctid, * FROM ft1 t1 LIMIT 1; + ctid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-------+----+----+-------+------------------------------+--------------------------+----+------------+----- + (0,1) | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + -- =================================================================== -- used in pl/pgsql function -- =================================================================== @@ -2482,6 +2589,91 @@ select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; (13 rows) -- =================================================================== +-- test check constraints +-- =================================================================== +-- Consistent check constraints provide consistent results +ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2positive CHECK (c2 >= 0); +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 < 0; + QUERY PLAN +------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan on public.ft1 + Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE ((c2 < 0)) +(4 rows) + +SELECT count(*) FROM ft1 WHERE c2 < 0; + count +------- + 0 +(1 row) + +SET constraint_exclusion = 'on'; +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 < 0; + QUERY PLAN +-------------------------------- + Aggregate + Output: count(*) + -> Result + One-Time Filter: false +(4 rows) + +SELECT count(*) FROM ft1 WHERE c2 < 0; + count +------- + 0 +(1 row) + +RESET constraint_exclusion; +-- check constraint is enforced on the remote side, not locally +INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive +ERROR: new row for relation "T 1" violates check constraint "c2positive" +DETAIL: Failing row contains (1111, -2, null, null, null, null, ft1 , null). +CONTEXT: Remote SQL command: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) +UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive +ERROR: new row for relation "T 1" violates check constraint "c2positive" +DETAIL: Failing row contains (1, -1, 00001_trig_update, 1970-01-02 08:00:00+00, 1970-01-02 00:00:00, 1, 1 , foo). +CONTEXT: Remote SQL command: UPDATE "S 1"."T 1" SET c2 = $2 WHERE ctid = $1 +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2positive; +-- But inconsistent check constraints provide inconsistent results +ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2negative CHECK (c2 < 0); +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 >= 0; + QUERY PLAN +-------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan on public.ft1 + Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE ((c2 >= 0)) +(4 rows) + +SELECT count(*) FROM ft1 WHERE c2 >= 0; + count +------- + 821 +(1 row) + +SET constraint_exclusion = 'on'; +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 >= 0; + QUERY PLAN +-------------------------------- + Aggregate + Output: count(*) + -> Result + One-Time Filter: false +(4 rows) + +SELECT count(*) FROM ft1 WHERE c2 >= 0; + count +------- + 0 +(1 row) + +RESET constraint_exclusion; +-- local check constraint is not actually enforced +INSERT INTO ft1(c1, c2) VALUES(1111, 2); +UPDATE ft1 SET c2 = c2 + 1 WHERE c1 = 1; +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative; +-- =================================================================== -- test serial columns (ie, sequence-based defaults) -- =================================================================== create table loc1 (f1 serial, f2 text); @@ -2834,3 +3026,613 @@ NOTICE: NEW: (13,"test triggered !") (0,27) (1 row) +-- =================================================================== +-- test inheritance features +-- =================================================================== +CREATE TABLE a (aa TEXT); +CREATE TABLE loct (aa TEXT, bb TEXT); +CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a) + SERVER loopback OPTIONS (table_name 'loct'); +INSERT INTO a(aa) VALUES('aaa'); +INSERT INTO a(aa) VALUES('aaaa'); +INSERT INTO a(aa) VALUES('aaaaa'); +INSERT INTO b(aa) VALUES('bbb'); +INSERT INTO b(aa) VALUES('bbbb'); +INSERT INTO b(aa) VALUES('bbbbb'); +SELECT tableoid::regclass, * FROM a; + tableoid | aa +----------+------- + a | aaa + a | aaaa + a | aaaaa + b | bbb + b | bbbb + b | bbbbb +(6 rows) + +SELECT tableoid::regclass, * FROM b; + tableoid | aa | bb +----------+-------+---- + b | bbb | + b | bbbb | + b | bbbbb | +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY a; + tableoid | aa +----------+------- + a | aaa + a | aaaa + a | aaaaa +(3 rows) + +UPDATE a SET aa = 'zzzzzz' WHERE aa LIKE 'aaaa%'; +SELECT tableoid::regclass, * FROM a; + tableoid | aa +----------+-------- + a | aaa + a | zzzzzz + a | zzzzzz + b | bbb + b | bbbb + b | bbbbb +(6 rows) + +SELECT tableoid::regclass, * FROM b; + tableoid | aa | bb +----------+-------+---- + b | bbb | + b | bbbb | + b | bbbbb | +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY a; + tableoid | aa +----------+-------- + a | aaa + a | zzzzzz + a | zzzzzz +(3 rows) + +UPDATE b SET aa = 'new'; +SELECT tableoid::regclass, * FROM a; + tableoid | aa +----------+-------- + a | aaa + a | zzzzzz + a | zzzzzz + b | new + b | new + b | new +(6 rows) + +SELECT tableoid::regclass, * FROM b; + tableoid | aa | bb +----------+-----+---- + b | new | + b | new | + b | new | +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY a; + tableoid | aa +----------+-------- + a | aaa + a | zzzzzz + a | zzzzzz +(3 rows) + +UPDATE a SET aa = 'newtoo'; +SELECT tableoid::regclass, * FROM a; + tableoid | aa +----------+-------- + a | newtoo + a | newtoo + a | newtoo + b | newtoo + b | newtoo + b | newtoo +(6 rows) + +SELECT tableoid::regclass, * FROM b; + tableoid | aa | bb +----------+--------+---- + b | newtoo | + b | newtoo | + b | newtoo | +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY a; + tableoid | aa +----------+-------- + a | newtoo + a | newtoo + a | newtoo +(3 rows) + +DELETE FROM a; +SELECT tableoid::regclass, * FROM a; + tableoid | aa +----------+---- +(0 rows) + +SELECT tableoid::regclass, * FROM b; + tableoid | aa | bb +----------+----+---- +(0 rows) + +SELECT tableoid::regclass, * FROM ONLY a; + tableoid | aa +----------+---- +(0 rows) + +DROP TABLE a CASCADE; +NOTICE: drop cascades to foreign table b +DROP TABLE loct; +-- Check SELECT FOR UPDATE/SHARE with an inherited source table +create table loct1 (f1 int, f2 int, f3 int); +create table loct2 (f1 int, f2 int, f3 int); +create table foo (f1 int, f2 int); +create foreign table foo2 (f3 int) inherits (foo) + server loopback options (table_name 'loct1'); +create table bar (f1 int, f2 int); +create foreign table bar2 (f3 int) inherits (bar) + server loopback options (table_name 'loct2'); +insert into foo values(1,1); +insert into foo values(3,3); +insert into foo2 values(2,2,2); +insert into foo2 values(4,4,4); +insert into bar values(1,11); +insert into bar values(2,22); +insert into bar values(6,66); +insert into bar2 values(3,33,33); +insert into bar2 values(4,44,44); +insert into bar2 values(7,77,77); +explain (verbose, costs off) +select * from bar where f1 in (select f1 from foo) for update; + QUERY PLAN +---------------------------------------------------------------------------------------------- + LockRows + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.* + -> Hash Join + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.* + Hash Cond: (bar.f1 = foo.f1) + -> Append + -> Seq Scan on public.bar + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.* + -> Foreign Scan on public.bar2 + Output: bar2.f1, bar2.f2, bar2.ctid, bar2.tableoid, bar2.* + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE + -> Hash + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> HashAggregate + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + Group Key: foo.f1 + -> Append + -> Seq Scan on public.foo + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> Foreign Scan on public.foo2 + Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1 + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1 +(22 rows) + +select * from bar where f1 in (select f1 from foo) for update; + f1 | f2 +----+---- + 1 | 11 + 2 | 22 + 3 | 33 + 4 | 44 +(4 rows) + +explain (verbose, costs off) +select * from bar where f1 in (select f1 from foo) for share; + QUERY PLAN +---------------------------------------------------------------------------------------------- + LockRows + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.* + -> Hash Join + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.*, foo.ctid, foo.tableoid, foo.* + Hash Cond: (bar.f1 = foo.f1) + -> Append + -> Seq Scan on public.bar + Output: bar.f1, bar.f2, bar.ctid, bar.tableoid, bar.* + -> Foreign Scan on public.bar2 + Output: bar2.f1, bar2.f2, bar2.ctid, bar2.tableoid, bar2.* + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR SHARE + -> Hash + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> HashAggregate + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + Group Key: foo.f1 + -> Append + -> Seq Scan on public.foo + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> Foreign Scan on public.foo2 + Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1 + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1 +(22 rows) + +select * from bar where f1 in (select f1 from foo) for share; + f1 | f2 +----+---- + 1 | 11 + 2 | 22 + 3 | 33 + 4 | 44 +(4 rows) + +-- Check UPDATE with inherited target and an inherited source table +explain (verbose, costs off) +update bar set f2 = f2 + 100 where f1 in (select f1 from foo); + QUERY PLAN +--------------------------------------------------------------------------------------------- + Update on public.bar + Update on public.bar + Foreign Update on public.bar2 + Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1 + -> Hash Join + Output: bar.f1, (bar.f2 + 100), bar.ctid, foo.ctid, foo.tableoid, foo.* + Hash Cond: (bar.f1 = foo.f1) + -> Seq Scan on public.bar + Output: bar.f1, bar.f2, bar.ctid + -> Hash + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> HashAggregate + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + Group Key: foo.f1 + -> Append + -> Seq Scan on public.foo + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> Foreign Scan on public.foo2 + Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1 + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1 + -> Hash Join + Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, foo.ctid, foo.tableoid, foo.* + Hash Cond: (bar2.f1 = foo.f1) + -> Foreign Scan on public.bar2 + Output: bar2.f1, bar2.f2, bar2.f3, bar2.ctid + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE + -> Hash + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> HashAggregate + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + Group Key: foo.f1 + -> Append + -> Seq Scan on public.foo + Output: foo.ctid, foo.tableoid, foo.*, foo.f1 + -> Foreign Scan on public.foo2 + Output: foo2.ctid, foo2.tableoid, foo2.*, foo2.f1 + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct1 +(37 rows) + +update bar set f2 = f2 + 100 where f1 in (select f1 from foo); +select tableoid::regclass, * from bar order by 1,2; + tableoid | f1 | f2 +----------+----+----- + bar | 1 | 111 + bar | 2 | 122 + bar | 6 | 66 + bar2 | 3 | 133 + bar2 | 4 | 144 + bar2 | 7 | 77 +(6 rows) + +-- Check UPDATE with inherited target and an appendrel subquery +explain (verbose, costs off) +update bar set f2 = f2 + 100 +from + ( select f1 from foo union all select f1+3 from foo ) ss +where bar.f1 = ss.f1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Update on public.bar + Update on public.bar + Foreign Update on public.bar2 + Remote SQL: UPDATE public.loct2 SET f2 = $2 WHERE ctid = $1 + -> Hash Join + Output: bar.f1, (bar.f2 + 100), bar.ctid, (ROW(foo.f1)) + Hash Cond: (foo.f1 = bar.f1) + -> Append + -> Seq Scan on public.foo + Output: ROW(foo.f1), foo.f1 + -> Foreign Scan on public.foo2 + Output: ROW(foo2.f1), foo2.f1 + Remote SQL: SELECT f1 FROM public.loct1 + -> Seq Scan on public.foo foo_1 + Output: ROW((foo_1.f1 + 3)), (foo_1.f1 + 3) + -> Foreign Scan on public.foo2 foo2_1 + Output: ROW((foo2_1.f1 + 3)), (foo2_1.f1 + 3) + Remote SQL: SELECT f1 FROM public.loct1 + -> Hash + Output: bar.f1, bar.f2, bar.ctid + -> Seq Scan on public.bar + Output: bar.f1, bar.f2, bar.ctid + -> Merge Join + Output: bar2.f1, (bar2.f2 + 100), bar2.f3, bar2.ctid, (ROW(foo.f1)) + Merge Cond: (bar2.f1 = foo.f1) + -> Sort + Output: bar2.f1, bar2.f2, bar2.f3, bar2.ctid + Sort Key: bar2.f1 + -> Foreign Scan on public.bar2 + Output: bar2.f1, bar2.f2, bar2.f3, bar2.ctid + Remote SQL: SELECT f1, f2, f3, ctid FROM public.loct2 FOR UPDATE + -> Sort + Output: (ROW(foo.f1)), foo.f1 + Sort Key: foo.f1 + -> Append + -> Seq Scan on public.foo + Output: ROW(foo.f1), foo.f1 + -> Foreign Scan on public.foo2 + Output: ROW(foo2.f1), foo2.f1 + Remote SQL: SELECT f1 FROM public.loct1 + -> Seq Scan on public.foo foo_1 + Output: ROW((foo_1.f1 + 3)), (foo_1.f1 + 3) + -> Foreign Scan on public.foo2 foo2_1 + Output: ROW((foo2_1.f1 + 3)), (foo2_1.f1 + 3) + Remote SQL: SELECT f1 FROM public.loct1 +(45 rows) + +update bar set f2 = f2 + 100 +from + ( select f1 from foo union all select f1+3 from foo ) ss +where bar.f1 = ss.f1; +select tableoid::regclass, * from bar order by 1,2; + tableoid | f1 | f2 +----------+----+----- + bar | 1 | 211 + bar | 2 | 222 + bar | 6 | 166 + bar2 | 3 | 233 + bar2 | 4 | 244 + bar2 | 7 | 177 +(6 rows) + +-- Test that WHERE CURRENT OF is not supported +begin; +declare c cursor for select * from bar where f1 = 7; +fetch from c; + f1 | f2 +----+----- + 7 | 177 +(1 row) + +update bar set f2 = null where current of c; +ERROR: WHERE CURRENT OF is not supported for this table type +rollback; +drop table foo cascade; +NOTICE: drop cascades to foreign table foo2 +drop table bar cascade; +NOTICE: drop cascades to foreign table bar2 +drop table loct1; +drop table loct2; +-- =================================================================== +-- test IMPORT FOREIGN SCHEMA +-- =================================================================== +CREATE SCHEMA import_source; +CREATE TABLE import_source.t1 (c1 int, c2 varchar NOT NULL); +CREATE TABLE import_source.t2 (c1 int default 42, c2 varchar NULL, c3 text collate "POSIX"); +CREATE TYPE typ1 AS (m1 int, m2 varchar); +CREATE TABLE import_source.t3 (c1 timestamptz default now(), c2 typ1); +CREATE TABLE import_source."x 4" (c1 float8, "C 2" text, c3 varchar(42)); +CREATE TABLE import_source."x 5" (c1 float8); +ALTER TABLE import_source."x 5" DROP COLUMN c1; +CREATE SCHEMA import_dest1; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1; +\det+ import_dest1 + List of foreign tables + Schema | Table | Server | FDW Options | Description +--------------+-------+----------+-------------------------------------------------+------------- + import_dest1 | t1 | loopback | (schema_name 'import_source', table_name 't1') | + import_dest1 | t2 | loopback | (schema_name 'import_source', table_name 't2') | + import_dest1 | t3 | loopback | (schema_name 'import_source', table_name 't3') | + import_dest1 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') | + import_dest1 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') | +(5 rows) + +\d import_dest1.* + Foreign table "import_dest1.t1" + Column | Type | Modifiers | FDW Options +--------+-------------------+-----------+-------------------- + c1 | integer | | (column_name 'c1') + c2 | character varying | not null | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't1') + + Foreign table "import_dest1.t2" + Column | Type | Modifiers | FDW Options +--------+-------------------+---------------+-------------------- + c1 | integer | | (column_name 'c1') + c2 | character varying | | (column_name 'c2') + c3 | text | collate POSIX | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't2') + + Foreign table "import_dest1.t3" + Column | Type | Modifiers | FDW Options +--------+--------------------------+-----------+-------------------- + c1 | timestamp with time zone | | (column_name 'c1') + c2 | typ1 | | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't3') + + Foreign table "import_dest1.x 4" + Column | Type | Modifiers | FDW Options +--------+-----------------------+-----------+--------------------- + c1 | double precision | | (column_name 'c1') + C 2 | text | | (column_name 'C 2') + c3 | character varying(42) | | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 4') + + Foreign table "import_dest1.x 5" + Column | Type | Modifiers | FDW Options +--------+------+-----------+------------- +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 5') + +-- Options +CREATE SCHEMA import_dest2; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2 + OPTIONS (import_default 'true'); +\det+ import_dest2 + List of foreign tables + Schema | Table | Server | FDW Options | Description +--------------+-------+----------+-------------------------------------------------+------------- + import_dest2 | t1 | loopback | (schema_name 'import_source', table_name 't1') | + import_dest2 | t2 | loopback | (schema_name 'import_source', table_name 't2') | + import_dest2 | t3 | loopback | (schema_name 'import_source', table_name 't3') | + import_dest2 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') | + import_dest2 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') | +(5 rows) + +\d import_dest2.* + Foreign table "import_dest2.t1" + Column | Type | Modifiers | FDW Options +--------+-------------------+-----------+-------------------- + c1 | integer | | (column_name 'c1') + c2 | character varying | not null | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't1') + + Foreign table "import_dest2.t2" + Column | Type | Modifiers | FDW Options +--------+-------------------+---------------+-------------------- + c1 | integer | default 42 | (column_name 'c1') + c2 | character varying | | (column_name 'c2') + c3 | text | collate POSIX | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't2') + + Foreign table "import_dest2.t3" + Column | Type | Modifiers | FDW Options +--------+--------------------------+---------------+-------------------- + c1 | timestamp with time zone | default now() | (column_name 'c1') + c2 | typ1 | | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't3') + + Foreign table "import_dest2.x 4" + Column | Type | Modifiers | FDW Options +--------+-----------------------+-----------+--------------------- + c1 | double precision | | (column_name 'c1') + C 2 | text | | (column_name 'C 2') + c3 | character varying(42) | | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 4') + + Foreign table "import_dest2.x 5" + Column | Type | Modifiers | FDW Options +--------+------+-----------+------------- +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 5') + +CREATE SCHEMA import_dest3; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 + OPTIONS (import_collate 'false', import_not_null 'false'); +\det+ import_dest3 + List of foreign tables + Schema | Table | Server | FDW Options | Description +--------------+-------+----------+-------------------------------------------------+------------- + import_dest3 | t1 | loopback | (schema_name 'import_source', table_name 't1') | + import_dest3 | t2 | loopback | (schema_name 'import_source', table_name 't2') | + import_dest3 | t3 | loopback | (schema_name 'import_source', table_name 't3') | + import_dest3 | x 4 | loopback | (schema_name 'import_source', table_name 'x 4') | + import_dest3 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') | +(5 rows) + +\d import_dest3.* + Foreign table "import_dest3.t1" + Column | Type | Modifiers | FDW Options +--------+-------------------+-----------+-------------------- + c1 | integer | | (column_name 'c1') + c2 | character varying | | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't1') + + Foreign table "import_dest3.t2" + Column | Type | Modifiers | FDW Options +--------+-------------------+-----------+-------------------- + c1 | integer | | (column_name 'c1') + c2 | character varying | | (column_name 'c2') + c3 | text | | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't2') + + Foreign table "import_dest3.t3" + Column | Type | Modifiers | FDW Options +--------+--------------------------+-----------+-------------------- + c1 | timestamp with time zone | | (column_name 'c1') + c2 | typ1 | | (column_name 'c2') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 't3') + + Foreign table "import_dest3.x 4" + Column | Type | Modifiers | FDW Options +--------+-----------------------+-----------+--------------------- + c1 | double precision | | (column_name 'c1') + C 2 | text | | (column_name 'C 2') + c3 | character varying(42) | | (column_name 'c3') +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 4') + + Foreign table "import_dest3.x 5" + Column | Type | Modifiers | FDW Options +--------+------+-----------+------------- +Server: loopback +FDW Options: (schema_name 'import_source', table_name 'x 5') + +-- Check LIMIT TO and EXCEPT +CREATE SCHEMA import_dest4; +IMPORT FOREIGN SCHEMA import_source LIMIT TO (t1, nonesuch) + FROM SERVER loopback INTO import_dest4; +\det+ import_dest4 + List of foreign tables + Schema | Table | Server | FDW Options | Description +--------------+-------+----------+------------------------------------------------+------------- + import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') | +(1 row) + +IMPORT FOREIGN SCHEMA import_source EXCEPT (t1, "x 4", nonesuch) + FROM SERVER loopback INTO import_dest4; +\det+ import_dest4 + List of foreign tables + Schema | Table | Server | FDW Options | Description +--------------+-------+----------+-------------------------------------------------+------------- + import_dest4 | t1 | loopback | (schema_name 'import_source', table_name 't1') | + import_dest4 | t2 | loopback | (schema_name 'import_source', table_name 't2') | + import_dest4 | t3 | loopback | (schema_name 'import_source', table_name 't3') | + import_dest4 | x 5 | loopback | (schema_name 'import_source', table_name 'x 5') | +(4 rows) + +-- Assorted error cases +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest4; +ERROR: relation "t1" already exists +CONTEXT: importing foreign table "t1" +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO import_dest4; +ERROR: schema "nonesuch" is not present on foreign server "loopback" +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO notthere; +ERROR: schema "notthere" does not exist +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER nowhere INTO notthere; +ERROR: server "nowhere" does not exist +-- Check case of a type present only on the remote server. +-- We can fake this by dropping the type locally in our transaction. +CREATE TYPE "Colors" AS ENUM ('red', 'green', 'blue'); +CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors"); +CREATE SCHEMA import_dest5; +BEGIN; +DROP TYPE "Colors" CASCADE; +NOTICE: drop cascades to table import_source.t5 column Col +IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5) + FROM SERVER loopback INTO import_dest5; -- ERROR +ERROR: type "public.Colors" does not exist +LINE 4: "Col" public."Colors" OPTIONS (column_name 'Col') + ^ +QUERY: CREATE FOREIGN TABLE t5 ( + c1 integer OPTIONS (column_name 'c1'), + c2 text OPTIONS (column_name 'c2') COLLATE pg_catalog."C", + "Col" public."Colors" OPTIONS (column_name 'Col') +) SERVER loopback +OPTIONS (schema_name 'import_source', table_name 't5'); +CONTEXT: importing foreign table "t5" +ROLLBACK; diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c index 65e7b8946a..7547ec2817 100644 --- a/contrib/postgres_fdw/option.c +++ b/contrib/postgres_fdw/option.c @@ -3,7 +3,7 @@ * option.c * FDW option handling for postgres_fdw * - * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/option.c diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 7dd43a9937..478e12484b 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -3,7 +3,7 @@ * postgres_fdw.c * Foreign-data wrapper for remote PostgreSQL servers * - * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/postgres_fdw.c @@ -36,7 +36,7 @@ #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" - +#include "utils/rel.h" PG_MODULE_MAGIC; @@ -286,6 +286,8 @@ static void postgresExplainForeignModify(ModifyTableState *mtstate, static bool postgresAnalyzeForeignTable(Relation relation, AcquireSampleRowsFunc *func, BlockNumber *totalpages); +static List *postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, + Oid serverOid); /* * Helper functions @@ -363,6 +365,9 @@ postgres_fdw_handler(PG_FUNCTION_ARGS) /* Support functions for ANALYZE */ routine->AnalyzeForeignTable = postgresAnalyzeForeignTable; + /* Support functions for IMPORT FOREIGN SCHEMA */ + routine->ImportForeignSchema = postgresImportForeignSchema; + PG_RETURN_POINTER(routine); } @@ -514,7 +519,7 @@ postgresGetForeignRelSize(PlannerInfo *root, { baserel->pages = 10; baserel->tuples = - (10 * BLCKSZ) / (baserel->width + sizeof(HeapTupleHeaderData)); + (10 * BLCKSZ) / (baserel->width + MAXALIGN(SizeofHeapTupleHeader)); } /* Estimate baserel size as best we can with local statistics. */ @@ -817,13 +822,14 @@ postgresGetForeignPlan(PlannerInfo *root, } else { - RowMarkClause *rc = get_parse_rowmark(root->parse, baserel->relid); + PlanRowMark *rc = get_plan_rowmark(root->rowMarks, baserel->relid); if (rc) { /* * Relation is specified as a FOR UPDATE/SHARE target, so handle - * that. + * that. (But we could also see LCS_NONE, meaning this isn't a + * target relation after all.) * * For now, just ignore any [NO] KEY specification, since (a) it's * not clear what that means for a remote table that we don't have @@ -832,6 +838,9 @@ postgresGetForeignPlan(PlannerInfo *root, */ switch (rc->strength) { + case LCS_NONE: + /* No locking needed */ + break; case LCS_FORKEYSHARE: case LCS_FORSHARE: appendStringInfoString(&sql, " FOR SHARE"); @@ -1193,15 +1202,17 @@ postgresPlanForeignModify(PlannerInfo *root, } else if (operation == CMD_UPDATE) { - Bitmapset *tmpset = bms_copy(rte->modifiedCols); - AttrNumber col; + int col; - while ((col = bms_first_member(tmpset)) >= 0) + col = -1; + while ((col = bms_next_member(rte->modifiedCols, col)) >= 0) { - col += FirstLowInvalidHeapAttributeNumber; - if (col <= InvalidAttrNumber) /* shouldn't happen */ + /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */ + AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber; + + if (attno <= InvalidAttrNumber) /* shouldn't happen */ elog(ERROR, "system-column update is not supported"); - targetAttrs = lappend_int(targetAttrs, col); + targetAttrs = lappend_int(targetAttrs, attno); } } @@ -2099,15 +2110,15 @@ set_transmission_modes(void) if (DateStyle != USE_ISO_DATES) (void) set_config_option("datestyle", "ISO", PGC_USERSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + GUC_ACTION_SAVE, true, 0, false); if (IntervalStyle != INTSTYLE_POSTGRES) (void) set_config_option("intervalstyle", "postgres", PGC_USERSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + GUC_ACTION_SAVE, true, 0, false); if (extra_float_digits < 3) (void) set_config_option("extra_float_digits", "3", PGC_USERSET, PGC_S_SESSION, - GUC_ACTION_SAVE, true, 0); + GUC_ACTION_SAVE, true, 0, false); return nestlevel; } @@ -2257,7 +2268,6 @@ static void store_returning_result(PgFdwModifyState *fmstate, TupleTableSlot *slot, PGresult *res) { - /* PGresult must be released before leaving this function. */ PG_TRY(); { HeapTuple newtup; @@ -2565,6 +2575,270 @@ analyze_row_processor(PGresult *res, int row, PgFdwAnalyzeState *astate) } /* + * Import a foreign schema + */ +static List * +postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid) +{ + List *commands = NIL; + bool import_collate = true; + bool import_default = false; + bool import_not_null = true; + ForeignServer *server; + UserMapping *mapping; + PGconn *conn; + StringInfoData buf; + PGresult *volatile res = NULL; + int numrows, + i; + ListCell *lc; + + /* Parse statement options */ + foreach(lc, stmt->options) + { + DefElem *def = (DefElem *) lfirst(lc); + + if (strcmp(def->defname, "import_collate") == 0) + import_collate = defGetBoolean(def); + else if (strcmp(def->defname, "import_default") == 0) + import_default = defGetBoolean(def); + else if (strcmp(def->defname, "import_not_null") == 0) + import_not_null = defGetBoolean(def); + else + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_OPTION_NAME), + errmsg("invalid option \"%s\"", def->defname))); + } + + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + server = GetForeignServer(serverOid); + mapping = GetUserMapping(GetUserId(), server->serverid); + conn = GetConnection(server, mapping, false); + + /* Don't attempt to import collation if remote server hasn't got it */ + if (PQserverVersion(conn) < 90100) + import_collate = false; + + /* Create workspace for strings */ + initStringInfo(&buf); + + /* In what follows, do not risk leaking any PGresults. */ + PG_TRY(); + { + /* Check that the schema really exists */ + appendStringInfoString(&buf, "SELECT 1 FROM pg_catalog.pg_namespace WHERE nspname = "); + deparseStringLiteral(&buf, stmt->remote_schema); + + res = PQexec(conn, buf.data); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pgfdw_report_error(ERROR, res, conn, false, buf.data); + + if (PQntuples(res) != 1) + ereport(ERROR, + (errcode(ERRCODE_FDW_SCHEMA_NOT_FOUND), + errmsg("schema \"%s\" is not present on foreign server \"%s\"", + stmt->remote_schema, server->servername))); + + PQclear(res); + res = NULL; + resetStringInfo(&buf); + + /* + * Fetch all table data from this schema, possibly restricted by + * EXCEPT or LIMIT TO. + * + * Note: because we run the connection with search_path restricted to + * pg_catalog, the format_type() and pg_get_expr() outputs will always + * include a schema name for types/functions in other schemas, which + * is what we want. + */ + if (import_collate) + appendStringInfoString(&buf, + "SELECT relname, " + " attname, " + " format_type(atttypid, atttypmod), " + " attnotnull, " + " pg_get_expr(adbin, adrelid), " + " collname, " + " collnsp.nspname " + "FROM pg_class c " + " JOIN pg_namespace n ON " + " relnamespace = n.oid " + " LEFT JOIN pg_attribute a ON " + " attrelid = c.oid AND attnum > 0 " + " AND NOT attisdropped " + " LEFT JOIN pg_attrdef ad ON " + " adrelid = c.oid AND adnum = attnum " + " LEFT JOIN pg_collation coll ON " + " coll.oid = attcollation " + " LEFT JOIN pg_namespace collnsp ON " + " collnsp.oid = collnamespace "); + else + appendStringInfoString(&buf, + "SELECT relname, " + " attname, " + " format_type(atttypid, atttypmod), " + " attnotnull, " + " pg_get_expr(adbin, adrelid), " + " NULL, NULL " + "FROM pg_class c " + " JOIN pg_namespace n ON " + " relnamespace = n.oid " + " LEFT JOIN pg_attribute a ON " + " attrelid = c.oid AND attnum > 0 " + " AND NOT attisdropped " + " LEFT JOIN pg_attrdef ad ON " + " adrelid = c.oid AND adnum = attnum "); + + appendStringInfoString(&buf, + "WHERE c.relkind IN ('r', 'v', 'f', 'm') " + " AND n.nspname = "); + deparseStringLiteral(&buf, stmt->remote_schema); + + /* Apply restrictions for LIMIT TO and EXCEPT */ + if (stmt->list_type == FDW_IMPORT_SCHEMA_LIMIT_TO || + stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT) + { + bool first_item = true; + + appendStringInfoString(&buf, " AND c.relname "); + if (stmt->list_type == FDW_IMPORT_SCHEMA_EXCEPT) + appendStringInfoString(&buf, "NOT "); + appendStringInfoString(&buf, "IN ("); + + /* Append list of table names within IN clause */ + foreach(lc, stmt->table_list) + { + RangeVar *rv = (RangeVar *) lfirst(lc); + + if (first_item) + first_item = false; + else + appendStringInfoString(&buf, ", "); + deparseStringLiteral(&buf, rv->relname); + } + appendStringInfoString(&buf, ")"); + } + + /* Append ORDER BY at the end of query to ensure output ordering */ + appendStringInfo(&buf, " ORDER BY c.relname, a.attnum"); + + /* Fetch the data */ + res = PQexec(conn, buf.data); + if (PQresultStatus(res) != PGRES_TUPLES_OK) + pgfdw_report_error(ERROR, res, conn, false, buf.data); + + /* Process results */ + numrows = PQntuples(res); + /* note: incrementation of i happens in inner loop's while() test */ + for (i = 0; i < numrows;) + { + char *tablename = PQgetvalue(res, i, 0); + bool first_item = true; + + resetStringInfo(&buf); + appendStringInfo(&buf, "CREATE FOREIGN TABLE %s (\n", + quote_identifier(tablename)); + + /* Scan all rows for this table */ + do + { + char *attname; + char *typename; + char *attnotnull; + char *attdefault; + char *collname; + char *collnamespace; + + /* If table has no columns, we'll see nulls here */ + if (PQgetisnull(res, i, 1)) + continue; + + attname = PQgetvalue(res, i, 1); + typename = PQgetvalue(res, i, 2); + attnotnull = PQgetvalue(res, i, 3); + attdefault = PQgetisnull(res, i, 4) ? (char *) NULL : + PQgetvalue(res, i, 4); + collname = PQgetisnull(res, i, 5) ? (char *) NULL : + PQgetvalue(res, i, 5); + collnamespace = PQgetisnull(res, i, 6) ? (char *) NULL : + PQgetvalue(res, i, 6); + + if (first_item) + first_item = false; + else + appendStringInfoString(&buf, ",\n"); + + /* Print column name and type */ + appendStringInfo(&buf, " %s %s", + quote_identifier(attname), + typename); + + /* + * Add column_name option so that renaming the foreign table's + * column doesn't break the association to the underlying + * column. + */ + appendStringInfoString(&buf, " OPTIONS (column_name "); + deparseStringLiteral(&buf, attname); + appendStringInfoString(&buf, ")"); + + /* Add COLLATE if needed */ + if (import_collate && collname != NULL && collnamespace != NULL) + appendStringInfo(&buf, " COLLATE %s.%s", + quote_identifier(collnamespace), + quote_identifier(collname)); + + /* Add DEFAULT if needed */ + if (import_default && attdefault != NULL) + appendStringInfo(&buf, " DEFAULT %s", attdefault); + + /* Add NOT NULL if needed */ + if (import_not_null && attnotnull[0] == 't') + appendStringInfoString(&buf, " NOT NULL"); + } + while (++i < numrows && + strcmp(PQgetvalue(res, i, 0), tablename) == 0); + + /* + * Add server name and table-level options. We specify remote + * schema and table name as options (the latter to ensure that + * renaming the foreign table doesn't break the association). + */ + appendStringInfo(&buf, "\n) SERVER %s\nOPTIONS (", + quote_identifier(server->servername)); + + appendStringInfoString(&buf, "schema_name "); + deparseStringLiteral(&buf, stmt->remote_schema); + appendStringInfoString(&buf, ", table_name "); + deparseStringLiteral(&buf, tablename); + + appendStringInfoString(&buf, ");"); + + commands = lappend(commands, pstrdup(buf.data)); + } + + /* Clean up */ + PQclear(res); + res = NULL; + } + PG_CATCH(); + { + if (res) + PQclear(res); + PG_RE_THROW(); + } + PG_END_TRY(); + + ReleaseConnection(conn); + + return commands; +} + +/* * Create a tuple from the specified row of the PGresult. * * rel is the local representation of the foreign table, attinmeta is diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h index 8aa8f1a1b5..950c6f79a2 100644 --- a/contrib/postgres_fdw/postgres_fdw.h +++ b/contrib/postgres_fdw/postgres_fdw.h @@ -3,7 +3,7 @@ * postgres_fdw.h * Foreign-data wrapper for remote PostgreSQL servers * - * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/postgres_fdw/postgres_fdw.h @@ -16,7 +16,7 @@ #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/relation.h" -#include "utils/rel.h" +#include "utils/relcache.h" #include "libpq-fe.h" @@ -73,5 +73,6 @@ extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root, extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel); extern void deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs); +extern void deparseStringLiteral(StringInfo buf, const char *val); #endif /* POSTGRES_FDW_H */ diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 6187839453..4a23457e79 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -5,8 +5,14 @@ CREATE EXTENSION postgres_fdw; CREATE SERVER testserver1 FOREIGN DATA WRAPPER postgres_fdw; -CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname 'contrib_regression'); +DO $d$ + BEGIN + EXECUTE $$CREATE SERVER loopback FOREIGN DATA WRAPPER postgres_fdw + OPTIONS (dbname '$$||current_database()||$$', + port '$$||current_setting('port')||$$' + )$$; + END; +$d$; CREATE USER MAPPING FOR public SERVER testserver1 OPTIONS (user 'value', password 'value'); @@ -145,6 +151,11 @@ SELECT * FROM ft1 WHERE false; -- with WHERE clause EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; +-- with FOR UPDATE/SHARE +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; +SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; +SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; -- aggregate SELECT COUNT(*) FROM ft1 t1; -- join two tables @@ -248,6 +259,20 @@ DEALLOCATE st3; DEALLOCATE st4; DEALLOCATE st5; +-- System columns, except ctid, should not be sent to remote +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1; +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1; +EXPLAIN (VERBOSE, COSTS false) +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; +EXPLAIN (VERBOSE, COSTS false) +SELECT ctid, * FROM ft1 t1 LIMIT 1; +SELECT ctid, * FROM ft1 t1 LIMIT 1; + -- =================================================================== -- used in pl/pgsql function -- =================================================================== @@ -381,6 +406,36 @@ select c2, count(*) from ft2 where c2 < 500 group by 1 order by 1; select c2, count(*) from "S 1"."T 1" where c2 < 500 group by 1 order by 1; -- =================================================================== +-- test check constraints +-- =================================================================== + +-- Consistent check constraints provide consistent results +ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2positive CHECK (c2 >= 0); +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 < 0; +SELECT count(*) FROM ft1 WHERE c2 < 0; +SET constraint_exclusion = 'on'; +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 < 0; +SELECT count(*) FROM ft1 WHERE c2 < 0; +RESET constraint_exclusion; +-- check constraint is enforced on the remote side, not locally +INSERT INTO ft1(c1, c2) VALUES(1111, -2); -- c2positive +UPDATE ft1 SET c2 = -c2 WHERE c1 = 1; -- c2positive +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2positive; + +-- But inconsistent check constraints provide inconsistent results +ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c2negative CHECK (c2 < 0); +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 >= 0; +SELECT count(*) FROM ft1 WHERE c2 >= 0; +SET constraint_exclusion = 'on'; +EXPLAIN (VERBOSE, COSTS false) SELECT count(*) FROM ft1 WHERE c2 >= 0; +SELECT count(*) FROM ft1 WHERE c2 >= 0; +RESET constraint_exclusion; +-- local check constraint is not actually enforced +INSERT INTO ft1(c1, c2) VALUES(1111, 2); +UPDATE ft1 SET c2 = c2 + 1 WHERE c1 = 1; +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c2negative; + +-- =================================================================== -- test serial columns (ie, sequence-based defaults) -- =================================================================== create table loc1 (f1 serial, f2 text); @@ -609,3 +664,170 @@ UPDATE rem1 SET f2 = 'testo'; -- Test returning a system attribute INSERT INTO rem1(f2) VALUES ('test') RETURNING ctid; + +-- =================================================================== +-- test inheritance features +-- =================================================================== + +CREATE TABLE a (aa TEXT); +CREATE TABLE loct (aa TEXT, bb TEXT); +CREATE FOREIGN TABLE b (bb TEXT) INHERITS (a) + SERVER loopback OPTIONS (table_name 'loct'); + +INSERT INTO a(aa) VALUES('aaa'); +INSERT INTO a(aa) VALUES('aaaa'); +INSERT INTO a(aa) VALUES('aaaaa'); + +INSERT INTO b(aa) VALUES('bbb'); +INSERT INTO b(aa) VALUES('bbbb'); +INSERT INTO b(aa) VALUES('bbbbb'); + +SELECT tableoid::regclass, * FROM a; +SELECT tableoid::regclass, * FROM b; +SELECT tableoid::regclass, * FROM ONLY a; + +UPDATE a SET aa = 'zzzzzz' WHERE aa LIKE 'aaaa%'; + +SELECT tableoid::regclass, * FROM a; +SELECT tableoid::regclass, * FROM b; +SELECT tableoid::regclass, * FROM ONLY a; + +UPDATE b SET aa = 'new'; + +SELECT tableoid::regclass, * FROM a; +SELECT tableoid::regclass, * FROM b; +SELECT tableoid::regclass, * FROM ONLY a; + +UPDATE a SET aa = 'newtoo'; + +SELECT tableoid::regclass, * FROM a; +SELECT tableoid::regclass, * FROM b; +SELECT tableoid::regclass, * FROM ONLY a; + +DELETE FROM a; + +SELECT tableoid::regclass, * FROM a; +SELECT tableoid::regclass, * FROM b; +SELECT tableoid::regclass, * FROM ONLY a; + +DROP TABLE a CASCADE; +DROP TABLE loct; + +-- Check SELECT FOR UPDATE/SHARE with an inherited source table +create table loct1 (f1 int, f2 int, f3 int); +create table loct2 (f1 int, f2 int, f3 int); + +create table foo (f1 int, f2 int); +create foreign table foo2 (f3 int) inherits (foo) + server loopback options (table_name 'loct1'); +create table bar (f1 int, f2 int); +create foreign table bar2 (f3 int) inherits (bar) + server loopback options (table_name 'loct2'); + +insert into foo values(1,1); +insert into foo values(3,3); +insert into foo2 values(2,2,2); +insert into foo2 values(4,4,4); +insert into bar values(1,11); +insert into bar values(2,22); +insert into bar values(6,66); +insert into bar2 values(3,33,33); +insert into bar2 values(4,44,44); +insert into bar2 values(7,77,77); + +explain (verbose, costs off) +select * from bar where f1 in (select f1 from foo) for update; +select * from bar where f1 in (select f1 from foo) for update; + +explain (verbose, costs off) +select * from bar where f1 in (select f1 from foo) for share; +select * from bar where f1 in (select f1 from foo) for share; + +-- Check UPDATE with inherited target and an inherited source table +explain (verbose, costs off) +update bar set f2 = f2 + 100 where f1 in (select f1 from foo); +update bar set f2 = f2 + 100 where f1 in (select f1 from foo); + +select tableoid::regclass, * from bar order by 1,2; + +-- Check UPDATE with inherited target and an appendrel subquery +explain (verbose, costs off) +update bar set f2 = f2 + 100 +from + ( select f1 from foo union all select f1+3 from foo ) ss +where bar.f1 = ss.f1; +update bar set f2 = f2 + 100 +from + ( select f1 from foo union all select f1+3 from foo ) ss +where bar.f1 = ss.f1; + +select tableoid::regclass, * from bar order by 1,2; + +-- Test that WHERE CURRENT OF is not supported +begin; +declare c cursor for select * from bar where f1 = 7; +fetch from c; +update bar set f2 = null where current of c; +rollback; + +drop table foo cascade; +drop table bar cascade; +drop table loct1; +drop table loct2; + +-- =================================================================== +-- test IMPORT FOREIGN SCHEMA +-- =================================================================== + +CREATE SCHEMA import_source; +CREATE TABLE import_source.t1 (c1 int, c2 varchar NOT NULL); +CREATE TABLE import_source.t2 (c1 int default 42, c2 varchar NULL, c3 text collate "POSIX"); +CREATE TYPE typ1 AS (m1 int, m2 varchar); +CREATE TABLE import_source.t3 (c1 timestamptz default now(), c2 typ1); +CREATE TABLE import_source."x 4" (c1 float8, "C 2" text, c3 varchar(42)); +CREATE TABLE import_source."x 5" (c1 float8); +ALTER TABLE import_source."x 5" DROP COLUMN c1; + +CREATE SCHEMA import_dest1; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest1; +\det+ import_dest1 +\d import_dest1.* + +-- Options +CREATE SCHEMA import_dest2; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest2 + OPTIONS (import_default 'true'); +\det+ import_dest2 +\d import_dest2.* +CREATE SCHEMA import_dest3; +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest3 + OPTIONS (import_collate 'false', import_not_null 'false'); +\det+ import_dest3 +\d import_dest3.* + +-- Check LIMIT TO and EXCEPT +CREATE SCHEMA import_dest4; +IMPORT FOREIGN SCHEMA import_source LIMIT TO (t1, nonesuch) + FROM SERVER loopback INTO import_dest4; +\det+ import_dest4 +IMPORT FOREIGN SCHEMA import_source EXCEPT (t1, "x 4", nonesuch) + FROM SERVER loopback INTO import_dest4; +\det+ import_dest4 + +-- Assorted error cases +IMPORT FOREIGN SCHEMA import_source FROM SERVER loopback INTO import_dest4; +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO import_dest4; +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER loopback INTO notthere; +IMPORT FOREIGN SCHEMA nonesuch FROM SERVER nowhere INTO notthere; + +-- Check case of a type present only on the remote server. +-- We can fake this by dropping the type locally in our transaction. +CREATE TYPE "Colors" AS ENUM ('red', 'green', 'blue'); +CREATE TABLE import_source.t5 (c1 int, c2 text collate "C", "Col" "Colors"); + +CREATE SCHEMA import_dest5; +BEGIN; +DROP TYPE "Colors" CASCADE; +IMPORT FOREIGN SCHEMA import_source LIMIT TO (t5) + FROM SERVER loopback INTO import_dest5; -- ERROR +ROLLBACK; diff --git a/contrib/seg/Makefile b/contrib/seg/Makefile index fb9c5765c3..d6de8a2444 100644 --- a/contrib/seg/Makefile +++ b/contrib/seg/Makefile @@ -1,10 +1,11 @@ # contrib/seg/Makefile MODULE_big = seg -OBJS = seg.o segparse.o +OBJS = seg.o segparse.o $(WIN32RES) EXTENSION = seg DATA = seg--1.0.sql seg--unpackaged--1.0.sql +PGFILEDESC = "seg - line segment data type" REGRESS = seg diff --git a/contrib/seg/seg--unpackaged--1.0.sql b/contrib/seg/seg--unpackaged--1.0.sql index ebd6b3bc5b..3987ebf3dd 100644 --- a/contrib/seg/seg--unpackaged--1.0.sql +++ b/contrib/seg/seg--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/seg/seg--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION seg" to load this file. \quit +\echo Use "CREATE EXTENSION seg FROM unpackaged" to load this file. \quit ALTER EXTENSION seg ADD type seg; ALTER EXTENSION seg ADD function seg_in(cstring); diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index 0807e238f1..8e2d5343ae 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -23,15 +23,6 @@ PG_MODULE_MAGIC; -extern int seg_yyparse(SEG *result); -extern void seg_yyerror(SEG *result, const char *message); -extern void seg_scanner_init(const char *str); -extern void seg_scanner_finish(void); - -/* -extern int seg_yydebug; -*/ - /* * Auxiliary data structure for picksplit method. */ @@ -103,7 +94,6 @@ bool seg_different(SEG *a, SEG *b); ** Auxiliary funxtions */ static int restore(char *s, float val, int n); -int significant_digits(char *s); /***************************************************************************** diff --git a/contrib/seg/segdata.h b/contrib/seg/segdata.h index 90be6e27aa..cac68ee2b2 100644 --- a/contrib/seg/segdata.h +++ b/contrib/seg/segdata.h @@ -10,3 +10,15 @@ typedef struct SEG char l_ext; char u_ext; } SEG; + +/* in seg.c */ +extern int significant_digits(char *str); + +/* in segscan.l */ +extern int seg_yylex(void); +extern void seg_yyerror(SEG *result, const char *message) pg_attribute_noreturn(); +extern void seg_scanner_init(const char *str); +extern void seg_scanner_finish(void); + +/* in segparse.y */ +extern int seg_yyparse(SEG *result); diff --git a/contrib/seg/segparse.y b/contrib/seg/segparse.y index 3fad9910bd..045ff91f3e 100644 --- a/contrib/seg/segparse.y +++ b/contrib/seg/segparse.y @@ -7,6 +7,7 @@ #include "fmgr.h" #include "utils/builtins.h" + #include "segdata.h" /* @@ -20,13 +21,6 @@ #define YYMALLOC palloc #define YYFREE pfree -extern int seg_yylex(void); - -extern int significant_digits(char *str); /* defined in seg.c */ - -extern int seg_yyparse(SEG *result); -extern void seg_yyerror(SEG *result, const char *message); - static float seg_atof(char *value); static char strbuf[25] = { diff --git a/contrib/seg/segscan.l b/contrib/seg/segscan.l index a3e685488a..6db24fdd1f 100644 --- a/contrib/seg/segscan.l +++ b/contrib/seg/segscan.l @@ -3,8 +3,6 @@ * A scanner for EMP-style numeric ranges */ -#include "postgres.h" - /* No reason to constrain amount of data slurped */ #define YY_READ_BUF_SIZE 16777216 @@ -22,12 +20,6 @@ fprintf_to_ereport(const char *fmt, const char *msg) static YY_BUFFER_STATE scanbufhandle; static char *scanbuf; static int scanbuflen; - -/* flex 2.5.4 doesn't bother with a decl for this */ -int seg_yylex(void); - -void seg_scanner_init(const char *str); -void seg_scanner_finish(void); %} %option 8bit @@ -59,7 +51,7 @@ float ({integer}|{real})([eE]{integer})? %% -void __attribute__((noreturn)) +void yyerror(SEG *result, const char *message) { if (*yytext == YY_END_OF_BUFFER_CHAR) diff --git a/contrib/sepgsql/Makefile b/contrib/sepgsql/Makefile index ff3a61da25..f194b7ed8a 100644 --- a/contrib/sepgsql/Makefile +++ b/contrib/sepgsql/Makefile @@ -2,8 +2,9 @@ MODULE_big = sepgsql OBJS = hooks.o selinux.o uavc.o label.o dml.o \ - database.o schema.o relation.o proc.o + database.o schema.o relation.o proc.o $(WIN32RES) DATA_built = sepgsql.sql +PGFILEDESC = "sepgsql - SELinux integration" # Note: because we don't tell the Makefile there are any regression tests, # we have to clean those result files explicitly diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c index cc8b31eb74..b5cfcfe0e4 100644 --- a/contrib/sepgsql/database.c +++ b/contrib/sepgsql/database.c @@ -4,7 +4,7 @@ * * Routines corresponding to database objects * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c index bb82c0d6d2..36c6a37ac1 100644 --- a/contrib/sepgsql/dml.c +++ b/contrib/sepgsql/dml.c @@ -4,7 +4,7 @@ * * Routines to handle DML permission checks * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ @@ -93,10 +93,7 @@ fixup_whole_row_references(Oid relOid, Bitmapset *columns) static Bitmapset * fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) { - AttrNumber attno; - Bitmapset *tmpset; Bitmapset *result = NULL; - char *attname; int index; /* @@ -105,10 +102,12 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) if (parentId == childId) return columns; - tmpset = bms_copy(columns); - while ((index = bms_first_member(tmpset)) > 0) + index = -1; + while ((index = bms_next_member(columns, index)) >= 0) { - attno = index + FirstLowInvalidHeapAttributeNumber; + /* bit numbers are offset by FirstLowInvalidHeapAttributeNumber */ + AttrNumber attno = index + FirstLowInvalidHeapAttributeNumber; + char *attname; /* * whole-row-reference shall be fixed-up later @@ -128,12 +127,11 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns) elog(ERROR, "cache lookup failed for attribute %s of relation %u", attname, childId); - index = attno - FirstLowInvalidHeapAttributeNumber; - result = bms_add_member(result, index); + result = bms_add_member(result, + attno - FirstLowInvalidHeapAttributeNumber); pfree(attname); } - bms_free(tmpset); return result; } diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c index d5338fa38d..bc0e3b6859 100644 --- a/contrib/sepgsql/hooks.c +++ b/contrib/sepgsql/hooks.c @@ -4,7 +4,7 @@ * * Entrypoints of the hooks in PostgreSQL, and dispatches the callbacks. * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c index 2682b37864..ef7661c82e 100644 --- a/contrib/sepgsql/label.c +++ b/contrib/sepgsql/label.c @@ -4,7 +4,7 @@ * * Routines to support SELinux labels (security context) * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ @@ -532,7 +532,10 @@ sepgsql_object_relabel(const ObjectAddress *object, const char *seclabel) break; default: - elog(ERROR, "unsupported object type: %u", object->classId); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("sepgsql provider does not support labels on %s", + getObjectTypeDescription(object)))); break; } } diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher index 62a6c2737d..c0c3ab75d7 100755 --- a/contrib/sepgsql/launcher +++ b/contrib/sepgsql/launcher @@ -2,7 +2,7 @@ # # A wrapper script to launch psql command in regression test # -# Copyright (c) 2010-2014, PostgreSQL Global Development Group +# Copyright (c) 2010-2015, PostgreSQL Global Development Group # # ------------------------------------------------------------------------- diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c index 1880eb43fa..c55b2a0f9e 100644 --- a/contrib/sepgsql/proc.c +++ b/contrib/sepgsql/proc.c @@ -4,7 +4,7 @@ * * Routines corresponding to procedure objects * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c index 14c877ea32..6732ef8588 100644 --- a/contrib/sepgsql/relation.c +++ b/contrib/sepgsql/relation.c @@ -4,7 +4,7 @@ * * Routines corresponding to relation/attribute objects * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c index fc103b0260..8b2b054c7e 100644 --- a/contrib/sepgsql/schema.c +++ b/contrib/sepgsql/schema.c @@ -4,7 +4,7 @@ * * Routines corresponding to schema objects * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c index b7a1083ee6..e0912795fa 100644 --- a/contrib/sepgsql/selinux.c +++ b/contrib/sepgsql/selinux.c @@ -5,7 +5,7 @@ * Interactions between userspace and selinux in kernelspace, * using libselinux api. * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h index 6dadb1dea5..46ca2279f4 100644 --- a/contrib/sepgsql/sepgsql.h +++ b/contrib/sepgsql/sepgsql.h @@ -4,7 +4,7 @@ * * Definitions corresponding to SE-PostgreSQL * - * Copyright (c) 2010-2014, PostgreSQL Global Development Group + * Copyright (c) 2010-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c index b014b01f36..428bf89d7e 100644 --- a/contrib/sepgsql/uavc.c +++ b/contrib/sepgsql/uavc.c @@ -6,7 +6,7 @@ * access control decisions recently used, and reduce number of kernel * invocations to avoid unnecessary performance hit. * - * Copyright (c) 2011-2014, PostgreSQL Global Development Group + * Copyright (c) 2011-2015, PostgreSQL Global Development Group * * ------------------------------------------------------------------------- */ diff --git a/contrib/spi/Makefile b/contrib/spi/Makefile index 0c11bfcbbd..10ab5bb5fe 100644 --- a/contrib/spi/Makefile +++ b/contrib/spi/Makefile @@ -9,6 +9,7 @@ DATA = autoinc--1.0.sql autoinc--unpackaged--1.0.sql \ moddatetime--1.0.sql moddatetime--unpackaged--1.0.sql \ refint--1.0.sql refint--unpackaged--1.0.sql \ timetravel--1.0.sql timetravel--unpackaged--1.0.sql +PGFILEDESC = "spi - examples of using SPI and triggers" DOCS = $(addsuffix .example, $(MODULES)) diff --git a/contrib/spi/autoinc--unpackaged--1.0.sql b/contrib/spi/autoinc--unpackaged--1.0.sql index cfe2065d3d..e5289e834f 100644 --- a/contrib/spi/autoinc--unpackaged--1.0.sql +++ b/contrib/spi/autoinc--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/autoinc--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION autoinc" to load this file. \quit +\echo Use "CREATE EXTENSION autoinc FROM unpackaged" to load this file. \quit ALTER EXTENSION autoinc ADD function autoinc(); diff --git a/contrib/spi/insert_username--unpackaged--1.0.sql b/contrib/spi/insert_username--unpackaged--1.0.sql index 91a5d1f309..eb26ba0bd1 100644 --- a/contrib/spi/insert_username--unpackaged--1.0.sql +++ b/contrib/spi/insert_username--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/insert_username--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION insert_username" to load this file. \quit +\echo Use "CREATE EXTENSION insert_username FROM unpackaged" to load this file. \quit ALTER EXTENSION insert_username ADD function insert_username(); diff --git a/contrib/spi/moddatetime--unpackaged--1.0.sql b/contrib/spi/moddatetime--unpackaged--1.0.sql index caa49ce0dc..c681fa7ed9 100644 --- a/contrib/spi/moddatetime--unpackaged--1.0.sql +++ b/contrib/spi/moddatetime--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/moddatetime--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION moddatetime" to load this file. \quit +\echo Use "CREATE EXTENSION moddatetime FROM unpackaged" to load this file. \quit ALTER EXTENSION moddatetime ADD function moddatetime(); diff --git a/contrib/spi/refint--unpackaged--1.0.sql b/contrib/spi/refint--unpackaged--1.0.sql index cd9c9b0c36..461ed157c3 100644 --- a/contrib/spi/refint--unpackaged--1.0.sql +++ b/contrib/spi/refint--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/spi/refint--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION refint" to load this file. \quit +\echo Use "CREATE EXTENSION refint FROM unpackaged" to load this file. \quit ALTER EXTENSION refint ADD function check_primary_key(); ALTER EXTENSION refint ADD function check_foreign_key(); diff --git a/contrib/spi/timetravel--unpackaged--1.0.sql b/contrib/spi/timetravel--unpackaged--1.0.sql index dd07a133a5..121bceba9b 100644 --- a/contrib/spi/timetravel--unpackaged--1.0.sql +++ b/contrib/spi/timetravel--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/spi/timetravel--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION timetravel" to load this file. \quit +\echo Use "CREATE EXTENSION timetravel FROM unpackaged" to load this file. \quit ALTER EXTENSION timetravel ADD function timetravel(); ALTER EXTENSION timetravel ADD function set_timetravel(name,integer); diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c index a37cbee863..0699438d6f 100644 --- a/contrib/spi/timetravel.c +++ b/contrib/spi/timetravel.c @@ -35,10 +35,10 @@ static int nPlans = 0; typedef struct _TTOffList { struct _TTOffList *next; - char name[1]; + char name[FLEXIBLE_ARRAY_MEMBER]; } TTOffList; -static TTOffList TTOff = {NULL, {0}}; +static TTOffList *TTOff = NULL; static int findTTStatus(char *name); static EPlan *find_plan(char *ident, EPlan **eplan, int *nplans); @@ -428,10 +428,11 @@ set_timetravel(PG_FUNCTION_ARGS) char *d; char *s; int32 ret; - TTOffList *p, + TTOffList *prev, *pp; - for (pp = (p = &TTOff)->next; pp; pp = (p = pp)->next) + prev = NULL; + for (pp = TTOff; pp; prev = pp, pp = pp->next) { if (namestrcmp(relname, pp->name) == 0) break; @@ -442,7 +443,10 @@ set_timetravel(PG_FUNCTION_ARGS) if (on != 0) { /* turn ON */ - p->next = pp->next; + if (prev) + prev->next = pp->next; + else + TTOff = pp->next; free(pp); } ret = 0; @@ -456,15 +460,18 @@ set_timetravel(PG_FUNCTION_ARGS) s = rname = DatumGetCString(DirectFunctionCall1(nameout, NameGetDatum(relname))); if (s) { - pp = malloc(sizeof(TTOffList) + strlen(rname)); + pp = malloc(offsetof(TTOffList, name) +strlen(rname) + 1); if (pp) { pp->next = NULL; - p->next = pp; d = pp->name; while (*s) *d++ = tolower((unsigned char) *s++); *d = '\0'; + if (prev) + prev->next = pp; + else + TTOff = pp; } pfree(rname); } @@ -486,7 +493,7 @@ get_timetravel(PG_FUNCTION_ARGS) Name relname = PG_GETARG_NAME(0); TTOffList *pp; - for (pp = TTOff.next; pp; pp = pp->next) + for (pp = TTOff; pp; pp = pp->next) { if (namestrcmp(relname, pp->name) == 0) PG_RETURN_INT32(0); @@ -499,7 +506,7 @@ findTTStatus(char *name) { TTOffList *pp; - for (pp = TTOff.next; pp; pp = pp->next) + for (pp = TTOff; pp; pp = pp->next) if (pg_strcasecmp(name, pp->name) == 0) return 0; return 1; diff --git a/contrib/sslinfo/Makefile b/contrib/sslinfo/Makefile index 0dee6ed2f7..86cbf053e6 100644 --- a/contrib/sslinfo/Makefile +++ b/contrib/sslinfo/Makefile @@ -1,10 +1,11 @@ # contrib/sslinfo/Makefile MODULE_big = sslinfo -OBJS = sslinfo.o +OBJS = sslinfo.o $(WIN32RES) EXTENSION = sslinfo DATA = sslinfo--1.0.sql sslinfo--unpackaged--1.0.sql +PGFILEDESC = "sslinfo - information about client SSL certificate" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/sslinfo/sslinfo--unpackaged--1.0.sql b/contrib/sslinfo/sslinfo--unpackaged--1.0.sql index e4b868423b..07407acb54 100644 --- a/contrib/sslinfo/sslinfo--unpackaged--1.0.sql +++ b/contrib/sslinfo/sslinfo--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/sslinfo/sslinfo--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION sslinfo" to load this file. \quit +\echo Use "CREATE EXTENSION sslinfo FROM unpackaged" to load this file. \quit ALTER EXTENSION sslinfo ADD function ssl_client_serial(); ALTER EXTENSION sslinfo ADD function ssl_is_used(); diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c index db491a4bc8..da201bde33 100644 --- a/contrib/sslinfo/sslinfo.c +++ b/contrib/sslinfo/sslinfo.c @@ -18,10 +18,8 @@ #include <openssl/x509.h> #include <openssl/asn1.h> - PG_MODULE_MAGIC; - static Datum X509_NAME_field_to_text(X509_NAME *name, text *fieldName); static Datum X509_NAME_to_text(X509_NAME *name); static Datum ASN1_STRING_to_text(ASN1_STRING *str); @@ -37,7 +35,7 @@ PG_FUNCTION_INFO_V1(ssl_is_used); Datum ssl_is_used(PG_FUNCTION_ARGS) { - PG_RETURN_BOOL(MyProcPort->ssl != NULL); + PG_RETURN_BOOL(MyProcPort->ssl_in_use); } diff --git a/contrib/start-scripts/linux b/contrib/start-scripts/linux index b950cf512c..2dff0094cd 100644 --- a/contrib/start-scripts/linux +++ b/contrib/start-scripts/linux @@ -42,15 +42,17 @@ PGLOG="$PGDATA/serverlog" # It's often a good idea to protect the postmaster from being killed by the # OOM killer (which will tend to preferentially kill the postmaster because -# of the way it accounts for shared memory). Setting the OOM_SCORE_ADJ value -# to -1000 will disable OOM kill altogether. If you enable this, you probably -# want to compile PostgreSQL with "-DLINUX_OOM_SCORE_ADJ=0", so that -# individual backends can still be killed by the OOM killer. -#OOM_SCORE_ADJ=-1000 +# of the way it accounts for shared memory). To do that, uncomment these +# three lines: +#PG_OOM_ADJUST_FILE=/proc/self/oom_score_adj +#PG_MASTER_OOM_SCORE_ADJ=-1000 +#PG_CHILD_OOM_SCORE_ADJ=0 # Older Linux kernels may not have /proc/self/oom_score_adj, but instead -# /proc/self/oom_adj, which works similarly except the disable value is -17. -# For such a system, enable this and compile with "-DLINUX_OOM_ADJ=0". -#OOM_ADJ=-17 +# /proc/self/oom_adj, which works similarly except for having a different +# range of scores. For such a system, uncomment these three lines instead: +#PG_OOM_ADJUST_FILE=/proc/self/oom_adj +#PG_MASTER_OOM_SCORE_ADJ=-17 +#PG_CHILD_OOM_SCORE_ADJ=0 ## STOP EDITING HERE @@ -77,14 +79,20 @@ test -x $DAEMON || fi } +# If we want to tell child processes to adjust their OOM scores, set up the +# necessary environment variables. Can't just export them through the "su". +if [ -e "$PG_OOM_ADJUST_FILE" -a -n "PG_CHILD_OOM_SCORE_ADJ" ] +then + DAEMON_ENV="PG_OOM_ADJUST_FILE=$PG_OOM_ADJUST_FILE PG_OOM_ADJUST_VALUE=$PG_CHILD_OOM_SCORE_ADJ" +fi + # Parse command line parameters. case $1 in start) echo -n "Starting PostgreSQL: " - test x"$OOM_SCORE_ADJ" != x && echo "$OOM_SCORE_ADJ" > /proc/self/oom_score_adj - test x"$OOM_ADJ" != x && echo "$OOM_ADJ" > /proc/self/oom_adj - su - $PGUSER -c "$DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + test -e "$PG_OOM_ADJUST_FILE" && echo "$PG_MASTER_OOM_SCORE_ADJ" > "$PG_OOM_ADJUST_FILE" + su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 echo "ok" ;; stop) @@ -95,9 +103,8 @@ case $1 in restart) echo -n "Restarting PostgreSQL: " su - $PGUSER -c "$PGCTL stop -D '$PGDATA' -s -m fast -w" - test x"$OOM_SCORE_ADJ" != x && echo "$OOM_SCORE_ADJ" > /proc/self/oom_score_adj - test x"$OOM_ADJ" != x && echo "$OOM_ADJ" > /proc/self/oom_adj - su - $PGUSER -c "$DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 + test -e "$PG_OOM_ADJUST_FILE" && echo "$PG_MASTER_OOM_SCORE_ADJ" > "$PG_OOM_ADJUST_FILE" + su - $PGUSER -c "$DAEMON_ENV $DAEMON -D '$PGDATA' &" >>$PGLOG 2>&1 echo "ok" ;; reload) diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL index 22ed9ff45e..24872b0944 100755 --- a/contrib/start-scripts/osx/PostgreSQL +++ b/contrib/start-scripts/osx/PostgreSQL @@ -4,7 +4,7 @@ # PostgreSQL RDBMS Server ## -# PostgreSQL boot time startup script for Darwin/Mac OS X. To install, change +# PostgreSQL boot time startup script for OS X. To install, change # the "prefix", "PGDATA", "PGUSER", and "PGLOG" variables below as # necessary. Next, create a new directory, "/Library/StartupItems/PostgreSQL". # Then copy this script and the accompanying "StartupParameters.plist" file diff --git a/contrib/tablefunc/Makefile b/contrib/tablefunc/Makefile index eb108931ec..7150117640 100644 --- a/contrib/tablefunc/Makefile +++ b/contrib/tablefunc/Makefile @@ -4,6 +4,7 @@ MODULES = tablefunc EXTENSION = tablefunc DATA = tablefunc--1.0.sql tablefunc--unpackaged--1.0.sql +PGFILEDESC = "tablefunc - various functions that return tables" REGRESS = tablefunc diff --git a/contrib/tablefunc/expected/tablefunc.out b/contrib/tablefunc/expected/tablefunc.out index 0437ecf90a..fffadc6e1b 100644 --- a/contrib/tablefunc/expected/tablefunc.out +++ b/contrib/tablefunc/expected/tablefunc.out @@ -376,6 +376,38 @@ SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 4, '~') A 11 | 10 | 4 | 2~5~9~10~11 (8 rows) +-- should fail as first two columns must have the same type +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid text, parent_keyid int, level int, branch text); +ERROR: invalid return type +DETAIL: First two columns must be the same type. +-- should fail as key field datatype should match return datatype +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid float8, parent_keyid float8, level int, branch text); +ERROR: invalid return type +DETAIL: SQL key field type double precision does not match return key field type integer. +-- tests for values using custom queries +-- query with one column - failed +SELECT * FROM connectby('connectby_int', '1; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +ERROR: invalid return type +DETAIL: Query must return at least two columns. +-- query with two columns first value as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, 1::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + keyid | parent_keyid | level +-------+--------------+------- + 2 | | 0 + | 1 | 1 +(2 rows) + +-- query with two columns second value as NULL +SELECT * FROM connectby('connectby_int', '1::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +ERROR: infinite recursion detected +-- query with two columns, both values as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + keyid | parent_keyid | level +-------+--------------+------- + 2 | | 0 + | | 1 +(2 rows) + -- test for falsely detected recursion DROP TABLE connectby_int; CREATE TABLE connectby_int(keyid int, parent_keyid int); diff --git a/contrib/tablefunc/sql/tablefunc.sql b/contrib/tablefunc/sql/tablefunc.sql index bf874f26ad..ec375b05c6 100644 --- a/contrib/tablefunc/sql/tablefunc.sql +++ b/contrib/tablefunc/sql/tablefunc.sql @@ -179,6 +179,22 @@ SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') A -- infinite recursion failure avoided by depth limit SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 4, '~') AS t(keyid int, parent_keyid int, level int, branch text); +-- should fail as first two columns must have the same type +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid text, parent_keyid int, level int, branch text); + +-- should fail as key field datatype should match return datatype +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid float8, parent_keyid float8, level int, branch text); + +-- tests for values using custom queries +-- query with one column - failed +SELECT * FROM connectby('connectby_int', '1; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns first value as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, 1::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns second value as NULL +SELECT * FROM connectby('connectby_int', '1::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns, both values as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + -- test for falsely detected recursion DROP TABLE connectby_int; CREATE TABLE connectby_int(keyid int, parent_keyid int); diff --git a/contrib/tablefunc/tablefunc--unpackaged--1.0.sql b/contrib/tablefunc/tablefunc--unpackaged--1.0.sql index e5e9619c52..f0a276a9c4 100644 --- a/contrib/tablefunc/tablefunc--unpackaged--1.0.sql +++ b/contrib/tablefunc/tablefunc--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/tablefunc/tablefunc--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION tablefunc" to load this file. \quit +\echo Use "CREATE EXTENSION tablefunc FROM unpackaged" to load this file. \quit ALTER EXTENSION tablefunc ADD function normal_rand(integer,double precision,double precision); ALTER EXTENSION tablefunc ADD function crosstab(text); diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 10ee8c76db..8a95d4710b 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -10,7 +10,7 @@ * And contributors: * Nabil Sayegh <postgresql@e-trolley.de> * - * Copyright (c) 2002-2014, PostgreSQL Global Development Group + * Copyright (c) 2002-2015, PostgreSQL Global Development Group * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement @@ -54,7 +54,7 @@ static Tuplestorestate *get_crosstab_tuplestore(char *sql, bool randomAccess); static void validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial); static bool compatCrosstabTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); -static bool compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); +static void compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); static void get_normal_pair(float8 *x1, float8 *x2); static Tuplestorestate *connectby(char *relname, char *key_fld, @@ -68,7 +68,7 @@ static Tuplestorestate *connectby(char *relname, MemoryContext per_query_ctx, bool randomAccess, AttInMetadata *attinmeta); -static Tuplestorestate *build_tuplestore_recursively(char *key_fld, +static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, char *orderby_fld, @@ -1178,28 +1178,28 @@ connectby(char *relname, MemoryContextSwitchTo(oldcontext); /* now go get the whole tree */ - tupstore = build_tuplestore_recursively(key_fld, - parent_key_fld, - relname, - orderby_fld, - branch_delim, - start_with, - start_with, /* current_branch */ - 0, /* initial level is 0 */ - &serial, /* initial serial is 1 */ - max_depth, - show_branch, - show_serial, - per_query_ctx, - attinmeta, - tupstore); + build_tuplestore_recursively(key_fld, + parent_key_fld, + relname, + orderby_fld, + branch_delim, + start_with, + start_with, /* current_branch */ + 0, /* initial level is 0 */ + &serial, /* initial serial is 1 */ + max_depth, + show_branch, + show_serial, + per_query_ctx, + attinmeta, + tupstore); SPI_finish(); return tupstore; } -static Tuplestorestate * +static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, @@ -1230,7 +1230,7 @@ build_tuplestore_recursively(char *key_fld, HeapTuple tuple; if (max_depth > 0 && level > max_depth) - return tupstore; + return; initStringInfo(&sql); @@ -1316,22 +1316,11 @@ build_tuplestore_recursively(char *key_fld, StringInfoData chk_branchstr; StringInfoData chk_current_key; - /* First time through, do a little more setup */ - if (level == 0) - { - /* - * Check that return tupdesc is compatible with the one we got - * from the query, but only at level 0 -- no need to check more - * than once - */ - - if (!compatConnectbyTupleDescs(tupdesc, spi_tupdesc)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid return type"), - errdetail("Return and SQL tuple descriptions are " \ - "incompatible."))); - } + /* + * Check that return tupdesc is compatible with the one we got from + * the query. + */ + compatConnectbyTupleDescs(tupdesc, spi_tupdesc); initStringInfo(&branchstr); initStringInfo(&chk_branchstr); @@ -1346,24 +1335,31 @@ build_tuplestore_recursively(char *key_fld, /* get the next sql result tuple */ spi_tuple = tuptable->vals[i]; - /* get the current key and parent */ + /* get the current key (might be NULL) */ current_key = SPI_getvalue(spi_tuple, spi_tupdesc, 1); - appendStringInfo(&chk_current_key, "%s%s%s", branch_delim, current_key, branch_delim); - current_key_parent = pstrdup(SPI_getvalue(spi_tuple, spi_tupdesc, 2)); + + /* get the parent key (might be NULL) */ + current_key_parent = SPI_getvalue(spi_tuple, spi_tupdesc, 2); /* get the current level */ sprintf(current_level, "%d", level); /* check to see if this key is also an ancestor */ - if (strstr(chk_branchstr.data, chk_current_key.data)) - elog(ERROR, "infinite recursion detected"); + if (current_key) + { + appendStringInfo(&chk_current_key, "%s%s%s", + branch_delim, current_key, branch_delim); + if (strstr(chk_branchstr.data, chk_current_key.data)) + elog(ERROR, "infinite recursion detected"); + } /* OK, extend the branch */ - appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); + if (current_key) + appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); current_branch = branchstr.data; /* build a tuple */ - values[0] = pstrdup(current_key); + values[0] = current_key; values[1] = current_key_parent; values[2] = current_level; if (show_branch) @@ -1379,30 +1375,31 @@ build_tuplestore_recursively(char *key_fld, tuple = BuildTupleFromCStrings(attinmeta, values); - xpfree(current_key); - xpfree(current_key_parent); - /* store the tuple for later use */ tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); - /* recurse using current_key_parent as the new start_with */ - tupstore = build_tuplestore_recursively(key_fld, - parent_key_fld, - relname, - orderby_fld, - branch_delim, - values[0], - current_branch, - level + 1, - serial, - max_depth, - show_branch, - show_serial, - per_query_ctx, - attinmeta, - tupstore); + /* recurse using current_key as the new start_with */ + if (current_key) + build_tuplestore_recursively(key_fld, + parent_key_fld, + relname, + orderby_fld, + branch_delim, + current_key, + current_branch, + level + 1, + serial, + max_depth, + show_branch, + show_serial, + per_query_ctx, + attinmeta, + tupstore); + + xpfree(current_key); + xpfree(current_key_parent); /* reset branch for next pass */ resetStringInfo(&branchstr); @@ -1414,8 +1411,6 @@ build_tuplestore_recursively(char *key_fld, xpfree(chk_branchstr.data); xpfree(chk_current_key.data); } - - return tupstore; } /* @@ -1488,34 +1483,56 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial /* * Check if spi sql tupdesc and return tupdesc are compatible */ -static bool +static void compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) { Oid ret_atttypid; Oid sql_atttypid; + int32 ret_atttypmod; + int32 sql_atttypmod; + + /* + * Result must have at least 2 columns. + */ + if (sql_tupdesc->natts < 2) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("invalid return type"), + errdetail("Query must return at least two columns."))); - /* check the key_fld types match */ + /* + * These columns must match the result type indicated by the calling + * query. + */ ret_atttypid = ret_tupdesc->attrs[0]->atttypid; sql_atttypid = sql_tupdesc->attrs[0]->atttypid; - if (ret_atttypid != sql_atttypid) + ret_atttypmod = ret_tupdesc->attrs[0]->atttypmod; + sql_atttypmod = sql_tupdesc->attrs[0]->atttypmod; + if (ret_atttypid != sql_atttypid || + (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid return type"), - errdetail("SQL key field datatype does " \ - "not match return key field datatype."))); + errdetail("SQL key field type %s does " \ + "not match return key field type %s.", + format_type_with_typemod(ret_atttypid, ret_atttypmod), + format_type_with_typemod(sql_atttypid, sql_atttypmod)))); - /* check the parent_key_fld types match */ ret_atttypid = ret_tupdesc->attrs[1]->atttypid; sql_atttypid = sql_tupdesc->attrs[1]->atttypid; - if (ret_atttypid != sql_atttypid) + ret_atttypmod = ret_tupdesc->attrs[1]->atttypmod; + sql_atttypmod = sql_tupdesc->attrs[1]->atttypmod; + if (ret_atttypid != sql_atttypid || + (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid return type"), - errdetail("SQL parent key field datatype does " \ - "not match return parent key field datatype."))); + errdetail("SQL parent key field type %s does " \ + "not match return parent key field type %s.", + format_type_with_typemod(ret_atttypid, ret_atttypmod), + format_type_with_typemod(sql_atttypid, sql_atttypmod)))); /* OK, the two tupdescs are compatible for our purposes */ - return true; } /* diff --git a/contrib/tablefunc/tablefunc.h b/contrib/tablefunc/tablefunc.h index a983bab2ee..911a69bfad 100644 --- a/contrib/tablefunc/tablefunc.h +++ b/contrib/tablefunc/tablefunc.h @@ -10,7 +10,7 @@ * And contributors: * Nabil Sayegh <postgresql@e-trolley.de> * - * Copyright (c) 2002-2014, PostgreSQL Global Development Group + * Copyright (c) 2002-2015, PostgreSQL Global Development Group * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement diff --git a/contrib/tcn/Makefile b/contrib/tcn/Makefile index 7bac5e359c..2de3425bbd 100644 --- a/contrib/tcn/Makefile +++ b/contrib/tcn/Makefile @@ -4,6 +4,7 @@ MODULES = tcn EXTENSION = tcn DATA = tcn--1.0.sql +PGFILEDESC = "tcn - trigger function notifying listeners" ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/tcn/tcn.c b/contrib/tcn/tcn.c index ba34f9b1fa..af1ba920a6 100644 --- a/contrib/tcn/tcn.c +++ b/contrib/tcn/tcn.c @@ -3,7 +3,7 @@ * tcn.c * triggered change notification support for PostgreSQL * - * Portions Copyright (c) 2011-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2011-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -23,10 +23,8 @@ #include "utils/rel.h" #include "utils/syscache.h" - PG_MODULE_MAGIC; - /* * Copy from s (for source) to r (for result), wrapping with q (quote) * characters and doubling any quote characters found. diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile index 58e0f384cb..438be44afc 100644 --- a/contrib/test_decoding/Makefile +++ b/contrib/test_decoding/Makefile @@ -1,7 +1,7 @@ # contrib/test_decoding/Makefile MODULES = test_decoding -OBJS = test_decoding.o +PGFILEDESC = "test_decoding - example of a logical decoding output plugin" # Note: because we don't tell the Makefile there are any regression tests, # we have to clean those result files explicitly @@ -37,7 +37,7 @@ submake-isolation: submake-test_decoding: $(MAKE) -C $(top_builddir)/contrib/test_decoding -REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact binary prepared +REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact decoding_into_rel binary prepared regresscheck: all | submake-regress submake-test_decoding $(MKDIR_P) regression_output @@ -53,7 +53,7 @@ regresscheck-install-force: | submake-regress submake-test_decoding --extra-install=contrib/test_decoding \ $(REGRESSCHECKS) -ISOLATIONCHECKS=mxact delayed_startup concurrent_ddl_dml +ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml isolationcheck: all | submake-isolation submake-test_decoding $(MKDIR_P) isolation_output diff --git a/contrib/test_decoding/expected/binary.out b/contrib/test_decoding/expected/binary.out index 4164784ab3..6d307491f0 100644 --- a/contrib/test_decoding/expected/binary.out +++ b/contrib/test_decoding/expected/binary.out @@ -14,7 +14,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'for -- fails, binary plugin, textual consumer SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1'); -ERROR: output plugin cannot produce binary output +ERROR: logical decoding output plugin "test_decoding" produces binary output, but "pg_logical_slot_get_changes(name,pg_lsn,integer,text[])" expects textual data -- succeeds, textual plugin, binary consumer SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0'); data diff --git a/contrib/test_decoding/expected/concurrent_ddl_dml.out b/contrib/test_decoding/expected/concurrent_ddl_dml.out index cc9165655f..a15bfa292e 100644 --- a/contrib/test_decoding/expected/concurrent_ddl_dml.out +++ b/contrib/test_decoding/expected/concurrent_ddl_dml.out @@ -10,12 +10,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_float: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1 COMMIT @@ -34,7 +32,7 @@ step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waitin step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -58,12 +56,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1' COMMIT @@ -82,7 +78,7 @@ step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varyi step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -107,7 +103,7 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -132,7 +128,7 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -158,12 +154,10 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1 COMMIT @@ -186,12 +180,10 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1' COMMIT @@ -213,14 +205,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_text: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1' COMMIT @@ -241,14 +229,10 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1' COMMIT @@ -270,7 +254,7 @@ step s2_alter_tbl2_boolean: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean; ERROR: column "val2" cannot be cast automatically to type boolean step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -295,7 +279,7 @@ step s2_alter_tbl1_boolean: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; <wa step s1_commit: COMMIT; step s2_alter_tbl1_boolean: <... completed> error in steps s1_commit s2_alter_tbl1_boolean: ERROR: column "val2" cannot be cast automatically to type boolean -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -316,12 +300,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT @@ -342,7 +324,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -350,8 +332,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT ?column? @@ -368,12 +348,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1 COMMIT @@ -394,7 +372,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -402,8 +380,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1 COMMIT ?column? @@ -420,12 +396,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT @@ -446,7 +420,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -454,8 +428,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT ?column? @@ -473,16 +445,12 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT -BEGIN -COMMIT ?column? stop @@ -500,18 +468,14 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:null COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -529,16 +493,12 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; <waiting ...> step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT -BEGIN -COMMIT step s2_alter_tbl2_add_text: ALTER TABLE tbl2 ADD COLUMN val3 TEXT; step s1_begin: BEGIN; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); @@ -546,20 +506,16 @@ step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character v step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT -BEGIN -COMMIT step s2_alter_tbl2_3rd_int: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -588,19 +544,15 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_text: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT ?column? @@ -621,19 +573,15 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT ?column? @@ -653,20 +601,14 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -686,20 +628,14 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -717,14 +653,10 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN -COMMIT -BEGIN -COMMIT -BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 COMMIT diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out index e13a6c7370..780120d731 100644 --- a/contrib/test_decoding/expected/ddl.out +++ b/contrib/test_decoding/expected/ddl.out @@ -12,7 +12,7 @@ ERROR: replication slot "regression_slot" already exists -- fail because of an invalid name SELECT 'init' FROM pg_create_logical_replication_slot('Invalid Name', 'test_decoding'); ERROR: replication slot name "Invalid Name" contains invalid character -HINT: Replication slot names may only contain letters, numbers and the underscore character. +HINT: Replication slot names may only contain letters, numbers, and the underscore character. -- fail twice because of an invalid parameter values SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar'); ERROR: could not parse value "frakbar" for parameter "include-xids" @@ -40,7 +40,7 @@ SELECT 'init' FROM pg_create_physical_replication_slot('repl'); init (1 row) -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ERROR: cannot use physical replication slot for logical decoding SELECT pg_drop_replication_slot('repl'); pg_drop_replication_slot @@ -89,18 +89,14 @@ COMMIT; ALTER TABLE replication_example RENAME COLUMN text TO somenum; INSERT INTO replication_example(somedata, somenum) VALUES (4, 1); -- collect all changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data --------------------------------------------------------------------------------------------------------------------------- BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:'1' table public.replication_example: INSERT: id[integer]:2 somedata[integer]:1 text[character varying]:'2' COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:3 somedata[integer]:2 text[character varying]:'1' bar[integer]:4 COMMIT BEGIN @@ -109,8 +105,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:6 somedata[integer]:2 text[character varying]:'4' bar[integer]:null COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:7 somedata[integer]:3 text[character varying]:'1' COMMIT BEGIN @@ -118,15 +112,13 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:9 somedata[integer]:3 text[character varying]:'3' COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:10 somedata[integer]:4 somenum[character varying]:'1' COMMIT -(30 rows) +(22 rows) ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- throw away changes, they contain oids -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); count ------- 12 @@ -142,7 +134,7 @@ INSERT INTO replication_example(somedata, somenum, zaphod2) VALUES (6, 3, 1); INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------------ BEGIN @@ -161,17 +153,17 @@ CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); count ------- - 10 + 6 (1 row) INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------- BEGIN @@ -194,7 +186,7 @@ UPDATE tr_etoomuch SET data = - data WHERE id > 5000; COMMIT; /* display results, but hide most of the output */ SELECT count(*), min(data), max(data) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') GROUP BY substring(data, 1, 24) ORDER BY 1,2; count | min | max @@ -224,12 +216,10 @@ RELEASE SAVEPOINT c; INSERT INTO tr_sub(path) VALUES ('1-top-2-#1'); RELEASE SAVEPOINT b; COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------- BEGIN - COMMIT - BEGIN table public.tr_sub: INSERT: id[integer]:1 path[text]:'1-top-#1' table public.tr_sub: INSERT: id[integer]:2 path[text]:'1-top-1-#1' table public.tr_sub: INSERT: id[integer]:3 path[text]:'1-top-1-#2' @@ -237,7 +227,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.tr_sub: INSERT: id[integer]:5 path[text]:'1-top-2-1-#2' table public.tr_sub: INSERT: id[integer]:6 path[text]:'1-top-2-#1' COMMIT -(10 rows) +(8 rows) -- check that we handle xlog assignments correctly BEGIN; @@ -265,7 +255,7 @@ INSERT INTO tr_sub(path) VALUES ('2-top-1...--#3'); RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------ BEGIN @@ -286,7 +276,7 @@ INSERT INTO tr_sub(path) VALUES ('3-top-2-2-#1'); ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------------------- BEGIN @@ -315,7 +305,7 @@ BEGIN; SAVEPOINT a; INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data --------------------------------------------------------------------- BEGIN @@ -395,32 +385,22 @@ Options: user_catalog_table=false INSERT INTO replication_metadata(relation, options) VALUES ('zaphod', NULL); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------ BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:1 relation[name]:'foo' options[text[]]:'{a,b}' COMMIT BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:2 relation[name]:'bar' options[text[]]:'{a,b}' COMMIT BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:3 relation[name]:'blub' options[text[]]:null COMMIT BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:4 relation[name]:'zaphod' options[text[]]:null rewritemeornot[integer]:null COMMIT -(22 rows) +(12 rows) /* * check whether we handle updates/deletes correct with & without a pkey @@ -489,12 +469,10 @@ INSERT INTO toasttable(toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM00 UPDATE toasttable SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ BEGIN - COMMIT - BEGIN table public.table_without_key: INSERT: id[integer]:1 data[integer]:1 table public.table_without_key: INSERT: id[integer]:2 data[integer]:2 COMMIT @@ -511,8 +489,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_without_key: UPDATE: id[integer]:2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_without_key: UPDATE: old-key: id[integer]:2 data[integer]:3 new-tuple: id[integer]:-2 data[integer]:3 COMMIT BEGIN @@ -522,8 +498,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_without_key: DELETE: id[integer]:2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_pkey: INSERT: id[integer]:1 data[integer]:1 table public.table_with_pkey: INSERT: id[integer]:2 data[integer]:2 COMMIT @@ -540,21 +514,15 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_pkey: UPDATE: old-key: id[integer]:2 new-tuple: id[integer]:-2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3 COMMIT BEGIN table public.table_with_pkey: DELETE: id[integer]:2 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_unique_not_null: INSERT: id[integer]:1 data[integer]:1 table public.table_with_unique_not_null: INSERT: id[integer]:2 data[integer]:2 COMMIT @@ -574,8 +542,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_unique_not_null: DELETE: (no-tuple-data) COMMIT BEGIN - COMMIT - BEGIN table public.table_with_unique_not_null: INSERT: id[integer]:3 data[integer]:1 table public.table_with_unique_not_null: INSERT: id[integer]:4 data[integer]:2 COMMIT @@ -595,8 +561,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_unique_not_null: DELETE: id[integer]:4 COMMIT BEGIN - COMMIT - BEGIN table public.toasttable: INSERT: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 COMMIT BEGIN @@ -605,7 +569,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 COMMIT -(113 rows) +(97 rows) INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i); -- update of second column, first column unchanged @@ -614,7 +578,7 @@ UPDATE toasttable WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -623,12 +587,10 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:79 toasted_col2[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand2[double precision]:1578 COMMIT - BEGIN - COMMIT -(8 rows) +(6 rows) -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) diff --git a/contrib/test_decoding/expected/decoding_in_xact.out b/contrib/test_decoding/expected/decoding_in_xact.out index d15b0b542b..456840886a 100644 --- a/contrib/test_decoding/expected/decoding_in_xact.out +++ b/contrib/test_decoding/expected/decoding_in_xact.out @@ -58,19 +58,17 @@ SELECT txid_current() = 0; -- don't show yet, haven't committed INSERT INTO nobarf(data) VALUES('2'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------- BEGIN - COMMIT - BEGIN table public.nobarf: INSERT: id[integer]:1 data[text]:'1' COMMIT -(5 rows) +(3 rows) COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------- BEGIN diff --git a/contrib/test_decoding/expected/decoding_into_rel.out b/contrib/test_decoding/expected/decoding_into_rel.out new file mode 100644 index 0000000000..be759caa31 --- /dev/null +++ b/contrib/test_decoding/expected/decoding_into_rel.out @@ -0,0 +1,86 @@ +-- test that we can insert the result of a get_changes call into a +-- logged relation. That's really not a good idea in practical terms, +-- but provides a nice test. +-- predictability +SET synchronous_commit = on; +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + ?column? +---------- + init +(1 row) + +-- slot works +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +------ +(0 rows) + +-- create some changes +CREATE TABLE somechange(id serial primary key); +INSERT INTO somechange DEFAULT VALUES; +CREATE TABLE changeresult AS + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT * FROM changeresult; + data +------------------------------------------------ + BEGIN + table public.somechange: INSERT: id[integer]:1 + COMMIT +(3 rows) + +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT * FROM changeresult; + data +-------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.somechange: INSERT: id[integer]:1 + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.somechange: INSERT: id[integer]:1' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.somechange: INSERT: id[integer]:1' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.somechange: INSERT: id[integer]:1''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT +(20 rows) + +DROP TABLE changeresult; +DROP TABLE somechange; +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.somechange: INSERT: id[integer]:1''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''BEGIN''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''table public.somechange: INSERT: id[integer]:1''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''COMMIT''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT +(14 rows) + +SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); + ?column? +---------- + stop +(1 row) + diff --git a/contrib/test_decoding/expected/ondisk_startup.out b/contrib/test_decoding/expected/ondisk_startup.out new file mode 100644 index 0000000000..65115c830a --- /dev/null +++ b/contrib/test_decoding/expected/ondisk_startup.out @@ -0,0 +1,43 @@ +Parsed test spec with 3 sessions + +starting permutation: s2txid s1init s3txid s2alter s2c s1insert s1checkpoint s1start s1insert s1alter s1insert s1start +step s2txid: BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; +?column? + +f +step s1init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); <waiting ...> +step s3txid: BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; +?column? + +f +step s2alter: ALTER TABLE do_write ADD COLUMN addedbys2 int; +step s2c: COMMIT; +step s1init: <... completed> +?column? + +init +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1checkpoint: CHECKPOINT; +step s1start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false'); +data + +BEGIN +table public.do_write: INSERT: id[integer]:1 addedbys2[integer]:null +COMMIT +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1alter: ALTER TABLE do_write ADD COLUMN addedbys1 int; +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false'); +data + +BEGIN +table public.do_write: INSERT: id[integer]:2 addedbys2[integer]:null +COMMIT +BEGIN +COMMIT +BEGIN +table public.do_write: INSERT: id[integer]:3 addedbys2[integer]:null addedbys1[integer]:null +COMMIT +?column? + +stop diff --git a/contrib/test_decoding/expected/permissions.out b/contrib/test_decoding/expected/permissions.out index 85b7f5d625..212fd1df35 100644 --- a/contrib/test_decoding/expected/permissions.out +++ b/contrib/test_decoding/expected/permissions.out @@ -14,7 +14,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d (1 row) INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -------------------------------------------------------------- BEGIN @@ -39,7 +39,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d INSERT INTO lr_test VALUES('lr_superuser_init'); ERROR: permission denied for relation lr_test -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) @@ -57,7 +57,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d ERROR: must be superuser or replication role to use replication slots INSERT INTO lr_test VALUES('lr_superuser_init'); ERROR: permission denied for relation lr_test -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ERROR: must be superuser or replication role to use replication slots SELECT pg_drop_replication_slot('regression_slot'); ERROR: must be superuser or replication role to use replication slots diff --git a/contrib/test_decoding/expected/prepared.out b/contrib/test_decoding/expected/prepared.out index 8313f8b7aa..46e915d4ff 100644 --- a/contrib/test_decoding/expected/prepared.out +++ b/contrib/test_decoding/expected/prepared.out @@ -39,14 +39,10 @@ INSERT INTO test_prepared2 VALUES (9); DROP TABLE test_prepared1; DROP TABLE test_prepared2; -- show results -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------- BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.test_prepared1: INSERT: id[integer]:1 COMMIT BEGIN @@ -68,11 +64,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.test_prepared2: INSERT: id[integer]:9 COMMIT - BEGIN - COMMIT - BEGIN - COMMIT -(30 rows) +(22 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out index ec23ab9024..4dcd489543 100644 --- a/contrib/test_decoding/expected/rewrite.out +++ b/contrib/test_decoding/expected/rewrite.out @@ -9,15 +9,13 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120)); INSERT INTO replication_example(somedata) VALUES (1); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------- BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:null COMMIT -(5 rows) +(3 rows) BEGIN; INSERT INTO replication_example(somedata) VALUES (2); @@ -58,7 +56,7 @@ INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (7, 5 COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -70,33 +68,13 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:5 somedata[integer]:4 text[character varying]:null testcolumn1[integer]:2 testcolumn2[integer]:1 COMMIT BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:6 somedata[integer]:5 text[character varying]:null testcolumn1[integer]:3 testcolumn2[integer]:null COMMIT BEGIN table public.replication_example: INSERT: id[integer]:7 somedata[integer]:6 text[character varying]:null testcolumn1[integer]:4 testcolumn2[integer]:null table public.replication_example: INSERT: id[integer]:8 somedata[integer]:7 text[character varying]:null testcolumn1[integer]:5 testcolumn2[integer]:null testcolumn3[integer]:1 COMMIT -(35 rows) +(15 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/expected/toast.out b/contrib/test_decoding/expected/toast.out index 6adef83f02..0a850b7acd 100644 --- a/contrib/test_decoding/expected/toast.out +++ b/contrib/test_decoding/expected/toast.out @@ -40,14 +40,18 @@ UPDATE toasted_key SET toasted_col2 = toasted_col1; -- test update of a toasted key, changing it UPDATE toasted_key SET toasted_key = toasted_key || '1'; DELETE FROM toasted_key; -SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +-- Test that HEAP2_MULTI_INSERT insertions with and without toasted +-- columns are handled correctly +CREATE TABLE toasted_copy ( + id int primary key, -- no default, copy didn't use to handle that with multi inserts + data text +); +ALTER TABLE toasted_copy ALTER COLUMN data SET STORAGE EXTERNAL; +\copy toasted_copy FROM STDIN +SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); substr ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.xpto: INSERT: id[integer]:1 toasted_col1[text]:'1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374 COMMIT BEGIN @@ -63,12 +67,6 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', table public.xpto: DELETE: id[integer]:1 COMMIT BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.toasted_key: INSERT: id[integer]:1 toasted_key[text]:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 COMMIT BEGIN @@ -80,7 +78,212 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', BEGIN table public.toasted_key: DELETE: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567 COMMIT -(37 rows) + BEGIN + table public.toasted_copy: INSERT: id[integer]:1 data[text]:'untoasted1' + table public.toasted_copy: INSERT: id[integer]:2 data[text]:'toasted1-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + table public.toasted_copy: INSERT: id[integer]:3 data[text]:'untoasted2' + table public.toasted_copy: INSERT: id[integer]:4 data[text]:'toasted2-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + table public.toasted_copy: INSERT: id[integer]:5 data[text]:'untoasted3' + table public.toasted_copy: INSERT: id[integer]:6 data[text]:'untoasted4' + table public.toasted_copy: INSERT: id[integer]:7 data[text]:'untoasted5' + table public.toasted_copy: INSERT: id[integer]:8 data[text]:'untoasted6' + table public.toasted_copy: INSERT: id[integer]:9 data[text]:'untoasted7' + table public.toasted_copy: INSERT: id[integer]:10 data[text]:'untoasted8' + table public.toasted_copy: INSERT: id[integer]:11 data[text]:'untoasted9' + table public.toasted_copy: INSERT: id[integer]:12 data[text]:'untoasted10' + table public.toasted_copy: INSERT: id[integer]:13 data[text]:'untoasted11' + table public.toasted_copy: INSERT: id[integer]:14 data[text]:'untoasted12' + table public.toasted_copy: INSERT: id[integer]:15 data[text]:'untoasted13' + table public.toasted_copy: INSERT: id[integer]:16 data[text]:'untoasted14' + table public.toasted_copy: INSERT: id[integer]:17 data[text]:'untoasted15' + table public.toasted_copy: INSERT: id[integer]:18 data[text]:'untoasted16' + table public.toasted_copy: INSERT: id[integer]:19 data[text]:'untoasted17' + table public.toasted_copy: INSERT: id[integer]:20 data[text]:'untoasted18' + table public.toasted_copy: INSERT: id[integer]:21 data[text]:'untoasted19' + table public.toasted_copy: INSERT: id[integer]:22 data[text]:'untoasted20' + table public.toasted_copy: INSERT: id[integer]:23 data[text]:'untoasted21' + table public.toasted_copy: INSERT: id[integer]:24 data[text]:'untoasted22' + table public.toasted_copy: INSERT: id[integer]:25 data[text]:'untoasted23' + table public.toasted_copy: INSERT: id[integer]:26 data[text]:'untoasted24' + table public.toasted_copy: INSERT: id[integer]:27 data[text]:'untoasted25' + table public.toasted_copy: INSERT: id[integer]:28 data[text]:'untoasted26' + table public.toasted_copy: INSERT: id[integer]:29 data[text]:'untoasted27' + table public.toasted_copy: INSERT: id[integer]:30 data[text]:'untoasted28' + table public.toasted_copy: INSERT: id[integer]:31 data[text]:'untoasted29' + table public.toasted_copy: INSERT: id[integer]:32 data[text]:'untoasted30' + table public.toasted_copy: INSERT: id[integer]:33 data[text]:'untoasted31' + table public.toasted_copy: INSERT: id[integer]:34 data[text]:'untoasted32' + table public.toasted_copy: INSERT: id[integer]:35 data[text]:'untoasted33' + table public.toasted_copy: INSERT: id[integer]:36 data[text]:'untoasted34' + table public.toasted_copy: INSERT: id[integer]:37 data[text]:'untoasted35' + table public.toasted_copy: INSERT: id[integer]:38 data[text]:'untoasted36' + table public.toasted_copy: INSERT: id[integer]:39 data[text]:'untoasted37' + table public.toasted_copy: INSERT: id[integer]:40 data[text]:'untoasted38' + table public.toasted_copy: INSERT: id[integer]:41 data[text]:'untoasted39' + table public.toasted_copy: INSERT: id[integer]:42 data[text]:'untoasted40' + table public.toasted_copy: INSERT: id[integer]:43 data[text]:'untoasted41' + table public.toasted_copy: INSERT: id[integer]:44 data[text]:'untoasted42' + table public.toasted_copy: INSERT: id[integer]:45 data[text]:'untoasted43' + table public.toasted_copy: INSERT: id[integer]:46 data[text]:'untoasted44' + table public.toasted_copy: INSERT: id[integer]:47 data[text]:'untoasted45' + table public.toasted_copy: INSERT: id[integer]:48 data[text]:'untoasted46' + table public.toasted_copy: INSERT: id[integer]:49 data[text]:'untoasted47' + table public.toasted_copy: INSERT: id[integer]:50 data[text]:'untoasted48' + table public.toasted_copy: INSERT: id[integer]:51 data[text]:'untoasted49' + table public.toasted_copy: INSERT: id[integer]:52 data[text]:'untoasted50' + table public.toasted_copy: INSERT: id[integer]:53 data[text]:'untoasted51' + table public.toasted_copy: INSERT: id[integer]:54 data[text]:'untoasted52' + table public.toasted_copy: INSERT: id[integer]:55 data[text]:'untoasted53' + table public.toasted_copy: INSERT: id[integer]:56 data[text]:'untoasted54' + table public.toasted_copy: INSERT: id[integer]:57 data[text]:'untoasted55' + table public.toasted_copy: INSERT: id[integer]:58 data[text]:'untoasted56' + table public.toasted_copy: INSERT: id[integer]:59 data[text]:'untoasted57' + table public.toasted_copy: INSERT: id[integer]:60 data[text]:'untoasted58' + table public.toasted_copy: INSERT: id[integer]:61 data[text]:'untoasted59' + table public.toasted_copy: INSERT: id[integer]:62 data[text]:'untoasted60' + table public.toasted_copy: INSERT: id[integer]:63 data[text]:'untoasted61' + table public.toasted_copy: INSERT: id[integer]:64 data[text]:'untoasted62' + table public.toasted_copy: INSERT: id[integer]:65 data[text]:'untoasted63' + table public.toasted_copy: INSERT: id[integer]:66 data[text]:'untoasted64' + table public.toasted_copy: INSERT: id[integer]:67 data[text]:'untoasted65' + table public.toasted_copy: INSERT: id[integer]:68 data[text]:'untoasted66' + table public.toasted_copy: INSERT: id[integer]:69 data[text]:'untoasted67' + table public.toasted_copy: INSERT: id[integer]:70 data[text]:'untoasted68' + table public.toasted_copy: INSERT: id[integer]:71 data[text]:'untoasted69' + table public.toasted_copy: INSERT: id[integer]:72 data[text]:'untoasted70' + table public.toasted_copy: INSERT: id[integer]:73 data[text]:'untoasted71' + table public.toasted_copy: INSERT: id[integer]:74 data[text]:'untoasted72' + table public.toasted_copy: INSERT: id[integer]:75 data[text]:'untoasted73' + table public.toasted_copy: INSERT: id[integer]:76 data[text]:'untoasted74' + table public.toasted_copy: INSERT: id[integer]:77 data[text]:'untoasted75' + table public.toasted_copy: INSERT: id[integer]:78 data[text]:'untoasted76' + table public.toasted_copy: INSERT: id[integer]:79 data[text]:'untoasted77' + table public.toasted_copy: INSERT: id[integer]:80 data[text]:'untoasted78' + table public.toasted_copy: INSERT: id[integer]:81 data[text]:'untoasted79' + table public.toasted_copy: INSERT: id[integer]:82 data[text]:'untoasted80' + table public.toasted_copy: INSERT: id[integer]:83 data[text]:'untoasted81' + table public.toasted_copy: INSERT: id[integer]:84 data[text]:'untoasted82' + table public.toasted_copy: INSERT: id[integer]:85 data[text]:'untoasted83' + table public.toasted_copy: INSERT: id[integer]:86 data[text]:'untoasted84' + table public.toasted_copy: INSERT: id[integer]:87 data[text]:'untoasted85' + table public.toasted_copy: INSERT: id[integer]:88 data[text]:'untoasted86' + table public.toasted_copy: INSERT: id[integer]:89 data[text]:'untoasted87' + table public.toasted_copy: INSERT: id[integer]:90 data[text]:'untoasted88' + table public.toasted_copy: INSERT: id[integer]:91 data[text]:'untoasted89' + table public.toasted_copy: INSERT: id[integer]:92 data[text]:'untoasted90' + table public.toasted_copy: INSERT: id[integer]:93 data[text]:'untoasted91' + table public.toasted_copy: INSERT: id[integer]:94 data[text]:'untoasted92' + table public.toasted_copy: INSERT: id[integer]:95 data[text]:'untoasted93' + table public.toasted_copy: INSERT: id[integer]:96 data[text]:'untoasted94' + table public.toasted_copy: INSERT: id[integer]:97 data[text]:'untoasted95' + table public.toasted_copy: INSERT: id[integer]:98 data[text]:'untoasted96' + table public.toasted_copy: INSERT: id[integer]:99 data[text]:'untoasted97' + table public.toasted_copy: INSERT: id[integer]:100 data[text]:'untoasted98' + table public.toasted_copy: INSERT: id[integer]:101 data[text]:'untoasted99' + table public.toasted_copy: INSERT: id[integer]:102 data[text]:'untoasted100' + table public.toasted_copy: INSERT: id[integer]:103 data[text]:'untoasted101' + table public.toasted_copy: INSERT: id[integer]:104 data[text]:'untoasted102' + table public.toasted_copy: INSERT: id[integer]:105 data[text]:'untoasted103' + table public.toasted_copy: INSERT: id[integer]:106 data[text]:'untoasted104' + table public.toasted_copy: INSERT: id[integer]:107 data[text]:'untoasted105' + table public.toasted_copy: INSERT: id[integer]:108 data[text]:'untoasted106' + table public.toasted_copy: INSERT: id[integer]:109 data[text]:'untoasted107' + table public.toasted_copy: INSERT: id[integer]:110 data[text]:'untoasted108' + table public.toasted_copy: INSERT: id[integer]:111 data[text]:'untoasted109' + table public.toasted_copy: INSERT: id[integer]:112 data[text]:'untoasted110' + table public.toasted_copy: INSERT: id[integer]:113 data[text]:'untoasted111' + table public.toasted_copy: INSERT: id[integer]:114 data[text]:'untoasted112' + table public.toasted_copy: INSERT: id[integer]:115 data[text]:'untoasted113' + table public.toasted_copy: INSERT: id[integer]:116 data[text]:'untoasted114' + table public.toasted_copy: INSERT: id[integer]:117 data[text]:'untoasted115' + table public.toasted_copy: INSERT: id[integer]:118 data[text]:'untoasted116' + table public.toasted_copy: INSERT: id[integer]:119 data[text]:'untoasted117' + table public.toasted_copy: INSERT: id[integer]:120 data[text]:'untoasted118' + table public.toasted_copy: INSERT: id[integer]:121 data[text]:'untoasted119' + table public.toasted_copy: INSERT: id[integer]:122 data[text]:'untoasted120' + table public.toasted_copy: INSERT: id[integer]:123 data[text]:'untoasted121' + table public.toasted_copy: INSERT: id[integer]:124 data[text]:'untoasted122' + table public.toasted_copy: INSERT: id[integer]:125 data[text]:'untoasted123' + table public.toasted_copy: INSERT: id[integer]:126 data[text]:'untoasted124' + table public.toasted_copy: INSERT: id[integer]:127 data[text]:'untoasted125' + table public.toasted_copy: INSERT: id[integer]:128 data[text]:'untoasted126' + table public.toasted_copy: INSERT: id[integer]:129 data[text]:'untoasted127' + table public.toasted_copy: INSERT: id[integer]:130 data[text]:'untoasted128' + table public.toasted_copy: INSERT: id[integer]:131 data[text]:'untoasted129' + table public.toasted_copy: INSERT: id[integer]:132 data[text]:'untoasted130' + table public.toasted_copy: INSERT: id[integer]:133 data[text]:'untoasted131' + table public.toasted_copy: INSERT: id[integer]:134 data[text]:'untoasted132' + table public.toasted_copy: INSERT: id[integer]:135 data[text]:'untoasted133' + table public.toasted_copy: INSERT: id[integer]:136 data[text]:'untoasted134' + table public.toasted_copy: INSERT: id[integer]:137 data[text]:'untoasted135' + table public.toasted_copy: INSERT: id[integer]:138 data[text]:'untoasted136' + table public.toasted_copy: INSERT: id[integer]:139 data[text]:'untoasted137' + table public.toasted_copy: INSERT: id[integer]:140 data[text]:'untoasted138' + table public.toasted_copy: INSERT: id[integer]:141 data[text]:'untoasted139' + table public.toasted_copy: INSERT: id[integer]:142 data[text]:'untoasted140' + table public.toasted_copy: INSERT: id[integer]:143 data[text]:'untoasted141' + table public.toasted_copy: INSERT: id[integer]:144 data[text]:'untoasted142' + table public.toasted_copy: INSERT: id[integer]:145 data[text]:'untoasted143' + table public.toasted_copy: INSERT: id[integer]:146 data[text]:'untoasted144' + table public.toasted_copy: INSERT: id[integer]:147 data[text]:'untoasted145' + table public.toasted_copy: INSERT: id[integer]:148 data[text]:'untoasted146' + table public.toasted_copy: INSERT: id[integer]:149 data[text]:'untoasted147' + table public.toasted_copy: INSERT: id[integer]:150 data[text]:'untoasted148' + table public.toasted_copy: INSERT: id[integer]:151 data[text]:'untoasted149' + table public.toasted_copy: INSERT: id[integer]:152 data[text]:'untoasted150' + table public.toasted_copy: INSERT: id[integer]:153 data[text]:'untoasted151' + table public.toasted_copy: INSERT: id[integer]:154 data[text]:'untoasted152' + table public.toasted_copy: INSERT: id[integer]:155 data[text]:'untoasted153' + table public.toasted_copy: INSERT: id[integer]:156 data[text]:'untoasted154' + table public.toasted_copy: INSERT: id[integer]:157 data[text]:'untoasted155' + table public.toasted_copy: INSERT: id[integer]:158 data[text]:'untoasted156' + table public.toasted_copy: INSERT: id[integer]:159 data[text]:'untoasted157' + table public.toasted_copy: INSERT: id[integer]:160 data[text]:'untoasted158' + table public.toasted_copy: INSERT: id[integer]:161 data[text]:'untoasted159' + table public.toasted_copy: INSERT: id[integer]:162 data[text]:'untoasted160' + table public.toasted_copy: INSERT: id[integer]:163 data[text]:'untoasted161' + table public.toasted_copy: INSERT: id[integer]:164 data[text]:'untoasted162' + table public.toasted_copy: INSERT: id[integer]:165 data[text]:'untoasted163' + table public.toasted_copy: INSERT: id[integer]:166 data[text]:'untoasted164' + table public.toasted_copy: INSERT: id[integer]:167 data[text]:'untoasted165' + table public.toasted_copy: INSERT: id[integer]:168 data[text]:'untoasted166' + table public.toasted_copy: INSERT: id[integer]:169 data[text]:'untoasted167' + table public.toasted_copy: INSERT: id[integer]:170 data[text]:'untoasted168' + table public.toasted_copy: INSERT: id[integer]:171 data[text]:'untoasted169' + table public.toasted_copy: INSERT: id[integer]:172 data[text]:'untoasted170' + table public.toasted_copy: INSERT: id[integer]:173 data[text]:'untoasted171' + table public.toasted_copy: INSERT: id[integer]:174 data[text]:'untoasted172' + table public.toasted_copy: INSERT: id[integer]:175 data[text]:'untoasted173' + table public.toasted_copy: INSERT: id[integer]:176 data[text]:'untoasted174' + table public.toasted_copy: INSERT: id[integer]:177 data[text]:'untoasted175' + table public.toasted_copy: INSERT: id[integer]:178 data[text]:'untoasted176' + table public.toasted_copy: INSERT: id[integer]:179 data[text]:'untoasted177' + table public.toasted_copy: INSERT: id[integer]:180 data[text]:'untoasted178' + table public.toasted_copy: INSERT: id[integer]:181 data[text]:'untoasted179' + table public.toasted_copy: INSERT: id[integer]:182 data[text]:'untoasted180' + table public.toasted_copy: INSERT: id[integer]:183 data[text]:'untoasted181' + table public.toasted_copy: INSERT: id[integer]:184 data[text]:'untoasted182' + table public.toasted_copy: INSERT: id[integer]:185 data[text]:'untoasted183' + table public.toasted_copy: INSERT: id[integer]:186 data[text]:'untoasted184' + table public.toasted_copy: INSERT: id[integer]:187 data[text]:'untoasted185' + table public.toasted_copy: INSERT: id[integer]:188 data[text]:'untoasted186' + table public.toasted_copy: INSERT: id[integer]:189 data[text]:'untoasted187' + table public.toasted_copy: INSERT: id[integer]:190 data[text]:'untoasted188' + table public.toasted_copy: INSERT: id[integer]:191 data[text]:'untoasted189' + table public.toasted_copy: INSERT: id[integer]:192 data[text]:'untoasted190' + table public.toasted_copy: INSERT: id[integer]:193 data[text]:'untoasted191' + table public.toasted_copy: INSERT: id[integer]:194 data[text]:'untoasted192' + table public.toasted_copy: INSERT: id[integer]:195 data[text]:'untoasted193' + table public.toasted_copy: INSERT: id[integer]:196 data[text]:'untoasted194' + table public.toasted_copy: INSERT: id[integer]:197 data[text]:'untoasted195' + table public.toasted_copy: INSERT: id[integer]:198 data[text]:'untoasted196' + table public.toasted_copy: INSERT: id[integer]:199 data[text]:'untoasted197' + table public.toasted_copy: INSERT: id[integer]:200 data[text]:'untoasted198' + table public.toasted_copy: INSERT: id[integer]:201 data[text]:'toasted3-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 + table public.toasted_copy: INSERT: id[integer]:202 data[text]:'untoasted199' + table public.toasted_copy: INSERT: id[integer]:203 data[text]:'untoasted200' + COMMIT +(232 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/specs/concurrent_ddl_dml.spec b/contrib/test_decoding/specs/concurrent_ddl_dml.spec index 7c8a7c7977..8cc5fa42ee 100644 --- a/contrib/test_decoding/specs/concurrent_ddl_dml.spec +++ b/contrib/test_decoding/specs/concurrent_ddl_dml.spec @@ -50,7 +50,7 @@ step "s2_alter_tbl2_3rd_char" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE characte step "s2_alter_tbl2_3rd_text" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text; } step "s2_alter_tbl2_3rd_int" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; } -step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); } +step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } diff --git a/contrib/test_decoding/specs/ondisk_startup.spec b/contrib/test_decoding/specs/ondisk_startup.spec new file mode 100644 index 0000000000..39c4a223ae --- /dev/null +++ b/contrib/test_decoding/specs/ondisk_startup.spec @@ -0,0 +1,43 @@ +# Force usage of ondisk decoding snapshots to test that code path. +setup +{ + DROP TABLE IF EXISTS do_write; + CREATE TABLE do_write(id serial primary key); +} + +teardown +{ + DROP TABLE do_write; + SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot'); +} + + +session "s1" +setup { SET synchronous_commit=on; } + +step "s1init" {SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');} +step "s1start" {SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');} +step "s1insert" { INSERT INTO do_write DEFAULT VALUES; } +step "s1checkpoint" { CHECKPOINT; } +step "s1alter" { ALTER TABLE do_write ADD COLUMN addedbys1 int; } + +session "s2" +setup { SET synchronous_commit=on; } + +step "s2txid" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; } +step "s2alter" { ALTER TABLE do_write ADD COLUMN addedbys2 int; } +step "s2c" { COMMIT; } + + +session "s3" +setup { SET synchronous_commit=on; } + +step "s3txid" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; } +step "s3c" { COMMIT; } + +# Force usage of ondisk snapshot by starting and not finishing a +# transaction with a assigned xid after consistency has been +# reached. In combination with a checkpoint forcing a snapshot to be +# written and a new restart point computed that'll lead to the usage +# of the snapshot. +permutation "s2txid" "s1init" "s3txid" "s2alter" "s2c" "s1insert" "s1checkpoint" "s1start" "s1insert" "s1alter" "s1insert" "s1start" diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql index 87e74c64f3..03314d18ac 100644 --- a/contrib/test_decoding/sql/ddl.sql +++ b/contrib/test_decoding/sql/ddl.sql @@ -19,7 +19,7 @@ SELECT pg_drop_replication_slot('regression_slot'); -- check that we're detecting a streaming rep slot used for logical decoding SELECT 'init' FROM pg_create_physical_replication_slot('repl'); -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('repl'); @@ -64,11 +64,11 @@ ALTER TABLE replication_example RENAME COLUMN text TO somenum; INSERT INTO replication_example(somedata, somenum) VALUES (4, 1); -- collect all changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- throw away changes, they contain oids -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO replication_example(somedata, somenum) VALUES (5, 1); @@ -82,21 +82,21 @@ INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- hide changes bc of oid visible in full table rewrites CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* * check that disk spooling works @@ -110,7 +110,7 @@ COMMIT; /* display results, but hide most of the output */ SELECT count(*), min(data), max(data) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') GROUP BY substring(data, 1, 24) ORDER BY 1,2; @@ -138,7 +138,7 @@ INSERT INTO tr_sub(path) VALUES ('1-top-2-#1'); RELEASE SAVEPOINT b; COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- check that we handle xlog assignments correctly BEGIN; @@ -167,7 +167,7 @@ RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- make sure rollbacked subtransactions aren't decoded BEGIN; @@ -180,7 +180,7 @@ ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- test whether a known, but not yet logged toplevel xact, followed by a -- subxact commit is handled correctly @@ -199,7 +199,7 @@ INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* @@ -239,7 +239,7 @@ ALTER TABLE replication_metadata SET (user_catalog_table = false); INSERT INTO replication_metadata(relation, options) VALUES ('zaphod', NULL); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* * check whether we handle updates/deletes correct with & without a pkey @@ -315,7 +315,7 @@ UPDATE toasttable SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i); @@ -327,10 +327,10 @@ WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/decoding_in_xact.sql b/contrib/test_decoding/sql/decoding_in_xact.sql index 2771afee7a..990f61885e 100644 --- a/contrib/test_decoding/sql/decoding_in_xact.sql +++ b/contrib/test_decoding/sql/decoding_in_xact.sql @@ -32,10 +32,10 @@ BEGIN; SELECT txid_current() = 0; -- don't show yet, haven't committed INSERT INTO nobarf(data) VALUES('2'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/decoding_into_rel.sql b/contrib/test_decoding/sql/decoding_into_rel.sql new file mode 100644 index 0000000000..54670fd39e --- /dev/null +++ b/contrib/test_decoding/sql/decoding_into_rel.sql @@ -0,0 +1,31 @@ +-- test that we can insert the result of a get_changes call into a +-- logged relation. That's really not a good idea in practical terms, +-- but provides a nice test. + +-- predictability +SET synchronous_commit = on; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + +-- slot works +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +-- create some changes +CREATE TABLE somechange(id serial primary key); +INSERT INTO somechange DEFAULT VALUES; + +CREATE TABLE changeresult AS + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +SELECT * FROM changeresult; + +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +SELECT * FROM changeresult; +DROP TABLE changeresult; +DROP TABLE somechange; +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/permissions.sql b/contrib/test_decoding/sql/permissions.sql index 39d70b56b0..8680c55771 100644 --- a/contrib/test_decoding/sql/permissions.sql +++ b/contrib/test_decoding/sql/permissions.sql @@ -11,7 +11,7 @@ CREATE TABLE lr_test(data text); SET ROLE lr_superuser; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; @@ -19,7 +19,7 @@ RESET ROLE; SET ROLE lr_replication; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; @@ -27,7 +27,7 @@ RESET ROLE; SET ROLE lr_normal; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; diff --git a/contrib/test_decoding/sql/prepared.sql b/contrib/test_decoding/sql/prepared.sql index 652f3d3f44..e72639767e 100644 --- a/contrib/test_decoding/sql/prepared.sql +++ b/contrib/test_decoding/sql/prepared.sql @@ -45,6 +45,6 @@ DROP TABLE test_prepared1; DROP TABLE test_prepared2; -- show results -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql index 9a3dcbf857..8a7329423d 100644 --- a/contrib/test_decoding/sql/rewrite.sql +++ b/contrib/test_decoding/sql/rewrite.sql @@ -6,7 +6,7 @@ DROP TABLE IF EXISTS replication_example; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120)); INSERT INTO replication_example(somedata) VALUES (1); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); BEGIN; INSERT INTO replication_example(somedata) VALUES (2); @@ -56,7 +56,7 @@ COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); DROP TABLE IF EXISTS replication_example; diff --git a/contrib/test_decoding/sql/toast.sql b/contrib/test_decoding/sql/toast.sql index 943db9d2ee..09293865df 100644 --- a/contrib/test_decoding/sql/toast.sql +++ b/contrib/test_decoding/sql/toast.sql @@ -47,5 +47,217 @@ UPDATE toasted_key SET toasted_key = toasted_key || '1'; DELETE FROM toasted_key; -SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +-- Test that HEAP2_MULTI_INSERT insertions with and without toasted +-- columns are handled correctly +CREATE TABLE toasted_copy ( + id int primary key, -- no default, copy didn't use to handle that with multi inserts + data text +); +ALTER TABLE toasted_copy ALTER COLUMN data SET STORAGE EXTERNAL; +\copy toasted_copy FROM STDIN +1 untoasted1 +2 toasted1-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +3 untoasted2 +4 toasted2-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +5 untoasted3 +6 untoasted4 +7 untoasted5 +8 untoasted6 +9 untoasted7 +10 untoasted8 +11 untoasted9 +12 untoasted10 +13 untoasted11 +14 untoasted12 +15 untoasted13 +16 untoasted14 +17 untoasted15 +18 untoasted16 +19 untoasted17 +20 untoasted18 +21 untoasted19 +22 untoasted20 +23 untoasted21 +24 untoasted22 +25 untoasted23 +26 untoasted24 +27 untoasted25 +28 untoasted26 +29 untoasted27 +30 untoasted28 +31 untoasted29 +32 untoasted30 +33 untoasted31 +34 untoasted32 +35 untoasted33 +36 untoasted34 +37 untoasted35 +38 untoasted36 +39 untoasted37 +40 untoasted38 +41 untoasted39 +42 untoasted40 +43 untoasted41 +44 untoasted42 +45 untoasted43 +46 untoasted44 +47 untoasted45 +48 untoasted46 +49 untoasted47 +50 untoasted48 +51 untoasted49 +52 untoasted50 +53 untoasted51 +54 untoasted52 +55 untoasted53 +56 untoasted54 +57 untoasted55 +58 untoasted56 +59 untoasted57 +60 untoasted58 +61 untoasted59 +62 untoasted60 +63 untoasted61 +64 untoasted62 +65 untoasted63 +66 untoasted64 +67 untoasted65 +68 untoasted66 +69 untoasted67 +70 untoasted68 +71 untoasted69 +72 untoasted70 +73 untoasted71 +74 untoasted72 +75 untoasted73 +76 untoasted74 +77 untoasted75 +78 untoasted76 +79 untoasted77 +80 untoasted78 +81 untoasted79 +82 untoasted80 +83 untoasted81 +84 untoasted82 +85 untoasted83 +86 untoasted84 +87 untoasted85 +88 untoasted86 +89 untoasted87 +90 untoasted88 +91 untoasted89 +92 untoasted90 +93 untoasted91 +94 untoasted92 +95 untoasted93 +96 untoasted94 +97 untoasted95 +98 untoasted96 +99 untoasted97 +100 untoasted98 +101 untoasted99 +102 untoasted100 +103 untoasted101 +104 untoasted102 +105 untoasted103 +106 untoasted104 +107 untoasted105 +108 untoasted106 +109 untoasted107 +110 untoasted108 +111 untoasted109 +112 untoasted110 +113 untoasted111 +114 untoasted112 +115 untoasted113 +116 untoasted114 +117 untoasted115 +118 untoasted116 +119 untoasted117 +120 untoasted118 +121 untoasted119 +122 untoasted120 +123 untoasted121 +124 untoasted122 +125 untoasted123 +126 untoasted124 +127 untoasted125 +128 untoasted126 +129 untoasted127 +130 untoasted128 +131 untoasted129 +132 untoasted130 +133 untoasted131 +134 untoasted132 +135 untoasted133 +136 untoasted134 +137 untoasted135 +138 untoasted136 +139 untoasted137 +140 untoasted138 +141 untoasted139 +142 untoasted140 +143 untoasted141 +144 untoasted142 +145 untoasted143 +146 untoasted144 +147 untoasted145 +148 untoasted146 +149 untoasted147 +150 untoasted148 +151 untoasted149 +152 untoasted150 +153 untoasted151 +154 untoasted152 +155 untoasted153 +156 untoasted154 +157 untoasted155 +158 untoasted156 +159 untoasted157 +160 untoasted158 +161 untoasted159 +162 untoasted160 +163 untoasted161 +164 untoasted162 +165 untoasted163 +166 untoasted164 +167 untoasted165 +168 untoasted166 +169 untoasted167 +170 untoasted168 +171 untoasted169 +172 untoasted170 +173 untoasted171 +174 untoasted172 +175 untoasted173 +176 untoasted174 +177 untoasted175 +178 untoasted176 +179 untoasted177 +180 untoasted178 +181 untoasted179 +182 untoasted180 +183 untoasted181 +184 untoasted182 +185 untoasted183 +186 untoasted184 +187 untoasted185 +188 untoasted186 +189 untoasted187 +190 untoasted188 +191 untoasted189 +192 untoasted190 +193 untoasted191 +194 untoasted192 +195 untoasted193 +196 untoasted194 +197 untoasted195 +198 untoasted196 +199 untoasted197 +200 untoasted198 +201 toasted3-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +202 untoasted199 +203 untoasted200 +\. +SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 5ce052b5c6..963d5df9da 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -3,7 +3,7 @@ * test_decoding.c * example logical decoding output plugin * - * Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Copyright (c) 2012-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/test_decoding/test_decoding.c @@ -30,7 +30,6 @@ #include "utils/syscache.h" #include "utils/typcache.h" - PG_MODULE_MAGIC; /* These must be available to pg_dlsym() */ @@ -42,6 +41,8 @@ typedef struct MemoryContext context; bool include_xids; bool include_timestamp; + bool skip_empty_xacts; + bool xact_wrote_changes; } TestDecodingData; static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, @@ -49,6 +50,10 @@ static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions * static void pg_decode_shutdown(LogicalDecodingContext *ctx); static void pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); +static void pg_output_begin(LogicalDecodingContext *ctx, + TestDecodingData *data, + ReorderBufferTXN *txn, + bool last_write); static void pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); static void pg_decode_change(LogicalDecodingContext *ctx, @@ -83,7 +88,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, ListCell *option; TestDecodingData *data; - data = palloc(sizeof(TestDecodingData)); + data = palloc0(sizeof(TestDecodingData)); data->context = AllocSetContextCreate(ctx->context, "text conversion context", ALLOCSET_DEFAULT_MINSIZE, @@ -91,6 +96,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, ALLOCSET_DEFAULT_MAXSIZE); data->include_xids = true; data->include_timestamp = false; + data->skip_empty_xacts = false; ctx->output_plugin_private = data; @@ -138,6 +144,17 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, if (force_binary) opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; } + else if (strcmp(elem->defname, "skip-empty-xacts") == 0) + { + + if (elem->arg == NULL) + data->skip_empty_xacts = true; + else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", + strVal(elem->arg), elem->defname))); + } else { ereport(ERROR, @@ -165,12 +182,22 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; - OutputPluginPrepareWrite(ctx, true); + data->xact_wrote_changes = false; + if (data->skip_empty_xacts) + return; + + pg_output_begin(ctx, data, txn, true); +} + +static void +pg_output_begin(LogicalDecodingContext *ctx, TestDecodingData *data, ReorderBufferTXN *txn, bool last_write) +{ + OutputPluginPrepareWrite(ctx, last_write); if (data->include_xids) appendStringInfo(ctx->out, "BEGIN %u", txn->xid); else appendStringInfoString(ctx->out, "BEGIN"); - OutputPluginWrite(ctx, true); + OutputPluginWrite(ctx, last_write); } /* COMMIT callback */ @@ -180,6 +207,9 @@ pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "COMMIT %u", txn->xid); @@ -339,6 +369,14 @@ pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContext old; data = ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) + { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + class_form = RelationGetForm(relation); tupdesc = RelationGetDescr(relation); diff --git a/contrib/test_parser/.gitignore b/contrib/test_parser/.gitignore deleted file mode 100644 index 5dcb3ff972..0000000000 --- a/contrib/test_parser/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Generated subdirectories -/log/ -/results/ -/tmp_check/ diff --git a/contrib/test_parser/Makefile b/contrib/test_parser/Makefile deleted file mode 100644 index b9766cb023..0000000000 --- a/contrib/test_parser/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# contrib/test_parser/Makefile - -MODULE_big = test_parser -OBJS = test_parser.o - -EXTENSION = test_parser -DATA = test_parser--1.0.sql test_parser--unpackaged--1.0.sql - -REGRESS = test_parser - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/test_parser -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/test_parser/expected/test_parser.out b/contrib/test_parser/expected/test_parser.out deleted file mode 100644 index 8a49bc01e3..0000000000 --- a/contrib/test_parser/expected/test_parser.out +++ /dev/null @@ -1,44 +0,0 @@ -CREATE EXTENSION test_parser; --- make test configuration using parser -CREATE TEXT SEARCH CONFIGURATION testcfg (PARSER = testparser); -ALTER TEXT SEARCH CONFIGURATION testcfg ADD MAPPING FOR word WITH simple; --- ts_parse -SELECT * FROM ts_parse('testparser', 'That''s simple parser can''t parse urls like http://some.url/here/'); - tokid | token --------+----------------------- - 3 | That's - 12 | - 3 | simple - 12 | - 3 | parser - 12 | - 3 | can't - 12 | - 3 | parse - 12 | - 3 | urls - 12 | - 3 | like - 12 | - 3 | http://some.url/here/ -(15 rows) - -SELECT to_tsvector('testcfg','That''s my first own parser'); - to_tsvector -------------------------------------------------- - 'first':3 'my':2 'own':4 'parser':5 'that''s':1 -(1 row) - -SELECT to_tsquery('testcfg', 'star'); - to_tsquery ------------- - 'star' -(1 row) - -SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', - to_tsquery('testcfg', 'stars')); - ts_headline ------------------------------------------------------------------ - Supernovae <b>stars</b> are the brightest phenomena in galaxies -(1 row) - diff --git a/contrib/test_parser/sql/test_parser.sql b/contrib/test_parser/sql/test_parser.sql deleted file mode 100644 index 1f21504602..0000000000 --- a/contrib/test_parser/sql/test_parser.sql +++ /dev/null @@ -1,18 +0,0 @@ -CREATE EXTENSION test_parser; - --- make test configuration using parser - -CREATE TEXT SEARCH CONFIGURATION testcfg (PARSER = testparser); - -ALTER TEXT SEARCH CONFIGURATION testcfg ADD MAPPING FOR word WITH simple; - --- ts_parse - -SELECT * FROM ts_parse('testparser', 'That''s simple parser can''t parse urls like http://some.url/here/'); - -SELECT to_tsvector('testcfg','That''s my first own parser'); - -SELECT to_tsquery('testcfg', 'star'); - -SELECT ts_headline('testcfg','Supernovae stars are the brightest phenomena in galaxies', - to_tsquery('testcfg', 'stars')); diff --git a/contrib/test_parser/test_parser--1.0.sql b/contrib/test_parser/test_parser--1.0.sql deleted file mode 100644 index 06c94d353b..0000000000 --- a/contrib/test_parser/test_parser--1.0.sql +++ /dev/null @@ -1,32 +0,0 @@ -/* contrib/test_parser/test_parser--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION test_parser" to load this file. \quit - -CREATE FUNCTION testprs_start(internal, int4) -RETURNS internal -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION testprs_getlexeme(internal, internal, internal) -RETURNS internal -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION testprs_end(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION testprs_lextype(internal) -RETURNS internal -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE TEXT SEARCH PARSER testparser ( - START = testprs_start, - GETTOKEN = testprs_getlexeme, - END = testprs_end, - HEADLINE = pg_catalog.prsd_headline, - LEXTYPES = testprs_lextype -); diff --git a/contrib/test_parser/test_parser--unpackaged--1.0.sql b/contrib/test_parser/test_parser--unpackaged--1.0.sql deleted file mode 100644 index 34120f2346..0000000000 --- a/contrib/test_parser/test_parser--unpackaged--1.0.sql +++ /dev/null @@ -1,10 +0,0 @@ -/* contrib/test_parser/test_parser--unpackaged--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION test_parser" to load this file. \quit - -ALTER EXTENSION test_parser ADD function testprs_start(internal,integer); -ALTER EXTENSION test_parser ADD function testprs_getlexeme(internal,internal,internal); -ALTER EXTENSION test_parser ADD function testprs_end(internal); -ALTER EXTENSION test_parser ADD function testprs_lextype(internal); -ALTER EXTENSION test_parser ADD text search parser testparser; diff --git a/contrib/test_parser/test_parser.c b/contrib/test_parser/test_parser.c deleted file mode 100644 index c41d1eb201..0000000000 --- a/contrib/test_parser/test_parser.c +++ /dev/null @@ -1,129 +0,0 @@ -/*------------------------------------------------------------------------- - * - * test_parser.c - * Simple example of a text search parser - * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/test_parser/test_parser.c - * - *------------------------------------------------------------------------- - */ -#include "postgres.h" - -#include "fmgr.h" - -PG_MODULE_MAGIC; - - -/* - * types - */ - -/* self-defined type */ -typedef struct -{ - char *buffer; /* text to parse */ - int len; /* length of the text in buffer */ - int pos; /* position of the parser */ -} ParserState; - -/* copy-paste from wparser.h of tsearch2 */ -typedef struct -{ - int lexid; - char *alias; - char *descr; -} LexDescr; - -/* - * functions - */ -PG_FUNCTION_INFO_V1(testprs_start); -PG_FUNCTION_INFO_V1(testprs_getlexeme); -PG_FUNCTION_INFO_V1(testprs_end); -PG_FUNCTION_INFO_V1(testprs_lextype); - -Datum -testprs_start(PG_FUNCTION_ARGS) -{ - ParserState *pst = (ParserState *) palloc0(sizeof(ParserState)); - - pst->buffer = (char *) PG_GETARG_POINTER(0); - pst->len = PG_GETARG_INT32(1); - pst->pos = 0; - - PG_RETURN_POINTER(pst); -} - -Datum -testprs_getlexeme(PG_FUNCTION_ARGS) -{ - ParserState *pst = (ParserState *) PG_GETARG_POINTER(0); - char **t = (char **) PG_GETARG_POINTER(1); - int *tlen = (int *) PG_GETARG_POINTER(2); - int startpos = pst->pos; - int type; - - *t = pst->buffer + pst->pos; - - if (pst->pos < pst->len && - (pst->buffer)[pst->pos] == ' ') - { - /* blank type */ - type = 12; - /* go to the next non-space character */ - while (pst->pos < pst->len && - (pst->buffer)[pst->pos] == ' ') - (pst->pos)++; - } - else - { - /* word type */ - type = 3; - /* go to the next space character */ - while (pst->pos < pst->len && - (pst->buffer)[pst->pos] != ' ') - (pst->pos)++; - } - - *tlen = pst->pos - startpos; - - /* we are finished if (*tlen == 0) */ - if (*tlen == 0) - type = 0; - - PG_RETURN_INT32(type); -} - -Datum -testprs_end(PG_FUNCTION_ARGS) -{ - ParserState *pst = (ParserState *) PG_GETARG_POINTER(0); - - pfree(pst); - PG_RETURN_VOID(); -} - -Datum -testprs_lextype(PG_FUNCTION_ARGS) -{ - /* - * Remarks: - we have to return the blanks for headline reason - we use - * the same lexids like Teodor in the default word parser; in this way we - * can reuse the headline function of the default word parser. - */ - LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2 + 1)); - - /* there are only two types in this parser */ - descr[0].lexid = 3; - descr[0].alias = pstrdup("word"); - descr[0].descr = pstrdup("Word"); - descr[1].lexid = 12; - descr[1].alias = pstrdup("blank"); - descr[1].descr = pstrdup("Space symbols"); - descr[2].lexid = 0; - - PG_RETURN_POINTER(descr); -} diff --git a/contrib/test_parser/test_parser.control b/contrib/test_parser/test_parser.control deleted file mode 100644 index 36b26b2087..0000000000 --- a/contrib/test_parser/test_parser.control +++ /dev/null @@ -1,5 +0,0 @@ -# test_parser extension -comment = 'example of a custom parser for full-text search' -default_version = '1.0' -module_pathname = '$libdir/test_parser' -relocatable = true diff --git a/contrib/test_shm_mq/.gitignore b/contrib/test_shm_mq/.gitignore deleted file mode 100644 index 5dcb3ff972..0000000000 --- a/contrib/test_shm_mq/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Generated subdirectories -/log/ -/results/ -/tmp_check/ diff --git a/contrib/test_shm_mq/Makefile b/contrib/test_shm_mq/Makefile deleted file mode 100644 index 5e5ac1ceb8..0000000000 --- a/contrib/test_shm_mq/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# contrib/test_shm_mq/Makefile - -MODULE_big = test_shm_mq -OBJS = test.o setup.o worker.o - -EXTENSION = test_shm_mq -DATA = test_shm_mq--1.0.sql - -REGRESS = test_shm_mq - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/test_shm_mq -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/test_shm_mq/expected/test_shm_mq.out b/contrib/test_shm_mq/expected/test_shm_mq.out deleted file mode 100644 index c4858b0c20..0000000000 --- a/contrib/test_shm_mq/expected/test_shm_mq.out +++ /dev/null @@ -1,36 +0,0 @@ -CREATE EXTENSION test_shm_mq; --- --- These tests don't produce any interesting output. We're checking that --- the operations complete without crashing or hanging and that none of their --- internal sanity tests fail. --- -SELECT test_shm_mq(1024, '', 2000, 1); - test_shm_mq -------------- - -(1 row) - -SELECT test_shm_mq(1024, 'a', 2001, 1); - test_shm_mq -------------- - -(1 row) - -SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1); - test_shm_mq -------------- - -(1 row) - -SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1); - test_shm_mq -------------- - -(1 row) - -SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3); - test_shm_mq_pipelined ------------------------ - -(1 row) - diff --git a/contrib/test_shm_mq/setup.c b/contrib/test_shm_mq/setup.c deleted file mode 100644 index 572cf8898f..0000000000 --- a/contrib/test_shm_mq/setup.c +++ /dev/null @@ -1,328 +0,0 @@ -/*-------------------------------------------------------------------------- - * - * setup.c - * Code to set up a dynamic shared memory segments and a specified - * number of background workers for shared memory message queue - * testing. - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/test_shm_mq/setup.c - * - * ------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "miscadmin.h" -#include "postmaster/bgworker.h" -#include "storage/procsignal.h" -#include "storage/shm_toc.h" -#include "utils/memutils.h" - -#include "test_shm_mq.h" - -typedef struct -{ - int nworkers; - BackgroundWorkerHandle *handle[FLEXIBLE_ARRAY_MEMBER]; -} worker_state; - -static void setup_dynamic_shared_memory(int64 queue_size, int nworkers, - dsm_segment **segp, - test_shm_mq_header **hdrp, - shm_mq **outp, shm_mq **inp); -static worker_state *setup_background_workers(int nworkers, - dsm_segment *seg); -static void cleanup_background_workers(dsm_segment *seg, Datum arg); -static void wait_for_workers_to_become_ready(worker_state *wstate, - volatile test_shm_mq_header *hdr); -static bool check_worker_status(worker_state *wstate); - -/* - * Set up a dynamic shared memory segment and zero or more background workers - * for a test run. - */ -void -test_shm_mq_setup(int64 queue_size, int32 nworkers, dsm_segment **segp, - shm_mq_handle **output, shm_mq_handle **input) -{ - dsm_segment *seg; - test_shm_mq_header *hdr; - shm_mq *outq = NULL; /* placate compiler */ - shm_mq *inq = NULL; /* placate compiler */ - worker_state *wstate; - - /* Set up a dynamic shared memory segment. */ - setup_dynamic_shared_memory(queue_size, nworkers, &seg, &hdr, &outq, &inq); - *segp = seg; - - /* Register background workers. */ - wstate = setup_background_workers(nworkers, seg); - - /* Attach the queues. */ - *output = shm_mq_attach(outq, seg, wstate->handle[0]); - *input = shm_mq_attach(inq, seg, wstate->handle[nworkers - 1]); - - /* Wait for workers to become ready. */ - wait_for_workers_to_become_ready(wstate, hdr); - - /* - * Once we reach this point, all workers are ready. We no longer need to - * kill them if we die; they'll die on their own as the message queues - * shut down. - */ - cancel_on_dsm_detach(seg, cleanup_background_workers, - PointerGetDatum(wstate)); - pfree(wstate); -} - -/* - * Set up a dynamic shared memory segment. - * - * We set up a small control region that contains only a test_shm_mq_header, - * plus one region per message queue. There are as many message queues as - * the number of workers, plus one. - */ -static void -setup_dynamic_shared_memory(int64 queue_size, int nworkers, - dsm_segment **segp, test_shm_mq_header **hdrp, - shm_mq **outp, shm_mq **inp) -{ - shm_toc_estimator e; - int i; - Size segsize; - dsm_segment *seg; - shm_toc *toc; - test_shm_mq_header *hdr; - - /* Ensure a valid queue size. */ - if (queue_size < 0 || ((uint64) queue_size) < shm_mq_minimum_size) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("queue size must be at least %zu bytes", - shm_mq_minimum_size))); - if (queue_size != ((Size) queue_size)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("queue size overflows size_t"))); - - /* - * Estimate how much shared memory we need. - * - * Because the TOC machinery may choose to insert padding of oddly-sized - * requests, we must estimate each chunk separately. - * - * We need one key to register the location of the header, and we need - * nworkers + 1 keys to track the locations of the message queues. - */ - shm_toc_initialize_estimator(&e); - shm_toc_estimate_chunk(&e, sizeof(test_shm_mq_header)); - for (i = 0; i <= nworkers; ++i) - shm_toc_estimate_chunk(&e, (Size) queue_size); - shm_toc_estimate_keys(&e, 2 + nworkers); - segsize = shm_toc_estimate(&e); - - /* Create the shared memory segment and establish a table of contents. */ - seg = dsm_create(shm_toc_estimate(&e)); - toc = shm_toc_create(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg), - segsize); - - /* Set up the header region. */ - hdr = shm_toc_allocate(toc, sizeof(test_shm_mq_header)); - SpinLockInit(&hdr->mutex); - hdr->workers_total = nworkers; - hdr->workers_attached = 0; - hdr->workers_ready = 0; - shm_toc_insert(toc, 0, hdr); - - /* Set up one message queue per worker, plus one. */ - for (i = 0; i <= nworkers; ++i) - { - shm_mq *mq; - - mq = shm_mq_create(shm_toc_allocate(toc, (Size) queue_size), - (Size) queue_size); - shm_toc_insert(toc, i + 1, mq); - - if (i == 0) - { - /* We send messages to the first queue. */ - shm_mq_set_sender(mq, MyProc); - *outp = mq; - } - if (i == nworkers) - { - /* We receive messages from the last queue. */ - shm_mq_set_receiver(mq, MyProc); - *inp = mq; - } - } - - /* Return results to caller. */ - *segp = seg; - *hdrp = hdr; -} - -/* - * Register background workers. - */ -static worker_state * -setup_background_workers(int nworkers, dsm_segment *seg) -{ - MemoryContext oldcontext; - BackgroundWorker worker; - worker_state *wstate; - int i; - - /* - * We need the worker_state object and the background worker handles to - * which it points to be allocated in CurTransactionContext rather than - * ExprContext; otherwise, they'll be destroyed before the on_dsm_detach - * hooks run. - */ - oldcontext = MemoryContextSwitchTo(CurTransactionContext); - - /* Create worker state object. */ - wstate = MemoryContextAlloc(TopTransactionContext, - offsetof(worker_state, handle) + - sizeof(BackgroundWorkerHandle *) * nworkers); - wstate->nworkers = 0; - - /* - * Arrange to kill all the workers if we abort before all workers are - * finished hooking themselves up to the dynamic shared memory segment. - * - * If we die after all the workers have finished hooking themselves up to - * the dynamic shared memory segment, we'll mark the two queues to which - * we're directly connected as detached, and the worker(s) connected to - * those queues will exit, marking any other queues to which they are - * connected as detached. This will cause any as-yet-unaware workers - * connected to those queues to exit in their turn, and so on, until - * everybody exits. - * - * But suppose the workers which are supposed to connect to the queues to - * which we're directly attached exit due to some error before they - * actually attach the queues. The remaining workers will have no way of - * knowing this. From their perspective, they're still waiting for those - * workers to start, when in fact they've already died. - */ - on_dsm_detach(seg, cleanup_background_workers, - PointerGetDatum(wstate)); - - /* Configure a worker. */ - worker.bgw_flags = BGWORKER_SHMEM_ACCESS; - worker.bgw_start_time = BgWorkerStart_ConsistentState; - worker.bgw_restart_time = BGW_NEVER_RESTART; - worker.bgw_main = NULL; /* new worker might not have library loaded */ - sprintf(worker.bgw_library_name, "test_shm_mq"); - sprintf(worker.bgw_function_name, "test_shm_mq_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "test_shm_mq"); - worker.bgw_main_arg = UInt32GetDatum(dsm_segment_handle(seg)); - /* set bgw_notify_pid, so we can detect if the worker stops */ - worker.bgw_notify_pid = MyProcPid; - - /* Register the workers. */ - for (i = 0; i < nworkers; ++i) - { - if (!RegisterDynamicBackgroundWorker(&worker, &wstate->handle[i])) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("could not register background process"), - errhint("You may need to increase max_worker_processes."))); - ++wstate->nworkers; - } - - /* All done. */ - MemoryContextSwitchTo(oldcontext); - return wstate; -} - -static void -cleanup_background_workers(dsm_segment *seg, Datum arg) -{ - worker_state *wstate = (worker_state *) DatumGetPointer(arg); - - while (wstate->nworkers > 0) - { - --wstate->nworkers; - TerminateBackgroundWorker(wstate->handle[wstate->nworkers]); - } -} - -static void -wait_for_workers_to_become_ready(worker_state *wstate, - volatile test_shm_mq_header *hdr) -{ - bool save_set_latch_on_sigusr1; - bool result = false; - - save_set_latch_on_sigusr1 = set_latch_on_sigusr1; - set_latch_on_sigusr1 = true; - - PG_TRY(); - { - for (;;) - { - int workers_ready; - - /* If all the workers are ready, we have succeeded. */ - SpinLockAcquire(&hdr->mutex); - workers_ready = hdr->workers_ready; - SpinLockRelease(&hdr->mutex); - if (workers_ready >= wstate->nworkers) - { - result = true; - break; - } - - /* If any workers (or the postmaster) have died, we have failed. */ - if (!check_worker_status(wstate)) - { - result = false; - break; - } - - /* Wait to be signalled. */ - WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0); - - /* An interrupt may have occurred while we were waiting. */ - CHECK_FOR_INTERRUPTS(); - - /* Reset the latch so we don't spin. */ - ResetLatch(&MyProc->procLatch); - } - } - PG_CATCH(); - { - set_latch_on_sigusr1 = save_set_latch_on_sigusr1; - PG_RE_THROW(); - } - PG_END_TRY(); - - if (!result) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("one or more background workers failed to start"))); -} - -static bool -check_worker_status(worker_state *wstate) -{ - int n; - - /* If any workers (or the postmaster) have died, we have failed. */ - for (n = 0; n < wstate->nworkers; ++n) - { - BgwHandleStatus status; - pid_t pid; - - status = GetBackgroundWorkerPid(wstate->handle[n], &pid); - if (status == BGWH_STOPPED || status == BGWH_POSTMASTER_DIED) - return false; - } - - /* Otherwise, things still look OK. */ - return true; -} diff --git a/contrib/test_shm_mq/sql/test_shm_mq.sql b/contrib/test_shm_mq/sql/test_shm_mq.sql deleted file mode 100644 index 9de19d304a..0000000000 --- a/contrib/test_shm_mq/sql/test_shm_mq.sql +++ /dev/null @@ -1,12 +0,0 @@ -CREATE EXTENSION test_shm_mq; - --- --- These tests don't produce any interesting output. We're checking that --- the operations complete without crashing or hanging and that none of their --- internal sanity tests fail. --- -SELECT test_shm_mq(1024, '', 2000, 1); -SELECT test_shm_mq(1024, 'a', 2001, 1); -SELECT test_shm_mq(32768, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+900*random())::int)), 10000, 1); -SELECT test_shm_mq(100, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,(100+200*random())::int)), 10000, 1); -SELECT test_shm_mq_pipelined(16384, (select string_agg(chr(32+(random()*95)::int), '') from generate_series(1,270000)), 200, 3); diff --git a/contrib/test_shm_mq/test.c b/contrib/test_shm_mq/test.c deleted file mode 100644 index 8750bae8db..0000000000 --- a/contrib/test_shm_mq/test.c +++ /dev/null @@ -1,262 +0,0 @@ -/*-------------------------------------------------------------------------- - * - * test.c - * Test harness code for shared memory message queues. - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/test_shm_mq/test.c - * - * ------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "fmgr.h" -#include "miscadmin.h" - -#include "test_shm_mq.h" - -PG_MODULE_MAGIC; PG_FUNCTION_INFO_V1(test_shm_mq); -PG_FUNCTION_INFO_V1(test_shm_mq_pipelined); - -void _PG_init(void); - -static void verify_message(Size origlen, char *origdata, Size newlen, - char *newdata); - -/* - * Simple test of the shared memory message queue infrastructure. - * - * We set up a ring of message queues passing through 1 or more background - * processes and eventually looping back to ourselves. We then send a message - * through the ring a number of times indicated by the loop count. At the end, - * we check whether the final message matches the one we started with. - */ -Datum -test_shm_mq(PG_FUNCTION_ARGS) -{ - int64 queue_size = PG_GETARG_INT64(0); - text *message = PG_GETARG_TEXT_PP(1); - char *message_contents = VARDATA_ANY(message); - int message_size = VARSIZE_ANY_EXHDR(message); - int32 loop_count = PG_GETARG_INT32(2); - int32 nworkers = PG_GETARG_INT32(3); - dsm_segment *seg; - shm_mq_handle *outqh; - shm_mq_handle *inqh; - shm_mq_result res; - Size len; - void *data; - - /* A negative loopcount is nonsensical. */ - if (loop_count < 0) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("repeat count size must be a non-negative integer"))); - - /* - * Since this test sends data using the blocking interfaces, it cannot - * send data to itself. Therefore, a minimum of 1 worker is required. Of - * course, a negative worker count is nonsensical. - */ - if (nworkers < 1) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("number of workers must be a positive integer"))); - - /* Set up dynamic shared memory segment and background workers. */ - test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh); - - /* Send the initial message. */ - res = shm_mq_send(outqh, message_size, message_contents, false); - if (res != SHM_MQ_SUCCESS) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not send message"))); - - /* - * Receive a message and send it back out again. Do this a number of - * times equal to the loop count. - */ - for (;;) - { - /* Receive a message. */ - res = shm_mq_receive(inqh, &len, &data, false); - if (res != SHM_MQ_SUCCESS) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not receive message"))); - - /* If this is supposed to be the last iteration, stop here. */ - if (--loop_count <= 0) - break; - - /* Send it back out. */ - res = shm_mq_send(outqh, len, data, false); - if (res != SHM_MQ_SUCCESS) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not send message"))); - } - - /* - * Finally, check that we got back the same message from the last - * iteration that we originally sent. - */ - verify_message(message_size, message_contents, len, data); - - /* Clean up. */ - dsm_detach(seg); - - PG_RETURN_VOID(); -} - -/* - * Pipelined test of the shared memory message queue infrastructure. - * - * As in the basic test, we set up a ring of message queues passing through - * 1 or more background processes and eventually looping back to ourselves. - * Then, we send N copies of the user-specified message through the ring and - * receive them all back. Since this might fill up all message queues in the - * ring and then stall, we must be prepared to begin receiving the messages - * back before we've finished sending them. - */ -Datum -test_shm_mq_pipelined(PG_FUNCTION_ARGS) -{ - int64 queue_size = PG_GETARG_INT64(0); - text *message = PG_GETARG_TEXT_PP(1); - char *message_contents = VARDATA_ANY(message); - int message_size = VARSIZE_ANY_EXHDR(message); - int32 loop_count = PG_GETARG_INT32(2); - int32 nworkers = PG_GETARG_INT32(3); - bool verify = PG_GETARG_BOOL(4); - int32 send_count = 0; - int32 receive_count = 0; - dsm_segment *seg; - shm_mq_handle *outqh; - shm_mq_handle *inqh; - shm_mq_result res; - Size len; - void *data; - - /* A negative loopcount is nonsensical. */ - if (loop_count < 0) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("repeat count size must be a non-negative integer"))); - - /* - * Using the nonblocking interfaces, we can even send data to ourselves, - * so the minimum number of workers for this test is zero. - */ - if (nworkers < 0) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("number of workers must be a non-negative integer"))); - - /* Set up dynamic shared memory segment and background workers. */ - test_shm_mq_setup(queue_size, nworkers, &seg, &outqh, &inqh); - - /* Main loop. */ - for (;;) - { - bool wait = true; - - /* - * If we haven't yet sent the message the requisite number of times, - * try again to send it now. Note that when shm_mq_send() returns - * SHM_MQ_WOULD_BLOCK, the next call to that function must pass the - * same message size and contents; that's not an issue here because - * we're sending the same message every time. - */ - if (send_count < loop_count) - { - res = shm_mq_send(outqh, message_size, message_contents, true); - if (res == SHM_MQ_SUCCESS) - { - ++send_count; - wait = false; - } - else if (res == SHM_MQ_DETACHED) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not send message"))); - } - - /* - * If we haven't yet received the message the requisite number of - * times, try to receive it again now. - */ - if (receive_count < loop_count) - { - res = shm_mq_receive(inqh, &len, &data, true); - if (res == SHM_MQ_SUCCESS) - { - ++receive_count; - /* Verifying every time is slow, so it's optional. */ - if (verify) - verify_message(message_size, message_contents, len, data); - wait = false; - } - else if (res == SHM_MQ_DETACHED) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not receive message"))); - } - else - { - /* - * Otherwise, we've received the message enough times. This - * shouldn't happen unless we've also sent it enough times. - */ - if (send_count != receive_count) - ereport(ERROR, - (errcode(ERRCODE_INTERNAL_ERROR), - errmsg("message sent %d times, but received %d times", - send_count, receive_count))); - break; - } - - if (wait) - { - /* - * If we made no progress, wait for one of the other processes to - * which we are connected to set our latch, indicating that they - * have read or written data and therefore there may now be work - * for us to do. - */ - WaitLatch(&MyProc->procLatch, WL_LATCH_SET, 0); - CHECK_FOR_INTERRUPTS(); - ResetLatch(&MyProc->procLatch); - } - } - - /* Clean up. */ - dsm_detach(seg); - - PG_RETURN_VOID(); -} - -/* - * Verify that two messages are the same. - */ -static void -verify_message(Size origlen, char *origdata, Size newlen, char *newdata) -{ - Size i; - - if (origlen != newlen) - ereport(ERROR, - (errmsg("message corrupted"), - errdetail("The original message was %zu bytes but the final message is %zu bytes.", - origlen, newlen))); - - for (i = 0; i < origlen; ++i) - if (origdata[i] != newdata[i]) - ereport(ERROR, - (errmsg("message corrupted"), - errdetail("The new and original messages differ at byte %zu of %zu.", i, origlen))); -} diff --git a/contrib/test_shm_mq/test_shm_mq--1.0.sql b/contrib/test_shm_mq/test_shm_mq--1.0.sql deleted file mode 100644 index 54b225e2ae..0000000000 --- a/contrib/test_shm_mq/test_shm_mq--1.0.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* contrib/test_shm_mq/test_shm_mq--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION test_shm_mq" to load this file. \quit - -CREATE FUNCTION test_shm_mq(queue_size pg_catalog.int8, - message pg_catalog.text, - repeat_count pg_catalog.int4 default 1, - num_workers pg_catalog.int4 default 1) - RETURNS pg_catalog.void STRICT - AS 'MODULE_PATHNAME' LANGUAGE C; - -CREATE FUNCTION test_shm_mq_pipelined(queue_size pg_catalog.int8, - message pg_catalog.text, - repeat_count pg_catalog.int4 default 1, - num_workers pg_catalog.int4 default 1, - verify pg_catalog.bool default true) - RETURNS pg_catalog.void STRICT - AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/contrib/test_shm_mq/test_shm_mq.control b/contrib/test_shm_mq/test_shm_mq.control deleted file mode 100644 index d9a74c7a32..0000000000 --- a/contrib/test_shm_mq/test_shm_mq.control +++ /dev/null @@ -1,4 +0,0 @@ -comment = 'Test code for shared memory message queues' -default_version = '1.0' -module_pathname = '$libdir/test_shm_mq' -relocatable = true diff --git a/contrib/test_shm_mq/test_shm_mq.h b/contrib/test_shm_mq/test_shm_mq.h deleted file mode 100644 index 7ebfba902f..0000000000 --- a/contrib/test_shm_mq/test_shm_mq.h +++ /dev/null @@ -1,45 +0,0 @@ -/*-------------------------------------------------------------------------- - * - * test_shm_mq.h - * Definitions for shared memory message queues - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/test_shm_mq/test_shm_mq.h - * - * ------------------------------------------------------------------------- - */ - -#ifndef TEST_SHM_MQ_H -#define TEST_SHM_MQ_H - -#include "storage/dsm.h" -#include "storage/shm_mq.h" -#include "storage/spin.h" - -/* Identifier for shared memory segments used by this extension. */ -#define PG_TEST_SHM_MQ_MAGIC 0x79fb2447 - -/* - * This structure is stored in the dynamic shared memory segment. We use - * it to determine whether all workers started up OK and successfully - * attached to their respective shared message queues. - */ -typedef struct -{ - slock_t mutex; - int workers_total; - int workers_attached; - int workers_ready; -} test_shm_mq_header; - -/* Set up dynamic shared memory and background workers for test run. */ -extern void test_shm_mq_setup(int64 queue_size, int32 nworkers, - dsm_segment **seg, shm_mq_handle **output, - shm_mq_handle **input); - -/* Main entrypoint for a worker. */ -extern void test_shm_mq_main(Datum); - -#endif diff --git a/contrib/test_shm_mq/worker.c b/contrib/test_shm_mq/worker.c deleted file mode 100644 index 0d66c92ddb..0000000000 --- a/contrib/test_shm_mq/worker.c +++ /dev/null @@ -1,224 +0,0 @@ -/*-------------------------------------------------------------------------- - * - * worker.c - * Code for sample worker making use of shared memory message queues. - * Our test worker simply reads messages from one message queue and - * writes them back out to another message queue. In a real - * application, you'd presumably want the worker to do some more - * complex calculation rather than simply returning the input, - * but it should be possible to use much of the control logic just - * as presented here. - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/test_shm_mq/worker.c - * - * ------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "miscadmin.h" -#include "storage/ipc.h" -#include "storage/procarray.h" -#include "storage/shm_mq.h" -#include "storage/shm_toc.h" -#include "utils/resowner.h" - -#include "test_shm_mq.h" - -static void handle_sigterm(SIGNAL_ARGS); -static void attach_to_queues(dsm_segment *seg, shm_toc *toc, - int myworkernumber, shm_mq_handle **inqhp, - shm_mq_handle **outqhp); -static void copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh); - -/* - * Background worker entrypoint. - * - * This is intended to demonstrate how a background worker can be used to - * facilitate a parallel computation. Most of the logic here is fairly - * boilerplate stuff, designed to attach to the shared memory segment, - * notify the user backend that we're alive, and so on. The - * application-specific bits of logic that you'd replace for your own worker - * are attach_to_queues() and copy_messages(). - */ -void -test_shm_mq_main(Datum main_arg) -{ - dsm_segment *seg; - shm_toc *toc; - shm_mq_handle *inqh; - shm_mq_handle *outqh; - volatile test_shm_mq_header *hdr; - int myworkernumber; - PGPROC *registrant; - - /* - * Establish signal handlers. - * - * We want CHECK_FOR_INTERRUPTS() to kill off this worker process just as - * it would a normal user backend. To make that happen, we establish a - * signal handler that is a stripped-down version of die(). We don't have - * any equivalent of the backend's command-read loop, where interrupts can - * be processed immediately, so make sure ImmediateInterruptOK is turned - * off. - */ - pqsignal(SIGTERM, handle_sigterm); - ImmediateInterruptOK = false; - BackgroundWorkerUnblockSignals(); - - /* - * Connect to the dynamic shared memory segment. - * - * The backend that registered this worker passed us the ID of a shared - * memory segment to which we must attach for further instructions. In - * order to attach to dynamic shared memory, we need a resource owner. - * Once we've mapped the segment in our address space, attach to the table - * of contents so we can locate the various data structures we'll need to - * find within the segment. - */ - CurrentResourceOwner = ResourceOwnerCreate(NULL, "test_shm_mq worker"); - seg = dsm_attach(DatumGetInt32(main_arg)); - if (seg == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("unable to map dynamic shared memory segment"))); - toc = shm_toc_attach(PG_TEST_SHM_MQ_MAGIC, dsm_segment_address(seg)); - if (toc == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("bad magic number in dynamic shared memory segment"))); - - /* - * Acquire a worker number. - * - * By convention, the process registering this background worker should - * have stored the control structure at key 0. We look up that key to - * find it. Our worker number gives our identity: there may be just one - * worker involved in this parallel operation, or there may be many. - */ - hdr = shm_toc_lookup(toc, 0); - SpinLockAcquire(&hdr->mutex); - myworkernumber = ++hdr->workers_attached; - SpinLockRelease(&hdr->mutex); - if (myworkernumber > hdr->workers_total) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("too many message queue testing workers already"))); - - /* - * Attach to the appropriate message queues. - */ - attach_to_queues(seg, toc, myworkernumber, &inqh, &outqh); - - /* - * Indicate that we're fully initialized and ready to begin the main part - * of the parallel operation. - * - * Once we signal that we're ready, the user backend is entitled to assume - * that our on_dsm_detach callbacks will fire before we disconnect from - * the shared memory segment and exit. Generally, that means we must have - * attached to all relevant dynamic shared memory data structures by now. - */ - SpinLockAcquire(&hdr->mutex); - ++hdr->workers_ready; - SpinLockRelease(&hdr->mutex); - registrant = BackendPidGetProc(MyBgworkerEntry->bgw_notify_pid); - if (registrant == NULL) - { - elog(DEBUG1, "registrant backend has exited prematurely"); - proc_exit(1); - } - SetLatch(®istrant->procLatch); - - /* Do the work. */ - copy_messages(inqh, outqh); - - /* - * We're done. Explicitly detach the shared memory segment so that we - * don't get a resource leak warning at commit time. This will fire any - * on_dsm_detach callbacks we've registered, as well. Once that's done, - * we can go ahead and exit. - */ - dsm_detach(seg); - proc_exit(1); -} - -/* - * Attach to shared memory message queues. - * - * We use our worker number to determine to which queue we should attach. - * The queues are registered at keys 1..<number-of-workers>. The user backend - * writes to queue #1 and reads from queue #<number-of-workers>; each worker - * reads from the queue whose number is equal to its worker number and writes - * to the next higher-numbered queue. - */ -static void -attach_to_queues(dsm_segment *seg, shm_toc *toc, int myworkernumber, - shm_mq_handle **inqhp, shm_mq_handle **outqhp) -{ - shm_mq *inq; - shm_mq *outq; - - inq = shm_toc_lookup(toc, myworkernumber); - shm_mq_set_receiver(inq, MyProc); - *inqhp = shm_mq_attach(inq, seg, NULL); - outq = shm_toc_lookup(toc, myworkernumber + 1); - shm_mq_set_sender(outq, MyProc); - *outqhp = shm_mq_attach(outq, seg, NULL); -} - -/* - * Loop, receiving and sending messages, until the connection is broken. - * - * This is the "real work" performed by this worker process. Everything that - * happens before this is initialization of one form or another, and everything - * after this point is cleanup. - */ -static void -copy_messages(shm_mq_handle *inqh, shm_mq_handle *outqh) -{ - Size len; - void *data; - shm_mq_result res; - - for (;;) - { - /* Notice any interrupts that have occurred. */ - CHECK_FOR_INTERRUPTS(); - - /* Receive a message. */ - res = shm_mq_receive(inqh, &len, &data, false); - if (res != SHM_MQ_SUCCESS) - break; - - /* Send it back out. */ - res = shm_mq_send(outqh, len, data, false); - if (res != SHM_MQ_SUCCESS) - break; - } -} - -/* - * When we receive a SIGTERM, we set InterruptPending and ProcDiePending just - * like a normal backend. The next CHECK_FOR_INTERRUPTS() will do the right - * thing. - */ -static void -handle_sigterm(SIGNAL_ARGS) -{ - int save_errno = errno; - - if (MyProc) - SetLatch(&MyProc->procLatch); - - if (!proc_exit_inprogress) - { - InterruptPending = true; - ProcDiePending = true; - } - - errno = save_errno; -} diff --git a/contrib/tsearch2/Makefile b/contrib/tsearch2/Makefile index d260fd0030..36dcedc688 100644 --- a/contrib/tsearch2/Makefile +++ b/contrib/tsearch2/Makefile @@ -4,6 +4,7 @@ MODULES = tsearch2 EXTENSION = tsearch2 DATA = tsearch2--1.0.sql tsearch2--unpackaged--1.0.sql +PGFILEDESC = "tsearch2 - backward-compatible text search functionality" REGRESS = tsearch2 diff --git a/contrib/tsearch2/tsearch2--unpackaged--1.0.sql b/contrib/tsearch2/tsearch2--unpackaged--1.0.sql index af970a4862..e123297132 100644 --- a/contrib/tsearch2/tsearch2--unpackaged--1.0.sql +++ b/contrib/tsearch2/tsearch2--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/tsearch2/tsearch2--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION tsearch2" to load this file. \quit +\echo Use "CREATE EXTENSION tsearch2 FROM unpackaged" to load this file. \quit ALTER EXTENSION tsearch2 ADD type @extschema@.tsvector; ALTER EXTENSION tsearch2 ADD type @extschema@.tsquery; diff --git a/contrib/tsearch2/tsearch2.c b/contrib/tsearch2/tsearch2.c index bd30d87515..143dabba40 100644 --- a/contrib/tsearch2/tsearch2.c +++ b/contrib/tsearch2/tsearch2.c @@ -3,7 +3,7 @@ * tsearch2.c * Backwards-compatibility package for old contrib/tsearch2 API * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * * * IDENTIFICATION diff --git a/contrib/unaccent/Makefile b/contrib/unaccent/Makefile index f0be62db53..bc93e6f8dd 100644 --- a/contrib/unaccent/Makefile +++ b/contrib/unaccent/Makefile @@ -1,11 +1,12 @@ # contrib/unaccent/Makefile MODULE_big = unaccent -OBJS = unaccent.o +OBJS = unaccent.o $(WIN32RES) EXTENSION = unaccent DATA = unaccent--1.0.sql unaccent--unpackaged--1.0.sql DATA_TSEARCH = unaccent.rules +PGFILEDESC = "unaccent - text search dictionary that removes accents" REGRESS = unaccent diff --git a/contrib/unaccent/unaccent--unpackaged--1.0.sql b/contrib/unaccent/unaccent--unpackaged--1.0.sql index abd06983ac..f3fb5d8760 100644 --- a/contrib/unaccent/unaccent--unpackaged--1.0.sql +++ b/contrib/unaccent/unaccent--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/unaccent/unaccent--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION unaccent" to load this file. \quit +\echo Use "CREATE EXTENSION unaccent FROM unpackaged" to load this file. \quit ALTER EXTENSION unaccent ADD function unaccent(regdictionary,text); ALTER EXTENSION unaccent ADD function unaccent(text); diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c index a337df61af..eafb4107b0 100644 --- a/contrib/unaccent/unaccent.c +++ b/contrib/unaccent/unaccent.c @@ -3,7 +3,7 @@ * unaccent.c * Text search unaccent dictionary * - * Copyright (c) 2009-2014, PostgreSQL Global Development Group + * Copyright (c) 2009-2015, PostgreSQL Global Development Group * * IDENTIFICATION * contrib/unaccent/unaccent.c @@ -15,6 +15,7 @@ #include "catalog/namespace.h" #include "commands/defrem.h" +#include "lib/stringinfo.h" #include "tsearch/ts_cache.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_public.h" @@ -23,9 +24,16 @@ PG_MODULE_MAGIC; /* - * Unaccent dictionary uses a trie to find a character to replace. Each node of - * the trie is an array of 256 TrieChar structs (n-th element of array - * corresponds to byte) + * An unaccent dictionary uses a trie to find a string to replace. Each node + * of the trie is an array of 256 TrieChar structs; the N-th element of the + * array corresponds to next byte value N. That element can contain both a + * replacement string (to be used if the source string ends with this byte) + * and a link to another trie node (to be followed if there are more bytes). + * + * Note that the trie search logic pays no attention to multibyte character + * boundaries. This is OK as long as both the data entered into the trie and + * the data we're trying to look up are validly encoded; no partial-character + * matches will occur. */ typedef struct TrieChar { @@ -36,34 +44,40 @@ typedef struct TrieChar /* * placeChar - put str into trie's structure, byte by byte. + * + * If node is NULL, we need to make a new node, which will be returned; + * otherwise the return value is the same as node. */ static TrieChar * -placeChar(TrieChar *node, unsigned char *str, int lenstr, char *replaceTo, int replacelen) +placeChar(TrieChar *node, const unsigned char *str, int lenstr, + const char *replaceTo, int replacelen) { TrieChar *curnode; if (!node) - { - node = palloc(sizeof(TrieChar) * 256); - memset(node, 0, sizeof(TrieChar) * 256); - } + node = (TrieChar *) palloc0(sizeof(TrieChar) * 256); + + Assert(lenstr > 0); /* else str[0] doesn't exist */ curnode = node + *str; - if (lenstr == 1) + if (lenstr <= 1) { if (curnode->replaceTo) - elog(WARNING, "duplicate TO argument, use first one"); + ereport(WARNING, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("duplicate source strings, first one will be used"))); else { curnode->replacelen = replacelen; - curnode->replaceTo = palloc(replacelen); + curnode->replaceTo = (char *) palloc(replacelen); memcpy(curnode->replaceTo, replaceTo, replacelen); } } else { - curnode->nextChar = placeChar(curnode->nextChar, str + 1, lenstr - 1, replaceTo, replacelen); + curnode->nextChar = placeChar(curnode->nextChar, str + 1, lenstr - 1, + replaceTo, replacelen); } return node; @@ -104,11 +118,21 @@ initTrie(char *filename) while ((line = tsearch_readline(&trst)) != NULL) { - /* - * The format of each line must be "src trg" where src and trg - * are sequences of one or more non-whitespace characters, - * separated by whitespace. Whitespace at start or end of - * line is ignored. + /*---------- + * The format of each line must be "src" or "src trg", where + * src and trg are sequences of one or more non-whitespace + * characters, separated by whitespace. Whitespace at start + * or end of line is ignored. If trg is omitted, an empty + * string is used as the replacement. + * + * We use a simple state machine, with states + * 0 initial (before src) + * 1 in src + * 2 in whitespace after src + * 3 in trg + * 4 in whitespace after trg + * -1 syntax error detected + *---------- */ int state; char *ptr; @@ -160,10 +184,21 @@ initTrie(char *filename) } } - if (state >= 3) + if (state == 1 || state == 2) + { + /* trg was omitted, so use "" */ + trg = ""; + trglen = 0; + } + + if (state > 0) rootTrie = placeChar(rootTrie, (unsigned char *) src, srclen, trg, trglen); + else if (state < 0) + ereport(WARNING, + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid syntax: more than two strings in unaccent rule"))); pfree(line); } @@ -196,23 +231,35 @@ initTrie(char *filename) } /* - * findReplaceTo - find multibyte character in trie + * findReplaceTo - find longest possible match in trie + * + * On success, returns pointer to ending subnode, plus length of matched + * source string in *p_matchlen. On failure, returns NULL. */ static TrieChar * -findReplaceTo(TrieChar *node, unsigned char *src, int srclen) +findReplaceTo(TrieChar *node, const unsigned char *src, int srclen, + int *p_matchlen) { - while (node) + TrieChar *result = NULL; + int matchlen = 0; + + *p_matchlen = 0; /* prevent uninitialized-variable warnings */ + + while (node && matchlen < srclen) { - node = node + *src; - if (srclen == 1) - return node; + node = node + src[matchlen]; + matchlen++; + + if (node->replaceTo) + { + result = node; + *p_matchlen = matchlen; + } - src++; - srclen--; node = node->nextChar; } - return NULL; + return result; } PG_FUNCTION_INFO_V1(unaccent_init); @@ -263,46 +310,52 @@ unaccent_lexize(PG_FUNCTION_ARGS) TrieChar *rootTrie = (TrieChar *) PG_GETARG_POINTER(0); char *srcchar = (char *) PG_GETARG_POINTER(1); int32 len = PG_GETARG_INT32(2); - char *srcstart, - *trgchar = NULL; - int charlen; - TSLexeme *res = NULL; - TrieChar *node; - - srcstart = srcchar; - while (srcchar - srcstart < len) + char *srcstart = srcchar; + TSLexeme *res; + StringInfoData buf; + + /* we allocate storage for the buffer only if needed */ + buf.data = NULL; + + while (len > 0) { - charlen = pg_mblen(srcchar); + TrieChar *node; + int matchlen; - node = findReplaceTo(rootTrie, (unsigned char *) srcchar, charlen); + node = findReplaceTo(rootTrie, (unsigned char *) srcchar, len, + &matchlen); if (node && node->replaceTo) { - if (!res) + if (buf.data == NULL) { - /* allocate res only if it's needed */ - res = palloc0(sizeof(TSLexeme) * 2); - res->lexeme = trgchar = palloc(len * pg_database_encoding_max_length() + 1 /* \0 */ ); - res->flags = TSL_FILTER; + /* initialize buffer */ + initStringInfo(&buf); + /* insert any data we already skipped over */ if (srcchar != srcstart) - { - memcpy(trgchar, srcstart, srcchar - srcstart); - trgchar += (srcchar - srcstart); - } + appendBinaryStringInfo(&buf, srcstart, srcchar - srcstart); } - memcpy(trgchar, node->replaceTo, node->replacelen); - trgchar += node->replacelen; + appendBinaryStringInfo(&buf, node->replaceTo, node->replacelen); } - else if (res) + else { - memcpy(trgchar, srcchar, charlen); - trgchar += charlen; + matchlen = pg_mblen(srcchar); + if (buf.data != NULL) + appendBinaryStringInfo(&buf, srcchar, matchlen); } - srcchar += charlen; + srcchar += matchlen; + len -= matchlen; } - if (res) - *trgchar = '\0'; + /* return a result only if we made at least one substitution */ + if (buf.data != NULL) + { + res = (TSLexeme *) palloc0(sizeof(TSLexeme) * 2); + res->lexeme = buf.data; + res->flags = TSL_FILTER; + } + else + res = NULL; PG_RETURN_POINTER(res); } diff --git a/contrib/uuid-ossp/Makefile b/contrib/uuid-ossp/Makefile index 335cc7ef50..93b9355a0c 100644 --- a/contrib/uuid-ossp/Makefile +++ b/contrib/uuid-ossp/Makefile @@ -1,10 +1,11 @@ # contrib/uuid-ossp/Makefile MODULE_big = uuid-ossp -OBJS = uuid-ossp.o $(UUID_EXTRA_OBJS) +OBJS = uuid-ossp.o $(UUID_EXTRA_OBJS) $(WIN32RES) EXTENSION = uuid-ossp DATA = uuid-ossp--1.0.sql uuid-ossp--unpackaged--1.0.sql +PGFILEDESC = "uuid-ossp - UUID generation" REGRESS = uuid_ossp diff --git a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql index 5776b6f930..444c5c7cef 100644 --- a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql +++ b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use '''CREATE EXTENSION "uuid-ossp"''' to load this file. \quit +\echo Use '''CREATE EXTENSION "uuid-ossp" FROM unpackaged''' to load this file. \quit ALTER EXTENSION "uuid-ossp" ADD function uuid_nil(); ALTER EXTENSION "uuid-ossp" ADD function uuid_ns_dns(); diff --git a/contrib/uuid-ossp/uuid-ossp.c b/contrib/uuid-ossp/uuid-ossp.c index 9e9905bfde..06fd0c7daa 100644 --- a/contrib/uuid-ossp/uuid-ossp.c +++ b/contrib/uuid-ossp/uuid-ossp.c @@ -2,7 +2,7 @@ * * UUID generation functions using the BSD, E2FS or OSSP UUID library * - * Copyright (c) 2007-2014, PostgreSQL Global Development Group + * Copyright (c) 2007-2015, PostgreSQL Global Development Group * * Portions Copyright (c) 2009 Andrew Gierth * @@ -108,10 +108,8 @@ do { \ #endif /* !HAVE_UUID_OSSP */ - PG_MODULE_MAGIC; - PG_FUNCTION_INFO_V1(uuid_nil); PG_FUNCTION_INFO_V1(uuid_ns_dns); PG_FUNCTION_INFO_V1(uuid_ns_url); diff --git a/contrib/vacuumlo/Makefile b/contrib/vacuumlo/Makefile index b658f9bf6f..b4ba896fba 100644 --- a/contrib/vacuumlo/Makefile +++ b/contrib/vacuumlo/Makefile @@ -4,7 +4,7 @@ PGFILEDESC = "vacuumlo - removes orphaned large objects" PGAPPICON = win32 PROGRAM = vacuumlo -OBJS = vacuumlo.o +OBJS = vacuumlo.o $(WIN32RES) PG_CPPFLAGS = -I$(libpq_srcdir) PG_LIBS = $(libpq_pgport) diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c index c2e5bad438..ca0d3048b8 100644 --- a/contrib/vacuumlo/vacuumlo.c +++ b/contrib/vacuumlo/vacuumlo.c @@ -3,7 +3,7 @@ * vacuumlo.c * This removes orphaned large objects from a database. * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -240,6 +240,12 @@ vacuumlo(const char *database, const struct _param * param) fprintf(stderr, "Out of memory\n"); PQclear(res); PQfinish(conn); + if (schema != NULL) + PQfreemem(schema); + if (schema != NULL) + PQfreemem(table); + if (schema != NULL) + PQfreemem(field); return -1; } @@ -256,6 +262,9 @@ vacuumlo(const char *database, const struct _param * param) PQclear(res2); PQclear(res); PQfinish(conn); + PQfreemem(schema); + PQfreemem(table); + PQfreemem(field); return -1; } PQclear(res2); diff --git a/contrib/worker_spi/Makefile b/contrib/worker_spi/Makefile deleted file mode 100644 index fbb29b4f2f..0000000000 --- a/contrib/worker_spi/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# contrib/worker_spi/Makefile - -MODULES = worker_spi - -EXTENSION = worker_spi -DATA = worker_spi--1.0.sql - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/worker_spi -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif diff --git a/contrib/worker_spi/worker_spi--1.0.sql b/contrib/worker_spi/worker_spi--1.0.sql deleted file mode 100644 index 09b7799f2c..0000000000 --- a/contrib/worker_spi/worker_spi--1.0.sql +++ /dev/null @@ -1,9 +0,0 @@ -/* contrib/worker_spi/worker_spi--1.0.sql */ - --- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION worker_spi" to load this file. \quit - -CREATE FUNCTION worker_spi_launch(pg_catalog.int4) -RETURNS pg_catalog.int4 STRICT -AS 'MODULE_PATHNAME' -LANGUAGE C; diff --git a/contrib/worker_spi/worker_spi.c b/contrib/worker_spi/worker_spi.c deleted file mode 100644 index 829de0e6a7..0000000000 --- a/contrib/worker_spi/worker_spi.c +++ /dev/null @@ -1,407 +0,0 @@ -/* ------------------------------------------------------------------------- - * - * worker_spi.c - * Sample background worker code that demonstrates various coding - * patterns: establishing a database connection; starting and committing - * transactions; using GUC variables, and heeding SIGHUP to reread - * the configuration file; reporting to pg_stat_activity; using the - * process latch to sleep and exit in case of postmaster death. - * - * This code connects to a database, creates a schema and table, and summarizes - * the numbers contained therein. To see it working, insert an initial value - * with "total" type and some initial value; then insert some other rows with - * "delta" type. Delta rows will be deleted by this worker and their values - * aggregated into the total. - * - * Copyright (C) 2013, PostgreSQL Global Development Group - * - * IDENTIFICATION - * contrib/worker_spi/worker_spi.c - * - * ------------------------------------------------------------------------- - */ -#include "postgres.h" - -/* These are always necessary for a bgworker */ -#include "miscadmin.h" -#include "postmaster/bgworker.h" -#include "storage/ipc.h" -#include "storage/latch.h" -#include "storage/lwlock.h" -#include "storage/proc.h" -#include "storage/shmem.h" - -/* these headers are used by this particular worker's code */ -#include "access/xact.h" -#include "executor/spi.h" -#include "fmgr.h" -#include "lib/stringinfo.h" -#include "pgstat.h" -#include "utils/builtins.h" -#include "utils/snapmgr.h" -#include "tcop/utility.h" - -PG_MODULE_MAGIC; - -PG_FUNCTION_INFO_V1(worker_spi_launch); - -void _PG_init(void); -void worker_spi_main(Datum); - -/* flags set by signal handlers */ -static volatile sig_atomic_t got_sighup = false; -static volatile sig_atomic_t got_sigterm = false; - -/* GUC variables */ -static int worker_spi_naptime = 10; -static int worker_spi_total_workers = 2; - - -typedef struct worktable -{ - const char *schema; - const char *name; -} worktable; - -/* - * Signal handler for SIGTERM - * Set a flag to let the main loop to terminate, and set our latch to wake - * it up. - */ -static void -worker_spi_sigterm(SIGNAL_ARGS) -{ - int save_errno = errno; - - got_sigterm = true; - if (MyProc) - SetLatch(&MyProc->procLatch); - - errno = save_errno; -} - -/* - * Signal handler for SIGHUP - * Set a flag to tell the main loop to reread the config file, and set - * our latch to wake it up. - */ -static void -worker_spi_sighup(SIGNAL_ARGS) -{ - int save_errno = errno; - - got_sighup = true; - if (MyProc) - SetLatch(&MyProc->procLatch); - - errno = save_errno; -} - -/* - * Initialize workspace for a worker process: create the schema if it doesn't - * already exist. - */ -static void -initialize_worker_spi(worktable *table) -{ - int ret; - int ntup; - bool isnull; - StringInfoData buf; - - SetCurrentStatementStartTimestamp(); - StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); - pgstat_report_activity(STATE_RUNNING, "initializing spi_worker schema"); - - /* XXX could we use CREATE SCHEMA IF NOT EXISTS? */ - initStringInfo(&buf); - appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", - table->schema); - - ret = SPI_execute(buf.data, true, 0); - if (ret != SPI_OK_SELECT) - elog(FATAL, "SPI_execute failed: error code %d", ret); - - if (SPI_processed != 1) - elog(FATAL, "not a singleton result"); - - ntup = DatumGetInt64(SPI_getbinval(SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 1, &isnull)); - if (isnull) - elog(FATAL, "null result"); - - if (ntup == 0) - { - resetStringInfo(&buf); - appendStringInfo(&buf, - "CREATE SCHEMA \"%s\" " - "CREATE TABLE \"%s\" (" - " type text CHECK (type IN ('total', 'delta')), " - " value integer)" - "CREATE UNIQUE INDEX \"%s_unique_total\" ON \"%s\" (type) " - "WHERE type = 'total'", - table->schema, table->name, table->name, table->name); - - /* set statement start time */ - SetCurrentStatementStartTimestamp(); - - ret = SPI_execute(buf.data, false, 0); - - if (ret != SPI_OK_UTILITY) - elog(FATAL, "failed to create my schema"); - } - - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); - pgstat_report_activity(STATE_IDLE, NULL); -} - -void -worker_spi_main(Datum main_arg) -{ - int index = DatumGetInt32(main_arg); - worktable *table; - StringInfoData buf; - char name[20]; - - table = palloc(sizeof(worktable)); - sprintf(name, "schema%d", index); - table->schema = pstrdup(name); - table->name = pstrdup("counted"); - - /* Establish signal handlers before unblocking signals. */ - pqsignal(SIGHUP, worker_spi_sighup); - pqsignal(SIGTERM, worker_spi_sigterm); - - /* We're now ready to receive signals */ - BackgroundWorkerUnblockSignals(); - - /* Connect to our database */ - BackgroundWorkerInitializeConnection("postgres", NULL); - - elog(LOG, "%s initialized with %s.%s", - MyBgworkerEntry->bgw_name, table->schema, table->name); - initialize_worker_spi(table); - - /* - * Quote identifiers passed to us. Note that this must be done after - * initialize_worker_spi, because that routine assumes the names are not - * quoted. - * - * Note some memory might be leaked here. - */ - table->schema = quote_identifier(table->schema); - table->name = quote_identifier(table->name); - - initStringInfo(&buf); - appendStringInfo(&buf, - "WITH deleted AS (DELETE " - "FROM %s.%s " - "WHERE type = 'delta' RETURNING value), " - "total AS (SELECT coalesce(sum(value), 0) as sum " - "FROM deleted) " - "UPDATE %s.%s " - "SET value = %s.value + total.sum " - "FROM total WHERE type = 'total' " - "RETURNING %s.value", - table->schema, table->name, - table->schema, table->name, - table->name, - table->name); - - /* - * Main loop: do this until the SIGTERM handler tells us to terminate - */ - while (!got_sigterm) - { - int ret; - int rc; - - /* - * Background workers mustn't call usleep() or any direct equivalent: - * instead, they may wait on their process latch, which sleeps as - * necessary, but is awakened if postmaster dies. That way the - * background process goes away immediately in an emergency. - */ - rc = WaitLatch(&MyProc->procLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - worker_spi_naptime * 1000L); - ResetLatch(&MyProc->procLatch); - - /* emergency bailout if postmaster has died */ - if (rc & WL_POSTMASTER_DEATH) - proc_exit(1); - - /* - * In case of a SIGHUP, just reload the configuration. - */ - if (got_sighup) - { - got_sighup = false; - ProcessConfigFile(PGC_SIGHUP); - } - - /* - * Start a transaction on which we can run queries. Note that each - * StartTransactionCommand() call should be preceded by a - * SetCurrentStatementStartTimestamp() call, which sets both the time - * for the statement we're about the run, and also the transaction - * start time. Also, each other query sent to SPI should probably be - * preceded by SetCurrentStatementStartTimestamp(), so that statement - * start time is always up to date. - * - * The SPI_connect() call lets us run queries through the SPI manager, - * and the PushActiveSnapshot() call creates an "active" snapshot - * which is necessary for queries to have MVCC data to work on. - * - * The pgstat_report_activity() call makes our activity visible - * through the pgstat views. - */ - SetCurrentStatementStartTimestamp(); - StartTransactionCommand(); - SPI_connect(); - PushActiveSnapshot(GetTransactionSnapshot()); - pgstat_report_activity(STATE_RUNNING, buf.data); - - /* We can now execute queries via SPI */ - ret = SPI_execute(buf.data, false, 0); - - if (ret != SPI_OK_UPDATE_RETURNING) - elog(FATAL, "cannot select from table %s.%s: error code %d", - table->schema, table->name, ret); - - if (SPI_processed > 0) - { - bool isnull; - int32 val; - - val = DatumGetInt32(SPI_getbinval(SPI_tuptable->vals[0], - SPI_tuptable->tupdesc, - 1, &isnull)); - if (!isnull) - elog(LOG, "%s: count in %s.%s is now %d", - MyBgworkerEntry->bgw_name, - table->schema, table->name, val); - } - - /* - * And finish our transaction. - */ - SPI_finish(); - PopActiveSnapshot(); - CommitTransactionCommand(); - pgstat_report_activity(STATE_IDLE, NULL); - } - - proc_exit(1); -} - -/* - * Entrypoint of this module. - * - * We register more than one worker process here, to demonstrate how that can - * be done. - */ -void -_PG_init(void) -{ - BackgroundWorker worker; - unsigned int i; - - /* get the configuration */ - DefineCustomIntVariable("worker_spi.naptime", - "Duration between each check (in seconds).", - NULL, - &worker_spi_naptime, - 10, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - if (!process_shared_preload_libraries_in_progress) - return; - - DefineCustomIntVariable("worker_spi.total_workers", - "Number of workers.", - NULL, - &worker_spi_total_workers, - 2, - 1, - 100, - PGC_POSTMASTER, - 0, - NULL, - NULL, - NULL); - - /* set up common data for all our workers */ - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | - BGWORKER_BACKEND_DATABASE_CONNECTION; - worker.bgw_start_time = BgWorkerStart_RecoveryFinished; - worker.bgw_restart_time = BGW_NEVER_RESTART; - worker.bgw_main = worker_spi_main; - worker.bgw_notify_pid = 0; - - /* - * Now fill in worker-specific data, and do the actual registrations. - */ - for (i = 1; i <= worker_spi_total_workers; i++) - { - snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i); - worker.bgw_main_arg = Int32GetDatum(i); - - RegisterBackgroundWorker(&worker); - } -} - -/* - * Dynamically launch an SPI worker. - */ -Datum -worker_spi_launch(PG_FUNCTION_ARGS) -{ - int32 i = PG_GETARG_INT32(0); - BackgroundWorker worker; - BackgroundWorkerHandle *handle; - BgwHandleStatus status; - pid_t pid; - - worker.bgw_flags = BGWORKER_SHMEM_ACCESS | - BGWORKER_BACKEND_DATABASE_CONNECTION; - worker.bgw_start_time = BgWorkerStart_RecoveryFinished; - worker.bgw_restart_time = BGW_NEVER_RESTART; - worker.bgw_main = NULL; /* new worker might not have library loaded */ - sprintf(worker.bgw_library_name, "worker_spi"); - sprintf(worker.bgw_function_name, "worker_spi_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "worker %d", i); - worker.bgw_main_arg = Int32GetDatum(i); - /* set bgw_notify_pid so that we can use WaitForBackgroundWorkerStartup */ - worker.bgw_notify_pid = MyProcPid; - - if (!RegisterDynamicBackgroundWorker(&worker, &handle)) - PG_RETURN_NULL(); - - status = WaitForBackgroundWorkerStartup(handle, &pid); - - if (status == BGWH_STOPPED) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("could not start background process"), - errhint("More details may be available in the server log."))); - if (status == BGWH_POSTMASTER_DIED) - ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("cannot start background processes without postmaster"), - errhint("Kill all remaining database processes and restart the database."))); - Assert(status == BGWH_STARTED); - - PG_RETURN_INT32(pid); -} diff --git a/contrib/worker_spi/worker_spi.control b/contrib/worker_spi/worker_spi.control deleted file mode 100644 index 84d6294628..0000000000 --- a/contrib/worker_spi/worker_spi.control +++ /dev/null @@ -1,5 +0,0 @@ -# worker_spi extension -comment = 'Sample background worker' -default_version = '1.0' -module_pathname = '$libdir/worker_spi' -relocatable = true diff --git a/contrib/xml2/Makefile b/contrib/xml2/Makefile index be3d018cf5..2f7a08cff1 100644 --- a/contrib/xml2/Makefile +++ b/contrib/xml2/Makefile @@ -1,10 +1,11 @@ # contrib/xml2/Makefile MODULE_big = pgxml -OBJS = xpath.o xslt_proc.o +OBJS = xpath.o xslt_proc.o $(WIN32RES) EXTENSION = xml2 DATA = xml2--1.0.sql xml2--unpackaged--1.0.sql +PGFILEDESC = "xml2 - XPath querying and XSLT" REGRESS = xml2 diff --git a/contrib/xml2/xml2--unpackaged--1.0.sql b/contrib/xml2/xml2--unpackaged--1.0.sql index b02dabffc2..8badef3079 100644 --- a/contrib/xml2/xml2--unpackaged--1.0.sql +++ b/contrib/xml2/xml2--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/xml2/xml2--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION xml2" to load this file. \quit +\echo Use "CREATE EXTENSION xml2 FROM unpackaged" to load this file. \quit ALTER EXTENSION xml2 ADD function xslt_process(text,text); ALTER EXTENSION xml2 ADD function xslt_process(text,text,text); diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index a8b159ebff..655c5322cd 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -23,7 +23,6 @@ #include <libxml/xmlerror.h> #include <libxml/parserInternals.h> - PG_MODULE_MAGIC; /* exported for use by xslt_proc.c */ @@ -328,7 +327,7 @@ xpath_string(PG_FUNCTION_ARGS) /* We could try casting to string using the libxml function? */ xpath = (xmlChar *) palloc(pathsize + 9); - strncpy((char *) xpath, "string(", 7); + memcpy((char *) xpath, "string(", 7); memcpy((char *) (xpath + 7), VARDATA(xpathsupp), pathsize); xpath[pathsize + 7] = ')'; xpath[pathsize + 8] = '\0'; diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index 9f13787332..343924e991 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -146,16 +146,16 @@ xslt_process(PG_FUNCTION_ARGS) } PG_CATCH(); { - if (stylesheet != NULL) - xsltFreeStylesheet(stylesheet); if (restree != NULL) xmlFreeDoc(restree); - if (doctree != NULL) - xmlFreeDoc(doctree); - if (xslt_sec_prefs != NULL) - xsltFreeSecurityPrefs(xslt_sec_prefs); if (xslt_ctxt != NULL) xsltFreeTransformContext(xslt_ctxt); + if (xslt_sec_prefs != NULL) + xsltFreeSecurityPrefs(xslt_sec_prefs); + if (stylesheet != NULL) + xsltFreeStylesheet(stylesheet); + if (doctree != NULL) + xmlFreeDoc(doctree); xsltCleanupGlobals(); pg_xml_done(xmlerrcxt, true); @@ -164,11 +164,11 @@ xslt_process(PG_FUNCTION_ARGS) } PG_END_TRY(); - xsltFreeStylesheet(stylesheet); xmlFreeDoc(restree); - xmlFreeDoc(doctree); - xsltFreeSecurityPrefs(xslt_sec_prefs); xsltFreeTransformContext(xslt_ctxt); + xsltFreeSecurityPrefs(xslt_sec_prefs); + xsltFreeStylesheet(stylesheet); + xmlFreeDoc(doctree); xsltCleanupGlobals(); pg_xml_done(xmlerrcxt, false); |
