summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/postgres_fdw/deparse.c50
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out21
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql9
-rw-r--r--doc/src/sgml/catalogs.sgml2
-rw-r--r--doc/src/sgml/charset.sgml2
-rw-r--r--doc/src/sgml/pgoverexplain.sgml17
-rw-r--r--doc/src/sgml/query.sgml14
-rw-r--r--doc/src/sgml/ref/create_database.sgml45
-rw-r--r--doc/src/sgml/ref/createdb.sgml3
-rw-r--r--src/backend/access/transam/xlogreader.c20
-rw-r--r--src/backend/commands/trigger.c74
-rw-r--r--src/backend/executor/execExprInterp.c1
-rw-r--r--src/backend/executor/execReplication.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c6
-rw-r--r--src/backend/main/main.c16
-rw-r--r--src/backend/storage/lmgr/generate-lwlocknames.pl3
-rw-r--r--src/backend/storage/lmgr/lwlock.c4
-rw-r--r--src/backend/tcop/pquery.c25
-rw-r--r--src/backend/utils/adt/bytea.c61
-rw-r--r--src/backend/utils/adt/selfuncs.c18
-rw-r--r--src/backend/utils/init/postinit.c11
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c15
-rw-r--r--src/bin/pg_dump/pg_dump.c113
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl4
-rw-r--r--src/include/commands/trigger.h6
-rw-r--r--src/interfaces/libpq/fe-connect.c6
-rw-r--r--src/interfaces/libpq/libpq-int.h3
-rw-r--r--src/test/isolation/expected/merge-match-recheck.out27
-rw-r--r--src/test/isolation/specs/merge-match-recheck.spec22
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/test_binaryheap/.gitignore4
-rw-r--r--src/test/modules/test_binaryheap/Makefile24
-rw-r--r--src/test/modules/test_binaryheap/expected/test_binaryheap.out12
-rw-r--r--src/test/modules/test_binaryheap/meson.build33
-rw-r--r--src/test/modules/test_binaryheap/sql/test_binaryheap.sql8
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap--1.0.sql7
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.c275
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.control5
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm26
-rw-r--r--src/test/recovery/meson.build1
-rw-r--r--src/test/recovery/t/027_stream_regress.pl11
-rw-r--r--src/test/recovery/t/046_checkpoint_logical_slot.pl142
-rw-r--r--src/test/recovery/t/047_checkpoint_physical_slot.pl7
-rw-r--r--src/test/regress/expected/compression.out235
-rw-r--r--src/test/regress/expected/compression_1.out360
-rw-r--r--src/test/regress/expected/compression_lz4.out249
-rw-r--r--src/test/regress/expected/compression_lz4_1.out7
-rw-r--r--src/test/regress/expected/constraints.out4
-rw-r--r--src/test/regress/expected/strings.out12
-rw-r--r--src/test/regress/parallel_schedule2
-rw-r--r--src/test/regress/sql/compression.sql84
-rw-r--r--src/test/regress/sql/compression_lz4.sql129
-rw-r--r--src/test/regress/sql/constraints.sql6
-rw-r--r--src/test/regress/sql/strings.sql2
55 files changed, 1415 insertions, 834 deletions
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 9351835b5e4..e5b5e1a5f51 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -161,6 +161,7 @@ static void deparseDistinctExpr(DistinctExpr *node, deparse_expr_cxt *context);
static void deparseScalarArrayOpExpr(ScalarArrayOpExpr *node,
deparse_expr_cxt *context);
static void deparseRelabelType(RelabelType *node, deparse_expr_cxt *context);
+static void deparseArrayCoerceExpr(ArrayCoerceExpr *node, deparse_expr_cxt *context);
static void deparseBoolExpr(BoolExpr *node, deparse_expr_cxt *context);
static void deparseNullTest(NullTest *node, deparse_expr_cxt *context);
static void deparseCaseExpr(CaseExpr *node, deparse_expr_cxt *context);
@@ -702,6 +703,34 @@ foreign_expr_walker(Node *node,
state = FDW_COLLATE_UNSAFE;
}
break;
+ case T_ArrayCoerceExpr:
+ {
+ ArrayCoerceExpr *e = (ArrayCoerceExpr *) node;
+
+ /*
+ * Recurse to input subexpression.
+ */
+ if (!foreign_expr_walker((Node *) e->arg,
+ glob_cxt, &inner_cxt, case_arg_cxt))
+ return false;
+
+ /*
+ * T_ArrayCoerceExpr must not introduce a collation not
+ * derived from an input foreign Var (same logic as for a
+ * function).
+ */
+ collation = e->resultcollid;
+ if (collation == InvalidOid)
+ state = FDW_COLLATE_NONE;
+ else if (inner_cxt.state == FDW_COLLATE_SAFE &&
+ collation == inner_cxt.collation)
+ state = FDW_COLLATE_SAFE;
+ else if (collation == DEFAULT_COLLATION_OID)
+ state = FDW_COLLATE_NONE;
+ else
+ state = FDW_COLLATE_UNSAFE;
+ }
+ break;
case T_BoolExpr:
{
BoolExpr *b = (BoolExpr *) node;
@@ -2919,6 +2948,9 @@ deparseExpr(Expr *node, deparse_expr_cxt *context)
case T_RelabelType:
deparseRelabelType((RelabelType *) node, context);
break;
+ case T_ArrayCoerceExpr:
+ deparseArrayCoerceExpr((ArrayCoerceExpr *) node, context);
+ break;
case T_BoolExpr:
deparseBoolExpr((BoolExpr *) node, context);
break;
@@ -3508,6 +3540,24 @@ deparseRelabelType(RelabelType *node, deparse_expr_cxt *context)
}
/*
+ * Deparse an ArrayCoerceExpr (array-type conversion) node.
+ */
+static void
+deparseArrayCoerceExpr(ArrayCoerceExpr *node, deparse_expr_cxt *context)
+{
+ deparseExpr(node->arg, context);
+
+ /*
+ * No difference how to deparse explicit cast, but if we omit implicit
+ * cast in the query, it'll be more user-friendly
+ */
+ if (node->coerceformat != COERCE_IMPLICIT_CAST)
+ appendStringInfo(context->buf, "::%s",
+ deparse_type_name(node->resulttype,
+ node->resulttypmod));
+}
+
+/*
* Deparse a BoolExpr node.
*/
static void
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 2185b42bb4f..ff2b30cc912 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -1180,6 +1180,27 @@ SELECT * FROM ft1 WHERE CASE c3 COLLATE "C" WHEN c6 THEN true ELSE c3 < 'bar' EN
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
(4 rows)
+-- Test array type conversion pushdown
+SET plan_cache_mode = force_generic_plan;
+PREPARE s(varchar[]) AS SELECT count(*) FROM ft2 WHERE c6 = ANY ($1);
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE s(ARRAY['1','2']);
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Foreign Scan
+ Output: (count(*))
+ Relations: Aggregate on (public.ft2)
+ Remote SQL: SELECT count(*) FROM "S 1"."T 1" WHERE ((c6 = ANY ($1::character varying[])))
+(4 rows)
+
+EXECUTE s(ARRAY['1','2']);
+ count
+-------
+ 200
+(1 row)
+
+DEALLOCATE s;
+RESET plan_cache_mode;
-- a regconfig constant referring to this text search configuration
-- is initially unshippable
CREATE TEXT SEARCH CONFIGURATION public.custom_search
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index e534b40de3c..7267732f569 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -458,6 +458,15 @@ SELECT * FROM ft1 WHERE CASE c3 WHEN c6 THEN true ELSE c3 < 'bar' END;
EXPLAIN (VERBOSE, COSTS OFF)
SELECT * FROM ft1 WHERE CASE c3 COLLATE "C" WHEN c6 THEN true ELSE c3 < 'bar' END;
+-- Test array type conversion pushdown
+SET plan_cache_mode = force_generic_plan;
+PREPARE s(varchar[]) AS SELECT count(*) FROM ft2 WHERE c6 = ANY ($1);
+EXPLAIN (VERBOSE, COSTS OFF)
+EXECUTE s(ARRAY['1','2']);
+EXECUTE s(ARRAY['1','2']);
+DEALLOCATE s;
+RESET plan_cache_mode;
+
-- a regconfig constant referring to this text search configuration
-- is initially unshippable
CREATE TEXT SEARCH CONFIGURATION public.custom_search
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index aa5b8772436..0d23bc1b122 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -3158,7 +3158,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<structfield>datcollate</structfield> <type>text</type>
</para>
<para>
- LC_COLLATE for this database
+ LC_COLLATE for this database (ignored unless <structfield>datlocprovider</structfield> is <literal>c</literal>)
</para></entry>
</row>
diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml
index 5a0e97f6f31..59b27c3c370 100644
--- a/doc/src/sgml/charset.sgml
+++ b/doc/src/sgml/charset.sgml
@@ -100,7 +100,7 @@ initdb --locale=sv_SE
<tbody>
<row>
<entry><envar>LC_COLLATE</envar></entry>
- <entry>String sort order</entry>
+ <entry>String sort order (ignored unless the provider is <literal>libc</literal>)</entry>
</row>
<row>
<entry><envar>LC_CTYPE</envar></entry>
diff --git a/doc/src/sgml/pgoverexplain.sgml b/doc/src/sgml/pgoverexplain.sgml
index 21930fbd3bd..377ddc8139e 100644
--- a/doc/src/sgml/pgoverexplain.sgml
+++ b/doc/src/sgml/pgoverexplain.sgml
@@ -8,7 +8,7 @@
</indexterm>
<para>
- The <filename>pg_overexplain</filename> extends <command>EXPLAIN</command>
+ The <filename>pg_overexplain</filename> module extends <command>EXPLAIN</command>
with new options that provide additional output. It is mostly intended to
assist with debugging of and development of the planner, rather than for
general use. Since this module displays internal details of planner data
@@ -17,6 +17,21 @@
often as) those data structures change.
</para>
+ <para>
+ To use it, simply load it into the server. You can load it into an
+ individual session:
+
+<programlisting>
+LOAD 'pg_overexplain';
+</programlisting>
+
+ You can also preload it into some or all sessions by including
+ <literal>pg_overexplain</literal> in
+ <xref linkend="guc-session-preload-libraries"/> or
+ <xref linkend="guc-shared-preload-libraries"/> in
+ <filename>postgresql.conf</filename>.
+ </para>
+
<sect2 id="pgoverexplain-debug">
<title>EXPLAIN (DEBUG)</title>
diff --git a/doc/src/sgml/query.sgml b/doc/src/sgml/query.sgml
index 727a0cb185f..b190f28d41e 100644
--- a/doc/src/sgml/query.sgml
+++ b/doc/src/sgml/query.sgml
@@ -264,8 +264,18 @@ COPY weather FROM '/home/user/weather.txt';
where the file name for the source file must be available on the
machine running the backend process, not the client, since the backend process
- reads the file directly. You can read more about the
- <command>COPY</command> command in <xref linkend="sql-copy"/>.
+ reads the file directly. The data inserted above into the weather table
+ could also be inserted from a file containing (values are separated by a
+ tab character):
+
+<programlisting>
+San Francisco 46 50 0.25 1994-11-27
+San Francisco 43 57 0.0 1994-11-29
+Hayward 37 54 \N 1994-11-29
+</programlisting>
+
+ You can read more about the <command>COPY</command> command in
+ <xref linkend="sql-copy"/>.
</para>
</sect1>
diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml
index 4da8aeebb50..3544b15efda 100644
--- a/doc/src/sgml/ref/create_database.sgml
+++ b/doc/src/sgml/ref/create_database.sgml
@@ -150,12 +150,12 @@ CREATE DATABASE <replaceable class="parameter">name</replaceable>
<para>
Sets the default collation order and character classification in the
new database. Collation affects the sort order applied to strings,
- e.g., in queries with <literal>ORDER BY</literal>, as well as the order used in indexes
- on text columns. Character classification affects the categorization
- of characters, e.g., lower, upper, and digit. Also sets the
- associated aspects of the operating system environment,
- <literal>LC_COLLATE</literal> and <literal>LC_CTYPE</literal>. The
- default is the same setting as the template database. See <xref
+ e.g., in queries with <literal>ORDER BY</literal>, as well as the
+ order used in indexes on text columns. Character classification
+ affects the categorization of characters, e.g., lower, upper, and
+ digit. Also sets the <literal>LC_CTYPE</literal> aspect of the
+ operating system environment. The default is the same setting as the
+ template database. See <xref
linkend="collation-managing-create-libc"/> and <xref
linkend="collation-managing-create-icu"/> for details.
</para>
@@ -189,17 +189,16 @@ CREATE DATABASE <replaceable class="parameter">name</replaceable>
<term><replaceable class="parameter">lc_collate</replaceable></term>
<listitem>
<para>
- Sets <literal>LC_COLLATE</literal> in the database server's operating
- system environment. The default is the setting of <xref
- linkend="create-database-locale"/> if specified, otherwise the same
- setting as the template database. See below for additional
- restrictions.
+ If <xref linkend="create-database-locale-provider"/> is
+ <literal>libc</literal>, sets the default collation order to use in
+ the new database, overriding the setting <xref
+ linkend="create-database-locale"/>. Otherwise, this setting is
+ ignored.
</para>
<para>
- If <xref linkend="create-database-locale-provider"/> is
- <literal>libc</literal>, also sets the default collation order to use
- in the new database, overriding the setting <xref
- linkend="create-database-locale"/>.
+ The default is the setting of <xref linkend="create-database-locale"/>
+ if specified, otherwise the same setting as the template database.
+ See below for additional restrictions.
</para>
</listitem>
</varlistentry>
@@ -208,16 +207,18 @@ CREATE DATABASE <replaceable class="parameter">name</replaceable>
<listitem>
<para>
Sets <literal>LC_CTYPE</literal> in the database server's operating
- system environment. The default is the setting of <xref
- linkend="create-database-locale"/> if specified, otherwise the same
- setting as the template database. See below for additional
- restrictions.
+ system environment.
</para>
<para>
If <xref linkend="create-database-locale-provider"/> is
- <literal>libc</literal>, also sets the default character
- classification to use in the new database, overriding the setting
- <xref linkend="create-database-locale"/>.
+ <literal>libc</literal>, sets the default character classification to
+ use in the new database, overriding the setting <xref
+ linkend="create-database-locale"/>.
+ </para>
+ <para>
+ The default is the setting of <xref linkend="create-database-locale"/>
+ if specified, otherwise the same setting as the template database.
+ See below for additional restrictions.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/createdb.sgml b/doc/src/sgml/ref/createdb.sgml
index 5c4e0465ed9..2ccbe13f390 100644
--- a/doc/src/sgml/ref/createdb.sgml
+++ b/doc/src/sgml/ref/createdb.sgml
@@ -136,7 +136,8 @@ PostgreSQL documentation
<term><option>--lc-collate=<replaceable class="parameter">locale</replaceable></option></term>
<listitem>
<para>
- Specifies the LC_COLLATE setting to be used in this database.
+ Specifies the LC_COLLATE setting to be used in this database (ignored
+ unless the locale provider is <literal>libc</literal>).
</para>
</listitem>
</varlistentry>
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index ac1f801b1eb..dcc8d4f9c1b 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -723,11 +723,12 @@ restart:
/* Calculate pointer to beginning of next page */
targetPagePtr += XLOG_BLCKSZ;
- /* Wait for the next page to become available */
- readOff = ReadPageInternal(state, targetPagePtr,
- Min(total_len - gotlen + SizeOfXLogShortPHD,
- XLOG_BLCKSZ));
-
+ /*
+ * Read the page header before processing the record data, so we
+ * can handle the case where the previous record ended as being a
+ * partial one.
+ */
+ readOff = ReadPageInternal(state, targetPagePtr, SizeOfXLogShortPHD);
if (readOff == XLREAD_WOULDBLOCK)
return XLREAD_WOULDBLOCK;
else if (readOff < 0)
@@ -776,6 +777,15 @@ restart:
goto err;
}
+ /* Wait for the next page to become available */
+ readOff = ReadPageInternal(state, targetPagePtr,
+ Min(total_len - gotlen + SizeOfXLogShortPHD,
+ XLOG_BLCKSZ));
+ if (readOff == XLREAD_WOULDBLOCK)
+ return XLREAD_WOULDBLOCK;
+ else if (readOff < 0)
+ goto err;
+
/* Append the continuation from this page to the buffer */
pageHeaderSize = XLogPageHeaderSize(pageHeader);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 67f8e70f9c1..7dc121f73f1 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -80,6 +80,7 @@ static bool GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp);
@@ -2693,7 +2694,8 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot **epqslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_delete)
{
TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
@@ -2708,9 +2710,17 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
+ /*
+ * Get a copy of the on-disk tuple we are planning to delete. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE DELETE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- LockTupleExclusive, slot, &epqslot_candidate,
- tmresult, tmfd))
+ LockTupleExclusive, slot, !is_merge_delete,
+ &epqslot_candidate, tmresult, tmfd))
return false;
/*
@@ -2800,6 +2810,7 @@ ExecARDeleteTriggers(EState *estate,
tupleid,
LockTupleExclusive,
slot,
+ false,
NULL,
NULL,
NULL);
@@ -2944,7 +2955,8 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot *newslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_update)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
@@ -2965,10 +2977,17 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
- /* get a copy of the on-disk tuple we are planning to update */
+ /*
+ * Get a copy of the on-disk tuple we are planning to update. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE UPDATE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- lockmode, oldslot, &epqslot_candidate,
- tmresult, tmfd))
+ lockmode, oldslot, !is_merge_update,
+ &epqslot_candidate, tmresult, tmfd))
return false; /* cancel the update action */
/*
@@ -3142,6 +3161,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
tupleid,
LockTupleExclusive,
oldslot,
+ false,
NULL,
NULL,
NULL);
@@ -3298,6 +3318,7 @@ GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp)
@@ -3357,29 +3378,30 @@ GetTupleForTrigger(EState *estate,
if (tmfd.traversed)
{
/*
- * Recheck the tuple using EPQ. For MERGE, we leave this
- * to the caller (it must do additional rechecking, and
- * might end up executing a different action entirely).
+ * Recheck the tuple using EPQ, if requested. Otherwise,
+ * just return that it was concurrently updated.
*/
- if (estate->es_plannedstmt->commandType == CMD_MERGE)
+ if (do_epq_recheck)
{
- if (tmresultp)
- *tmresultp = TM_Updated;
- return false;
+ *epqslot = EvalPlanQual(epqstate,
+ relation,
+ relinfo->ri_RangeTableIndex,
+ oldslot);
+
+ /*
+ * If PlanQual failed for updated tuple - we must not
+ * process this tuple!
+ */
+ if (TupIsNull(*epqslot))
+ {
+ *epqslot = NULL;
+ return false;
+ }
}
-
- *epqslot = EvalPlanQual(epqstate,
- relation,
- relinfo->ri_RangeTableIndex,
- oldslot);
-
- /*
- * If PlanQual failed for updated tuple - we must not
- * process this tuple!
- */
- if (TupIsNull(*epqslot))
+ else
{
- *epqslot = NULL;
+ if (tmresultp)
+ *tmresultp = TM_Updated;
return false;
}
}
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 8a72b5e70a4..1a37737d4a2 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -5228,7 +5228,6 @@ ExecEvalJsonCoercionFinish(ExprState *state, ExprEvalStep *op)
* JsonBehavior expression.
*/
jsestate->escontext.error_occurred = false;
- jsestate->escontext.error_occurred = false;
jsestate->escontext.details_wanted = true;
}
}
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index 53ddd25c42d..f262e7a66f7 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -670,7 +670,7 @@ ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_update_before_row)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, slot, NULL, NULL))
+ tid, NULL, slot, NULL, NULL, false))
skip_tuple = true; /* "do nothing" */
}
@@ -746,7 +746,7 @@ ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_delete_before_row)
{
skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, NULL, NULL, NULL);
+ tid, NULL, NULL, NULL, NULL, false);
}
if (!skip_tuple)
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 54da8e7995b..7c6c2c1f6e4 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1474,7 +1474,8 @@ ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRDeleteTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple,
- epqreturnslot, result, &context->tmfd);
+ epqreturnslot, result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
@@ -2117,7 +2118,8 @@ ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRUpdateTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple, slot,
- result, &context->tmfd);
+ result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 7d63cf94a6b..bdcb5e4f261 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -125,13 +125,17 @@ main(int argc, char *argv[])
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres"));
/*
- * In the postmaster, absorb the environment values for LC_COLLATE and
- * LC_CTYPE. Individual backends will change these later to settings
- * taken from pg_database, but the postmaster cannot do that. If we leave
- * these set to "C" then message localization might not work well in the
- * postmaster.
+ * Collation is handled by pg_locale.c, and the behavior is dependent on
+ * the provider. strcoll(), etc., should not be called directly.
+ */
+ init_locale("LC_COLLATE", LC_COLLATE, "C");
+
+ /*
+ * In the postmaster, absorb the environment value for LC_CTYPE.
+ * Individual backends will change it later to pg_database.datctype, but
+ * the postmaster cannot do that. If we leave it set to "C" then message
+ * localization might not work well in the postmaster.
*/
- init_locale("LC_COLLATE", LC_COLLATE, "");
init_locale("LC_CTYPE", LC_CTYPE, "");
/*
diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl
index 4441b7cba0c..c7a6720440d 100644
--- a/src/backend/storage/lmgr/generate-lwlocknames.pl
+++ b/src/backend/storage/lmgr/generate-lwlocknames.pl
@@ -10,7 +10,6 @@ use Getopt::Long;
my $output_path = '.';
my $lastlockidx = -1;
-my $continue = "\n";
GetOptions('outdir:s' => \$output_path);
@@ -102,10 +101,8 @@ while (<$lwlocklist>)
while ($lastlockidx < $lockidx - 1)
{
++$lastlockidx;
- $continue = ",\n";
}
$lastlockidx = $lockidx;
- $continue = ",\n";
# Add a "Lock" suffix to each lock name, as the C code depends on that
printf $h "#define %-32s (&MainLWLockArray[$lockidx].lock)\n",
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 46f44bc4511..2d43bf2cc13 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -170,8 +170,8 @@ static const char *const BuiltinTrancheNames[] = {
[LWTRANCHE_DSM_REGISTRY_DSA] = "DSMRegistryDSA",
[LWTRANCHE_DSM_REGISTRY_HASH] = "DSMRegistryHash",
[LWTRANCHE_COMMITTS_SLRU] = "CommitTsSLRU",
- [LWTRANCHE_MULTIXACTOFFSET_SLRU] = "MultixactOffsetSLRU",
- [LWTRANCHE_MULTIXACTMEMBER_SLRU] = "MultixactMemberSLRU",
+ [LWTRANCHE_MULTIXACTOFFSET_SLRU] = "MultiXactOffsetSLRU",
+ [LWTRANCHE_MULTIXACTMEMBER_SLRU] = "MultiXactMemberSLRU",
[LWTRANCHE_NOTIFY_SLRU] = "NotifySLRU",
[LWTRANCHE_SERIAL_SLRU] = "SerialSLRU",
[LWTRANCHE_SUBTRANS_SLRU] = "SubtransSLRU",
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d1593f38b35..08791b8f75e 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1350,24 +1350,15 @@ PortalRunMulti(Portal portal,
PopActiveSnapshot();
/*
- * If a query completion data was supplied, use it. Otherwise use the
- * portal's query completion data.
- *
- * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
- * fake them with zeros. This can happen with DO INSTEAD rules if there
- * is no replacement query of the same type as the original. We print "0
- * 0" here because technically there is no query of the matching tag type,
- * and printing a non-zero count for a different query type seems wrong,
- * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
- * one row was updated. See QueryRewrite(), step 3, for details.
+ * If a command tag was requested and we did not fill in a run-time-
+ * determined tag above, copy the parse-time tag from the Portal. (There
+ * might not be any tag there either, in edge cases such as empty prepared
+ * statements. That's OK.)
*/
- if (qc && qc->commandTag == CMDTAG_UNKNOWN)
- {
- if (portal->qc.commandTag != CMDTAG_UNKNOWN)
- CopyQueryCompletion(qc, &portal->qc);
- /* If the caller supplied a qc, we should have set it by now. */
- Assert(qc->commandTag != CMDTAG_UNKNOWN);
- }
+ if (qc &&
+ qc->commandTag == CMDTAG_UNKNOWN &&
+ portal->qc.commandTag != CMDTAG_UNKNOWN)
+ CopyQueryCompletion(qc, &portal->qc);
}
/*
diff --git a/src/backend/utils/adt/bytea.c b/src/backend/utils/adt/bytea.c
index 2e539c2504e..6e7b914c563 100644
--- a/src/backend/utils/adt/bytea.c
+++ b/src/backend/utils/adt/bytea.c
@@ -182,27 +182,21 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
*
* Non-printable characters must be passed as '\nnn' (octal) and are
* converted to internal form. '\' must be passed as '\\'.
- * ereport(ERROR, ...) if bad form.
- *
- * BUGS:
- * The input is scanned twice.
- * The error checking of input is minimal.
*/
Datum
byteain(PG_FUNCTION_ARGS)
{
char *inputText = PG_GETARG_CSTRING(0);
Node *escontext = fcinfo->context;
+ size_t len = strlen(inputText);
+ size_t bc;
char *tp;
char *rp;
- int bc;
bytea *result;
/* Recognize hex input */
if (inputText[0] == '\\' && inputText[1] == 'x')
{
- size_t len = strlen(inputText);
-
bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */
result = palloc(bc);
bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result),
@@ -213,33 +207,7 @@ byteain(PG_FUNCTION_ARGS)
}
/* Else, it's the traditional escaped style */
- for (bc = 0, tp = inputText; *tp != '\0'; bc++)
- {
- if (tp[0] != '\\')
- tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
- (tp[2] >= '0' && tp[2] <= '7') &&
- (tp[3] >= '0' && tp[3] <= '7'))
- tp += 4;
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
- tp += 2;
- else
- {
- /*
- * one backslash, not followed by another or ### valid octal
- */
- ereturn(escontext, (Datum) 0,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type %s", "bytea")));
- }
- }
-
- bc += VARHDRSZ;
-
- result = (bytea *) palloc(bc);
- SET_VARSIZE(result, bc);
+ result = (bytea *) palloc(len + VARHDRSZ); /* maximum possible length */
tp = inputText;
rp = VARDATA(result);
@@ -247,21 +215,21 @@ byteain(PG_FUNCTION_ARGS)
{
if (tp[0] != '\\')
*rp++ = *tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
+ else if ((tp[1] >= '0' && tp[1] <= '3') &&
(tp[2] >= '0' && tp[2] <= '7') &&
(tp[3] >= '0' && tp[3] <= '7'))
{
- bc = VAL(tp[1]);
- bc <<= 3;
- bc += VAL(tp[2]);
- bc <<= 3;
- *rp++ = bc + VAL(tp[3]);
+ int v;
+
+ v = VAL(tp[1]);
+ v <<= 3;
+ v += VAL(tp[2]);
+ v <<= 3;
+ *rp++ = v + VAL(tp[3]);
tp += 4;
}
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
+ else if (tp[1] == '\\')
{
*rp++ = '\\';
tp += 2;
@@ -269,7 +237,7 @@ byteain(PG_FUNCTION_ARGS)
else
{
/*
- * We should never get here. The first pass should not allow it.
+ * one backslash, not followed by another or ### valid octal
*/
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -277,6 +245,9 @@ byteain(PG_FUNCTION_ARGS)
}
}
+ bc = rp - VARDATA(result); /* actual length */
+ SET_VARSIZE(result, bc + VARHDRSZ);
+
PG_RETURN_BYTEA_P(result);
}
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index ce6a626eba2..17fbfa9b410 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -3798,18 +3798,25 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
List *hashclauses,
Selectivity *innerbucketsize)
{
- List *clauses = list_copy(hashclauses);
- List *otherclauses = NIL;
- double ndistinct = 1.0;
+ List *clauses;
+ List *otherclauses;
+ double ndistinct;
if (list_length(hashclauses) <= 1)
-
+ {
/*
* Nothing to do for a single clause. Could we employ univariate
* extended stat here?
*/
return hashclauses;
+ }
+ /* "clauses" is the list of hashclauses we've not dealt with yet */
+ clauses = list_copy(hashclauses);
+ /* "otherclauses" holds clauses we are going to return to caller */
+ otherclauses = NIL;
+ /* current estimate of ndistinct */
+ ndistinct = 1.0;
while (clauses != NIL)
{
ListCell *lc;
@@ -3874,12 +3881,13 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
group_rel = root->simple_rel_array[relid];
}
else if (group_relid != relid)
-
+ {
/*
* Being in the group forming state we don't need other
* clauses.
*/
continue;
+ }
/*
* We're going to add the new clause to the varinfos list. We
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index c86ceefda94..641e535a73c 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -417,12 +417,11 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
datum = SysCacheGetAttrNotNull(DATABASEOID, tup, Anum_pg_database_datctype);
ctype = TextDatumGetCString(datum);
- if (pg_perm_setlocale(LC_COLLATE, collate) == NULL)
- ereport(FATAL,
- (errmsg("database locale is incompatible with operating system"),
- errdetail("The database was initialized with LC_COLLATE \"%s\", "
- " which is not recognized by setlocale().", collate),
- errhint("Recreate the database with another locale or install the missing locale.")));
+ /*
+ * Historcally, we set LC_COLLATE from datcollate, as well. That's no
+ * longer necessary because all collation behavior is handled through
+ * pg_locale_t.
+ */
if (pg_perm_setlocale(LC_CTYPE, ctype) == NULL)
ereport(FATAL,
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 197c1295d93..30e0da31aa3 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -31,6 +31,8 @@
#endif
#include "catalog/pg_class_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "common/string.h"
#include "compress_io.h"
#include "dumputils.h"
@@ -2974,6 +2976,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
int res = REQ_SCHEMA | REQ_DATA;
RestoreOptions *ropt = AH->public.ropt;
+ /*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
+ strcmp(te->desc, "TABLE DATA") == 0 &&
+ (te->catalogId.oid == LargeObjectMetadataRelationId ||
+ te->catalogId.oid == SharedDependRelationId))
+ return REQ_DATA;
+
/* These items are treated specially */
if (strcmp(te->desc, "ENCODING") == 0 ||
strcmp(te->desc, "STDSTRINGS") == 0 ||
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1937997ea67..604fc109416 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -49,8 +49,10 @@
#include "catalog/pg_class_d.h"
#include "catalog/pg_default_acl_d.h"
#include "catalog/pg_largeobject_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_publication_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "catalog/pg_subscription_d.h"
#include "catalog/pg_type_d.h"
#include "common/connect.h"
@@ -209,6 +211,12 @@ static int nbinaryUpgradeClassOids = 0;
static SequenceItem *sequences = NULL;
static int nsequences = 0;
+/*
+ * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
+ * as a dependency for pg_shdepend and any large object comments/seclabels.
+ */
+static DumpId lo_metadata_dumpId;
+
/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
#define MAX_ATTR_STATS_RELS 64
@@ -1086,6 +1094,36 @@ main(int argc, char **argv)
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
+ {
+ TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
+ TableInfo *shdepend = findTableByOid(SharedDependRelationId);
+
+ makeTableDataInfo(&dopt, lo_metadata);
+ makeTableDataInfo(&dopt, shdepend);
+
+ /*
+ * Save pg_largeobject_metadata's dump ID for use as a dependency for
+ * pg_shdepend and any large object comments/seclabels.
+ */
+ lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
+ addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
+
+ /*
+ * Only dump large object shdepend rows for this database.
+ */
+ shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
+ "AND dbid = (SELECT oid FROM pg_database "
+ " WHERE datname = current_database())";
+ }
+
+ /*
* In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
@@ -3924,10 +3962,37 @@ getLOs(Archive *fout)
* as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
- * pg_largeobject_metadata, after the dump is restored.
+ * pg_largeobject_metadata, after the dump is restored. In versions
+ * before v12, this is done via proper large object commands. In
+ * newer versions, we dump the content of pg_largeobject_metadata and
+ * any associated pg_shdepend rows, which is faster to restore. (On
+ * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
+ * column is hidden and won't be dumped.)
*/
if (dopt->binary_upgrade)
- loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ {
+ if (fout->remoteVersion >= 120000)
+ {
+ /*
+ * We should've saved pg_largeobject_metadata's dump ID before
+ * this point.
+ */
+ Assert(lo_metadata_dumpId);
+
+ loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
+
+ /*
+ * Mark the large object as dependent on
+ * pg_largeobject_metadata so that any large object
+ * comments/seclables are dumped after it.
+ */
+ loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
+ loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
+ loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
+ }
+ else
+ loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ }
/*
* Create a "BLOBS" data item for the group, too. This is just a
@@ -9039,8 +9104,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (tbinfo->relkind == RELKIND_SEQUENCE)
continue;
- /* Don't bother with uninteresting tables, either */
- if (!tbinfo->interesting)
+ /*
+ * Don't bother with uninteresting tables, either. For binary
+ * upgrades, this is bypassed for pg_largeobject_metadata and
+ * pg_shdepend so that the columns names are collected for the
+ * corresponding COPY commands. Restoring the data for those catalogs
+ * is faster than restoring the equivalent set of large object
+ * commands. We can only do this for upgrades from v12 and newer; in
+ * older versions, pg_largeobject_metadata was created WITH OIDS, so
+ * the OID column is hidden and won't be dumped.
+ */
+ if (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId)))
continue;
/* OK, we need info for this table */
@@ -9244,7 +9321,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
- !tbinfo->interesting)
+ (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId))))
pg_fatal("unexpected column data for table \"%s\"",
tbinfo->dobj.name);
@@ -12583,8 +12663,13 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
for (i = 0; i < tyinfo->nDomChecks; i++)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- PQExpBuffer conprefix = createPQExpBuffer();
+ PQExpBuffer conprefix;
+
+ /* but only if the constraint itself was dumped here */
+ if (domcheck->separate)
+ continue;
+ conprefix = createPQExpBuffer();
appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
fmtId(domcheck->dobj.name));
@@ -18488,6 +18573,22 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
.section = SECTION_POST_DATA,
.createStmt = q->data,
.dropStmt = delq->data));
+
+ if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+ char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(coninfo->dobj.name));
+
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ coninfo->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ free(qtypname);
+ }
}
}
else
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 2485d8f360e..d8330e2bd17 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -1087,6 +1087,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_owner => 1,
schema_only => 1,
@@ -1605,6 +1606,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
schema_only => 1,
schema_only_with_statistics => 1,
no_large_objects => 1,
@@ -4612,9 +4614,9 @@ my %tests = (
no_schema => 1,
section_data => 1,
test_schema_plus_large_objects => 1,
- binary_upgrade => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_privs => 1,
schema_only => 1,
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 2ed2c4bb378..cfd7daa20ed 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -213,7 +213,8 @@ extern bool ExecBRDeleteTriggers(EState *estate,
HeapTuple fdw_trigtuple,
TupleTableSlot **epqslot,
TM_Result *tmresult,
- TM_FailureData *tmfd);
+ TM_FailureData *tmfd,
+ bool is_merge_delete);
extern void ExecARDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
ItemPointer tupleid,
@@ -235,7 +236,8 @@ extern bool ExecBRUpdateTriggers(EState *estate,
HeapTuple fdw_trigtuple,
TupleTableSlot *newslot,
TM_Result *tmresult,
- TM_FailureData *tmfd);
+ TM_FailureData *tmfd,
+ bool is_merge_update);
extern void ExecARUpdateTriggers(EState *estate,
ResultRelInfo *relinfo,
ResultRelInfo *src_partinfo,
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 2a2b10d5a29..afa85d9fca9 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -7574,10 +7574,12 @@ PQport(const PGconn *conn)
if (!conn)
return NULL;
- if (conn->connhost != NULL)
+ if (conn->connhost != NULL &&
+ conn->connhost[conn->whichhost].port != NULL &&
+ conn->connhost[conn->whichhost].port[0] != '\0')
return conn->connhost[conn->whichhost].port;
- return "";
+ return DEF_PGPORT_STR;
}
/*
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 70c28f2ffca..a701c25038a 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -357,7 +357,8 @@ typedef struct pg_conn_host
pg_conn_host_type type; /* type of host address */
char *host; /* host name or socket path */
char *hostaddr; /* host numeric IP address */
- char *port; /* port number (always provided) */
+ char *port; /* port number (if NULL or empty, use
+ * DEF_PGPORT[_STR]) */
char *password; /* password for this host, read from the
* password file; NULL if not sought or not
* found in password file. */
diff --git a/src/test/isolation/expected/merge-match-recheck.out b/src/test/isolation/expected/merge-match-recheck.out
index 9a44a595927..90300f1db5a 100644
--- a/src/test/isolation/expected/merge-match-recheck.out
+++ b/src/test/isolation/expected/merge-match-recheck.out
@@ -241,19 +241,28 @@ starting permutation: update_bal1_tg merge_bal_tg c2 select1_tg c1
s2: NOTICE: Update: (1,160,s1,setup) -> (1,50,s1,"setup updated by update_bal1_tg")
step update_bal1_tg: UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1;
step merge_bal_tg:
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
<waiting ...>
step c2: COMMIT;
s1: NOTICE: Update: (1,50,s1,"setup updated by update_bal1_tg") -> (1,100,s1,"setup updated by update_bal1_tg when1")
step merge_bal_tg: <... completed>
+key|balance|status|val
+---+-------+------+-------------------------------------
+ 1| 100|s1 |setup updated by update_bal1_tg when1
+(1 row)
+
step select1_tg: SELECT * FROM target_tg;
key|balance|status|val
---+-------+------+-------------------------------------
diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec
index 26266b8c297..15226e40c9e 100644
--- a/src/test/isolation/specs/merge-match-recheck.spec
+++ b/src/test/isolation/specs/merge-match-recheck.spec
@@ -99,15 +99,19 @@ step "merge_bal_pa"
}
step "merge_bal_tg"
{
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
}
step "merge_delete"
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index aa1d27bbed3..7d3d3d52b45 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -15,6 +15,7 @@ SUBDIRS = \
plsample \
spgist_name_ops \
test_aio \
+ test_binaryheap \
test_bloomfilter \
test_copy_callbacks \
test_custom_rmgrs \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index 9de0057bd1d..dd5cd065ba1 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -14,6 +14,7 @@ subdir('plsample')
subdir('spgist_name_ops')
subdir('ssl_passphrase_callback')
subdir('test_aio')
+subdir('test_binaryheap')
subdir('test_bloomfilter')
subdir('test_copy_callbacks')
subdir('test_custom_rmgrs')
diff --git a/src/test/modules/test_binaryheap/.gitignore b/src/test/modules/test_binaryheap/.gitignore
new file mode 100644
index 00000000000..5dcb3ff9723
--- /dev/null
+++ b/src/test/modules/test_binaryheap/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_binaryheap/Makefile b/src/test/modules/test_binaryheap/Makefile
new file mode 100644
index 00000000000..d310fbc9e88
--- /dev/null
+++ b/src/test/modules/test_binaryheap/Makefile
@@ -0,0 +1,24 @@
+# src/test/modules/test_binaryheap/Makefile
+
+MODULE_big = test_binaryheap
+OBJS = \
+ $(WIN32RES) \
+ test_binaryheap.o
+
+PGFILEDESC = "test_binaryheap - test code for binaryheap"
+
+EXTENSION = test_binaryheap
+DATA = test_binaryheap--1.0.sql
+
+REGRESS = test_binaryheap
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_binaryheap
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_binaryheap/expected/test_binaryheap.out b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
new file mode 100644
index 00000000000..16ce07875e3
--- /dev/null
+++ b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_binaryheap;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
+ test_binaryheap
+-----------------
+
+(1 row)
+
diff --git a/src/test/modules/test_binaryheap/meson.build b/src/test/modules/test_binaryheap/meson.build
new file mode 100644
index 00000000000..816a43c93e9
--- /dev/null
+++ b/src/test/modules/test_binaryheap/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_binaryheap_sources = files(
+ 'test_binaryheap.c',
+)
+
+if host_system == 'windows'
+ test_binaryheap_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_binaryheap',
+ '--FILEDESC', 'test_binaryheap - test code for binaryheap',])
+endif
+
+test_binaryheap = shared_module('test_binaryheap',
+ test_binaryheap_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += test_binaryheap
+
+test_install_data += files(
+ 'test_binaryheap.control',
+ 'test_binaryheap--1.0.sql',
+)
+
+tests += {
+ 'name': 'test_binaryheap',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'test_binaryheap',
+ ],
+ },
+}
diff --git a/src/test/modules/test_binaryheap/sql/test_binaryheap.sql b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
new file mode 100644
index 00000000000..8439545815b
--- /dev/null
+++ b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
@@ -0,0 +1,8 @@
+CREATE EXTENSION test_binaryheap;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
diff --git a/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
new file mode 100644
index 00000000000..cddceeee603
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
@@ -0,0 +1,7 @@
+/* src/test/modules/test_binaryheap/test_binaryheap--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_binaryheap" to load this file. \quit
+
+CREATE FUNCTION test_binaryheap() RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.c b/src/test/modules/test_binaryheap/test_binaryheap.c
new file mode 100644
index 00000000000..583dae1da30
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.c
@@ -0,0 +1,275 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_binaryheap.c
+ * Test correctness of binary heap implementation.
+ *
+ * Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_binaryheap/test_binaryheap.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "common/int.h"
+#include "common/pg_prng.h"
+#include "fmgr.h"
+#include "lib/binaryheap.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * Test binaryheap_comparator for max-heap of integers.
+ */
+static int
+int_cmp(Datum a, Datum b, void *arg)
+{
+ return pg_cmp_s32(DatumGetInt32(a), DatumGetInt32(b));
+}
+
+/*
+ * Loops through all nodes and returns the maximum value.
+ */
+static int
+get_max_from_heap(binaryheap *heap)
+{
+ int max = -1;
+
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ max = Max(max, DatumGetInt32(binaryheap_get_node(heap, i)));
+
+ return max;
+}
+
+/*
+ * Generate a random permutation of the integers 0..size-1.
+ */
+static int *
+get_permutation(int size)
+{
+ int *permutation = (int *) palloc(size * sizeof(int));
+
+ permutation[0] = 0;
+
+ /*
+ * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm.
+ * Notionally, we append each new value to the array and then swap it with
+ * a randomly-chosen array element (possibly including itself, else we
+ * fail to generate permutations with the last integer last). The swap
+ * step can be optimized by combining it with the insertion.
+ */
+ for (int i = 1; i < size; i++)
+ {
+ int j = pg_prng_uint64_range(&pg_global_prng_state, 0, i);
+
+ if (j < i) /* avoid fetching undefined data if j=i */
+ permutation[i] = permutation[j];
+ permutation[j] = i;
+ }
+
+ return permutation;
+}
+
+/*
+ * Ensure that the heap property holds for the given heap, i.e., each parent is
+ * greater than or equal to its children.
+ */
+static void
+verify_heap_property(binaryheap *heap)
+{
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ {
+ int left = 2 * i + 1;
+ int right = 2 * i + 2;
+ int parent_val = DatumGetInt32(binaryheap_get_node(heap, i));
+
+ if (left < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, left)))
+ elog(ERROR, "parent node less than left child");
+
+ if (right < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, right)))
+ elog(ERROR, "parent node less than right child");
+ }
+}
+
+/*
+ * Check correctness of basic operations.
+ */
+static void
+test_basic(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "new heap not empty");
+ if (binaryheap_size(heap) != 0)
+ elog(ERROR, "wrong size for new heap");
+
+ for (int i = 0; i < size; i++)
+ {
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_empty(heap))
+ elog(ERROR, "heap empty after adding values");
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after adding values");
+
+ if (DatumGetInt32(binaryheap_first(heap)) != get_max_from_heap(heap))
+ elog(ERROR, "incorrect root node after adding values");
+
+ for (int i = 0; i < size; i++)
+ {
+ int expected = get_max_from_heap(heap);
+ int actual = DatumGetInt32(binaryheap_remove_first(heap));
+
+ if (actual != expected)
+ elog(ERROR, "incorrect root node after removing root");
+ verify_heap_property(heap);
+ }
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after removing all nodes");
+}
+
+/*
+ * Test building heap after unordered additions.
+ */
+static void
+test_build(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add_unordered(heap, Int32GetDatum(permutation[i]));
+
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after unordered additions");
+
+ binaryheap_build(heap);
+ verify_heap_property(heap);
+}
+
+/*
+ * Test removing nodes.
+ */
+static void
+test_remove_node(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+ int remove_count = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+
+ for (int i = 0; i < remove_count; i++)
+ {
+ int idx = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, binaryheap_size(heap) - 1);
+
+ binaryheap_remove_node(heap, idx);
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_size(heap) != size - remove_count)
+ elog(ERROR, "wrong size after removing nodes");
+}
+
+/*
+ * Test replacing the root node.
+ */
+static void
+test_replace_first(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ /*
+ * Replace root with a value smaller than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(-1));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a value in the middle of the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size / 2));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a larger value than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size + 1));
+ verify_heap_property(heap);
+}
+
+/*
+ * Test duplicate values.
+ */
+static void
+test_duplicates(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int dup = pg_prng_uint64_range(&pg_global_prng_state, 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(dup));
+
+ for (int i = 0; i < size; i++)
+ {
+ if (DatumGetInt32(binaryheap_remove_first(heap)) != dup)
+ elog(ERROR, "unexpected value in heap with duplicates");
+ }
+}
+
+/*
+ * Test resetting.
+ */
+static void
+test_reset(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ binaryheap_reset(heap);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after resetting");
+}
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+PG_FUNCTION_INFO_V1(test_binaryheap);
+
+Datum
+test_binaryheap(PG_FUNCTION_ARGS)
+{
+ static const int test_sizes[] = {1, 2, 3, 10, 100, 1000};
+
+ for (int i = 0; i < sizeof(test_sizes) / sizeof(int); i++)
+ {
+ int size = test_sizes[i];
+
+ test_basic(size);
+ test_build(size);
+ test_remove_node(size);
+ test_replace_first(size);
+ test_duplicates(size);
+ test_reset(size);
+ }
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.control b/src/test/modules/test_binaryheap/test_binaryheap.control
new file mode 100644
index 00000000000..dd0785e05bd
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.control
@@ -0,0 +1,5 @@
+# test_binaryheap extension
+comment = 'Test code for binaryheap'
+default_version = '1.0'
+module_pathname = '$libdir/test_binaryheap'
+relocatable = true
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 301766d2ed9..61f68e0cc2e 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -290,6 +290,32 @@ sub connstr
=pod
+=item $node->is_alive()
+
+Check if the node is alive, using pg_isready.
+Returns 1 if successful, 0 on failure.
+
+=cut
+
+sub is_alive
+{
+ my ($self) = @_;
+ local %ENV = $self->_get_env();
+
+ my $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_isready',
+ '--host' => $self->host,
+ '--port' => $self->port);
+
+ if ($ret != 0)
+ {
+ return 0;
+ }
+ return 1;
+}
+
+=pod
+
=item $node->raw_connect()
Open a raw TCP or Unix domain socket connection to the server. This is
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index 6e78ff1a030..52993c32dbb 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -54,6 +54,7 @@ tests += {
't/043_no_contrecord_switch.pl',
't/044_invalidate_inactive_slots.pl',
't/045_archive_restartpoint.pl',
+ 't/046_checkpoint_logical_slot.pl',
't/047_checkpoint_physical_slot.pl',
't/048_vacuum_horizon_floor.pl'
],
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index 83def062d11..5d2c06ba06e 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -81,7 +81,14 @@ my $rc =
. "--max-concurrent-tests=20 "
. "--inputdir=../regress "
. "--outputdir=\"$outputdir\"");
-if ($rc != 0)
+
+# Regression diffs are only meaningful if both the primary and the standby
+# are still alive after a regression test failure. A crash would cause a
+# useless increase in the log quantity, mostly filled with information
+# related to queries that could not run.
+my $primary_alive = $node_primary->is_alive;
+my $standby_alive = $node_standby_1->is_alive;
+if ($rc != 0 && $primary_alive && $standby_alive)
{
# Dump out the regression diffs file, if there is one
my $diffs = "$outputdir/regression.diffs";
@@ -93,6 +100,8 @@ if ($rc != 0)
}
}
is($rc, 0, 'regression tests pass');
+is($primary_alive, 1, 'primary alive after regression test run');
+is($standby_alive, 1, 'standby alive after regression test run');
# Clobber all sequences with their next value, so that we don't have
# differences between nodes due to caching.
diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl
new file mode 100644
index 00000000000..4fd709e3a03
--- /dev/null
+++ b/src/test/recovery/t/046_checkpoint_logical_slot.pl
@@ -0,0 +1,142 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the logical slot is advanced during
+# checkpoint. The test checks that the logical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'logical'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create the two slots we'll need.
+$node->safe_psql('postgres',
+ q{select pg_create_logical_replication_slot('slot_logical', 'test_decoding')}
+);
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance both slots to the current position just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null)}
+);
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Generate some transactions to get RUNNING_XACTS.
+my $xacts = $node->background_psql('postgres');
+$xacts->query_until(
+ qr/run_xacts/,
+ q(\echo run_xacts
+SELECT 1 \watch 0.1
+\q
+));
+
+$node->advance_wal(20);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+$node->advance_wal(20);
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q(select injection_points_attach('checkpoint-before-old-wal-removal','wait'))
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# Try to advance the logical slot, but make it stop when it moves to the next
+# WAL segment (this has to happen in the background, too).
+my $logical = $node->background_psql('postgres');
+$logical->query_safe(
+ q{select injection_points_attach('logical-replication-slot-advance-segment','wait');}
+);
+$logical->query_until(
+ qr/get_changes/,
+ q(
+\echo get_changes
+select count(*) from pg_logical_slot_get_changes('slot_logical', null, null) \watch 1
+\q
+));
+
+# Wait until the slot's restart_lsn points to the next WAL segment.
+note('waiting for injection_point');
+$node->wait_for_event('client backend',
+ 'logical-replication-slot-advance-segment');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN, and then unblock the checkpoint, which
+# removes the WAL still needed by the logical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Generate a long WAL record, spawning at least two pages for the follow-up
+# post-recovery check.
+$node->safe_psql('postgres',
+ q{select pg_logical_emit_message(false, '', repeat('123456789', 1000))});
+
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
+
+# Abruptly stop the server.
+$node->stop('immediate');
+
+$node->start;
+
+eval {
+ $node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null);}
+ );
+};
+is($@, '', "Logical slot still valid");
+
+done_testing();
diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl
index a1332b5d44c..9e98383e30e 100644
--- a/src/test/recovery/t/047_checkpoint_physical_slot.pl
+++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl
@@ -94,9 +94,11 @@ $node->safe_psql('postgres',
q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
);
-# Continue the checkpoint.
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
$node->safe_psql('postgres',
q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
my $restart_lsn_old = $node->safe_psql('postgres',
q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
@@ -104,8 +106,7 @@ my $restart_lsn_old = $node->safe_psql('postgres',
chomp($restart_lsn_old);
note("restart lsn before stop: $restart_lsn_old");
-# Abruptly stop the server (1 second should be enough for the checkpoint
-# to finish; it would be better).
+# Abruptly stop the server.
$node->stop('immediate');
$node->start;
diff --git a/src/test/regress/expected/compression.out b/src/test/regress/expected/compression.out
index 4dd9ee7200d..09f198149aa 100644
--- a/src/test/regress/expected/compression.out
+++ b/src/test/regress/expected/compression.out
@@ -1,3 +1,7 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
SET default_toast_compression = 'pglz';
@@ -6,21 +10,13 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
- Table "public.cmdata"
+ Table "pglz.cmdata"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | pglz | |
Indexes:
"idx" btree (f1)
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
- Table "public.cmdata1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
@@ -28,12 +24,6 @@ SELECT pg_column_compression(f1) FROM cmdata;
pglz
(1 row)
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
substr
@@ -41,16 +31,10 @@ SELECT SUBSTR(f1, 200, 5) FROM cmdata;
01234
(1 row)
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- substr
-----------------------------------------------------
- 01234567890123456789012345678901234567890123456789
-(1 row)
-
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
- Table "public.cmmove1"
+ Table "pglz.cmmove1"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | | |
@@ -61,45 +45,9 @@ SELECT pg_column_compression(f1) FROM cmmove1;
pglz
(1 row)
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-DROP TABLE cmdata2;
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
@@ -111,21 +59,6 @@ SELECT pg_column_compression(f1) FROM cmdata2;
pglz
(1 row)
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- substr
---------
- 01234
- 79026
-(2 rows)
-
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
substr
--------
@@ -136,21 +69,21 @@ DROP TABLE cmdata2;
--test column type update varlena/non-varlena
CREATE TABLE cmdata2 (f1 int);
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
@@ -160,14 +93,14 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | pglz | |
ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | pglz | |
@@ -179,164 +112,47 @@ SELECT pg_column_compression(f1) FROM cmdata2;
(1 row)
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT pg_column_compression(x) FROM compressmv;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-NOTICE: merging multiple inherited definitions of column "f1"
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
NOTICE: merging multiple inherited definitions of column "f1"
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz, lz4.
SET default_toast_compression = 'I do not exist compression';
ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz, lz4.
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | | |
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | lz4 | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
- pglz
-(2 rows)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-- check data is ok
SELECT length(f1) FROM cmdata;
length
--------
10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
- length
---------
- 10040
- 12449
-(2 rows)
+(1 row)
SELECT length(f1) FROM cmmove1;
length
@@ -344,19 +160,6 @@ SELECT length(f1) FROM cmmove1;
10000
(1 row)
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
- 10040
-(2 rows)
-
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
ERROR: invalid compression method "i_do_not_exist_compression"
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/expected/compression_1.out b/src/test/regress/expected/compression_1.out
deleted file mode 100644
index 7bd7642b4b9..00000000000
--- a/src/test/regress/expected/compression_1.out
+++ /dev/null
@@ -1,360 +0,0 @@
-\set HIDE_TOAST_COMPRESSION false
--- ensure we get stable results regardless of installation's default
-SET default_toast_compression = 'pglz';
--- test creating table with compression method
-CREATE TABLE cmdata(f1 text COMPRESSION pglz);
-CREATE INDEX idx ON cmdata(f1);
-INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
- ^
-\d+ cmdata1
--- verify stored compression method in the data
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
--- decompress data slice
-SELECT SUBSTR(f1, 200, 5) FROM cmdata;
- substr
---------
- 01234
-(1 row)
-
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- ^
--- copy with table creation
-SELECT * INTO cmmove1 FROM cmdata;
-\d+ cmmove1
- Table "public.cmmove1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | | |
-
-SELECT pg_column_compression(f1) FROM cmmove1;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmmove3 SELECT * FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-ERROR: relation "cmdata1" does not exist
-LINE 1: CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
- ^
-\d+ cmdata2
-DROP TABLE cmdata2;
-ERROR: table "cmdata2" does not exist
--- try setting compression for incompressible data type
-CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
-ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test externally stored compressed data
-CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
-'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
-CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
-INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
- ^
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
- substr
---------
- 79026
-(1 row)
-
-DROP TABLE cmdata2;
---test column type update varlena/non-varlena
-CREATE TABLE cmdata2 (f1 int);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
---changing column storage should not impact the compression method
---but the data should not be compressed
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | pglz | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | pglz | |
-
-INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
-
-(1 row)
-
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: ...TE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
- ^
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT pg_column_compression(x) FROM compressmv;
-ERROR: relation "compressmv" does not exist
-LINE 1: SELECT pg_column_compression(x) FROM compressmv;
- ^
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-ERROR: relation "cmpart" does not exist
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-ERROR: relation "cmpart" does not exist
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-ERROR: relation "cmdata1" does not exist
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cmdata3(f1 text);
-CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-NOTICE: merging multiple inherited definitions of column "f1"
--- test default_toast_compression GUC
-SET default_toast_compression = '';
-ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz.
-SET default_toast_compression = 'I do not exist compression';
-ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz.
-SET default_toast_compression = 'lz4';
-ERROR: invalid value for parameter "default_toast_compression": "lz4"
-HINT: Available values: pglz.
-SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | | |
-
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-ERROR: relation "compressmv" does not exist
-\d+ compressmv
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ERROR: relation "cmpart1" does not exist
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- VACUUM FULL does not recompress
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-VACUUM FULL cmdata;
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-ERROR: relation "cmdata2" does not exist
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-ERROR: relation "cmdata2" does not exist
-LINE 1: INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEX...
- ^
--- check data is ok
-SELECT length(f1) FROM cmdata;
- length
---------
- 10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT length(f1) FROM cmdata1;
- ^
-SELECT length(f1) FROM cmmove1;
- length
---------
- 10000
-(1 row)
-
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
-(1 row)
-
-CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-CREATE TABLE badcompresstbl (a text);
-ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-DROP TABLE badcompresstbl;
-\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4.out b/src/test/regress/expected/compression_lz4.out
new file mode 100644
index 00000000000..068dd7c3674
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4.out
@@ -0,0 +1,249 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+\set HIDE_TOAST_COMPRESSION false
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+ substr
+--------
+ 01234
+(1 row)
+
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+ substr
+----------------------------------------------------
+ 01234567890123456789012345678901234567890123456789
+(1 row)
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+ Table "lz4.cmmove1"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | | |
+
+SELECT pg_column_compression(f1) FROM cmmove1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+ Table "lz4.cmdata2"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | lz4 | |
+
+DROP TABLE cmdata2;
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+ substr
+--------
+ 79026
+(1 row)
+
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(x) FROM compressmv;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+NOTICE: merging multiple inherited definitions of column "f1"
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+NOTICE: merging column "f1" with inherited definition
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+NOTICE: merging multiple inherited definitions of column "f1"
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | lz4 | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+ pglz
+(2 rows)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+ length
+--------
+ 10000
+ 36036
+(2 rows)
+
+SELECT length(f1) FROM cmdata_lz4;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove1;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove2;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove3;
+ length
+--------
+ 10000
+ 10040
+(2 rows)
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4_1.out b/src/test/regress/expected/compression_lz4_1.out
new file mode 100644
index 00000000000..198056fa224
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4_1.out
@@ -0,0 +1,7 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+*** skipping TOAST tests with lz4 (not supported) ***
+ \quit
diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out
index ccea883cffd..3590d3274f0 100644
--- a/src/test/regress/expected/constraints.out
+++ b/src/test/regress/expected/constraints.out
@@ -1701,3 +1701,7 @@ DROP TABLE constraint_comments_tbl;
DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 788844abd20..1bfd33de3f3 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -236,6 +236,12 @@ SELECT E'De\\678dBeEf'::bytea;
ERROR: invalid input syntax for type bytea
LINE 1: SELECT E'De\\678dBeEf'::bytea;
^
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+----------------------
+ \x446541645c42654566
+(1 row)
+
SELECT reverse(''::bytea);
reverse
---------
@@ -291,6 +297,12 @@ SELECT E'De\\123dBeEf'::bytea;
DeSdBeEf
(1 row)
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+------------
+ DeAd\\BeEf
+(1 row)
+
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');
pg_input_is_valid
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index a424be2a6bf..fbffc67ae60 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -123,7 +123,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# The stats test resets stats, so nothing else needing stats access can be in
# this group.
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize stats predicate numa
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression compression_lz4 memoize stats predicate numa
# event_trigger depends on create_am and cannot run concurrently with
# any test that runs DDL
diff --git a/src/test/regress/sql/compression.sql b/src/test/regress/sql/compression.sql
index 490595fcfb2..ce5ea37a660 100644
--- a/src/test/regress/sql/compression.sql
+++ b/src/test/regress/sql/compression.sql
@@ -1,3 +1,8 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
@@ -8,53 +13,27 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
-SELECT pg_column_compression(f1) FROM cmdata1;
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
SELECT pg_column_compression(f1) FROM cmmove1;
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
-DROP TABLE cmdata2;
-
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
SELECT pg_column_compression(f1) FROM cmdata2;
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
DROP TABLE cmdata2;
@@ -76,76 +55,31 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
SELECT pg_column_compression(f1) FROM cmdata2;
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT pg_column_compression(x) FROM compressmv;
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
SET default_toast_compression = 'I do not exist compression';
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
-
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
-SELECT pg_column_compression(f1) FROM cmdata;
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-
-- check data is ok
SELECT length(f1) FROM cmdata;
-SELECT length(f1) FROM cmdata1;
SELECT length(f1) FROM cmmove1;
-SELECT length(f1) FROM cmmove2;
-SELECT length(f1) FROM cmmove3;
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/sql/compression_lz4.sql b/src/test/regress/sql/compression_lz4.sql
new file mode 100644
index 00000000000..3849f8618de
--- /dev/null
+++ b/src/test/regress/sql/compression_lz4.sql
@@ -0,0 +1,129 @@
+-- Tests for TOAST compression with lz4
+
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+
+\set HIDE_TOAST_COMPRESSION false
+
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+SELECT pg_column_compression(f1) FROM cmmove1;
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+DROP TABLE cmdata2;
+
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+SELECT pg_column_compression(x) FROM compressmv;
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+SELECT length(f1) FROM cmdata_lz4;
+SELECT length(f1) FROM cmmove1;
+SELECT length(f1) FROM cmmove2;
+SELECT length(f1) FROM cmmove3;
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql
index 7487723ab84..1f6dc8fd69f 100644
--- a/src/test/regress/sql/constraints.sql
+++ b/src/test/regress/sql/constraints.sql
@@ -1043,3 +1043,9 @@ DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 2577a42987d..92c445c2439 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -76,6 +76,7 @@ SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\123dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
SELECT E'De\\678dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
SELECT reverse(''::bytea);
SELECT reverse('\xaa'::bytea);
@@ -88,6 +89,7 @@ SELECT E'\\xDe00BeEf'::bytea;
SELECT E'DeAdBeEf'::bytea;
SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');