summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/transam/xlogreader.c20
-rw-r--r--src/backend/commands/trigger.c74
-rw-r--r--src/backend/executor/execExprInterp.c1
-rw-r--r--src/backend/executor/execReplication.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c6
-rw-r--r--src/backend/storage/lmgr/generate-lwlocknames.pl3
-rw-r--r--src/backend/tcop/pquery.c25
-rw-r--r--src/backend/utils/adt/bytea.c61
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c15
-rw-r--r--src/bin/pg_dump/pg_dump.c90
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl4
-rw-r--r--src/include/commands/trigger.h6
-rw-r--r--src/interfaces/libpq/fe-connect.c6
-rw-r--r--src/interfaces/libpq/libpq-int.h3
-rw-r--r--src/test/isolation/expected/merge-match-recheck.out27
-rw-r--r--src/test/isolation/specs/merge-match-recheck.spec22
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/test_binaryheap/.gitignore4
-rw-r--r--src/test/modules/test_binaryheap/Makefile24
-rw-r--r--src/test/modules/test_binaryheap/expected/test_binaryheap.out12
-rw-r--r--src/test/modules/test_binaryheap/meson.build33
-rw-r--r--src/test/modules/test_binaryheap/sql/test_binaryheap.sql8
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap--1.0.sql7
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.c275
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.control5
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm26
-rw-r--r--src/test/recovery/meson.build1
-rw-r--r--src/test/recovery/t/027_stream_regress.pl11
-rw-r--r--src/test/recovery/t/046_checkpoint_logical_slot.pl142
-rw-r--r--src/test/recovery/t/047_checkpoint_physical_slot.pl7
-rw-r--r--src/test/regress/expected/strings.out12
-rw-r--r--src/test/regress/sql/strings.sql2
33 files changed, 804 insertions, 134 deletions
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index ac1f801b1eb..dcc8d4f9c1b 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -723,11 +723,12 @@ restart:
/* Calculate pointer to beginning of next page */
targetPagePtr += XLOG_BLCKSZ;
- /* Wait for the next page to become available */
- readOff = ReadPageInternal(state, targetPagePtr,
- Min(total_len - gotlen + SizeOfXLogShortPHD,
- XLOG_BLCKSZ));
-
+ /*
+ * Read the page header before processing the record data, so we
+ * can handle the case where the previous record ended as being a
+ * partial one.
+ */
+ readOff = ReadPageInternal(state, targetPagePtr, SizeOfXLogShortPHD);
if (readOff == XLREAD_WOULDBLOCK)
return XLREAD_WOULDBLOCK;
else if (readOff < 0)
@@ -776,6 +777,15 @@ restart:
goto err;
}
+ /* Wait for the next page to become available */
+ readOff = ReadPageInternal(state, targetPagePtr,
+ Min(total_len - gotlen + SizeOfXLogShortPHD,
+ XLOG_BLCKSZ));
+ if (readOff == XLREAD_WOULDBLOCK)
+ return XLREAD_WOULDBLOCK;
+ else if (readOff < 0)
+ goto err;
+
/* Append the continuation from this page to the buffer */
pageHeaderSize = XLogPageHeaderSize(pageHeader);
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 67f8e70f9c1..7dc121f73f1 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -80,6 +80,7 @@ static bool GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp);
@@ -2693,7 +2694,8 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot **epqslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_delete)
{
TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
@@ -2708,9 +2710,17 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
+ /*
+ * Get a copy of the on-disk tuple we are planning to delete. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE DELETE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- LockTupleExclusive, slot, &epqslot_candidate,
- tmresult, tmfd))
+ LockTupleExclusive, slot, !is_merge_delete,
+ &epqslot_candidate, tmresult, tmfd))
return false;
/*
@@ -2800,6 +2810,7 @@ ExecARDeleteTriggers(EState *estate,
tupleid,
LockTupleExclusive,
slot,
+ false,
NULL,
NULL,
NULL);
@@ -2944,7 +2955,8 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot *newslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_update)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
@@ -2965,10 +2977,17 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
- /* get a copy of the on-disk tuple we are planning to update */
+ /*
+ * Get a copy of the on-disk tuple we are planning to update. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE UPDATE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- lockmode, oldslot, &epqslot_candidate,
- tmresult, tmfd))
+ lockmode, oldslot, !is_merge_update,
+ &epqslot_candidate, tmresult, tmfd))
return false; /* cancel the update action */
/*
@@ -3142,6 +3161,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
tupleid,
LockTupleExclusive,
oldslot,
+ false,
NULL,
NULL,
NULL);
@@ -3298,6 +3318,7 @@ GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp)
@@ -3357,29 +3378,30 @@ GetTupleForTrigger(EState *estate,
if (tmfd.traversed)
{
/*
- * Recheck the tuple using EPQ. For MERGE, we leave this
- * to the caller (it must do additional rechecking, and
- * might end up executing a different action entirely).
+ * Recheck the tuple using EPQ, if requested. Otherwise,
+ * just return that it was concurrently updated.
*/
- if (estate->es_plannedstmt->commandType == CMD_MERGE)
+ if (do_epq_recheck)
{
- if (tmresultp)
- *tmresultp = TM_Updated;
- return false;
+ *epqslot = EvalPlanQual(epqstate,
+ relation,
+ relinfo->ri_RangeTableIndex,
+ oldslot);
+
+ /*
+ * If PlanQual failed for updated tuple - we must not
+ * process this tuple!
+ */
+ if (TupIsNull(*epqslot))
+ {
+ *epqslot = NULL;
+ return false;
+ }
}
-
- *epqslot = EvalPlanQual(epqstate,
- relation,
- relinfo->ri_RangeTableIndex,
- oldslot);
-
- /*
- * If PlanQual failed for updated tuple - we must not
- * process this tuple!
- */
- if (TupIsNull(*epqslot))
+ else
{
- *epqslot = NULL;
+ if (tmresultp)
+ *tmresultp = TM_Updated;
return false;
}
}
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 8a72b5e70a4..1a37737d4a2 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -5228,7 +5228,6 @@ ExecEvalJsonCoercionFinish(ExprState *state, ExprEvalStep *op)
* JsonBehavior expression.
*/
jsestate->escontext.error_occurred = false;
- jsestate->escontext.error_occurred = false;
jsestate->escontext.details_wanted = true;
}
}
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index 53ddd25c42d..f262e7a66f7 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -670,7 +670,7 @@ ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_update_before_row)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, slot, NULL, NULL))
+ tid, NULL, slot, NULL, NULL, false))
skip_tuple = true; /* "do nothing" */
}
@@ -746,7 +746,7 @@ ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_delete_before_row)
{
skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, NULL, NULL, NULL);
+ tid, NULL, NULL, NULL, NULL, false);
}
if (!skip_tuple)
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 54da8e7995b..7c6c2c1f6e4 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1474,7 +1474,8 @@ ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRDeleteTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple,
- epqreturnslot, result, &context->tmfd);
+ epqreturnslot, result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
@@ -2117,7 +2118,8 @@ ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRUpdateTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple, slot,
- result, &context->tmfd);
+ result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl
index 4441b7cba0c..c7a6720440d 100644
--- a/src/backend/storage/lmgr/generate-lwlocknames.pl
+++ b/src/backend/storage/lmgr/generate-lwlocknames.pl
@@ -10,7 +10,6 @@ use Getopt::Long;
my $output_path = '.';
my $lastlockidx = -1;
-my $continue = "\n";
GetOptions('outdir:s' => \$output_path);
@@ -102,10 +101,8 @@ while (<$lwlocklist>)
while ($lastlockidx < $lockidx - 1)
{
++$lastlockidx;
- $continue = ",\n";
}
$lastlockidx = $lockidx;
- $continue = ",\n";
# Add a "Lock" suffix to each lock name, as the C code depends on that
printf $h "#define %-32s (&MainLWLockArray[$lockidx].lock)\n",
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d1593f38b35..08791b8f75e 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1350,24 +1350,15 @@ PortalRunMulti(Portal portal,
PopActiveSnapshot();
/*
- * If a query completion data was supplied, use it. Otherwise use the
- * portal's query completion data.
- *
- * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
- * fake them with zeros. This can happen with DO INSTEAD rules if there
- * is no replacement query of the same type as the original. We print "0
- * 0" here because technically there is no query of the matching tag type,
- * and printing a non-zero count for a different query type seems wrong,
- * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
- * one row was updated. See QueryRewrite(), step 3, for details.
+ * If a command tag was requested and we did not fill in a run-time-
+ * determined tag above, copy the parse-time tag from the Portal. (There
+ * might not be any tag there either, in edge cases such as empty prepared
+ * statements. That's OK.)
*/
- if (qc && qc->commandTag == CMDTAG_UNKNOWN)
- {
- if (portal->qc.commandTag != CMDTAG_UNKNOWN)
- CopyQueryCompletion(qc, &portal->qc);
- /* If the caller supplied a qc, we should have set it by now. */
- Assert(qc->commandTag != CMDTAG_UNKNOWN);
- }
+ if (qc &&
+ qc->commandTag == CMDTAG_UNKNOWN &&
+ portal->qc.commandTag != CMDTAG_UNKNOWN)
+ CopyQueryCompletion(qc, &portal->qc);
}
/*
diff --git a/src/backend/utils/adt/bytea.c b/src/backend/utils/adt/bytea.c
index 2e539c2504e..6e7b914c563 100644
--- a/src/backend/utils/adt/bytea.c
+++ b/src/backend/utils/adt/bytea.c
@@ -182,27 +182,21 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
*
* Non-printable characters must be passed as '\nnn' (octal) and are
* converted to internal form. '\' must be passed as '\\'.
- * ereport(ERROR, ...) if bad form.
- *
- * BUGS:
- * The input is scanned twice.
- * The error checking of input is minimal.
*/
Datum
byteain(PG_FUNCTION_ARGS)
{
char *inputText = PG_GETARG_CSTRING(0);
Node *escontext = fcinfo->context;
+ size_t len = strlen(inputText);
+ size_t bc;
char *tp;
char *rp;
- int bc;
bytea *result;
/* Recognize hex input */
if (inputText[0] == '\\' && inputText[1] == 'x')
{
- size_t len = strlen(inputText);
-
bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */
result = palloc(bc);
bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result),
@@ -213,33 +207,7 @@ byteain(PG_FUNCTION_ARGS)
}
/* Else, it's the traditional escaped style */
- for (bc = 0, tp = inputText; *tp != '\0'; bc++)
- {
- if (tp[0] != '\\')
- tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
- (tp[2] >= '0' && tp[2] <= '7') &&
- (tp[3] >= '0' && tp[3] <= '7'))
- tp += 4;
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
- tp += 2;
- else
- {
- /*
- * one backslash, not followed by another or ### valid octal
- */
- ereturn(escontext, (Datum) 0,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type %s", "bytea")));
- }
- }
-
- bc += VARHDRSZ;
-
- result = (bytea *) palloc(bc);
- SET_VARSIZE(result, bc);
+ result = (bytea *) palloc(len + VARHDRSZ); /* maximum possible length */
tp = inputText;
rp = VARDATA(result);
@@ -247,21 +215,21 @@ byteain(PG_FUNCTION_ARGS)
{
if (tp[0] != '\\')
*rp++ = *tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
+ else if ((tp[1] >= '0' && tp[1] <= '3') &&
(tp[2] >= '0' && tp[2] <= '7') &&
(tp[3] >= '0' && tp[3] <= '7'))
{
- bc = VAL(tp[1]);
- bc <<= 3;
- bc += VAL(tp[2]);
- bc <<= 3;
- *rp++ = bc + VAL(tp[3]);
+ int v;
+
+ v = VAL(tp[1]);
+ v <<= 3;
+ v += VAL(tp[2]);
+ v <<= 3;
+ *rp++ = v + VAL(tp[3]);
tp += 4;
}
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
+ else if (tp[1] == '\\')
{
*rp++ = '\\';
tp += 2;
@@ -269,7 +237,7 @@ byteain(PG_FUNCTION_ARGS)
else
{
/*
- * We should never get here. The first pass should not allow it.
+ * one backslash, not followed by another or ### valid octal
*/
ereturn(escontext, (Datum) 0,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -277,6 +245,9 @@ byteain(PG_FUNCTION_ARGS)
}
}
+ bc = rp - VARDATA(result); /* actual length */
+ SET_VARSIZE(result, bc + VARHDRSZ);
+
PG_RETURN_BYTEA_P(result);
}
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 197c1295d93..30e0da31aa3 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -31,6 +31,8 @@
#endif
#include "catalog/pg_class_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "common/string.h"
#include "compress_io.h"
#include "dumputils.h"
@@ -2974,6 +2976,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
int res = REQ_SCHEMA | REQ_DATA;
RestoreOptions *ropt = AH->public.ropt;
+ /*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
+ strcmp(te->desc, "TABLE DATA") == 0 &&
+ (te->catalogId.oid == LargeObjectMetadataRelationId ||
+ te->catalogId.oid == SharedDependRelationId))
+ return REQ_DATA;
+
/* These items are treated specially */
if (strcmp(te->desc, "ENCODING") == 0 ||
strcmp(te->desc, "STDSTRINGS") == 0 ||
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index c6226175528..604fc109416 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -49,8 +49,10 @@
#include "catalog/pg_class_d.h"
#include "catalog/pg_default_acl_d.h"
#include "catalog/pg_largeobject_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_publication_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "catalog/pg_subscription_d.h"
#include "catalog/pg_type_d.h"
#include "common/connect.h"
@@ -209,6 +211,12 @@ static int nbinaryUpgradeClassOids = 0;
static SequenceItem *sequences = NULL;
static int nsequences = 0;
+/*
+ * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
+ * as a dependency for pg_shdepend and any large object comments/seclabels.
+ */
+static DumpId lo_metadata_dumpId;
+
/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
#define MAX_ATTR_STATS_RELS 64
@@ -1086,6 +1094,36 @@ main(int argc, char **argv)
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
+ {
+ TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
+ TableInfo *shdepend = findTableByOid(SharedDependRelationId);
+
+ makeTableDataInfo(&dopt, lo_metadata);
+ makeTableDataInfo(&dopt, shdepend);
+
+ /*
+ * Save pg_largeobject_metadata's dump ID for use as a dependency for
+ * pg_shdepend and any large object comments/seclabels.
+ */
+ lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
+ addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
+
+ /*
+ * Only dump large object shdepend rows for this database.
+ */
+ shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
+ "AND dbid = (SELECT oid FROM pg_database "
+ " WHERE datname = current_database())";
+ }
+
+ /*
* In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
@@ -3924,10 +3962,37 @@ getLOs(Archive *fout)
* as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
- * pg_largeobject_metadata, after the dump is restored.
+ * pg_largeobject_metadata, after the dump is restored. In versions
+ * before v12, this is done via proper large object commands. In
+ * newer versions, we dump the content of pg_largeobject_metadata and
+ * any associated pg_shdepend rows, which is faster to restore. (On
+ * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
+ * column is hidden and won't be dumped.)
*/
if (dopt->binary_upgrade)
- loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ {
+ if (fout->remoteVersion >= 120000)
+ {
+ /*
+ * We should've saved pg_largeobject_metadata's dump ID before
+ * this point.
+ */
+ Assert(lo_metadata_dumpId);
+
+ loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
+
+ /*
+ * Mark the large object as dependent on
+ * pg_largeobject_metadata so that any large object
+ * comments/seclables are dumped after it.
+ */
+ loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
+ loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
+ loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
+ }
+ else
+ loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ }
/*
* Create a "BLOBS" data item for the group, too. This is just a
@@ -9039,8 +9104,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (tbinfo->relkind == RELKIND_SEQUENCE)
continue;
- /* Don't bother with uninteresting tables, either */
- if (!tbinfo->interesting)
+ /*
+ * Don't bother with uninteresting tables, either. For binary
+ * upgrades, this is bypassed for pg_largeobject_metadata and
+ * pg_shdepend so that the columns names are collected for the
+ * corresponding COPY commands. Restoring the data for those catalogs
+ * is faster than restoring the equivalent set of large object
+ * commands. We can only do this for upgrades from v12 and newer; in
+ * older versions, pg_largeobject_metadata was created WITH OIDS, so
+ * the OID column is hidden and won't be dumped.
+ */
+ if (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId)))
continue;
/* OK, we need info for this table */
@@ -9244,7 +9321,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
- !tbinfo->interesting)
+ (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId))))
pg_fatal("unexpected column data for table \"%s\"",
tbinfo->dobj.name);
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 2485d8f360e..d8330e2bd17 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -1087,6 +1087,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_owner => 1,
schema_only => 1,
@@ -1605,6 +1606,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
schema_only => 1,
schema_only_with_statistics => 1,
no_large_objects => 1,
@@ -4612,9 +4614,9 @@ my %tests = (
no_schema => 1,
section_data => 1,
test_schema_plus_large_objects => 1,
- binary_upgrade => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_privs => 1,
schema_only => 1,
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 2ed2c4bb378..cfd7daa20ed 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -213,7 +213,8 @@ extern bool ExecBRDeleteTriggers(EState *estate,
HeapTuple fdw_trigtuple,
TupleTableSlot **epqslot,
TM_Result *tmresult,
- TM_FailureData *tmfd);
+ TM_FailureData *tmfd,
+ bool is_merge_delete);
extern void ExecARDeleteTriggers(EState *estate,
ResultRelInfo *relinfo,
ItemPointer tupleid,
@@ -235,7 +236,8 @@ extern bool ExecBRUpdateTriggers(EState *estate,
HeapTuple fdw_trigtuple,
TupleTableSlot *newslot,
TM_Result *tmresult,
- TM_FailureData *tmfd);
+ TM_FailureData *tmfd,
+ bool is_merge_update);
extern void ExecARUpdateTriggers(EState *estate,
ResultRelInfo *relinfo,
ResultRelInfo *src_partinfo,
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 2a2b10d5a29..afa85d9fca9 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -7574,10 +7574,12 @@ PQport(const PGconn *conn)
if (!conn)
return NULL;
- if (conn->connhost != NULL)
+ if (conn->connhost != NULL &&
+ conn->connhost[conn->whichhost].port != NULL &&
+ conn->connhost[conn->whichhost].port[0] != '\0')
return conn->connhost[conn->whichhost].port;
- return "";
+ return DEF_PGPORT_STR;
}
/*
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 70c28f2ffca..a701c25038a 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -357,7 +357,8 @@ typedef struct pg_conn_host
pg_conn_host_type type; /* type of host address */
char *host; /* host name or socket path */
char *hostaddr; /* host numeric IP address */
- char *port; /* port number (always provided) */
+ char *port; /* port number (if NULL or empty, use
+ * DEF_PGPORT[_STR]) */
char *password; /* password for this host, read from the
* password file; NULL if not sought or not
* found in password file. */
diff --git a/src/test/isolation/expected/merge-match-recheck.out b/src/test/isolation/expected/merge-match-recheck.out
index 9a44a595927..90300f1db5a 100644
--- a/src/test/isolation/expected/merge-match-recheck.out
+++ b/src/test/isolation/expected/merge-match-recheck.out
@@ -241,19 +241,28 @@ starting permutation: update_bal1_tg merge_bal_tg c2 select1_tg c1
s2: NOTICE: Update: (1,160,s1,setup) -> (1,50,s1,"setup updated by update_bal1_tg")
step update_bal1_tg: UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1;
step merge_bal_tg:
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
<waiting ...>
step c2: COMMIT;
s1: NOTICE: Update: (1,50,s1,"setup updated by update_bal1_tg") -> (1,100,s1,"setup updated by update_bal1_tg when1")
step merge_bal_tg: <... completed>
+key|balance|status|val
+---+-------+------+-------------------------------------
+ 1| 100|s1 |setup updated by update_bal1_tg when1
+(1 row)
+
step select1_tg: SELECT * FROM target_tg;
key|balance|status|val
---+-------+------+-------------------------------------
diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec
index 26266b8c297..15226e40c9e 100644
--- a/src/test/isolation/specs/merge-match-recheck.spec
+++ b/src/test/isolation/specs/merge-match-recheck.spec
@@ -99,15 +99,19 @@ step "merge_bal_pa"
}
step "merge_bal_tg"
{
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
}
step "merge_delete"
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index aa1d27bbed3..7d3d3d52b45 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -15,6 +15,7 @@ SUBDIRS = \
plsample \
spgist_name_ops \
test_aio \
+ test_binaryheap \
test_bloomfilter \
test_copy_callbacks \
test_custom_rmgrs \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index 9de0057bd1d..dd5cd065ba1 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -14,6 +14,7 @@ subdir('plsample')
subdir('spgist_name_ops')
subdir('ssl_passphrase_callback')
subdir('test_aio')
+subdir('test_binaryheap')
subdir('test_bloomfilter')
subdir('test_copy_callbacks')
subdir('test_custom_rmgrs')
diff --git a/src/test/modules/test_binaryheap/.gitignore b/src/test/modules/test_binaryheap/.gitignore
new file mode 100644
index 00000000000..5dcb3ff9723
--- /dev/null
+++ b/src/test/modules/test_binaryheap/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_binaryheap/Makefile b/src/test/modules/test_binaryheap/Makefile
new file mode 100644
index 00000000000..d310fbc9e88
--- /dev/null
+++ b/src/test/modules/test_binaryheap/Makefile
@@ -0,0 +1,24 @@
+# src/test/modules/test_binaryheap/Makefile
+
+MODULE_big = test_binaryheap
+OBJS = \
+ $(WIN32RES) \
+ test_binaryheap.o
+
+PGFILEDESC = "test_binaryheap - test code for binaryheap"
+
+EXTENSION = test_binaryheap
+DATA = test_binaryheap--1.0.sql
+
+REGRESS = test_binaryheap
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_binaryheap
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_binaryheap/expected/test_binaryheap.out b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
new file mode 100644
index 00000000000..16ce07875e3
--- /dev/null
+++ b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_binaryheap;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
+ test_binaryheap
+-----------------
+
+(1 row)
+
diff --git a/src/test/modules/test_binaryheap/meson.build b/src/test/modules/test_binaryheap/meson.build
new file mode 100644
index 00000000000..816a43c93e9
--- /dev/null
+++ b/src/test/modules/test_binaryheap/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_binaryheap_sources = files(
+ 'test_binaryheap.c',
+)
+
+if host_system == 'windows'
+ test_binaryheap_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_binaryheap',
+ '--FILEDESC', 'test_binaryheap - test code for binaryheap',])
+endif
+
+test_binaryheap = shared_module('test_binaryheap',
+ test_binaryheap_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += test_binaryheap
+
+test_install_data += files(
+ 'test_binaryheap.control',
+ 'test_binaryheap--1.0.sql',
+)
+
+tests += {
+ 'name': 'test_binaryheap',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'test_binaryheap',
+ ],
+ },
+}
diff --git a/src/test/modules/test_binaryheap/sql/test_binaryheap.sql b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
new file mode 100644
index 00000000000..8439545815b
--- /dev/null
+++ b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
@@ -0,0 +1,8 @@
+CREATE EXTENSION test_binaryheap;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
diff --git a/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
new file mode 100644
index 00000000000..cddceeee603
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
@@ -0,0 +1,7 @@
+/* src/test/modules/test_binaryheap/test_binaryheap--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_binaryheap" to load this file. \quit
+
+CREATE FUNCTION test_binaryheap() RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.c b/src/test/modules/test_binaryheap/test_binaryheap.c
new file mode 100644
index 00000000000..583dae1da30
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.c
@@ -0,0 +1,275 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_binaryheap.c
+ * Test correctness of binary heap implementation.
+ *
+ * Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_binaryheap/test_binaryheap.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "common/int.h"
+#include "common/pg_prng.h"
+#include "fmgr.h"
+#include "lib/binaryheap.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * Test binaryheap_comparator for max-heap of integers.
+ */
+static int
+int_cmp(Datum a, Datum b, void *arg)
+{
+ return pg_cmp_s32(DatumGetInt32(a), DatumGetInt32(b));
+}
+
+/*
+ * Loops through all nodes and returns the maximum value.
+ */
+static int
+get_max_from_heap(binaryheap *heap)
+{
+ int max = -1;
+
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ max = Max(max, DatumGetInt32(binaryheap_get_node(heap, i)));
+
+ return max;
+}
+
+/*
+ * Generate a random permutation of the integers 0..size-1.
+ */
+static int *
+get_permutation(int size)
+{
+ int *permutation = (int *) palloc(size * sizeof(int));
+
+ permutation[0] = 0;
+
+ /*
+ * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm.
+ * Notionally, we append each new value to the array and then swap it with
+ * a randomly-chosen array element (possibly including itself, else we
+ * fail to generate permutations with the last integer last). The swap
+ * step can be optimized by combining it with the insertion.
+ */
+ for (int i = 1; i < size; i++)
+ {
+ int j = pg_prng_uint64_range(&pg_global_prng_state, 0, i);
+
+ if (j < i) /* avoid fetching undefined data if j=i */
+ permutation[i] = permutation[j];
+ permutation[j] = i;
+ }
+
+ return permutation;
+}
+
+/*
+ * Ensure that the heap property holds for the given heap, i.e., each parent is
+ * greater than or equal to its children.
+ */
+static void
+verify_heap_property(binaryheap *heap)
+{
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ {
+ int left = 2 * i + 1;
+ int right = 2 * i + 2;
+ int parent_val = DatumGetInt32(binaryheap_get_node(heap, i));
+
+ if (left < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, left)))
+ elog(ERROR, "parent node less than left child");
+
+ if (right < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, right)))
+ elog(ERROR, "parent node less than right child");
+ }
+}
+
+/*
+ * Check correctness of basic operations.
+ */
+static void
+test_basic(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "new heap not empty");
+ if (binaryheap_size(heap) != 0)
+ elog(ERROR, "wrong size for new heap");
+
+ for (int i = 0; i < size; i++)
+ {
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_empty(heap))
+ elog(ERROR, "heap empty after adding values");
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after adding values");
+
+ if (DatumGetInt32(binaryheap_first(heap)) != get_max_from_heap(heap))
+ elog(ERROR, "incorrect root node after adding values");
+
+ for (int i = 0; i < size; i++)
+ {
+ int expected = get_max_from_heap(heap);
+ int actual = DatumGetInt32(binaryheap_remove_first(heap));
+
+ if (actual != expected)
+ elog(ERROR, "incorrect root node after removing root");
+ verify_heap_property(heap);
+ }
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after removing all nodes");
+}
+
+/*
+ * Test building heap after unordered additions.
+ */
+static void
+test_build(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add_unordered(heap, Int32GetDatum(permutation[i]));
+
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after unordered additions");
+
+ binaryheap_build(heap);
+ verify_heap_property(heap);
+}
+
+/*
+ * Test removing nodes.
+ */
+static void
+test_remove_node(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+ int remove_count = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+
+ for (int i = 0; i < remove_count; i++)
+ {
+ int idx = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, binaryheap_size(heap) - 1);
+
+ binaryheap_remove_node(heap, idx);
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_size(heap) != size - remove_count)
+ elog(ERROR, "wrong size after removing nodes");
+}
+
+/*
+ * Test replacing the root node.
+ */
+static void
+test_replace_first(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ /*
+ * Replace root with a value smaller than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(-1));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a value in the middle of the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size / 2));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a larger value than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size + 1));
+ verify_heap_property(heap);
+}
+
+/*
+ * Test duplicate values.
+ */
+static void
+test_duplicates(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int dup = pg_prng_uint64_range(&pg_global_prng_state, 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(dup));
+
+ for (int i = 0; i < size; i++)
+ {
+ if (DatumGetInt32(binaryheap_remove_first(heap)) != dup)
+ elog(ERROR, "unexpected value in heap with duplicates");
+ }
+}
+
+/*
+ * Test resetting.
+ */
+static void
+test_reset(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ binaryheap_reset(heap);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after resetting");
+}
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+PG_FUNCTION_INFO_V1(test_binaryheap);
+
+Datum
+test_binaryheap(PG_FUNCTION_ARGS)
+{
+ static const int test_sizes[] = {1, 2, 3, 10, 100, 1000};
+
+ for (int i = 0; i < sizeof(test_sizes) / sizeof(int); i++)
+ {
+ int size = test_sizes[i];
+
+ test_basic(size);
+ test_build(size);
+ test_remove_node(size);
+ test_replace_first(size);
+ test_duplicates(size);
+ test_reset(size);
+ }
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.control b/src/test/modules/test_binaryheap/test_binaryheap.control
new file mode 100644
index 00000000000..dd0785e05bd
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.control
@@ -0,0 +1,5 @@
+# test_binaryheap extension
+comment = 'Test code for binaryheap'
+default_version = '1.0'
+module_pathname = '$libdir/test_binaryheap'
+relocatable = true
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 301766d2ed9..61f68e0cc2e 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -290,6 +290,32 @@ sub connstr
=pod
+=item $node->is_alive()
+
+Check if the node is alive, using pg_isready.
+Returns 1 if successful, 0 on failure.
+
+=cut
+
+sub is_alive
+{
+ my ($self) = @_;
+ local %ENV = $self->_get_env();
+
+ my $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_isready',
+ '--host' => $self->host,
+ '--port' => $self->port);
+
+ if ($ret != 0)
+ {
+ return 0;
+ }
+ return 1;
+}
+
+=pod
+
=item $node->raw_connect()
Open a raw TCP or Unix domain socket connection to the server. This is
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index 6e78ff1a030..52993c32dbb 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -54,6 +54,7 @@ tests += {
't/043_no_contrecord_switch.pl',
't/044_invalidate_inactive_slots.pl',
't/045_archive_restartpoint.pl',
+ 't/046_checkpoint_logical_slot.pl',
't/047_checkpoint_physical_slot.pl',
't/048_vacuum_horizon_floor.pl'
],
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index 83def062d11..5d2c06ba06e 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -81,7 +81,14 @@ my $rc =
. "--max-concurrent-tests=20 "
. "--inputdir=../regress "
. "--outputdir=\"$outputdir\"");
-if ($rc != 0)
+
+# Regression diffs are only meaningful if both the primary and the standby
+# are still alive after a regression test failure. A crash would cause a
+# useless increase in the log quantity, mostly filled with information
+# related to queries that could not run.
+my $primary_alive = $node_primary->is_alive;
+my $standby_alive = $node_standby_1->is_alive;
+if ($rc != 0 && $primary_alive && $standby_alive)
{
# Dump out the regression diffs file, if there is one
my $diffs = "$outputdir/regression.diffs";
@@ -93,6 +100,8 @@ if ($rc != 0)
}
}
is($rc, 0, 'regression tests pass');
+is($primary_alive, 1, 'primary alive after regression test run');
+is($standby_alive, 1, 'standby alive after regression test run');
# Clobber all sequences with their next value, so that we don't have
# differences between nodes due to caching.
diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl
new file mode 100644
index 00000000000..4fd709e3a03
--- /dev/null
+++ b/src/test/recovery/t/046_checkpoint_logical_slot.pl
@@ -0,0 +1,142 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the logical slot is advanced during
+# checkpoint. The test checks that the logical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'logical'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create the two slots we'll need.
+$node->safe_psql('postgres',
+ q{select pg_create_logical_replication_slot('slot_logical', 'test_decoding')}
+);
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance both slots to the current position just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null)}
+);
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Generate some transactions to get RUNNING_XACTS.
+my $xacts = $node->background_psql('postgres');
+$xacts->query_until(
+ qr/run_xacts/,
+ q(\echo run_xacts
+SELECT 1 \watch 0.1
+\q
+));
+
+$node->advance_wal(20);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+$node->advance_wal(20);
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q(select injection_points_attach('checkpoint-before-old-wal-removal','wait'))
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# Try to advance the logical slot, but make it stop when it moves to the next
+# WAL segment (this has to happen in the background, too).
+my $logical = $node->background_psql('postgres');
+$logical->query_safe(
+ q{select injection_points_attach('logical-replication-slot-advance-segment','wait');}
+);
+$logical->query_until(
+ qr/get_changes/,
+ q(
+\echo get_changes
+select count(*) from pg_logical_slot_get_changes('slot_logical', null, null) \watch 1
+\q
+));
+
+# Wait until the slot's restart_lsn points to the next WAL segment.
+note('waiting for injection_point');
+$node->wait_for_event('client backend',
+ 'logical-replication-slot-advance-segment');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN, and then unblock the checkpoint, which
+# removes the WAL still needed by the logical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Generate a long WAL record, spawning at least two pages for the follow-up
+# post-recovery check.
+$node->safe_psql('postgres',
+ q{select pg_logical_emit_message(false, '', repeat('123456789', 1000))});
+
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
+
+# Abruptly stop the server.
+$node->stop('immediate');
+
+$node->start;
+
+eval {
+ $node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null);}
+ );
+};
+is($@, '', "Logical slot still valid");
+
+done_testing();
diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl
index a1332b5d44c..9e98383e30e 100644
--- a/src/test/recovery/t/047_checkpoint_physical_slot.pl
+++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl
@@ -94,9 +94,11 @@ $node->safe_psql('postgres',
q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
);
-# Continue the checkpoint.
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
$node->safe_psql('postgres',
q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
my $restart_lsn_old = $node->safe_psql('postgres',
q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
@@ -104,8 +106,7 @@ my $restart_lsn_old = $node->safe_psql('postgres',
chomp($restart_lsn_old);
note("restart lsn before stop: $restart_lsn_old");
-# Abruptly stop the server (1 second should be enough for the checkpoint
-# to finish; it would be better).
+# Abruptly stop the server.
$node->stop('immediate');
$node->start;
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 788844abd20..1bfd33de3f3 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -236,6 +236,12 @@ SELECT E'De\\678dBeEf'::bytea;
ERROR: invalid input syntax for type bytea
LINE 1: SELECT E'De\\678dBeEf'::bytea;
^
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+----------------------
+ \x446541645c42654566
+(1 row)
+
SELECT reverse(''::bytea);
reverse
---------
@@ -291,6 +297,12 @@ SELECT E'De\\123dBeEf'::bytea;
DeSdBeEf
(1 row)
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+------------
+ DeAd\\BeEf
+(1 row)
+
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');
pg_input_is_valid
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 2577a42987d..92c445c2439 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -76,6 +76,7 @@ SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\123dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
SELECT E'De\\678dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
SELECT reverse(''::bytea);
SELECT reverse('\xaa'::bytea);
@@ -88,6 +89,7 @@ SELECT E'\\xDe00BeEf'::bytea;
SELECT E'DeAdBeEf'::bytea;
SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');